/*
- * (C) Gražvydas "notaz" Ignotas, 2011
+ * (C) Gražvydas "notaz" Ignotas, 2011-2012
*
* This work is licensed under the terms of any of these licenses
* (at your option):
struct psx_gpu gpu __attribute__((aligned(2048)));
static noinline int do_cmd_buffer(uint32_t *data, int count);
+static void finish_vram_transfer(int is_read);
static noinline void do_cmd_reset(void)
{
if (unlikely(gpu.cmd_len > 0))
do_cmd_buffer(gpu.cmd_buffer, gpu.cmd_len);
-
gpu.cmd_len = 0;
+
+ if (unlikely(gpu.dma.h > 0))
+ finish_vram_transfer(gpu.dma_start.is_read);
gpu.dma.h = 0;
}
do_vram_line(x, y, sdata, w, is_read);
}
- if (h > 0 && count > 0) {
- y &= 511;
- do_vram_line(x, y, sdata, count, is_read);
- o = count;
- count = 0;
+ if (h > 0) {
+ if (count > 0) {
+ y &= 511;
+ do_vram_line(x, y, sdata, count, is_read);
+ o = count;
+ count = 0;
+ }
}
+ else
+ finish_vram_transfer(is_read);
gpu.dma.y = y;
gpu.dma.h = h;
gpu.dma.offset = o;
gpu.dma.w = ((size_word - 1) & 0x3ff) + 1;
gpu.dma.h = (((size_word >> 16) - 1) & 0x1ff) + 1;
gpu.dma.offset = 0;
+ gpu.dma.is_read = is_read;
+ gpu.dma_start = gpu.dma;
renderer_flush_queues();
if (is_read) {
memcpy(&gpu.gp0, VRAM_MEM_XY(gpu.dma.x, gpu.dma.y), 4);
gpu.state.last_vram_read_frame = *gpu.state.frame_count;
}
- else {
- renderer_invalidate_caches(gpu.dma.x, gpu.dma.y, gpu.dma.w, gpu.dma.h);
- }
log_io("start_vram_transfer %c (%d, %d) %dx%d\n", is_read ? 'r' : 'w',
gpu.dma.x, gpu.dma.y, gpu.dma.w, gpu.dma.h);
}
+static void finish_vram_transfer(int is_read)
+{
+ if (is_read)
+ gpu.status.img = 0;
+ else
+ renderer_update_caches(gpu.dma_start.x, gpu.dma_start.y,
+ gpu.dma_start.w, gpu.dma_start.h);
+}
+
static noinline int do_cmd_buffer(uint32_t *data, int count)
{
int len, cmd, start, pos;
freeze->ulStatus = gpu.status.reg;
break;
case 0: // load
- renderer_invalidate_caches(0, 0, 1024, 512);
memcpy(gpu.vram, freeze->psxVRam, sizeof(gpu.vram));
memcpy(gpu.regs, freeze->ulControl, sizeof(gpu.regs));
memcpy(gpu.ex_regs, freeze->ulControl + 0xe0, sizeof(gpu.ex_regs));
GPUwriteStatus((i << 24) | (gpu.regs[i] ^ 1));
}
renderer_sync_ecmds(gpu.ex_regs);
+ renderer_update_caches(0, 0, 1024, 512);
break;
}
return mask;
}
+void update_texture_cache_region(psx_gpu_struct *psx_gpu, u32 x1, u32 y1,
+ u32 x2, u32 y2)
+{
+ u32 mask = texture_region_mask(x1, y1, x2, y2);
+ u32 texture_page;
+ u8 *texture_page_ptr;
+ u16 *vram_ptr;
+ u32 texel_block;
+ u32 sub_x, sub_y;
+
+ psx_gpu->dirty_textures_8bpp_mask |= mask;
+ psx_gpu->dirty_textures_8bpp_alternate_mask |= mask;
+
+ if ((psx_gpu->dirty_textures_4bpp_mask & mask) == 0 &&
+ (x1 & 3) == 0 && (y1 & 15) == 0 && x2 - x1 < 4 && y2 - y1 < 16)
+ {
+ texture_page = ((x1 / 64) & 15) + (y1 / 256) * 16;
+ texture_page_ptr = psx_gpu->texture_4bpp_cache[texture_page];
+ texture_page_ptr += (x1 / 4 & 15) * 16*16 + (y1 / 16 & 15) * 16*16*16;
+ vram_ptr = psx_gpu->vram_ptr + x1 + y1 * 1024;
+ sub_x = 4;
+ sub_y = 16;
+
+ while(sub_y)
+ {
+ while(sub_x)
+ {
+ texel_block = *vram_ptr;
+
+ texture_page_ptr[0] = texel_block & 0xF;
+ texture_page_ptr[1] = (texel_block >> 4) & 0xF;
+ texture_page_ptr[2] = (texel_block >> 8) & 0xF;
+ texture_page_ptr[3] = texel_block >> 12;
+
+ vram_ptr++;
+ texture_page_ptr += 4;
+
+ sub_x--;
+ }
+
+ vram_ptr -= 4;
+ sub_x = 4;
+
+ sub_y--;
+ vram_ptr += 1024;
+ }
+ }
+ else
+ {
+ psx_gpu->dirty_textures_4bpp_mask |= mask;
+ }
+}
void update_texture_8bpp_cache_slice(psx_gpu_struct *psx_gpu,
u32 texture_page);
gpu_parse(&egpu, ecmds + 1, 6 * 4);
}
-void renderer_invalidate_caches(int x, int y, int w, int h)
+void renderer_update_caches(int x, int y, int w, int h)
{
- invalidate_texture_cache_region(&egpu, x, y, x + w - 1, y + h - 1);
+ update_texture_cache_region(&egpu, x, y, x + w - 1, y + h - 1);
}
void renderer_flush_queues(void)