From 36da9c1305e300fce0d41236bae4533851d490d3 Mon Sep 17 00:00:00 2001 From: notaz Date: Sat, 9 Sep 2023 00:39:34 +0300 Subject: [PATCH] gpulib: handle vram copy in gpulib use internal buffering according to mednafen notaz/pcsx_rearmed#289 --- plugins/dfxvideo/gpulib_if.c | 2 +- plugins/gpu_neon/psx_gpu/psx_gpu.c | 3 +- plugins/gpu_neon/psx_gpu/psx_gpu_parse.c | 41 ++++------- plugins/gpu_unai/gpulib_if.cpp | 7 +- plugins/gpulib/gpu.c | 89 ++++++++++++++++++++---- 5 files changed, 97 insertions(+), 45 deletions(-) diff --git a/plugins/dfxvideo/gpulib_if.c b/plugins/dfxvideo/gpulib_if.c index ee1f68a4..ac86f379 100644 --- a/plugins/dfxvideo/gpulib_if.c +++ b/plugins/dfxvideo/gpulib_if.c @@ -337,7 +337,7 @@ int do_cmd_list(uint32_t *list, int list_len, int *last_cmd) } #ifndef TEST - if (cmd == 0xa0 || cmd == 0xc0) + if (0x80 <= cmd && cmd < 0xe0) break; // image i/o, forward to upper layer else if ((cmd & 0xf8) == 0xe0) gpu.ex_regs[cmd & 7] = GETLE32(list); diff --git a/plugins/gpu_neon/psx_gpu/psx_gpu.c b/plugins/gpu_neon/psx_gpu/psx_gpu.c index a0bff3e9..fbacbd5f 100644 --- a/plugins/gpu_neon/psx_gpu/psx_gpu.c +++ b/plugins/gpu_neon/psx_gpu/psx_gpu.c @@ -4888,6 +4888,7 @@ void render_block_fill_enh(psx_gpu_struct *psx_gpu, u32 color, u32 x, u32 y, } } +#ifndef PCSX void render_block_copy(psx_gpu_struct *psx_gpu, u16 *source, u32 x, u32 y, u32 width, u32 height, u32 pitch) { @@ -4919,7 +4920,7 @@ void render_block_move(psx_gpu_struct *psx_gpu, u32 source_x, u32 source_y, render_block_copy(psx_gpu, psx_gpu->vram_ptr + source_x + (source_y * 1024), dest_x, dest_y, width, height, 1024); } - +#endif void initialize_reciprocal_table(void) { diff --git a/plugins/gpu_neon/psx_gpu/psx_gpu_parse.c b/plugins/gpu_neon/psx_gpu/psx_gpu_parse.c index c7562993..5badf6b9 100644 --- a/plugins/gpu_neon/psx_gpu/psx_gpu_parse.c +++ b/plugins/gpu_neon/psx_gpu/psx_gpu_parse.c @@ -606,7 +606,13 @@ u32 gpu_parse(psx_gpu_struct *psx_gpu, u32 *list, u32 size, u32 *last_command) break; } - case 0x80: // vid -> vid +#ifdef PCSX + case 0x80 ... 0x9F: // vid -> vid + case 0xA0 ... 0xBF: // sys -> vid + case 0xC0 ... 0xDF: // vid -> sys + goto breakloop; +#else + case 0x80 ... 0x9F: // vid -> vid { u32 sx = list_s16[2] & 0x3FF; u32 sy = list_s16[3] & 0x1FF; @@ -622,12 +628,7 @@ u32 gpu_parse(psx_gpu_struct *psx_gpu, u32 *list, u32 size, u32 *last_command) break; } -#ifdef PCSX - case 0xA0: // sys -> vid - case 0xC0: // vid -> sys - goto breakloop; -#else - case 0xA0: // sys -> vid + case 0xA0 ... 0xBF: // sys -> vid { u32 load_x = list_s16[2] & 0x3FF; u32 load_y = list_s16[3] & 0x1FF; @@ -645,8 +646,8 @@ u32 gpu_parse(psx_gpu_struct *psx_gpu, u32 *list, u32 size, u32 *last_command) break; } - case 0xC0: // vid -> sys - break; + case 0xC0 ... 0xDF: // vid -> sys + break; #endif case 0xE1: @@ -1575,26 +1576,10 @@ u32 gpu_parse_enhanced(psx_gpu_struct *psx_gpu, u32 *list, u32 size, do_sprite_enhanced(psx_gpu, x, y, u, v, 16, 16, list[0]); break; } - - case 0x80: // vid -> vid - { - u32 sx = list_s16[2] & 0x3FF; - u32 sy = list_s16[3] & 0x1FF; - u32 dx = list_s16[4] & 0x3FF; - u32 dy = list_s16[5] & 0x1FF; - u32 w = ((list_s16[6] - 1) & 0x3FF) + 1; - u32 h = ((list_s16[7] - 1) & 0x1FF) + 1; - if (sx == dx && sy == dy && psx_gpu->mask_msb == 0) - break; - - render_block_move(psx_gpu, sx, sy, dx, dy, w, h); - sync_enhancement_buffers(dx, dy, w, h); - break; - } - - case 0xA0: // sys -> vid - case 0xC0: // vid -> sys + case 0x80 ... 0x9F: // vid -> vid + case 0xA0 ... 0xBF: // sys -> vid + case 0xC0 ... 0xDF: // vid -> sys goto breakloop; case 0xE1: diff --git a/plugins/gpu_unai/gpulib_if.cpp b/plugins/gpu_unai/gpulib_if.cpp index 453cc7a6..960ad89d 100644 --- a/plugins/gpu_unai/gpulib_if.cpp +++ b/plugins/gpu_unai/gpulib_if.cpp @@ -754,11 +754,11 @@ int do_cmd_list(u32 *_list, int list_len, int *last_cmd) gpuDrawS(packet, driver); } break; +#ifdef TEST case 0x80: // vid -> vid gpuMoveImage(packet); break; -#ifdef TEST case 0xA0: // sys -> vid { u32 load_width = list[2] & 0xffff; @@ -771,8 +771,9 @@ int do_cmd_list(u32 *_list, int list_len, int *last_cmd) case 0xC0: break; #else - case 0xA0: // sys ->vid - case 0xC0: // vid -> sys + case 0x80 ... 0x9F: // vid -> vid + case 0xA0 ... 0xBF: // sys -> vid + case 0xC0 ... 0xDF: // vid -> sys // Handled by gpulib goto breakloop; #endif diff --git a/plugins/gpulib/gpu.c b/plugins/gpulib/gpu.c index 5f69c9d2..5e2e9e1b 100644 --- a/plugins/gpulib/gpu.c +++ b/plugins/gpulib/gpu.c @@ -410,23 +410,33 @@ const unsigned char cmd_lengths[256] = 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 2, 2, 2, 2, 3, 3, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, // 60 1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2, - 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 80 - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // a0 - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // c0 - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, // 80 + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // a0 + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // c0 + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // e0 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; #define VRAM_MEM_XY(x, y) &gpu.vram[(y) * 1024 + (x)] -static inline void do_vram_line(int x, int y, uint16_t *mem, int l, int is_read) +static void cpy_msb(uint16_t *dst, const uint16_t *src, int l, uint16_t msb) +{ + int i; + for (i = 0; i < l; i++) + dst[i] = src[i] | msb; +} + +static inline void do_vram_line(int x, int y, uint16_t *mem, int l, + int is_read, uint16_t msb) { uint16_t *vram = VRAM_MEM_XY(x, y); - if (is_read) + if (unlikely(is_read)) memcpy(mem, vram, l * 2); + else if (unlikely(msb)) + cpy_msb(vram, mem, l, msb); else memcpy(vram, mem, l * 2); } @@ -434,6 +444,7 @@ static inline void do_vram_line(int x, int y, uint16_t *mem, int l, int is_read) static int do_vram_io(uint32_t *data, int count, int is_read) { int count_initial = count; + uint16_t msb = gpu.ex_regs[6] << 15; uint16_t *sdata = (uint16_t *)data; int x = gpu.dma.x, y = gpu.dma.y; int w = gpu.dma.w, h = gpu.dma.h; @@ -448,7 +459,7 @@ static int do_vram_io(uint32_t *data, int count, int is_read) if (count < l) l = count; - do_vram_line(x + o, y, sdata, l, is_read); + do_vram_line(x + o, y, sdata, l, is_read, msb); if (o + l < w) o += l; @@ -463,13 +474,13 @@ static int do_vram_io(uint32_t *data, int count, int is_read) for (; h > 0 && count >= w; sdata += w, count -= w, y++, h--) { y &= 511; - do_vram_line(x, y, sdata, w, is_read); + do_vram_line(x, y, sdata, w, is_read, msb); } if (h > 0) { if (count > 0) { y &= 511; - do_vram_line(x, y, sdata, count, is_read); + do_vram_line(x, y, sdata, count, is_read, msb); o = count; count = 0; } @@ -517,6 +528,51 @@ static void finish_vram_transfer(int is_read) gpu.dma_start.w, gpu.dma_start.h, 0); } +static void do_vram_copy(const uint32_t *params) +{ + const uint32_t sx = LE32TOH(params[0]) & 0x3FF; + const uint32_t sy = (LE32TOH(params[0]) >> 16) & 0x1FF; + const uint32_t dx = LE32TOH(params[1]) & 0x3FF; + const uint32_t dy = (LE32TOH(params[1]) >> 16) & 0x1FF; + uint32_t w = ((LE32TOH(params[2]) - 1) & 0x3FF) + 1; + uint32_t h = (((LE32TOH(params[2]) >> 16) - 1) & 0x1FF) + 1; + uint16_t msb = gpu.ex_regs[6] << 15; + uint16_t lbuf[128]; + uint32_t x, y; + + if (sx == dx && sy == dy && msb == 0) + return; + + renderer_flush_queues(); + + if (unlikely((sx < dx && dx < sx + w) || sx + w > 1024 || dx + w > 1024 || msb)) + { + for (y = 0; y < h; y++) + { + const uint16_t *src = VRAM_MEM_XY(0, (sy + y) & 0x1ff); + uint16_t *dst = VRAM_MEM_XY(0, (dy + y) & 0x1ff); + for (x = 0; x < w; x += ARRAY_SIZE(lbuf)) + { + uint32_t x1, w1 = w - x; + if (w1 > ARRAY_SIZE(lbuf)) + w1 = ARRAY_SIZE(lbuf); + for (x1 = 0; x1 < w1; x1++) + lbuf[x1] = src[(sx + x + x1) & 0x3ff]; + for (x1 = 0; x1 < w1; x1++) + dst[(dx + x + x1) & 0x3ff] = lbuf[x1] | msb; + } + } + } + else + { + uint32_t sy1 = sy, dy1 = dy; + for (y = 0; y < h; y++, sy1++, dy1++) + memcpy(VRAM_MEM_XY(dx, dy1 & 0x1ff), VRAM_MEM_XY(sx, sy1 & 0x1ff), w * 2); + } + + renderer_update_caches(dx, dy, w, h, 0); +} + static noinline int do_cmd_list_skip(uint32_t *data, int count, int *last_cmd) { int cmd = 0, pos = 0, len, dummy, v; @@ -572,7 +628,7 @@ static noinline int do_cmd_list_skip(uint32_t *data, int count, int *last_cmd) cmd = -1; break; // incomplete cmd } - if (0xa0 <= cmd && cmd <= 0xdf) + if (0x80 <= cmd && cmd <= 0xdf) break; // image i/o pos += len; @@ -612,6 +668,15 @@ static noinline int do_cmd_buffer(uint32_t *data, int count) pos += 3; continue; } + else if ((cmd & 0xe0) == 0x80) { + if (unlikely((pos+3) >= count)) { + cmd = -1; // incomplete cmd, can't consume yet + break; + } + do_vram_copy(data + pos + 1); + pos += 4; + continue; + } // 0xex cmds might affect frameskip.allow, so pass to do_cmd_list_skip if (gpu.frameskip.active && (gpu.frameskip.allow || ((LE32TOH(data[pos]) >> 24) & 0xf0) == 0xe0)) -- 2.39.5