X-Git-Url: https://notaz.gp2x.de/cgi-bin/gitweb.cgi?p=pcsx_rearmed.git;a=blobdiff_plain;f=plugins%2Fgpulib%2Fgpu.c;h=b61bff607d32b84add3269a5f7ec716deaae2f22;hp=99b8edac6311a1be3dbc58b1ebdd45d45ce26ef1;hb=06bc35c833797ce9f6f3287abf954f037bb12319;hpb=7890a708c71f94c549b3e87f7471647a014d4038 diff --git a/plugins/gpulib/gpu.c b/plugins/gpulib/gpu.c index 99b8edac..b61bff60 100644 --- a/plugins/gpulib/gpu.c +++ b/plugins/gpulib/gpu.c @@ -24,7 +24,7 @@ //#define log_anomaly gpu_log #define log_anomaly(...) -struct psx_gpu gpu __attribute__((aligned(2048))); +struct psx_gpu gpu; static noinline int do_cmd_buffer(uint32_t *data, int count); static void finish_vram_transfer(int is_read); @@ -92,9 +92,15 @@ static noinline void decide_frameskip(void) gpu.frameskip.active = 1; else gpu.frameskip.active = 0; + + if (!gpu.frameskip.active && gpu.frameskip.pending_fill[0] != 0) { + int dummy; + do_cmd_list(gpu.frameskip.pending_fill, 3, &dummy); + gpu.frameskip.pending_fill[0] = 0; + } } -static noinline void decide_frameskip_allow(uint32_t cmd_e3) +static noinline int decide_frameskip_allow(uint32_t cmd_e3) { // no frameskip if it decides to draw to display area, // but not for interlace since it'll most likely always do that @@ -103,6 +109,7 @@ static noinline void decide_frameskip_allow(uint32_t cmd_e3) gpu.frameskip.allow = gpu.status.interlace || (uint32_t)(x - gpu.screen.x) >= (uint32_t)gpu.screen.w || (uint32_t)(y - gpu.screen.y) >= (uint32_t)gpu.screen.h; + return gpu.frameskip.allow; } static noinline void get_gpu_info(uint32_t data) @@ -126,6 +133,22 @@ static noinline void get_gpu_info(uint32_t data) } } +// double, for overdraw guard +#define VRAM_SIZE (1024 * 512 * 2 * 2) + +static int map_vram(void) +{ + gpu.vram = gpu.mmap(VRAM_SIZE); + if (gpu.vram != NULL) { + gpu.vram += 4096 / 2; + return 0; + } + else { + fprintf(stderr, "could not map vram, expect crashes\n"); + return -1; + } +} + long GPUinit(void) { int ret; @@ -138,12 +161,26 @@ long GPUinit(void) gpu.cmd_len = 0; do_reset(); + if (gpu.mmap != NULL) { + if (map_vram() != 0) + ret = -1; + } return ret; } long GPUshutdown(void) { - return vout_finish(); + long ret; + + renderer_finish(); + ret = vout_finish(); + if (gpu.vram != NULL) { + gpu.vram -= 4096 / 2; + gpu.munmap(gpu.vram, VRAM_SIZE); + } + gpu.vram = NULL; + + return ret; } void GPUwriteStatus(uint32_t data) @@ -200,6 +237,7 @@ void GPUwriteStatus(uint32_t data) gpu.screen.vres = vres[(gpu.status.reg >> 19) & 3]; update_width(); update_height(); + renderer_notify_res_change(); break; default: if ((cmd & 0xf0) == 0x10) @@ -327,70 +365,90 @@ static void finish_vram_transfer(int is_read) gpu.dma_start.w, gpu.dma_start.h); } +static noinline int do_cmd_list_skip(uint32_t *data, int count, int *last_cmd) +{ + int cmd = 0, pos = 0, len, dummy; + int skip = 1; + + gpu.frameskip.pending_fill[0] = 0; + + // XXX: polylines are not properly handled + while (pos < count && skip) { + uint32_t *list = data + pos; + cmd = list[0] >> 24; + len = 1 + cmd_lengths[cmd]; + + if (cmd == 0x02) { + if ((list[2] & 0x3ff) > gpu.screen.w || ((list[2] >> 16) & 0x1ff) > gpu.screen.h) + // clearing something large, don't skip + do_cmd_list(list, 3, &dummy); + else + memcpy(gpu.frameskip.pending_fill, list, 3 * 4); + } + else if ((cmd & 0xf4) == 0x24) { + // flat textured prim + gpu.ex_regs[1] &= ~0x1ff; + gpu.ex_regs[1] |= list[4] & 0x1ff; + } + else if ((cmd & 0xf4) == 0x34) { + // shaded textured prim + gpu.ex_regs[1] &= ~0x1ff; + gpu.ex_regs[1] |= list[5] & 0x1ff; + } + else if (cmd == 0xe3) + skip = decide_frameskip_allow(list[0]); + + if ((cmd & 0xf8) == 0xe0) + gpu.ex_regs[cmd & 7] = list[0]; + + if (pos + len > count) { + cmd = -1; + break; // incomplete cmd + } + if (cmd == 0xa0 || cmd == 0xc0) + break; // image i/o + pos += len; + } + + renderer_sync_ecmds(gpu.ex_regs); + *last_cmd = cmd; + return pos; +} + static noinline int do_cmd_buffer(uint32_t *data, int count) { - int len, cmd, start, pos; + int cmd, pos; + uint32_t old_e3 = gpu.ex_regs[3]; int vram_dirty = 0; // process buffer - for (start = pos = 0; pos < count; ) + for (pos = 0; pos < count; ) { - cmd = -1; - len = 0; - - if (gpu.dma.h) { + if (gpu.dma.h && !gpu.dma_start.is_read) { // XXX: need to verify + vram_dirty = 1; pos += do_vram_io(data + pos, count - pos, 0); if (pos == count) break; - start = pos; - } - - // do look-ahead pass to detect SR changes and VRAM i/o - while (pos < count) { - uint32_t *list = data + pos; - cmd = list[0] >> 24; - len = 1 + cmd_lengths[cmd]; - - //printf(" %3d: %02x %d\n", pos, cmd, len); - if ((cmd & 0xf4) == 0x24) { - // flat textured prim - gpu.ex_regs[1] &= ~0x1ff; - gpu.ex_regs[1] |= list[4] & 0x1ff; - } - else if ((cmd & 0xf4) == 0x34) { - // shaded textured prim - gpu.ex_regs[1] &= ~0x1ff; - gpu.ex_regs[1] |= list[5] & 0x1ff; - } - else if (cmd == 0xe3) - decide_frameskip_allow(list[0]); - - if (2 <= cmd && cmd < 0xc0) - vram_dirty = 1; - else if ((cmd & 0xf8) == 0xe0) - gpu.ex_regs[cmd & 7] = list[0]; - - if (pos + len > count) { - cmd = -1; - break; // incomplete cmd - } - if (cmd == 0xa0 || cmd == 0xc0) - break; // image i/o - pos += len; - } - - if (pos - start > 0) { - if (!gpu.frameskip.active || !gpu.frameskip.allow) - do_cmd_list(data + start, pos - start); - start = pos; } + cmd = data[pos] >> 24; if (cmd == 0xa0 || cmd == 0xc0) { // consume vram write/read cmd start_vram_transfer(data[pos + 1], data[pos + 2], cmd == 0xc0); - pos += len; + pos += 3; + continue; } - else if (cmd == -1) + + // 0xex cmds might affect frameskip.allow, so pass to do_cmd_list_skip + if (gpu.frameskip.active && (gpu.frameskip.allow || ((data[pos] >> 24) & 0xf0) == 0xe0)) + pos += do_cmd_list_skip(data + pos, count - pos, &cmd); + else { + pos += do_cmd_list(data + pos, count - pos, &cmd); + vram_dirty = 1; + } + + if (cmd == -1) + // incomplete cmd break; } @@ -398,10 +456,11 @@ static noinline int do_cmd_buffer(uint32_t *data, int count) gpu.status.reg |= gpu.ex_regs[1] & 0x7ff; gpu.status.reg |= (gpu.ex_regs[6] & 3) << 11; - if (gpu.frameskip.active) - renderer_sync_ecmds(gpu.ex_regs); gpu.state.fb_dirty |= vram_dirty; + if (old_e3 != gpu.ex_regs[3]) + decide_frameskip_allow(gpu.ex_regs[3]); + return count - pos; } @@ -554,16 +613,17 @@ long GPUfreeze(uint32_t type, struct GPUFreeze *freeze) case 1: // save if (gpu.cmd_len > 0) flush_cmd_buffer(); - memcpy(freeze->psxVRam, gpu.vram, sizeof(gpu.vram)); + memcpy(freeze->psxVRam, gpu.vram, 1024 * 512 * 2); memcpy(freeze->ulControl, gpu.regs, sizeof(gpu.regs)); memcpy(freeze->ulControl + 0xe0, gpu.ex_regs, sizeof(gpu.ex_regs)); freeze->ulStatus = gpu.status.reg; break; case 0: // load - memcpy(gpu.vram, freeze->psxVRam, sizeof(gpu.vram)); + memcpy(gpu.vram, freeze->psxVRam, 1024 * 512 * 2); memcpy(gpu.regs, freeze->ulControl, sizeof(gpu.regs)); memcpy(gpu.ex_regs, freeze->ulControl + 0xe0, sizeof(gpu.ex_regs)); gpu.status.reg = freeze->ulStatus; + gpu.cmd_len = 0; for (i = 8; i > 0; i--) { gpu.regs[i] ^= 1; // avoid reg change detection GPUwriteStatus((i << 24) | (gpu.regs[i] ^ 1)); @@ -582,7 +642,16 @@ void GPUupdateLace(void) flush_cmd_buffer(); renderer_flush_queues(); - if (gpu.status.blanking || !gpu.state.fb_dirty) + if (gpu.status.blanking) { + if (!gpu.state.blanked) { + vout_blank(); + gpu.state.blanked = 1; + gpu.state.fb_dirty = 1; + } + return; + } + + if (!gpu.state.fb_dirty) return; if (gpu.frameskip.set) { @@ -596,6 +665,7 @@ void GPUupdateLace(void) vout_update(); gpu.state.fb_dirty = 0; + gpu.state.blanked = 0; } void GPUvBlank(int is_vblank, int lcf) @@ -630,6 +700,14 @@ void GPUrearmedCallbacks(const struct rearmed_cbs *cbs) gpu.state.hcnt = cbs->gpu_hcnt; gpu.state.frame_count = cbs->gpu_frame_count; gpu.state.allow_interlace = cbs->gpu_neon.allow_interlace; + gpu.state.enhancement_enable = cbs->gpu_neon.enhancement_enable; + + gpu.mmap = cbs->mmap; + gpu.munmap = cbs->munmap; + + // delayed vram mmap + if (gpu.vram == NULL) + map_vram(); if (cbs->pl_vout_set_raw_vram) cbs->pl_vout_set_raw_vram(gpu.vram);