X-Git-Url: https://notaz.gp2x.de/cgi-bin/gitweb.cgi?p=pcsx_rearmed.git;a=blobdiff_plain;f=plugins%2Fgpu_neon%2Fgpu.c;h=f1d8993fde6819756e060c712e45d6c276a213f3;hp=12417e6e0adaf3a39b27df90dd9bcc8a1f6c7f42;hb=652c6b8b676be0172612da811b38dd5f87fa2870;hpb=24de2dd4dbdd50e44c91c40ebbc7d59ee1c0ac9b diff --git a/plugins/gpu_neon/gpu.c b/plugins/gpu_neon/gpu.c index 12417e6e..f1d8993f 100644 --- a/plugins/gpu_neon/gpu.c +++ b/plugins/gpu_neon/gpu.c @@ -17,18 +17,19 @@ #define noinline __attribute__((noinline)) #define gpu_log(fmt, ...) \ - printf("%d:%03d: " fmt, gpu.state.frame_count, *gpu.state.hcnt, ##__VA_ARGS__) + printf("%d:%03d: " fmt, *gpu.state.frame_count, *gpu.state.hcnt, ##__VA_ARGS__) //#define log_io gpu_log #define log_io(...) //#define log_anomaly gpu_log #define log_anomaly(...) -struct psx_gpu gpu __attribute__((aligned(64))); +struct psx_gpu gpu __attribute__((aligned(2048))); static noinline void do_reset(void) { memset(gpu.regs, 0, sizeof(gpu.regs)); + memset(gpu.ex_regs, 0, sizeof(gpu.ex_regs)); gpu.status.reg = 0x14802000; gpu.gp0 = 0; gpu.regs[3] = 1; @@ -59,14 +60,32 @@ static noinline void update_height(void) static noinline void decide_frameskip(void) { - gpu.frameskip.frame_ready = !gpu.frameskip.active; + if (gpu.frameskip.active) + gpu.frameskip.cnt++; + else { + gpu.frameskip.cnt = 0; + gpu.frameskip.frame_ready = 1; + } - if (!gpu.frameskip.active && (*gpu.frameskip.advice || gpu.frameskip.set == 1)) + if (!gpu.frameskip.active && *gpu.frameskip.advice) + gpu.frameskip.active = 1; + else if (gpu.frameskip.set > 0 && gpu.frameskip.cnt < gpu.frameskip.set) gpu.frameskip.active = 1; else gpu.frameskip.active = 0; } +static noinline void decide_frameskip_allow(uint32_t cmd_e3) +{ + // no frameskip if it decides to draw to display area, + // but not for interlace since it'll most likely always do that + uint32_t x = cmd_e3 & 0x3ff; + uint32_t y = (cmd_e3 >> 10) & 0x3ff; + gpu.frameskip.allow = gpu.status.interlace || + (uint32_t)(x - gpu.screen.x) >= (uint32_t)gpu.screen.w || + (uint32_t)(y - gpu.screen.y) >= (uint32_t)gpu.screen.h; +} + static noinline void get_gpu_info(uint32_t data) { switch (data & 0x0f) { @@ -94,7 +113,7 @@ long GPUinit(void) ret = vout_init(); ret |= renderer_init(); - gpu.state.frame_count = 0; + gpu.state.frame_count = &gpu.zero; gpu.state.hcnt = &gpu.zero; do_reset(); return ret; @@ -132,8 +151,13 @@ void GPUwriteStatus(uint32_t data) case 0x05: gpu.screen.x = data & 0x3ff; gpu.screen.y = (data >> 10) & 0x3ff; - if (gpu.frameskip.set) - decide_frameskip(); + if (gpu.frameskip.set) { + decide_frameskip_allow(gpu.ex_regs[3]); + if (gpu.frameskip.last_flip_frame != *gpu.state.frame_count) { + decide_frameskip(); + gpu.frameskip.last_flip_frame = *gpu.state.frame_count; + } + } break; case 0x06: gpu.screen.x1 = data & 0xfff; @@ -165,9 +189,9 @@ const unsigned char cmd_lengths[256] = 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 6, 6, 6, 6, 4, 4, 4, 4, 8, 8, 8, 8, // 20 5, 5, 5, 5, 8, 8, 8, 8, 7, 7, 7, 7, 11, 11, 11, 11, - 2, 2, 2, 2, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3, 3, 3, // 40 - 3, 3, 3, 3, 0, 0, 0, 0, 4, 4, 4, 4, 4, 4, 4, 4, - 2, 2, 2, 2, 3, 3, 3, 3, 1, 1, 1, 1, 2, 2, 2, 2, // 60 + 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, // 40 + 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, + 2, 2, 2, 2, 3, 3, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, // 60 1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 80 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -241,16 +265,22 @@ static void start_vram_transfer(uint32_t pos_word, uint32_t size_word, int is_re if (gpu.dma.h) log_anomaly("start_vram_transfer while old unfinished\n"); - gpu.dma.x = pos_word & 1023; - gpu.dma.y = (pos_word >> 16) & 511; - gpu.dma.w = size_word & 0xffff; // ? - gpu.dma.h = size_word >> 16; + gpu.dma.x = pos_word & 0x3ff; + gpu.dma.y = (pos_word >> 16) & 0x1ff; + gpu.dma.w = size_word & 0x3ff; + gpu.dma.h = (size_word >> 16) & 0x1ff; gpu.dma.offset = 0; - if (is_read) + renderer_flush_queues(); + if (is_read) { gpu.status.img = 1; - else + // XXX: wrong for width 1 + memcpy(&gpu.gp0, VRAM_MEM_XY(gpu.dma.x, gpu.dma.y), 4); + gpu.state.last_vram_read_frame = *gpu.state.frame_count; + } + else { renderer_invalidate_caches(gpu.dma.x, gpu.dma.y, gpu.dma.w, gpu.dma.h); + } log_io("start_vram_transfer %c (%d, %d) %dx%d\n", is_read ? 'r' : 'w', gpu.dma.x, gpu.dma.y, gpu.dma.w, gpu.dma.h); @@ -283,25 +313,17 @@ static int check_cmd(uint32_t *data, int count) //printf(" %3d: %02x %d\n", pos, cmd, len); if ((cmd & 0xf4) == 0x24) { // flat textured prim - gpu.status.reg &= ~0x1ff; - gpu.status.reg |= list[4] & 0x1ff; + gpu.ex_regs[1] &= ~0x1ff; + gpu.ex_regs[1] |= list[4] & 0x1ff; } else if ((cmd & 0xf4) == 0x34) { // shaded textured prim - gpu.status.reg &= ~0x1ff; - gpu.status.reg |= list[5] & 0x1ff; - } - else switch (cmd) - { - case 0xe1: - gpu.status.reg &= ~0x7ff; - gpu.status.reg |= list[0] & 0x7ff; - break; - case 0xe6: - gpu.status.reg &= ~0x1800; - gpu.status.reg |= (list[0] & 3) << 11; - break; + gpu.ex_regs[1] &= ~0x1ff; + gpu.ex_regs[1] |= list[5] & 0x1ff; } + else if (cmd == 0xe3) + decide_frameskip_allow(list[0]); + if (2 <= cmd && cmd < 0xc0) vram_dirty = 1; else if ((cmd & 0xf8) == 0xe0) @@ -317,7 +339,7 @@ static int check_cmd(uint32_t *data, int count) } if (pos - start > 0) { - if (!gpu.frameskip.active) + if (!gpu.frameskip.active || !gpu.frameskip.allow) do_cmd_list(data + start, pos - start); start = pos; } @@ -331,6 +353,10 @@ static int check_cmd(uint32_t *data, int count) break; } + gpu.status.reg &= ~0x1fff; + gpu.status.reg |= gpu.ex_regs[1] & 0x7ff; + gpu.status.reg |= (gpu.ex_regs[6] & 3) << 11; + if (gpu.frameskip.active) renderer_sync_ecmds(gpu.ex_regs); gpu.state.fb_dirty |= vram_dirty; @@ -373,15 +399,15 @@ long GPUdmaChain(uint32_t *rambase, uint32_t start_addr) uint32_t addr, *list; uint32_t *llist_entry = NULL; int len, left, count; - long dma_words = 0; + long cpu_cycles = 0; if (unlikely(gpu.cmd_len > 0)) flush_cmd_buffer(); // ff7 sends it's main list twice, detect this - if (gpu.state.frame_count == gpu.state.last_list.frame && - *gpu.state.hcnt - gpu.state.last_list.hcnt <= 1 && - gpu.state.last_list.words > 1024) + if (*gpu.state.frame_count == gpu.state.last_list.frame && + *gpu.state.hcnt - gpu.state.last_list.hcnt <= 1 && + gpu.state.last_list.cycles > 2048) { llist_entry = rambase + (gpu.state.last_list.addr & 0x1fffff) / 4; *llist_entry |= 0x800000; @@ -394,7 +420,9 @@ long GPUdmaChain(uint32_t *rambase, uint32_t start_addr) list = rambase + (addr & 0x1fffff) / 4; len = list[0] >> 24; addr = list[0] & 0xffffff; - dma_words += 1 + len; + cpu_cycles += 10; + if (len > 0) + cpu_cycles += 5 + len; log_io(".chain %08x #%d\n", (list - rambase) * 4, len); @@ -423,12 +451,12 @@ long GPUdmaChain(uint32_t *rambase, uint32_t start_addr) if (llist_entry) *llist_entry &= ~0x800000; - gpu.state.last_list.frame = gpu.state.frame_count; + gpu.state.last_list.frame = *gpu.state.frame_count; gpu.state.last_list.hcnt = *gpu.state.hcnt; - gpu.state.last_list.words = dma_words; + gpu.state.last_list.cycles = cpu_cycles; gpu.state.last_list.addr = start_addr; - return dma_words; + return cpu_cycles; } void GPUreadDataMem(uint32_t *mem, int count) @@ -444,15 +472,17 @@ void GPUreadDataMem(uint32_t *mem, int count) uint32_t GPUreadData(void) { - log_io("gpu_read\n"); + uint32_t ret; if (unlikely(gpu.cmd_len > 0)) flush_cmd_buffer(); + ret = gpu.gp0; if (gpu.dma.h) - do_vram_io(&gpu.gp0, 1, 1); + do_vram_io(&ret, 1, 1); - return gpu.gp0; + log_io("gpu_read %08x\n", ret); + return ret; } uint32_t GPUreadStatus(void) @@ -467,15 +497,15 @@ uint32_t GPUreadStatus(void) return ret; } -typedef struct GPUFREEZETAG +struct GPUFreeze { uint32_t ulFreezeVersion; // should be always 1 for now (set by main emu) uint32_t ulStatus; // current gpu status uint32_t ulControl[256]; // latest control register values unsigned char psxVRam[1024*1024*2]; // current VRam image (full 2 MB for ZN) -} GPUFreeze_t; +}; -long GPUfreeze(uint32_t type, GPUFreeze_t *freeze) +long GPUfreeze(uint32_t type, struct GPUFreeze *freeze) { int i; @@ -505,4 +535,65 @@ long GPUfreeze(uint32_t type, GPUFreeze_t *freeze) return 1; } +void GPUupdateLace(void) +{ + if (gpu.cmd_len > 0) + flush_cmd_buffer(); + renderer_flush_queues(); + + if (gpu.status.blanking || !gpu.state.fb_dirty) + return; + + if (gpu.frameskip.set) { + if (!gpu.frameskip.frame_ready) { + if (*gpu.state.frame_count - gpu.frameskip.last_flip_frame < 9) + return; + gpu.frameskip.active = 0; + } + gpu.frameskip.frame_ready = 0; + } + + vout_update(); + gpu.state.fb_dirty = 0; +} + +void GPUvBlank(int is_vblank, int lcf) +{ + int interlace = gpu.state.allow_interlace + && gpu.status.interlace && gpu.status.dheight; + // interlace doesn't look nice on progressive displays, + // so we have this "auto" mode here for games that don't read vram + if (gpu.state.allow_interlace == 2 + && *gpu.state.frame_count - gpu.state.last_vram_read_frame > 1) + { + interlace = 0; + } + if (interlace || interlace != gpu.state.old_interlace) { + gpu.state.old_interlace = interlace; + + if (gpu.cmd_len > 0) + flush_cmd_buffer(); + renderer_flush_queues(); + renderer_set_interlace(interlace, !lcf); + } +} + +#include "../../frontend/plugin_lib.h" + +void GPUrearmedCallbacks(const struct rearmed_cbs *cbs) +{ + gpu.frameskip.set = cbs->frameskip; + gpu.frameskip.advice = &cbs->fskip_advice; + gpu.frameskip.active = 0; + gpu.frameskip.frame_ready = 1; + gpu.state.hcnt = cbs->gpu_hcnt; + gpu.state.frame_count = cbs->gpu_frame_count; + gpu.state.allow_interlace = cbs->gpu_neon.allow_interlace; + + if (cbs->pl_vout_set_raw_vram) + cbs->pl_vout_set_raw_vram(gpu.vram); + renderer_set_config(cbs); + vout_set_config(cbs); +} + // vim:shiftwidth=2:expandtab