X-Git-Url: https://notaz.gp2x.de/cgi-bin/gitweb.cgi?p=pcsx_rearmed.git;a=blobdiff_plain;f=plugins%2Fgpu_neon%2Fgpu.c;h=c275d8342f82e588d495327b7cb35f57a8206f06;hp=c43fd3119e5e8768c3d1e29f0c4e04386c6d7549;hb=a80ae4a0353fce94df700ec84222d3c56c3d813a;hpb=8dd855cd6cdab41d1e38637a020681137eceb457 diff --git a/plugins/gpu_neon/gpu.c b/plugins/gpu_neon/gpu.c index c43fd311..c275d834 100644 --- a/plugins/gpu_neon/gpu.c +++ b/plugins/gpu_neon/gpu.c @@ -16,23 +16,24 @@ #define unlikely(x) __builtin_expect((x), 0) #define noinline __attribute__((noinline)) -//#define log_io printf +#define gpu_log(fmt, ...) \ + printf("%d:%03d: " fmt, gpu.state.frame_count, *gpu.state.hcnt, ##__VA_ARGS__) + +//#define log_io gpu_log #define log_io(...) -#define log_anomaly printf +//#define log_anomaly gpu_log +#define log_anomaly(...) struct psx_gpu gpu __attribute__((aligned(64))); -long GPUinit(void) +static noinline void do_reset(void) { - int ret = vout_init(); + memset(gpu.regs, 0, sizeof(gpu.regs)); gpu.status.reg = 0x14802000; - gpu.lcf_hc = &gpu.zero; - return ret; -} - -long GPUshutdown(void) -{ - return vout_finish(); + gpu.gp0 = 0; + gpu.regs[3] = 1; + gpu.screen.hres = gpu.screen.w = 256; + gpu.screen.vres = gpu.screen.h = 240; } static noinline void update_width(void) @@ -56,19 +57,72 @@ static noinline void update_height(void) gpu.screen.h = sh; } +static noinline void decide_frameskip(void) +{ + gpu.frameskip.frame_ready = !gpu.frameskip.active; + + if (!gpu.frameskip.active && (*gpu.frameskip.advice || gpu.frameskip.set == 1)) + gpu.frameskip.active = 1; + else + gpu.frameskip.active = 0; +} + +static noinline void get_gpu_info(uint32_t data) +{ + switch (data & 0x0f) { + case 0x02: + case 0x03: + case 0x04: + case 0x05: + gpu.gp0 = gpu.ex_regs[data & 7] & 0xfffff; + break; + case 0x06: + gpu.gp0 = gpu.ex_regs[5] & 0xfffff; + break; + case 0x07: + gpu.gp0 = 2; + break; + default: + gpu.gp0 = 0; + break; + } +} + +long GPUinit(void) +{ + int ret; + ret = vout_init(); + ret |= renderer_init(); + + gpu.lcf_hc = &gpu.zero; + gpu.state.frame_count = 0; + gpu.state.hcnt = &gpu.zero; + do_reset(); + return ret; +} + +long GPUshutdown(void) +{ + return vout_finish(); +} + void GPUwriteStatus(uint32_t data) { static const short hres[8] = { 256, 368, 320, 384, 512, 512, 640, 640 }; static const short vres[4] = { 240, 480, 256, 480 }; uint32_t cmd = data >> 24; - if (cmd < ARRAY_SIZE(gpu.regs)) + if (cmd < ARRAY_SIZE(gpu.regs)) { + if (cmd != 0 && cmd != 5 && gpu.regs[cmd] == data) + return; gpu.regs[cmd] = data; + } + + gpu.state.fb_dirty = 1; switch (cmd) { case 0x00: - gpu.status.reg = 0x14802000; - gpu.status.blanking = 1; + do_reset(); break; case 0x03: gpu.status.blanking = data & 1; @@ -79,6 +133,8 @@ void GPUwriteStatus(uint32_t data) case 0x05: gpu.screen.x = data & 0x3ff; gpu.screen.y = (data >> 10) & 0x3ff; + if (gpu.frameskip.set) + decide_frameskip(); break; case 0x06: gpu.screen.x1 = data & 0xfff; @@ -97,6 +153,10 @@ void GPUwriteStatus(uint32_t data) update_width(); update_height(); break; + default: + if ((cmd & 0xf0) == 0x10) + get_gpu_info(data); + break; } } @@ -174,7 +234,7 @@ static int do_vram_io(uint32_t *data, int count, int is_read) gpu.dma.h = h; gpu.dma.offset = o; - return count_initial - (count + 1) / 2; + return count_initial - count / 2; } static void start_vram_transfer(uint32_t pos_word, uint32_t size_word, int is_read) @@ -190,14 +250,17 @@ static void start_vram_transfer(uint32_t pos_word, uint32_t size_word, int is_re if (is_read) gpu.status.img = 1; + else + renderer_invalidate_caches(gpu.dma.x, gpu.dma.y, gpu.dma.w, gpu.dma.h); - //printf("start_vram_transfer %c (%d, %d) %dx%d\n", is_read ? 'r' : 'w', - // gpu.dma.x, gpu.dma.y, gpu.dma.w, gpu.dma.h); + log_io("start_vram_transfer %c (%d, %d) %dx%d\n", is_read ? 'r' : 'w', + gpu.dma.x, gpu.dma.y, gpu.dma.w, gpu.dma.h); } static int check_cmd(uint32_t *data, int count) { int len, cmd, start, pos; + int vram_dirty = 0; // process buffer for (start = pos = 0; pos < count; ) @@ -240,6 +303,10 @@ static int check_cmd(uint32_t *data, int count) gpu.status.reg |= (list[0] & 3) << 11; break; } + if (2 <= cmd && cmd < 0xc0) + vram_dirty = 1; + else if ((cmd & 0xf8) == 0xe0) + gpu.ex_regs[cmd & 7] = list[0]; if (pos + len > count) { cmd = -1; @@ -251,7 +318,8 @@ static int check_cmd(uint32_t *data, int count) } if (pos - start > 0) { - do_cmd_list(data + start, pos - start); + if (!gpu.frameskip.active) + do_cmd_list(data + start, pos - start); start = pos; } @@ -260,11 +328,14 @@ static int check_cmd(uint32_t *data, int count) start_vram_transfer(data[pos + 1], data[pos + 2], cmd == 0xc0); pos += len; } - - if (cmd == -1) + else if (cmd == -1) break; } + if (gpu.frameskip.active) + renderer_sync_ecmds(gpu.ex_regs); + gpu.state.fb_dirty |= vram_dirty; + return count - pos; } @@ -301,20 +372,32 @@ void GPUwriteData(uint32_t data) long GPUdmaChain(uint32_t *rambase, uint32_t start_addr) { uint32_t addr, *list; + uint32_t *llist_entry = NULL; int len, left, count; + long dma_words = 0; if (unlikely(gpu.cmd_len > 0)) flush_cmd_buffer(); + // ff7 sends it's main list twice, detect this + if (gpu.state.frame_count == gpu.state.last_list.frame && + *gpu.state.hcnt - gpu.state.last_list.hcnt <= 1 && + gpu.state.last_list.words > 1024) + { + llist_entry = rambase + (gpu.state.last_list.addr & 0x1fffff) / 4; + *llist_entry |= 0x800000; + } + log_io("gpu_dma_chain\n"); addr = start_addr & 0xffffff; for (count = 0; addr != 0xffffff; count++) { - log_io(".chain %08x\n", addr); - list = rambase + (addr & 0x1fffff) / 4; len = list[0] >> 24; addr = list[0] & 0xffffff; + dma_words += 1 + len; + + log_io(".chain %08x #%d\n", (list - rambase) * 4, len); // loop detection marker // (bit23 set causes DMA error on real machine, so @@ -324,7 +407,7 @@ long GPUdmaChain(uint32_t *rambase, uint32_t start_addr) if (len) { left = check_cmd(list + 1, len); if (left) - log_anomaly("GPUwriteDataMem: discarded %d/%d words\n", left, len); + log_anomaly("GPUdmaChain: discarded %d/%d words\n", left, len); } if (addr & 0x800000) @@ -338,8 +421,15 @@ long GPUdmaChain(uint32_t *rambase, uint32_t start_addr) addr = list[0] & 0x1fffff; list[0] &= ~0x800000; } + if (llist_entry) + *llist_entry &= ~0x800000; + + gpu.state.last_list.frame = gpu.state.frame_count; + gpu.state.last_list.hcnt = *gpu.state.hcnt; + gpu.state.last_list.words = dma_words; + gpu.state.last_list.addr = start_addr; - return 0; + return dma_words; } void GPUreadDataMem(uint32_t *mem, int count) @@ -355,17 +445,15 @@ void GPUreadDataMem(uint32_t *mem, int count) uint32_t GPUreadData(void) { - uint32_t v = 0; - log_io("gpu_read\n"); if (unlikely(gpu.cmd_len > 0)) flush_cmd_buffer(); if (gpu.dma.h) - do_vram_io(&v, 1, 1); + do_vram_io(&gpu.gp0, 1, 1); - return v; + return gpu.gp0; } uint32_t GPUreadStatus(void) @@ -390,21 +478,28 @@ typedef struct GPUFREEZETAG long GPUfreeze(uint32_t type, GPUFreeze_t *freeze) { + int i; + switch (type) { case 1: // save if (gpu.cmd_len > 0) flush_cmd_buffer(); memcpy(freeze->psxVRam, gpu.vram, sizeof(gpu.vram)); memcpy(freeze->ulControl, gpu.regs, sizeof(gpu.regs)); + memcpy(freeze->ulControl + 0xe0, gpu.ex_regs, sizeof(gpu.ex_regs)); freeze->ulStatus = gpu.status.reg; break; case 0: // load + renderer_invalidate_caches(0, 0, 1024, 512); memcpy(gpu.vram, freeze->psxVRam, sizeof(gpu.vram)); memcpy(gpu.regs, freeze->ulControl, sizeof(gpu.regs)); + memcpy(gpu.ex_regs, freeze->ulControl + 0xe0, sizeof(gpu.ex_regs)); gpu.status.reg = freeze->ulStatus; - GPUwriteStatus((5 << 24) | gpu.regs[5]); - GPUwriteStatus((7 << 24) | gpu.regs[7]); - GPUwriteStatus((8 << 24) | gpu.regs[8]); + for (i = 8; i > 0; i--) { + gpu.regs[i] ^= 1; // avoid reg change detection + GPUwriteStatus((i << 24) | (gpu.regs[i] ^ 1)); + } + renderer_sync_ecmds(gpu.ex_regs); break; } @@ -423,6 +518,10 @@ void GPUvBlank(int val, uint32_t *hcnt) if (!val) gpu.lcf_hc = hcnt; } + if (!val) + gpu.state.frame_count++; + + gpu.state.hcnt = hcnt; } // vim:shiftwidth=2:expandtab