//#define log_io gpu_log
#define log_io(...)
-#define log_anomaly gpu_log
-//#define log_anomaly(...)
+//#define log_anomaly gpu_log
+#define log_anomaly(...)
struct psx_gpu gpu __attribute__((aligned(64)));
{
gpu.frameskip.frame_ready = !gpu.frameskip.active;
- if (!gpu.frameskip.active && *gpu.frameskip.advice)
+ if (!gpu.frameskip.active && (*gpu.frameskip.advice || gpu.frameskip.set == 1))
gpu.frameskip.active = 1;
else
gpu.frameskip.active = 0;
long GPUinit(void)
{
- int ret = vout_init();
- do_reset();
+ int ret;
+ ret = vout_init();
+ ret |= renderer_init();
+
gpu.lcf_hc = &gpu.zero;
gpu.state.frame_count = 0;
gpu.state.hcnt = &gpu.zero;
+ do_reset();
return ret;
}
uint32_t cmd = data >> 24;
if (cmd < ARRAY_SIZE(gpu.regs)) {
- if (cmd != 0 && gpu.regs[cmd] == data)
+ if (cmd != 0 && cmd != 5 && gpu.regs[cmd] == data)
return;
gpu.regs[cmd] = data;
}
case 0x05:
gpu.screen.x = data & 0x3ff;
gpu.screen.y = (data >> 10) & 0x3ff;
- if (gpu.frameskip.enabled)
+ if (gpu.frameskip.set)
decide_frameskip();
break;
case 0x06:
if (is_read)
gpu.status.img = 1;
+ else
+ renderer_invalidate_caches(gpu.dma.x, gpu.dma.y, gpu.dma.w, gpu.dma.h);
log_io("start_vram_transfer %c (%d, %d) %dx%d\n", is_read ? 'r' : 'w',
gpu.dma.x, gpu.dma.y, gpu.dma.w, gpu.dma.h);
start_vram_transfer(data[pos + 1], data[pos + 2], cmd == 0xc0);
pos += len;
}
-
- if (cmd == -1)
+ else if (cmd == -1)
break;
}
+ if (gpu.frameskip.active)
+ renderer_sync_ecmds(gpu.ex_regs);
gpu.state.fb_dirty |= vram_dirty;
return count - pos;
freeze->ulStatus = gpu.status.reg;
break;
case 0: // load
+ renderer_invalidate_caches(0, 0, 1024, 512);
memcpy(gpu.vram, freeze->psxVRam, sizeof(gpu.vram));
memcpy(gpu.regs, freeze->ulControl, sizeof(gpu.regs));
memcpy(gpu.ex_regs, freeze->ulControl + 0xe0, sizeof(gpu.ex_regs));
gpu.regs[i] ^= 1; // avoid reg change detection
GPUwriteStatus((i << 24) | (gpu.regs[i] ^ 1));
}
+ renderer_sync_ecmds(gpu.ex_regs);
break;
}