#define unlikely(x) __builtin_expect((x), 0)
#define noinline __attribute__((noinline))
-//#define log_io printf
+#define gpu_log(fmt, ...) \
+ printf("%d:%03d: " fmt, gpu.state.frame_count, *gpu.state.hcnt, ##__VA_ARGS__)
+
+//#define log_io gpu_log
#define log_io(...)
-#define log_anomaly printf
+//#define log_anomaly gpu_log
+#define log_anomaly(...)
struct psx_gpu gpu __attribute__((aligned(64)));
-long GPUinit(void)
+static noinline void do_reset(void)
{
- int ret = vout_init();
+ memset(gpu.regs, 0, sizeof(gpu.regs));
gpu.status.reg = 0x14802000;
- gpu.status.blanking = 1;
+ gpu.gp0 = 0;
gpu.regs[3] = 1;
- gpu.screen.hres = gpu.screen.w = 320;
+ gpu.screen.hres = gpu.screen.w = 256;
gpu.screen.vres = gpu.screen.h = 240;
- gpu.lcf_hc = &gpu.zero;
- return ret;
-}
-
-long GPUshutdown(void)
-{
- return vout_finish();
}
static noinline void update_width(void)
{
gpu.frameskip.frame_ready = !gpu.frameskip.active;
- if (!gpu.frameskip.active && *gpu.frameskip.advice)
+ if (!gpu.frameskip.active && (*gpu.frameskip.advice || gpu.frameskip.set == 1))
gpu.frameskip.active = 1;
else
gpu.frameskip.active = 0;
}
+static noinline void get_gpu_info(uint32_t data)
+{
+ switch (data & 0x0f) {
+ case 0x02:
+ case 0x03:
+ case 0x04:
+ case 0x05:
+ gpu.gp0 = gpu.ex_regs[data & 7] & 0xfffff;
+ break;
+ case 0x06:
+ gpu.gp0 = gpu.ex_regs[5] & 0xfffff;
+ break;
+ case 0x07:
+ gpu.gp0 = 2;
+ break;
+ default:
+ gpu.gp0 = 0;
+ break;
+ }
+}
+
+long GPUinit(void)
+{
+ int ret;
+ ret = vout_init();
+ ret |= renderer_init();
+
+ gpu.lcf_hc = &gpu.zero;
+ gpu.state.frame_count = 0;
+ gpu.state.hcnt = &gpu.zero;
+ do_reset();
+ return ret;
+}
+
+long GPUshutdown(void)
+{
+ return vout_finish();
+}
+
void GPUwriteStatus(uint32_t data)
{
static const short hres[8] = { 256, 368, 320, 384, 512, 512, 640, 640 };
uint32_t cmd = data >> 24;
if (cmd < ARRAY_SIZE(gpu.regs)) {
- if (cmd != 0 && gpu.regs[cmd] == data)
+ if (cmd != 0 && cmd != 5 && gpu.regs[cmd] == data)
return;
gpu.regs[cmd] = data;
}
switch (cmd) {
case 0x00:
- gpu.status.reg = 0x14802000;
- gpu.status.blanking = 1;
+ do_reset();
break;
case 0x03:
gpu.status.blanking = data & 1;
case 0x05:
gpu.screen.x = data & 0x3ff;
gpu.screen.y = (data >> 10) & 0x3ff;
- if (gpu.frameskip.enabled)
+ if (gpu.frameskip.set)
decide_frameskip();
break;
case 0x06:
update_width();
update_height();
break;
+ default:
+ if ((cmd & 0xf0) == 0x10)
+ get_gpu_info(data);
+ break;
}
}
gpu.dma.h = h;
gpu.dma.offset = o;
- return count_initial - (count + 1) / 2;
+ return count_initial - count / 2;
}
static void start_vram_transfer(uint32_t pos_word, uint32_t size_word, int is_read)
if (is_read)
gpu.status.img = 1;
+ else
+ renderer_invalidate_caches(gpu.dma.x, gpu.dma.y, gpu.dma.w, gpu.dma.h);
- //printf("start_vram_transfer %c (%d, %d) %dx%d\n", is_read ? 'r' : 'w',
- // gpu.dma.x, gpu.dma.y, gpu.dma.w, gpu.dma.h);
+ log_io("start_vram_transfer %c (%d, %d) %dx%d\n", is_read ? 'r' : 'w',
+ gpu.dma.x, gpu.dma.y, gpu.dma.w, gpu.dma.h);
}
static int check_cmd(uint32_t *data, int count)
}
if (2 <= cmd && cmd < 0xc0)
vram_dirty = 1;
+ else if ((cmd & 0xf8) == 0xe0)
+ gpu.ex_regs[cmd & 7] = list[0];
if (pos + len > count) {
cmd = -1;
start_vram_transfer(data[pos + 1], data[pos + 2], cmd == 0xc0);
pos += len;
}
-
- if (cmd == -1)
+ else if (cmd == -1)
break;
}
+ if (gpu.frameskip.active)
+ renderer_sync_ecmds(gpu.ex_regs);
gpu.state.fb_dirty |= vram_dirty;
return count - pos;
long GPUdmaChain(uint32_t *rambase, uint32_t start_addr)
{
uint32_t addr, *list;
+ uint32_t *llist_entry = NULL;
int len, left, count;
+ long dma_words = 0;
if (unlikely(gpu.cmd_len > 0))
flush_cmd_buffer();
+ // ff7 sends it's main list twice, detect this
+ if (gpu.state.frame_count == gpu.state.last_list.frame &&
+ *gpu.state.hcnt - gpu.state.last_list.hcnt <= 1 &&
+ gpu.state.last_list.words > 1024)
+ {
+ llist_entry = rambase + (gpu.state.last_list.addr & 0x1fffff) / 4;
+ *llist_entry |= 0x800000;
+ }
+
log_io("gpu_dma_chain\n");
addr = start_addr & 0xffffff;
for (count = 0; addr != 0xffffff; count++)
{
- log_io(".chain %08x\n", addr);
-
list = rambase + (addr & 0x1fffff) / 4;
len = list[0] >> 24;
addr = list[0] & 0xffffff;
+ dma_words += 1 + len;
+
+ log_io(".chain %08x #%d\n", (list - rambase) * 4, len);
// loop detection marker
// (bit23 set causes DMA error on real machine, so
if (len) {
left = check_cmd(list + 1, len);
if (left)
- log_anomaly("GPUwriteDataMem: discarded %d/%d words\n", left, len);
+ log_anomaly("GPUdmaChain: discarded %d/%d words\n", left, len);
}
if (addr & 0x800000)
addr = list[0] & 0x1fffff;
list[0] &= ~0x800000;
}
+ if (llist_entry)
+ *llist_entry &= ~0x800000;
+
+ gpu.state.last_list.frame = gpu.state.frame_count;
+ gpu.state.last_list.hcnt = *gpu.state.hcnt;
+ gpu.state.last_list.words = dma_words;
+ gpu.state.last_list.addr = start_addr;
- return 0;
+ return dma_words;
}
void GPUreadDataMem(uint32_t *mem, int count)
uint32_t GPUreadData(void)
{
- uint32_t v = 0;
-
log_io("gpu_read\n");
if (unlikely(gpu.cmd_len > 0))
flush_cmd_buffer();
if (gpu.dma.h)
- do_vram_io(&v, 1, 1);
+ do_vram_io(&gpu.gp0, 1, 1);
- return v;
+ return gpu.gp0;
}
uint32_t GPUreadStatus(void)
flush_cmd_buffer();
memcpy(freeze->psxVRam, gpu.vram, sizeof(gpu.vram));
memcpy(freeze->ulControl, gpu.regs, sizeof(gpu.regs));
+ memcpy(freeze->ulControl + 0xe0, gpu.ex_regs, sizeof(gpu.ex_regs));
freeze->ulStatus = gpu.status.reg;
break;
case 0: // load
+ renderer_invalidate_caches(0, 0, 1024, 512);
memcpy(gpu.vram, freeze->psxVRam, sizeof(gpu.vram));
memcpy(gpu.regs, freeze->ulControl, sizeof(gpu.regs));
+ memcpy(gpu.ex_regs, freeze->ulControl + 0xe0, sizeof(gpu.ex_regs));
gpu.status.reg = freeze->ulStatus;
for (i = 8; i > 0; i--) {
gpu.regs[i] ^= 1; // avoid reg change detection
GPUwriteStatus((i << 24) | (gpu.regs[i] ^ 1));
}
+ renderer_sync_ecmds(gpu.ex_regs);
break;
}
if (!val)
gpu.lcf_hc = hcnt;
}
+ if (!val)
+ gpu.state.frame_count++;
+
+ gpu.state.hcnt = hcnt;
}
// vim:shiftwidth=2:expandtab