#include "gpu.h"
#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+#ifdef __GNUC__
#define unlikely(x) __builtin_expect((x), 0)
+#define preload __builtin_prefetch
#define noinline __attribute__((noinline))
+#else
+#define unlikely(x)
+#define preload(...)
+#define noinline
+#error huh
+#endif
#define gpu_log(fmt, ...) \
printf("%d:%03d: " fmt, *gpu.state.frame_count, *gpu.state.hcnt, ##__VA_ARGS__)
//#define log_anomaly gpu_log
#define log_anomaly(...)
-struct psx_gpu gpu __attribute__((aligned(2048)));
+struct psx_gpu gpu;
static noinline int do_cmd_buffer(uint32_t *data, int count);
static void finish_vram_transfer(int is_read);
static noinline void update_height(void)
{
+ // TODO: emulate this properly..
int sh = gpu.screen.y2 - gpu.screen.y1;
if (gpu.status.dheight)
sh *= 2;
- if (sh <= 0)
+ if (sh <= 0 || sh > gpu.screen.vres)
sh = gpu.screen.vres;
gpu.screen.h = sh;
gpu.frameskip.active = 1;
else
gpu.frameskip.active = 0;
+
+ if (!gpu.frameskip.active && gpu.frameskip.pending_fill[0] != 0) {
+ int dummy;
+ do_cmd_list(gpu.frameskip.pending_fill, 3, &dummy);
+ gpu.frameskip.pending_fill[0] = 0;
+ }
}
-static noinline void decide_frameskip_allow(uint32_t cmd_e3)
+static noinline int decide_frameskip_allow(uint32_t cmd_e3)
{
// no frameskip if it decides to draw to display area,
// but not for interlace since it'll most likely always do that
gpu.frameskip.allow = gpu.status.interlace ||
(uint32_t)(x - gpu.screen.x) >= (uint32_t)gpu.screen.w ||
(uint32_t)(y - gpu.screen.y) >= (uint32_t)gpu.screen.h;
+ return gpu.frameskip.allow;
}
static noinline void get_gpu_info(uint32_t data)
}
}
+// double, for overdraw guard
+#define VRAM_SIZE (1024 * 512 * 2 * 2)
+
+static int map_vram(void)
+{
+ gpu.vram = gpu.mmap(VRAM_SIZE);
+ if (gpu.vram != NULL) {
+ gpu.vram += 4096 / 2;
+ return 0;
+ }
+ else {
+ fprintf(stderr, "could not map vram, expect crashes\n");
+ return -1;
+ }
+}
+
long GPUinit(void)
{
int ret;
gpu.cmd_len = 0;
do_reset();
+ if (gpu.mmap != NULL) {
+ if (map_vram() != 0)
+ ret = -1;
+ }
return ret;
}
long GPUshutdown(void)
{
- return vout_finish();
+ long ret;
+
+ renderer_finish();
+ ret = vout_finish();
+ if (gpu.vram != NULL) {
+ gpu.vram -= 4096 / 2;
+ gpu.munmap(gpu.vram, VRAM_SIZE);
+ }
+ gpu.vram = NULL;
+
+ return ret;
}
void GPUwriteStatus(uint32_t data)
break;
case 0x05:
gpu.screen.x = data & 0x3ff;
- gpu.screen.y = (data >> 10) & 0x3ff;
+ gpu.screen.y = (data >> 10) & 0x1ff;
if (gpu.frameskip.set) {
decide_frameskip_allow(gpu.ex_regs[3]);
if (gpu.frameskip.last_flip_frame != *gpu.state.frame_count) {
gpu.screen.vres = vres[(gpu.status.reg >> 19) & 3];
update_width();
update_height();
+ renderer_notify_res_change();
break;
default:
if ((cmd & 0xf0) == 0x10)
get_gpu_info(data);
break;
}
+
+#ifdef GPUwriteStatus_ext
+ GPUwriteStatus_ext(data);
+#endif
}
const unsigned char cmd_lengths[256] =
gpu.dma_start.w, gpu.dma_start.h);
}
+static noinline int do_cmd_list_skip(uint32_t *data, int count, int *last_cmd)
+{
+ int cmd = 0, pos = 0, len, dummy, v;
+ int skip = 1;
+
+ gpu.frameskip.pending_fill[0] = 0;
+
+ while (pos < count && skip) {
+ uint32_t *list = data + pos;
+ cmd = list[0] >> 24;
+ len = 1 + cmd_lengths[cmd];
+
+ switch (cmd) {
+ case 0x02:
+ if ((list[2] & 0x3ff) > gpu.screen.w || ((list[2] >> 16) & 0x1ff) > gpu.screen.h)
+ // clearing something large, don't skip
+ do_cmd_list(list, 3, &dummy);
+ else
+ memcpy(gpu.frameskip.pending_fill, list, 3 * 4);
+ break;
+ case 0x24 ... 0x27:
+ case 0x2c ... 0x2f:
+ case 0x34 ... 0x37:
+ case 0x3c ... 0x3f:
+ gpu.ex_regs[1] &= ~0x1ff;
+ gpu.ex_regs[1] |= list[4 + ((cmd >> 4) & 1)] & 0x1ff;
+ break;
+ case 0x48 ... 0x4F:
+ for (v = 3; pos + v < count; v++)
+ {
+ if ((list[v] & 0xf000f000) == 0x50005000)
+ break;
+ }
+ len += v - 3;
+ break;
+ case 0x58 ... 0x5F:
+ for (v = 4; pos + v < count; v += 2)
+ {
+ if ((list[v] & 0xf000f000) == 0x50005000)
+ break;
+ }
+ len += v - 4;
+ break;
+ default:
+ if (cmd == 0xe3)
+ skip = decide_frameskip_allow(list[0]);
+ if ((cmd & 0xf8) == 0xe0)
+ gpu.ex_regs[cmd & 7] = list[0];
+ break;
+ }
+
+ if (pos + len > count) {
+ cmd = -1;
+ break; // incomplete cmd
+ }
+ if (0xa0 <= cmd && cmd <= 0xdf)
+ break; // image i/o
+
+ pos += len;
+ }
+
+ renderer_sync_ecmds(gpu.ex_regs);
+ *last_cmd = cmd;
+ return pos;
+}
+
static noinline int do_cmd_buffer(uint32_t *data, int count)
{
- int len, cmd, start, pos;
+ int cmd, pos;
+ uint32_t old_e3 = gpu.ex_regs[3];
int vram_dirty = 0;
// process buffer
- for (start = pos = 0; pos < count; )
+ for (pos = 0; pos < count; )
{
- cmd = -1;
- len = 0;
-
- if (gpu.dma.h) {
+ if (gpu.dma.h && !gpu.dma_start.is_read) { // XXX: need to verify
+ vram_dirty = 1;
pos += do_vram_io(data + pos, count - pos, 0);
if (pos == count)
break;
- start = pos;
}
- // do look-ahead pass to detect SR changes and VRAM i/o
- while (pos < count) {
- uint32_t *list = data + pos;
- cmd = list[0] >> 24;
- len = 1 + cmd_lengths[cmd];
-
- //printf(" %3d: %02x %d\n", pos, cmd, len);
- if ((cmd & 0xf4) == 0x24) {
- // flat textured prim
- gpu.ex_regs[1] &= ~0x1ff;
- gpu.ex_regs[1] |= list[4] & 0x1ff;
- }
- else if ((cmd & 0xf4) == 0x34) {
- // shaded textured prim
- gpu.ex_regs[1] &= ~0x1ff;
- gpu.ex_regs[1] |= list[5] & 0x1ff;
- }
- else if (cmd == 0xe3)
- decide_frameskip_allow(list[0]);
-
- if (2 <= cmd && cmd < 0xc0)
- vram_dirty = 1;
- else if ((cmd & 0xf8) == 0xe0)
- gpu.ex_regs[cmd & 7] = list[0];
-
- if (pos + len > count) {
- cmd = -1;
- break; // incomplete cmd
- }
- if (cmd == 0xa0 || cmd == 0xc0)
- break; // image i/o
- pos += len;
+ cmd = data[pos] >> 24;
+ if (0xa0 <= cmd && cmd <= 0xdf) {
+ // consume vram write/read cmd
+ start_vram_transfer(data[pos + 1], data[pos + 2], (cmd & 0xe0) == 0xc0);
+ pos += 3;
+ continue;
}
- if (pos - start > 0) {
- if (!gpu.frameskip.active || !gpu.frameskip.allow)
- do_cmd_list(data + start, pos - start);
- start = pos;
+ // 0xex cmds might affect frameskip.allow, so pass to do_cmd_list_skip
+ if (gpu.frameskip.active && (gpu.frameskip.allow || ((data[pos] >> 24) & 0xf0) == 0xe0))
+ pos += do_cmd_list_skip(data + pos, count - pos, &cmd);
+ else {
+ pos += do_cmd_list(data + pos, count - pos, &cmd);
+ vram_dirty = 1;
}
- if (cmd == 0xa0 || cmd == 0xc0) {
- // consume vram write/read cmd
- start_vram_transfer(data[pos + 1], data[pos + 2], cmd == 0xc0);
- pos += len;
- }
- else if (cmd == -1)
+ if (cmd == -1)
+ // incomplete cmd
break;
}
gpu.status.reg |= gpu.ex_regs[1] & 0x7ff;
gpu.status.reg |= (gpu.ex_regs[6] & 3) << 11;
- if (gpu.frameskip.active)
- renderer_sync_ecmds(gpu.ex_regs);
gpu.state.fb_dirty |= vram_dirty;
+ if (old_e3 != gpu.ex_regs[3])
+ decide_frameskip_allow(gpu.ex_regs[3]);
+
return count - pos;
}
int len, left, count;
long cpu_cycles = 0;
+ preload(rambase + (start_addr & 0x1fffff) / 4);
+
if (unlikely(gpu.cmd_len > 0))
flush_cmd_buffer();
list = rambase + (addr & 0x1fffff) / 4;
len = list[0] >> 24;
addr = list[0] & 0xffffff;
+ preload(rambase + (addr & 0x1fffff) / 4);
+
cpu_cycles += 10;
if (len > 0)
cpu_cycles += 5 + len;
case 1: // save
if (gpu.cmd_len > 0)
flush_cmd_buffer();
- memcpy(freeze->psxVRam, gpu.vram, sizeof(gpu.vram));
+ memcpy(freeze->psxVRam, gpu.vram, 1024 * 512 * 2);
memcpy(freeze->ulControl, gpu.regs, sizeof(gpu.regs));
memcpy(freeze->ulControl + 0xe0, gpu.ex_regs, sizeof(gpu.ex_regs));
freeze->ulStatus = gpu.status.reg;
break;
case 0: // load
- memcpy(gpu.vram, freeze->psxVRam, sizeof(gpu.vram));
+ memcpy(gpu.vram, freeze->psxVRam, 1024 * 512 * 2);
memcpy(gpu.regs, freeze->ulControl, sizeof(gpu.regs));
memcpy(gpu.ex_regs, freeze->ulControl + 0xe0, sizeof(gpu.ex_regs));
gpu.status.reg = freeze->ulStatus;
+ gpu.cmd_len = 0;
for (i = 8; i > 0; i--) {
gpu.regs[i] ^= 1; // avoid reg change detection
GPUwriteStatus((i << 24) | (gpu.regs[i] ^ 1));
flush_cmd_buffer();
renderer_flush_queues();
- if (gpu.status.blanking || !gpu.state.fb_dirty)
+ if (gpu.status.blanking) {
+ if (!gpu.state.blanked) {
+ vout_blank();
+ gpu.state.blanked = 1;
+ gpu.state.fb_dirty = 1;
+ }
+ return;
+ }
+
+ if (!gpu.state.fb_dirty)
return;
if (gpu.frameskip.set) {
vout_update();
gpu.state.fb_dirty = 0;
+ gpu.state.blanked = 0;
}
void GPUvBlank(int is_vblank, int lcf)
gpu.state.hcnt = cbs->gpu_hcnt;
gpu.state.frame_count = cbs->gpu_frame_count;
gpu.state.allow_interlace = cbs->gpu_neon.allow_interlace;
+ gpu.state.enhancement_enable = cbs->gpu_neon.enhancement_enable;
+
+ gpu.mmap = cbs->mmap;
+ gpu.munmap = cbs->munmap;
+
+ // delayed vram mmap
+ if (gpu.vram == NULL)
+ map_vram();
if (cbs->pl_vout_set_raw_vram)
cbs->pl_vout_set_raw_vram(gpu.vram);