#include <stdlib.h> /* for calloc */
#include "gpu.h"
+#include "gpu_timing.h"
+#include "../../libpcsxcore/gpu.h" // meh
+#include "../../frontend/plugin_lib.h"
+#ifndef ARRAY_SIZE
#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+#endif
#ifdef __GNUC__
#define unlikely(x) __builtin_expect((x), 0)
#define preload __builtin_prefetch
struct psx_gpu gpu;
-static noinline int do_cmd_buffer(uint32_t *data, int count);
+static noinline int do_cmd_buffer(uint32_t *data, int count, int *cpu_cycles);
static void finish_vram_transfer(int is_read);
static noinline void do_cmd_reset(void)
{
+ int dummy = 0;
renderer_sync();
-
if (unlikely(gpu.cmd_len > 0))
- do_cmd_buffer(gpu.cmd_buffer, gpu.cmd_len);
+ do_cmd_buffer(gpu.cmd_buffer, gpu.cmd_len, &dummy);
gpu.cmd_len = 0;
if (unlikely(gpu.dma.h > 0))
gpu.screen.hres = gpu.screen.w = 256;
gpu.screen.vres = gpu.screen.h = 240;
gpu.screen.x = gpu.screen.y = 0;
+ renderer_sync_ecmds(gpu.ex_regs);
renderer_notify_res_change();
}
int hres = hres_all[(gpu.status >> 16) & 7];
int pal = gpu.status & PSX_GPU_STATUS_PAL;
int sw = gpu.screen.x2 - gpu.screen.x1;
+ int type = gpu.state.screen_centering_type;
int x = 0, x_auto;
+ if (type == C_AUTO)
+ type = gpu.state.screen_centering_type_default;
if (sw <= 0)
/* nothing displayed? */;
else {
x = (x + 1) & ~1; // blitter limitation
sw /= hdiv;
sw = (sw + 2) & ~3; // according to nocash
- switch (gpu.state.screen_centering_type) {
- case 1:
+ switch (type) {
+ case C_INGAME:
break;
- case 2:
+ case C_MANUAL:
x = gpu.state.screen_centering_x;
break;
default:
/* nothing displayed? */;
else {
switch (gpu.state.screen_centering_type) {
- case 1:
+ case C_INGAME:
break;
- case 2:
+ case C_BORDERLESS:
+ y = 0;
+ break;
+ case C_MANUAL:
y = gpu.state.screen_centering_y;
break;
default:
gpu.frameskip.active = 0;
if (!gpu.frameskip.active && gpu.frameskip.pending_fill[0] != 0) {
- int dummy;
- do_cmd_list(gpu.frameskip.pending_fill, 3, &dummy);
+ int dummy = 0;
+ do_cmd_list(gpu.frameskip.pending_fill, 3, &dummy, &dummy);
gpu.frameskip.pending_fill[0] = 0;
}
}
return gpu.frameskip.allow;
}
+static void flush_cmd_buffer(void);
+
static noinline void get_gpu_info(uint32_t data)
{
+ if (unlikely(gpu.cmd_len > 0))
+ flush_cmd_buffer();
switch (data & 0x0f) {
case 0x02:
case 0x03:
}
}
-// double, for overdraw guard
-#define VRAM_SIZE ((1024 * 512 * 2 * 2) + 4096)
+#ifndef max
+#define max(a, b) (((a) > (b)) ? (a) : (b))
+#endif
// Minimum 16-byte VRAM alignment needed by gpu_unai's pixel-skipping
// renderer/downscaler it uses in high res modes:
#define VRAM_ALIGN 16
#endif
+// double, for overdraw guard + at least 1 page before
+#define VRAM_SIZE ((1024 * 512 * 2 * 2) + max(VRAM_ALIGN, 4096))
+
// vram ptr received from mmap/malloc/alloc (will deallocate using this)
static uint16_t *vram_ptr_orig = NULL;
-#ifdef GPULIB_USE_MMAP
+#ifndef GPULIB_USE_MMAP
+# ifdef __linux__
+# define GPULIB_USE_MMAP 1
+# else
+# define GPULIB_USE_MMAP 0
+# endif
+#endif
static int map_vram(void)
{
- gpu.vram = vram_ptr_orig = gpu.mmap(VRAM_SIZE + (VRAM_ALIGN-1));
- if (gpu.vram != NULL) {
- // 4kb guard in front
+#if GPULIB_USE_MMAP
+ gpu.vram = vram_ptr_orig = gpu.mmap(VRAM_SIZE);
+#else
+ gpu.vram = vram_ptr_orig = calloc(VRAM_SIZE, 1);
+#endif
+ if (gpu.vram != NULL && gpu.vram != (void *)(intptr_t)-1) {
+ // 4kb guard in front
gpu.vram += (4096 / 2);
- // Align
- gpu.vram = (uint16_t*)(((uintptr_t)gpu.vram + (VRAM_ALIGN-1)) & ~(VRAM_ALIGN-1));
+ // Align
+ gpu.vram = (uint16_t*)(((uintptr_t)gpu.vram + (VRAM_ALIGN-1)) & ~(VRAM_ALIGN-1));
return 0;
}
else {
return -1;
}
}
-#else
-static int map_vram(void)
-{
- gpu.vram = vram_ptr_orig = (uint16_t*)calloc(VRAM_SIZE + (VRAM_ALIGN-1), 1);
- if (gpu.vram != NULL) {
- // 4kb guard in front
- gpu.vram += (4096 / 2);
- // Align
- gpu.vram = (uint16_t*)(((uintptr_t)gpu.vram + (VRAM_ALIGN-1)) & ~(VRAM_ALIGN-1));
- return 0;
- } else {
- fprintf(stderr, "could not allocate vram, expect crashes\n");
- return -1;
- }
-}
-
-static int allocate_vram(void)
-{
- gpu.vram = vram_ptr_orig = (uint16_t*)calloc(VRAM_SIZE + (VRAM_ALIGN-1), 1);
- if (gpu.vram != NULL) {
- // 4kb guard in front
- gpu.vram += (4096 / 2);
- // Align
- gpu.vram = (uint16_t*)(((uintptr_t)gpu.vram + (VRAM_ALIGN-1)) & ~(VRAM_ALIGN-1));
- return 0;
- } else {
- fprintf(stderr, "could not allocate vram, expect crashes\n");
- return -1;
- }
-}
-#endif
long GPUinit(void)
{
-#ifndef GPULIB_USE_MMAP
- if (gpu.vram == NULL) {
- if (allocate_vram() != 0) {
- printf("ERROR: could not allocate VRAM, exiting..\n");
- exit(1);
- }
- }
-#endif
-
- //extern uint32_t hSyncCount; // in psxcounters.cpp
- //extern uint32_t frame_counter; // in psxcounters.cpp
- //gpu.state.hcnt = &hSyncCount;
- //gpu.state.frame_count = &frame_counter;
-
int ret;
ret = vout_init();
ret |= renderer_init();
ret = vout_finish();
if (vram_ptr_orig != NULL) {
-#ifdef GPULIB_USE_MMAP
+#if GPULIB_USE_MMAP
gpu.munmap(vram_ptr_orig, VRAM_SIZE);
#else
free(vram_ptr_orig);
void GPUwriteStatus(uint32_t data)
{
uint32_t cmd = data >> 24;
+ int src_x, src_y;
if (cmd < ARRAY_SIZE(gpu.regs)) {
if (cmd > 1 && cmd != 5 && gpu.regs[cmd] == data)
gpu.status |= PSX_GPU_STATUS_DMA(data & 3);
break;
case 0x05:
- gpu.screen.src_x = data & 0x3ff;
- gpu.screen.src_y = (data >> 10) & 0x1ff;
- renderer_notify_scanout_x_change(gpu.screen.src_x, gpu.screen.hres);
- if (gpu.frameskip.set) {
- decide_frameskip_allow(gpu.ex_regs[3]);
- if (gpu.frameskip.last_flip_frame != *gpu.state.frame_count) {
- decide_frameskip();
- gpu.frameskip.last_flip_frame = *gpu.state.frame_count;
+ src_x = data & 0x3ff; src_y = (data >> 10) & 0x1ff;
+ if (src_x != gpu.screen.src_x || src_y != gpu.screen.src_y) {
+ gpu.screen.src_x = src_x;
+ gpu.screen.src_y = src_y;
+ renderer_notify_scanout_change(src_x, src_y);
+ if (gpu.frameskip.set) {
+ decide_frameskip_allow(gpu.ex_regs[3]);
+ if (gpu.frameskip.last_flip_frame != *gpu.state.frame_count) {
+ decide_frameskip();
+ gpu.frameskip.last_flip_frame = *gpu.state.frame_count;
+ }
}
}
break;
3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
2, 2, 2, 2, 3, 3, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, // 60
1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2,
- 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 80
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // a0
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // c0
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, // 80
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // a0
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // c0
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // e0
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
};
#define VRAM_MEM_XY(x, y) &gpu.vram[(y) * 1024 + (x)]
-static inline void do_vram_line(int x, int y, uint16_t *mem, int l, int is_read)
+static void cpy_msb(uint16_t *dst, const uint16_t *src, int l, uint16_t msb)
+{
+ int i;
+ for (i = 0; i < l; i++)
+ dst[i] = src[i] | msb;
+}
+
+static inline void do_vram_line(int x, int y, uint16_t *mem, int l,
+ int is_read, uint16_t msb)
{
uint16_t *vram = VRAM_MEM_XY(x, y);
- if (is_read)
+ if (unlikely(is_read))
memcpy(mem, vram, l * 2);
+ else if (unlikely(msb))
+ cpy_msb(vram, mem, l, msb);
else
memcpy(vram, mem, l * 2);
}
static int do_vram_io(uint32_t *data, int count, int is_read)
{
int count_initial = count;
+ uint16_t msb = gpu.ex_regs[6] << 15;
uint16_t *sdata = (uint16_t *)data;
int x = gpu.dma.x, y = gpu.dma.y;
int w = gpu.dma.w, h = gpu.dma.h;
if (count < l)
l = count;
- do_vram_line(x + o, y, sdata, l, is_read);
+ do_vram_line(x + o, y, sdata, l, is_read, msb);
if (o + l < w)
o += l;
for (; h > 0 && count >= w; sdata += w, count -= w, y++, h--) {
y &= 511;
- do_vram_line(x, y, sdata, w, is_read);
+ do_vram_line(x, y, sdata, w, is_read, msb);
}
if (h > 0) {
if (count > 0) {
y &= 511;
- do_vram_line(x, y, sdata, count, is_read);
+ do_vram_line(x, y, sdata, count, is_read, msb);
o = count;
count = 0;
}
log_io("start_vram_transfer %c (%d, %d) %dx%d\n", is_read ? 'r' : 'w',
gpu.dma.x, gpu.dma.y, gpu.dma.w, gpu.dma.h);
+ if (gpu.gpu_state_change)
+ gpu.gpu_state_change(PGS_VRAM_TRANSFER_START);
}
static void finish_vram_transfer(int is_read)
{
if (is_read)
gpu.status &= ~PSX_GPU_STATUS_IMG;
- else
+ else {
+ gpu.state.fb_dirty = 1;
renderer_update_caches(gpu.dma_start.x, gpu.dma_start.y,
gpu.dma_start.w, gpu.dma_start.h, 0);
+ }
+ if (gpu.gpu_state_change)
+ gpu.gpu_state_change(PGS_VRAM_TRANSFER_END);
+}
+
+static void do_vram_copy(const uint32_t *params, int *cpu_cycles)
+{
+ const uint32_t sx = LE32TOH(params[0]) & 0x3FF;
+ const uint32_t sy = (LE32TOH(params[0]) >> 16) & 0x1FF;
+ const uint32_t dx = LE32TOH(params[1]) & 0x3FF;
+ const uint32_t dy = (LE32TOH(params[1]) >> 16) & 0x1FF;
+ uint32_t w = ((LE32TOH(params[2]) - 1) & 0x3FF) + 1;
+ uint32_t h = (((LE32TOH(params[2]) >> 16) - 1) & 0x1FF) + 1;
+ uint16_t msb = gpu.ex_regs[6] << 15;
+ uint16_t lbuf[128];
+ uint32_t x, y;
+
+ *cpu_cycles += gput_copy(w, h);
+ if (sx == dx && sy == dy && msb == 0)
+ return;
+
+ renderer_flush_queues();
+
+ if (unlikely((sx < dx && dx < sx + w) || sx + w > 1024 || dx + w > 1024 || msb))
+ {
+ for (y = 0; y < h; y++)
+ {
+ const uint16_t *src = VRAM_MEM_XY(0, (sy + y) & 0x1ff);
+ uint16_t *dst = VRAM_MEM_XY(0, (dy + y) & 0x1ff);
+ for (x = 0; x < w; x += ARRAY_SIZE(lbuf))
+ {
+ uint32_t x1, w1 = w - x;
+ if (w1 > ARRAY_SIZE(lbuf))
+ w1 = ARRAY_SIZE(lbuf);
+ for (x1 = 0; x1 < w1; x1++)
+ lbuf[x1] = src[(sx + x + x1) & 0x3ff];
+ for (x1 = 0; x1 < w1; x1++)
+ dst[(dx + x + x1) & 0x3ff] = lbuf[x1] | msb;
+ }
+ }
+ }
+ else
+ {
+ uint32_t sy1 = sy, dy1 = dy;
+ for (y = 0; y < h; y++, sy1++, dy1++)
+ memcpy(VRAM_MEM_XY(dx, dy1 & 0x1ff), VRAM_MEM_XY(sx, sy1 & 0x1ff), w * 2);
+ }
+
+ renderer_update_caches(dx, dy, w, h, 0);
}
static noinline int do_cmd_list_skip(uint32_t *data, int count, int *last_cmd)
{
- int cmd = 0, pos = 0, len, dummy, v;
+ int cmd = 0, pos = 0, len, dummy = 0, v;
int skip = 1;
gpu.frameskip.pending_fill[0] = 0;
case 0x02:
if ((LE32TOH(list[2]) & 0x3ff) > gpu.screen.w || ((LE32TOH(list[2]) >> 16) & 0x1ff) > gpu.screen.h)
// clearing something large, don't skip
- do_cmd_list(list, 3, &dummy);
+ do_cmd_list(list, 3, &dummy, &dummy);
else
memcpy(gpu.frameskip.pending_fill, list, 3 * 4);
break;
cmd = -1;
break; // incomplete cmd
}
- if (0xa0 <= cmd && cmd <= 0xdf)
+ if (0x80 <= cmd && cmd <= 0xdf)
break; // image i/o
pos += len;
return pos;
}
-static noinline int do_cmd_buffer(uint32_t *data, int count)
+static noinline int do_cmd_buffer(uint32_t *data, int count, int *cpu_cycles)
{
int cmd, pos;
uint32_t old_e3 = gpu.ex_regs[3];
pos += 3;
continue;
}
+ else if ((cmd & 0xe0) == 0x80) {
+ if (unlikely((pos+3) >= count)) {
+ cmd = -1; // incomplete cmd, can't consume yet
+ break;
+ }
+ do_vram_copy(data + pos + 1, cpu_cycles);
+ vram_dirty = 1;
+ pos += 4;
+ continue;
+ }
// 0xex cmds might affect frameskip.allow, so pass to do_cmd_list_skip
if (gpu.frameskip.active && (gpu.frameskip.allow || ((LE32TOH(data[pos]) >> 24) & 0xf0) == 0xe0))
pos += do_cmd_list_skip(data + pos, count - pos, &cmd);
else {
- pos += do_cmd_list(data + pos, count - pos, &cmd);
+ pos += do_cmd_list(data + pos, count - pos, cpu_cycles, &cmd);
vram_dirty = 1;
}
return count - pos;
}
-static void flush_cmd_buffer(void)
+static noinline void flush_cmd_buffer(void)
{
- int left = do_cmd_buffer(gpu.cmd_buffer, gpu.cmd_len);
+ int dummy = 0, left;
+ left = do_cmd_buffer(gpu.cmd_buffer, gpu.cmd_len, &dummy);
if (left > 0)
memmove(gpu.cmd_buffer, gpu.cmd_buffer + gpu.cmd_len - left, left * 4);
- gpu.cmd_len = left;
+ if (left != gpu.cmd_len) {
+ if (!gpu.dma.h && gpu.gpu_state_change)
+ gpu.gpu_state_change(PGS_PRIMITIVE_START);
+ gpu.cmd_len = left;
+ }
}
void GPUwriteDataMem(uint32_t *mem, int count)
{
- int left;
+ int dummy = 0, left;
log_io("gpu_dma_write %p %d\n", mem, count);
if (unlikely(gpu.cmd_len > 0))
flush_cmd_buffer();
- left = do_cmd_buffer(mem, count);
+ left = do_cmd_buffer(mem, count, &dummy);
if (left)
log_anomaly("GPUwriteDataMem: discarded %d/%d words\n", left, count);
}
{
uint32_t addr, *list, ld_addr = 0;
int len, left, count;
- long cpu_cycles = 0;
+ int cpu_cycles = 0;
preload(rambase + (start_addr & 0x1fffff) / 4);
}
if (len) {
- left = do_cmd_buffer(list + 1, len);
+ left = do_cmd_buffer(list + 1, len, &cpu_cycles);
if (left) {
memcpy(gpu.cmd_buffer, list + 1 + len - left, left * 4);
gpu.cmd_len = left;
GPUwriteStatus((i << 24) | (gpu.regs[i] ^ 1));
}
renderer_sync_ecmds(gpu.ex_regs);
- renderer_update_caches(0, 0, 1024, 512, 1);
+ renderer_update_caches(0, 0, 1024, 512, 0);
break;
}
flush_cmd_buffer();
renderer_flush_queues();
+#ifndef RAW_FB_DISPLAY
if (gpu.status & PSX_GPU_STATUS_BLANKING) {
if (!gpu.state.blanked) {
vout_blank();
if (!gpu.state.fb_dirty)
return;
+#endif
if (gpu.frameskip.set) {
if (!gpu.frameskip.frame_ready) {
}
}
-#include "../../frontend/plugin_lib.h"
+void GPUgetScreenInfo(int *y, int *base_hres)
+{
+ *y = gpu.screen.y;
+ *base_hres = gpu.screen.vres;
+ if (gpu.status & PSX_GPU_STATUS_DHEIGHT)
+ *base_hres >>= 1;
+}
void GPUrearmedCallbacks(const struct rearmed_cbs *cbs)
{
gpu.state.frame_count = cbs->gpu_frame_count;
gpu.state.allow_interlace = cbs->gpu_neon.allow_interlace;
gpu.state.enhancement_enable = cbs->gpu_neon.enhancement_enable;
+ gpu.state.screen_centering_type_default = cbs->screen_centering_type_default;
if (gpu.state.screen_centering_type != cbs->screen_centering_type
|| gpu.state.screen_centering_x != cbs->screen_centering_x
|| gpu.state.screen_centering_y != cbs->screen_centering_y) {
gpu.mmap = cbs->mmap;
gpu.munmap = cbs->munmap;
+ gpu.gpu_state_change = cbs->gpu_state_change;
// delayed vram mmap
if (gpu.vram == NULL)