X-Git-Url: https://notaz.gp2x.de/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=plugins%2Fgpulib%2Fgpu.c;h=e3943a251660ee9c5d4da62be3f151f87dd31fd5;hb=c296224f47ceebab4d6fbd071959bff294e80293;hp=40a6bd85a2852987ab38aab2b087d4249e1cfd50;hpb=3b7b00650a53bc493c7da7d69ad54e1a25111ebf;p=pcsx_rearmed.git diff --git a/plugins/gpulib/gpu.c b/plugins/gpulib/gpu.c index 40a6bd85..e3943a25 100644 --- a/plugins/gpulib/gpu.c +++ b/plugins/gpulib/gpu.c @@ -14,8 +14,13 @@ #include /* for calloc */ #include "gpu.h" +#include "gpu_timing.h" +#include "../../libpcsxcore/gpu.h" // meh +#include "../../frontend/plugin_lib.h" +#ifndef ARRAY_SIZE #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) +#endif #ifdef __GNUC__ #define unlikely(x) __builtin_expect((x), 0) #define preload __builtin_prefetch @@ -31,15 +36,15 @@ struct psx_gpu gpu; -static noinline int do_cmd_buffer(uint32_t *data, int count); +static noinline int do_cmd_buffer(uint32_t *data, int count, int *cpu_cycles); static void finish_vram_transfer(int is_read); static noinline void do_cmd_reset(void) { + int dummy = 0; renderer_sync(); - if (unlikely(gpu.cmd_len > 0)) - do_cmd_buffer(gpu.cmd_buffer, gpu.cmd_len); + do_cmd_buffer(gpu.cmd_buffer, gpu.cmd_len, &dummy); gpu.cmd_len = 0; if (unlikely(gpu.dma.h > 0)) @@ -62,6 +67,7 @@ static noinline void do_reset(void) gpu.screen.hres = gpu.screen.w = 256; gpu.screen.vres = gpu.screen.h = 240; gpu.screen.x = gpu.screen.y = 0; + renderer_sync_ecmds(gpu.ex_regs); renderer_notify_res_change(); } @@ -73,7 +79,10 @@ static noinline void update_width(void) int hres = hres_all[(gpu.status >> 16) & 7]; int pal = gpu.status & PSX_GPU_STATUS_PAL; int sw = gpu.screen.x2 - gpu.screen.x1; + int type = gpu.state.screen_centering_type; int x = 0, x_auto; + if (type == C_AUTO) + type = gpu.state.screen_centering_type_default; if (sw <= 0) /* nothing displayed? */; else { @@ -82,10 +91,10 @@ static noinline void update_width(void) x = (x + 1) & ~1; // blitter limitation sw /= hdiv; sw = (sw + 2) & ~3; // according to nocash - switch (gpu.state.screen_centering_type) { - case 1: + switch (type) { + case C_INGAME: break; - case 2: + case C_MANUAL: x = gpu.state.screen_centering_x; break; default: @@ -127,9 +136,12 @@ static noinline void update_height(void) /* nothing displayed? */; else { switch (gpu.state.screen_centering_type) { - case 1: + case C_INGAME: break; - case 2: + case C_BORDERLESS: + y = 0; + break; + case C_MANUAL: y = gpu.state.screen_centering_y; break; default: @@ -169,8 +181,8 @@ static noinline void decide_frameskip(void) gpu.frameskip.active = 0; if (!gpu.frameskip.active && gpu.frameskip.pending_fill[0] != 0) { - int dummy; - do_cmd_list(gpu.frameskip.pending_fill, 3, &dummy); + int dummy = 0; + do_cmd_list(gpu.frameskip.pending_fill, 3, &dummy, &dummy); gpu.frameskip.pending_fill[0] = 0; } } @@ -187,8 +199,12 @@ static noinline int decide_frameskip_allow(uint32_t cmd_e3) return gpu.frameskip.allow; } +static void flush_cmd_buffer(void); + static noinline void get_gpu_info(uint32_t data) { + if (unlikely(gpu.cmd_len > 0)) + flush_cmd_buffer(); switch (data & 0x0f) { case 0x02: case 0x03: @@ -207,8 +223,9 @@ static noinline void get_gpu_info(uint32_t data) } } -// double, for overdraw guard -#define VRAM_SIZE ((1024 * 512 * 2 * 2) + 4096) +#ifndef max +#define max(a, b) (((a) > (b)) ? (a) : (b)) +#endif // Minimum 16-byte VRAM alignment needed by gpu_unai's pixel-skipping // renderer/downscaler it uses in high res modes: @@ -220,18 +237,31 @@ static noinline void get_gpu_info(uint32_t data) #define VRAM_ALIGN 16 #endif +// double, for overdraw guard + at least 1 page before +#define VRAM_SIZE ((1024 * 512 * 2 * 2) + max(VRAM_ALIGN, 4096)) + // vram ptr received from mmap/malloc/alloc (will deallocate using this) static uint16_t *vram_ptr_orig = NULL; -#ifdef GPULIB_USE_MMAP +#ifndef GPULIB_USE_MMAP +# ifdef __linux__ +# define GPULIB_USE_MMAP 1 +# else +# define GPULIB_USE_MMAP 0 +# endif +#endif static int map_vram(void) { - gpu.vram = vram_ptr_orig = gpu.mmap(VRAM_SIZE + (VRAM_ALIGN-1)); - if (gpu.vram != NULL) { - // 4kb guard in front +#if GPULIB_USE_MMAP + gpu.vram = vram_ptr_orig = gpu.mmap(VRAM_SIZE); +#else + gpu.vram = vram_ptr_orig = calloc(VRAM_SIZE, 1); +#endif + if (gpu.vram != NULL && gpu.vram != (void *)(intptr_t)-1) { + // 4kb guard in front gpu.vram += (4096 / 2); - // Align - gpu.vram = (uint16_t*)(((uintptr_t)gpu.vram + (VRAM_ALIGN-1)) & ~(VRAM_ALIGN-1)); + // Align + gpu.vram = (uint16_t*)(((uintptr_t)gpu.vram + (VRAM_ALIGN-1)) & ~(VRAM_ALIGN-1)); return 0; } else { @@ -239,54 +269,9 @@ static int map_vram(void) return -1; } } -#else -static int map_vram(void) -{ - gpu.vram = vram_ptr_orig = (uint16_t*)calloc(VRAM_SIZE + (VRAM_ALIGN-1), 1); - if (gpu.vram != NULL) { - // 4kb guard in front - gpu.vram += (4096 / 2); - // Align - gpu.vram = (uint16_t*)(((uintptr_t)gpu.vram + (VRAM_ALIGN-1)) & ~(VRAM_ALIGN-1)); - return 0; - } else { - fprintf(stderr, "could not allocate vram, expect crashes\n"); - return -1; - } -} - -static int allocate_vram(void) -{ - gpu.vram = vram_ptr_orig = (uint16_t*)calloc(VRAM_SIZE + (VRAM_ALIGN-1), 1); - if (gpu.vram != NULL) { - // 4kb guard in front - gpu.vram += (4096 / 2); - // Align - gpu.vram = (uint16_t*)(((uintptr_t)gpu.vram + (VRAM_ALIGN-1)) & ~(VRAM_ALIGN-1)); - return 0; - } else { - fprintf(stderr, "could not allocate vram, expect crashes\n"); - return -1; - } -} -#endif long GPUinit(void) { -#ifndef GPULIB_USE_MMAP - if (gpu.vram == NULL) { - if (allocate_vram() != 0) { - printf("ERROR: could not allocate VRAM, exiting..\n"); - exit(1); - } - } -#endif - - //extern uint32_t hSyncCount; // in psxcounters.cpp - //extern uint32_t frame_counter; // in psxcounters.cpp - //gpu.state.hcnt = &hSyncCount; - //gpu.state.frame_count = &frame_counter; - int ret; ret = vout_init(); ret |= renderer_init(); @@ -314,7 +299,7 @@ long GPUshutdown(void) ret = vout_finish(); if (vram_ptr_orig != NULL) { -#ifdef GPULIB_USE_MMAP +#if GPULIB_USE_MMAP gpu.munmap(vram_ptr_orig, VRAM_SIZE); #else free(vram_ptr_orig); @@ -328,6 +313,7 @@ long GPUshutdown(void) void GPUwriteStatus(uint32_t data) { uint32_t cmd = data >> 24; + int src_x, src_y; if (cmd < ARRAY_SIZE(gpu.regs)) { if (cmd > 1 && cmd != 5 && gpu.regs[cmd] == data) @@ -357,14 +343,17 @@ void GPUwriteStatus(uint32_t data) gpu.status |= PSX_GPU_STATUS_DMA(data & 3); break; case 0x05: - gpu.screen.src_x = data & 0x3ff; - gpu.screen.src_y = (data >> 10) & 0x1ff; - renderer_notify_scanout_x_change(gpu.screen.src_x, gpu.screen.hres); - if (gpu.frameskip.set) { - decide_frameskip_allow(gpu.ex_regs[3]); - if (gpu.frameskip.last_flip_frame != *gpu.state.frame_count) { - decide_frameskip(); - gpu.frameskip.last_flip_frame = *gpu.state.frame_count; + src_x = data & 0x3ff; src_y = (data >> 10) & 0x1ff; + if (src_x != gpu.screen.src_x || src_y != gpu.screen.src_y) { + gpu.screen.src_x = src_x; + gpu.screen.src_y = src_y; + renderer_notify_scanout_change(src_x, src_y); + if (gpu.frameskip.set) { + decide_frameskip_allow(gpu.ex_regs[3]); + if (gpu.frameskip.last_flip_frame != *gpu.state.frame_count) { + decide_frameskip(); + gpu.frameskip.last_flip_frame = *gpu.state.frame_count; + } } } break; @@ -405,23 +394,33 @@ const unsigned char cmd_lengths[256] = 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 2, 2, 2, 2, 3, 3, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, // 60 1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2, - 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 80 - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // a0 - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // c0 - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, // 80 + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // a0 + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // c0 + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // e0 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; #define VRAM_MEM_XY(x, y) &gpu.vram[(y) * 1024 + (x)] -static inline void do_vram_line(int x, int y, uint16_t *mem, int l, int is_read) +static void cpy_msb(uint16_t *dst, const uint16_t *src, int l, uint16_t msb) +{ + int i; + for (i = 0; i < l; i++) + dst[i] = src[i] | msb; +} + +static inline void do_vram_line(int x, int y, uint16_t *mem, int l, + int is_read, uint16_t msb) { uint16_t *vram = VRAM_MEM_XY(x, y); - if (is_read) + if (unlikely(is_read)) memcpy(mem, vram, l * 2); + else if (unlikely(msb)) + cpy_msb(vram, mem, l, msb); else memcpy(vram, mem, l * 2); } @@ -429,6 +428,7 @@ static inline void do_vram_line(int x, int y, uint16_t *mem, int l, int is_read) static int do_vram_io(uint32_t *data, int count, int is_read) { int count_initial = count; + uint16_t msb = gpu.ex_regs[6] << 15; uint16_t *sdata = (uint16_t *)data; int x = gpu.dma.x, y = gpu.dma.y; int w = gpu.dma.w, h = gpu.dma.h; @@ -443,7 +443,7 @@ static int do_vram_io(uint32_t *data, int count, int is_read) if (count < l) l = count; - do_vram_line(x + o, y, sdata, l, is_read); + do_vram_line(x + o, y, sdata, l, is_read, msb); if (o + l < w) o += l; @@ -458,13 +458,13 @@ static int do_vram_io(uint32_t *data, int count, int is_read) for (; h > 0 && count >= w; sdata += w, count -= w, y++, h--) { y &= 511; - do_vram_line(x, y, sdata, w, is_read); + do_vram_line(x, y, sdata, w, is_read, msb); } if (h > 0) { if (count > 0) { y &= 511; - do_vram_line(x, y, sdata, count, is_read); + do_vram_line(x, y, sdata, count, is_read, msb); o = count; count = 0; } @@ -501,20 +501,72 @@ static void start_vram_transfer(uint32_t pos_word, uint32_t size_word, int is_re log_io("start_vram_transfer %c (%d, %d) %dx%d\n", is_read ? 'r' : 'w', gpu.dma.x, gpu.dma.y, gpu.dma.w, gpu.dma.h); + if (gpu.gpu_state_change) + gpu.gpu_state_change(PGS_VRAM_TRANSFER_START); } static void finish_vram_transfer(int is_read) { if (is_read) gpu.status &= ~PSX_GPU_STATUS_IMG; - else + else { + gpu.state.fb_dirty = 1; renderer_update_caches(gpu.dma_start.x, gpu.dma_start.y, gpu.dma_start.w, gpu.dma_start.h, 0); + } + if (gpu.gpu_state_change) + gpu.gpu_state_change(PGS_VRAM_TRANSFER_END); +} + +static void do_vram_copy(const uint32_t *params, int *cpu_cycles) +{ + const uint32_t sx = LE32TOH(params[0]) & 0x3FF; + const uint32_t sy = (LE32TOH(params[0]) >> 16) & 0x1FF; + const uint32_t dx = LE32TOH(params[1]) & 0x3FF; + const uint32_t dy = (LE32TOH(params[1]) >> 16) & 0x1FF; + uint32_t w = ((LE32TOH(params[2]) - 1) & 0x3FF) + 1; + uint32_t h = (((LE32TOH(params[2]) >> 16) - 1) & 0x1FF) + 1; + uint16_t msb = gpu.ex_regs[6] << 15; + uint16_t lbuf[128]; + uint32_t x, y; + + *cpu_cycles += gput_copy(w, h); + if (sx == dx && sy == dy && msb == 0) + return; + + renderer_flush_queues(); + + if (unlikely((sx < dx && dx < sx + w) || sx + w > 1024 || dx + w > 1024 || msb)) + { + for (y = 0; y < h; y++) + { + const uint16_t *src = VRAM_MEM_XY(0, (sy + y) & 0x1ff); + uint16_t *dst = VRAM_MEM_XY(0, (dy + y) & 0x1ff); + for (x = 0; x < w; x += ARRAY_SIZE(lbuf)) + { + uint32_t x1, w1 = w - x; + if (w1 > ARRAY_SIZE(lbuf)) + w1 = ARRAY_SIZE(lbuf); + for (x1 = 0; x1 < w1; x1++) + lbuf[x1] = src[(sx + x + x1) & 0x3ff]; + for (x1 = 0; x1 < w1; x1++) + dst[(dx + x + x1) & 0x3ff] = lbuf[x1] | msb; + } + } + } + else + { + uint32_t sy1 = sy, dy1 = dy; + for (y = 0; y < h; y++, sy1++, dy1++) + memcpy(VRAM_MEM_XY(dx, dy1 & 0x1ff), VRAM_MEM_XY(sx, sy1 & 0x1ff), w * 2); + } + + renderer_update_caches(dx, dy, w, h, 0); } static noinline int do_cmd_list_skip(uint32_t *data, int count, int *last_cmd) { - int cmd = 0, pos = 0, len, dummy, v; + int cmd = 0, pos = 0, len, dummy = 0, v; int skip = 1; gpu.frameskip.pending_fill[0] = 0; @@ -528,7 +580,7 @@ static noinline int do_cmd_list_skip(uint32_t *data, int count, int *last_cmd) case 0x02: if ((LE32TOH(list[2]) & 0x3ff) > gpu.screen.w || ((LE32TOH(list[2]) >> 16) & 0x1ff) > gpu.screen.h) // clearing something large, don't skip - do_cmd_list(list, 3, &dummy); + do_cmd_list(list, 3, &dummy, &dummy); else memcpy(gpu.frameskip.pending_fill, list, 3 * 4); break; @@ -567,7 +619,7 @@ static noinline int do_cmd_list_skip(uint32_t *data, int count, int *last_cmd) cmd = -1; break; // incomplete cmd } - if (0xa0 <= cmd && cmd <= 0xdf) + if (0x80 <= cmd && cmd <= 0xdf) break; // image i/o pos += len; @@ -578,7 +630,7 @@ static noinline int do_cmd_list_skip(uint32_t *data, int count, int *last_cmd) return pos; } -static noinline int do_cmd_buffer(uint32_t *data, int count) +static noinline int do_cmd_buffer(uint32_t *data, int count, int *cpu_cycles) { int cmd, pos; uint32_t old_e3 = gpu.ex_regs[3]; @@ -607,12 +659,27 @@ static noinline int do_cmd_buffer(uint32_t *data, int count) pos += 3; continue; } + else if ((cmd & 0xe0) == 0x80) { + if (unlikely((pos+3) >= count)) { + cmd = -1; // incomplete cmd, can't consume yet + break; + } + do_vram_copy(data + pos + 1, cpu_cycles); + vram_dirty = 1; + pos += 4; + continue; + } + else if (cmd == 0x1f) { + log_anomaly("irq1?\n"); + pos++; + continue; + } // 0xex cmds might affect frameskip.allow, so pass to do_cmd_list_skip if (gpu.frameskip.active && (gpu.frameskip.allow || ((LE32TOH(data[pos]) >> 24) & 0xf0) == 0xe0)) pos += do_cmd_list_skip(data + pos, count - pos, &cmd); else { - pos += do_cmd_list(data + pos, count - pos, &cmd); + pos += do_cmd_list(data + pos, count - pos, cpu_cycles, &cmd); vram_dirty = 1; } @@ -633,24 +700,29 @@ static noinline int do_cmd_buffer(uint32_t *data, int count) return count - pos; } -static void flush_cmd_buffer(void) +static noinline void flush_cmd_buffer(void) { - int left = do_cmd_buffer(gpu.cmd_buffer, gpu.cmd_len); + int dummy = 0, left; + left = do_cmd_buffer(gpu.cmd_buffer, gpu.cmd_len, &dummy); if (left > 0) memmove(gpu.cmd_buffer, gpu.cmd_buffer + gpu.cmd_len - left, left * 4); - gpu.cmd_len = left; + if (left != gpu.cmd_len) { + if (!gpu.dma.h && gpu.gpu_state_change) + gpu.gpu_state_change(PGS_PRIMITIVE_START); + gpu.cmd_len = left; + } } void GPUwriteDataMem(uint32_t *mem, int count) { - int left; + int dummy = 0, left; log_io("gpu_dma_write %p %d\n", mem, count); if (unlikely(gpu.cmd_len > 0)) flush_cmd_buffer(); - left = do_cmd_buffer(mem, count); + left = do_cmd_buffer(mem, count, &dummy); if (left) log_anomaly("GPUwriteDataMem: discarded %d/%d words\n", left, count); } @@ -667,7 +739,7 @@ long GPUdmaChain(uint32_t *rambase, uint32_t start_addr, uint32_t *progress_addr { uint32_t addr, *list, ld_addr = 0; int len, left, count; - long cpu_cycles = 0; + int cpu_cycles = 0; preload(rambase + (start_addr & 0x1fffff) / 4); @@ -687,9 +759,13 @@ long GPUdmaChain(uint32_t *rambase, uint32_t start_addr, uint32_t *progress_addr if (len > 0) cpu_cycles += 5 + len; - log_io(".chain %08lx #%d+%d\n", - (long)(list - rambase) * 4, len, gpu.cmd_len); + log_io(".chain %08lx #%d+%d %u\n", + (long)(list - rambase) * 4, len, gpu.cmd_len, cpu_cycles); if (unlikely(gpu.cmd_len > 0)) { + if (gpu.cmd_len + len > ARRAY_SIZE(gpu.cmd_buffer)) { + log_anomaly("cmd_buffer overflow, likely garbage commands\n"); + gpu.cmd_len = 0; + } memcpy(gpu.cmd_buffer + gpu.cmd_len, list + 1, len * 4); gpu.cmd_len += len; flush_cmd_buffer(); @@ -697,7 +773,7 @@ long GPUdmaChain(uint32_t *rambase, uint32_t start_addr, uint32_t *progress_addr } if (len) { - left = do_cmd_buffer(list + 1, len); + left = do_cmd_buffer(list + 1, len, &cpu_cycles); if (left) { memcpy(gpu.cmd_buffer, list + 1 + len - left, left * 4); gpu.cmd_len = left; @@ -818,7 +894,7 @@ long GPUfreeze(uint32_t type, struct GPUFreeze *freeze) GPUwriteStatus((i << 24) | (gpu.regs[i] ^ 1)); } renderer_sync_ecmds(gpu.ex_regs); - renderer_update_caches(0, 0, 1024, 512, 1); + renderer_update_caches(0, 0, 1024, 512, 0); break; } @@ -831,6 +907,7 @@ void GPUupdateLace(void) flush_cmd_buffer(); renderer_flush_queues(); +#ifndef RAW_FB_DISPLAY if (gpu.status & PSX_GPU_STATUS_BLANKING) { if (!gpu.state.blanked) { vout_blank(); @@ -844,6 +921,7 @@ void GPUupdateLace(void) if (!gpu.state.fb_dirty) return; +#endif if (gpu.frameskip.set) { if (!gpu.frameskip.frame_ready) { @@ -885,7 +963,13 @@ void GPUvBlank(int is_vblank, int lcf) } } -#include "../../frontend/plugin_lib.h" +void GPUgetScreenInfo(int *y, int *base_hres) +{ + *y = gpu.screen.y; + *base_hres = gpu.screen.vres; + if (gpu.status & PSX_GPU_STATUS_DHEIGHT) + *base_hres >>= 1; +} void GPUrearmedCallbacks(const struct rearmed_cbs *cbs) { @@ -899,6 +983,7 @@ void GPUrearmedCallbacks(const struct rearmed_cbs *cbs) gpu.state.frame_count = cbs->gpu_frame_count; gpu.state.allow_interlace = cbs->gpu_neon.allow_interlace; gpu.state.enhancement_enable = cbs->gpu_neon.enhancement_enable; + gpu.state.screen_centering_type_default = cbs->screen_centering_type_default; if (gpu.state.screen_centering_type != cbs->screen_centering_type || gpu.state.screen_centering_x != cbs->screen_centering_x || gpu.state.screen_centering_y != cbs->screen_centering_y) { @@ -911,6 +996,7 @@ void GPUrearmedCallbacks(const struct rearmed_cbs *cbs) gpu.mmap = cbs->mmap; gpu.munmap = cbs->munmap; + gpu.gpu_state_change = cbs->gpu_state_change; // delayed vram mmap if (gpu.vram == NULL)