2 * (C) GraÅžvydas "notaz" Ignotas, 2011-2012
4 * This work is licensed under the terms of any of these licenses
6 * - GNU GPL, version 2 or later.
7 * - GNU LGPL, version 2.1 or later.
8 * See the COPYING file in the top-level directory.
15 #include "../../libpcsxcore/gpu.h" // meh
16 #include "../../frontend/plugin_lib.h"
19 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
22 #define unlikely(x) __builtin_expect((x), 0)
23 #define preload __builtin_prefetch
24 #define noinline __attribute__((noinline))
31 //#define log_io gpu_log
36 static noinline int do_cmd_buffer(uint32_t *data, int count);
37 static void finish_vram_transfer(int is_read);
39 static noinline void do_cmd_reset(void)
41 if (unlikely(gpu.cmd_len > 0))
42 do_cmd_buffer(gpu.cmd_buffer, gpu.cmd_len);
45 if (unlikely(gpu.dma.h > 0))
46 finish_vram_transfer(gpu.dma_start.is_read);
50 static noinline void do_reset(void)
56 memset(gpu.regs, 0, sizeof(gpu.regs));
57 for (i = 0; i < sizeof(gpu.ex_regs) / sizeof(gpu.ex_regs[0]); i++)
58 gpu.ex_regs[i] = (0xe0 + i) << 24;
59 gpu.status = 0x14802000;
62 gpu.screen.hres = gpu.screen.w = 256;
63 gpu.screen.vres = gpu.screen.h = 240;
64 gpu.screen.x = gpu.screen.y = 0;
65 renderer_sync_ecmds(gpu.ex_regs);
66 renderer_notify_res_change();
69 static noinline void update_width(void)
71 static const short hres_all[8] = { 256, 368, 320, 368, 512, 368, 640, 368 };
72 static const uint8_t hdivs[8] = { 10, 7, 8, 7, 5, 7, 4, 7 };
73 uint8_t hdiv = hdivs[(gpu.status >> 16) & 7];
74 int hres = hres_all[(gpu.status >> 16) & 7];
75 int pal = gpu.status & PSX_GPU_STATUS_PAL;
76 int sw = gpu.screen.x2 - gpu.screen.x1;
79 /* nothing displayed? */;
81 int s = pal ? 656 : 608; // or 600? pal is just a guess
82 x = (gpu.screen.x1 - s) / hdiv;
83 x = (x + 1) & ~1; // blitter limitation
85 sw = (sw + 2) & ~3; // according to nocash
86 switch (gpu.state.screen_centering_type) {
90 x = gpu.state.screen_centering_x;
93 // correct if slightly miscentered
94 x_auto = (hres - sw) / 2 & ~3;
95 if ((uint32_t)x_auto <= 8u && abs(x) < 24)
100 // .x range check is done in vout_update()
102 // reduce the unpleasant right border that a few games have
103 if (gpu.state.screen_centering_type == 0
104 && x <= 4 && hres - (x + sw) >= 4)
108 gpu.screen.hres = hres;
109 gpu.state.dims_changed = 1;
110 //printf("xx %d %d -> %2d, %d / %d\n",
111 // gpu.screen.x1, gpu.screen.x2, x, sw, hres);
114 static noinline void update_height(void)
116 int pal = gpu.status & PSX_GPU_STATUS_PAL;
117 int dheight = gpu.status & PSX_GPU_STATUS_DHEIGHT;
118 int y = gpu.screen.y1 - (pal ? 39 : 16); // 39 for spyro
119 int sh = gpu.screen.y2 - gpu.screen.y1;
123 if (pal && (sh > 240 || gpu.screen.vres == 256))
126 y *= 2, sh *= 2, vres *= 2, center_tol *= 2;
128 /* nothing displayed? */;
130 switch (gpu.state.screen_centering_type) {
137 y = gpu.state.screen_centering_y;
140 // correct if slightly miscentered
141 if ((uint32_t)(vres - sh) <= 1 && abs(y) <= center_tol)
149 gpu.screen.vres = vres;
150 gpu.state.dims_changed = 1;
151 //printf("yy %d %d -> %d, %d / %d\n",
152 // gpu.screen.y1, gpu.screen.y2, y, sh, vres);
155 static noinline void decide_frameskip(void)
157 if (gpu.frameskip.active)
160 gpu.frameskip.cnt = 0;
161 gpu.frameskip.frame_ready = 1;
164 if (!gpu.frameskip.active && *gpu.frameskip.advice)
165 gpu.frameskip.active = 1;
166 else if (gpu.frameskip.set > 0 && gpu.frameskip.cnt < gpu.frameskip.set)
167 gpu.frameskip.active = 1;
169 gpu.frameskip.active = 0;
171 if (!gpu.frameskip.active && gpu.frameskip.pending_fill[0] != 0) {
173 do_cmd_list(gpu.frameskip.pending_fill, 3, &dummy);
174 gpu.frameskip.pending_fill[0] = 0;
178 static noinline int decide_frameskip_allow(uint32_t cmd_e3)
180 // no frameskip if it decides to draw to display area,
181 // but not for interlace since it'll most likely always do that
182 uint32_t x = cmd_e3 & 0x3ff;
183 uint32_t y = (cmd_e3 >> 10) & 0x3ff;
184 gpu.frameskip.allow = (gpu.status & PSX_GPU_STATUS_INTERLACE) ||
185 (uint32_t)(x - gpu.screen.src_x) >= (uint32_t)gpu.screen.w ||
186 (uint32_t)(y - gpu.screen.src_y) >= (uint32_t)gpu.screen.h;
187 return gpu.frameskip.allow;
190 static void flush_cmd_buffer(void);
192 static noinline void get_gpu_info(uint32_t data)
194 if (unlikely(gpu.cmd_len > 0))
196 switch (data & 0x0f) {
200 gpu.gp0 = gpu.ex_regs[data & 7] & 0xfffff;
203 gpu.gp0 = gpu.ex_regs[5] & 0x3fffff;
214 // double, for overdraw guard
215 #define VRAM_SIZE (1024 * 512 * 2 * 2)
217 static int map_vram(void)
219 gpu.vram = gpu.mmap(VRAM_SIZE);
220 if (gpu.vram != NULL) {
221 gpu.vram += 4096 / 2;
225 fprintf(stderr, "could not map vram, expect crashes\n");
234 ret |= renderer_init();
236 memset(&gpu.state, 0, sizeof(gpu.state));
237 memset(&gpu.frameskip, 0, sizeof(gpu.frameskip));
239 gpu.state.frame_count = &gpu.zero;
240 gpu.state.hcnt = &gpu.zero;
244 if (gpu.mmap != NULL) {
251 long GPUshutdown(void)
257 if (gpu.vram != NULL) {
258 gpu.vram -= 4096 / 2;
259 gpu.munmap(gpu.vram, VRAM_SIZE);
266 void GPUwriteStatus(uint32_t data)
268 uint32_t cmd = data >> 24;
271 if (cmd < ARRAY_SIZE(gpu.regs)) {
272 if (cmd > 1 && cmd != 5 && gpu.regs[cmd] == data)
274 gpu.regs[cmd] = data;
277 gpu.state.fb_dirty = 1;
288 gpu.status |= PSX_GPU_STATUS_BLANKING;
289 gpu.state.dims_changed = 1; // for hud clearing
292 gpu.status &= ~PSX_GPU_STATUS_BLANKING;
295 gpu.status &= ~PSX_GPU_STATUS_DMA_MASK;
296 gpu.status |= PSX_GPU_STATUS_DMA(data & 3);
299 src_x = data & 0x3ff; src_y = (data >> 10) & 0x1ff;
300 if (src_x != gpu.screen.src_x || src_y != gpu.screen.src_y) {
301 gpu.screen.src_x = src_x;
302 gpu.screen.src_y = src_y;
303 renderer_notify_scanout_change(src_x, src_y);
304 if (gpu.frameskip.set) {
305 decide_frameskip_allow(gpu.ex_regs[3]);
306 if (gpu.frameskip.last_flip_frame != *gpu.state.frame_count) {
308 gpu.frameskip.last_flip_frame = *gpu.state.frame_count;
314 gpu.screen.x1 = data & 0xfff;
315 gpu.screen.x2 = (data >> 12) & 0xfff;
319 gpu.screen.y1 = data & 0x3ff;
320 gpu.screen.y2 = (data >> 10) & 0x3ff;
324 gpu.status = (gpu.status & ~0x7f0000) | ((data & 0x3F) << 17) | ((data & 0x40) << 10);
327 renderer_notify_res_change();
330 if ((cmd & 0xf0) == 0x10)
335 #ifdef GPUwriteStatus_ext
336 GPUwriteStatus_ext(data);
340 const unsigned char cmd_lengths[256] =
342 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
343 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
344 3, 3, 3, 3, 6, 6, 6, 6, 4, 4, 4, 4, 8, 8, 8, 8, // 20
345 5, 5, 5, 5, 8, 8, 8, 8, 7, 7, 7, 7, 11, 11, 11, 11,
346 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, // 40
347 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
348 2, 2, 2, 2, 3, 3, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, // 60
349 1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2,
350 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, // 80
351 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
352 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // a0
353 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
354 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // c0
355 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
356 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // e0
357 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
360 #define VRAM_MEM_XY(x, y) &gpu.vram[(y) * 1024 + (x)]
362 static void cpy_msb(uint16_t *dst, const uint16_t *src, int l, uint16_t msb)
365 for (i = 0; i < l; i++)
366 dst[i] = src[i] | msb;
369 static inline void do_vram_line(int x, int y, uint16_t *mem, int l,
370 int is_read, uint16_t msb)
372 uint16_t *vram = VRAM_MEM_XY(x, y);
373 if (unlikely(is_read))
374 memcpy(mem, vram, l * 2);
375 else if (unlikely(msb))
376 cpy_msb(vram, mem, l, msb);
378 memcpy(vram, mem, l * 2);
381 static int do_vram_io(uint32_t *data, int count, int is_read)
383 int count_initial = count;
384 uint16_t msb = gpu.ex_regs[6] << 15;
385 uint16_t *sdata = (uint16_t *)data;
386 int x = gpu.dma.x, y = gpu.dma.y;
387 int w = gpu.dma.w, h = gpu.dma.h;
388 int o = gpu.dma.offset;
390 count *= 2; // operate in 16bpp pixels
392 if (gpu.dma.offset) {
393 l = w - gpu.dma.offset;
397 do_vram_line(x + o, y, sdata, l, is_read, msb);
410 for (; h > 0 && count >= w; sdata += w, count -= w, y++, h--) {
412 do_vram_line(x, y, sdata, w, is_read, msb);
418 do_vram_line(x, y, sdata, count, is_read, msb);
424 finish_vram_transfer(is_read);
429 return count_initial - count / 2;
432 static void start_vram_transfer(uint32_t pos_word, uint32_t size_word, int is_read)
435 log_anomaly("start_vram_transfer while old unfinished\n");
437 gpu.dma.x = pos_word & 0x3ff;
438 gpu.dma.y = (pos_word >> 16) & 0x1ff;
439 gpu.dma.w = ((size_word - 1) & 0x3ff) + 1;
440 gpu.dma.h = (((size_word >> 16) - 1) & 0x1ff) + 1;
442 gpu.dma.is_read = is_read;
443 gpu.dma_start = gpu.dma;
445 renderer_flush_queues();
447 gpu.status |= PSX_GPU_STATUS_IMG;
448 // XXX: wrong for width 1
449 gpu.gp0 = LE32TOH(*(uint32_t *) VRAM_MEM_XY(gpu.dma.x, gpu.dma.y));
450 gpu.state.last_vram_read_frame = *gpu.state.frame_count;
453 log_io("start_vram_transfer %c (%d, %d) %dx%d\n", is_read ? 'r' : 'w',
454 gpu.dma.x, gpu.dma.y, gpu.dma.w, gpu.dma.h);
455 if (gpu.gpu_state_change)
456 gpu.gpu_state_change(PGS_VRAM_TRANSFER_START);
459 static void finish_vram_transfer(int is_read)
462 gpu.status &= ~PSX_GPU_STATUS_IMG;
464 gpu.state.fb_dirty = 1;
465 renderer_update_caches(gpu.dma_start.x, gpu.dma_start.y,
466 gpu.dma_start.w, gpu.dma_start.h, 0);
468 if (gpu.gpu_state_change)
469 gpu.gpu_state_change(PGS_VRAM_TRANSFER_END);
472 static void do_vram_copy(const uint32_t *params)
474 const uint32_t sx = LE32TOH(params[0]) & 0x3FF;
475 const uint32_t sy = (LE32TOH(params[0]) >> 16) & 0x1FF;
476 const uint32_t dx = LE32TOH(params[1]) & 0x3FF;
477 const uint32_t dy = (LE32TOH(params[1]) >> 16) & 0x1FF;
478 uint32_t w = ((LE32TOH(params[2]) - 1) & 0x3FF) + 1;
479 uint32_t h = (((LE32TOH(params[2]) >> 16) - 1) & 0x1FF) + 1;
480 uint16_t msb = gpu.ex_regs[6] << 15;
484 if (sx == dx && sy == dy && msb == 0)
487 renderer_flush_queues();
489 if (unlikely((sx < dx && dx < sx + w) || sx + w > 1024 || dx + w > 1024 || msb))
491 for (y = 0; y < h; y++)
493 const uint16_t *src = VRAM_MEM_XY(0, (sy + y) & 0x1ff);
494 uint16_t *dst = VRAM_MEM_XY(0, (dy + y) & 0x1ff);
495 for (x = 0; x < w; x += ARRAY_SIZE(lbuf))
497 uint32_t x1, w1 = w - x;
498 if (w1 > ARRAY_SIZE(lbuf))
499 w1 = ARRAY_SIZE(lbuf);
500 for (x1 = 0; x1 < w1; x1++)
501 lbuf[x1] = src[(sx + x + x1) & 0x3ff];
502 for (x1 = 0; x1 < w1; x1++)
503 dst[(dx + x + x1) & 0x3ff] = lbuf[x1] | msb;
509 uint32_t sy1 = sy, dy1 = dy;
510 for (y = 0; y < h; y++, sy1++, dy1++)
511 memcpy(VRAM_MEM_XY(dx, dy1 & 0x1ff), VRAM_MEM_XY(sx, sy1 & 0x1ff), w * 2);
514 renderer_update_caches(dx, dy, w, h, 0);
517 static noinline int do_cmd_list_skip(uint32_t *data, int count, int *last_cmd)
519 int cmd = 0, pos = 0, len, dummy, v;
522 gpu.frameskip.pending_fill[0] = 0;
524 while (pos < count && skip) {
525 uint32_t *list = data + pos;
526 cmd = LE32TOH(list[0]) >> 24;
527 len = 1 + cmd_lengths[cmd];
531 if ((LE32TOH(list[2]) & 0x3ff) > gpu.screen.w || ((LE32TOH(list[2]) >> 16) & 0x1ff) > gpu.screen.h)
532 // clearing something large, don't skip
533 do_cmd_list(list, 3, &dummy);
535 memcpy(gpu.frameskip.pending_fill, list, 3 * 4);
541 gpu.ex_regs[1] &= ~0x1ff;
542 gpu.ex_regs[1] |= LE32TOH(list[4 + ((cmd >> 4) & 1)]) & 0x1ff;
545 for (v = 3; pos + v < count; v++)
547 if ((list[v] & HTOLE32(0xf000f000)) == HTOLE32(0x50005000))
553 for (v = 4; pos + v < count; v += 2)
555 if ((list[v] & HTOLE32(0xf000f000)) == HTOLE32(0x50005000))
562 skip = decide_frameskip_allow(LE32TOH(list[0]));
563 if ((cmd & 0xf8) == 0xe0)
564 gpu.ex_regs[cmd & 7] = LE32TOH(list[0]);
568 if (pos + len > count) {
570 break; // incomplete cmd
572 if (0x80 <= cmd && cmd <= 0xdf)
578 renderer_sync_ecmds(gpu.ex_regs);
583 static noinline int do_cmd_buffer(uint32_t *data, int count)
586 uint32_t old_e3 = gpu.ex_regs[3];
590 for (pos = 0; pos < count; )
592 if (gpu.dma.h && !gpu.dma_start.is_read) { // XXX: need to verify
594 pos += do_vram_io(data + pos, count - pos, 0);
599 cmd = LE32TOH(data[pos]) >> 24;
600 if (0xa0 <= cmd && cmd <= 0xdf) {
601 if (unlikely((pos+2) >= count)) {
602 // incomplete vram write/read cmd, can't consume yet
607 // consume vram write/read cmd
608 start_vram_transfer(LE32TOH(data[pos + 1]), LE32TOH(data[pos + 2]), (cmd & 0xe0) == 0xc0);
612 else if ((cmd & 0xe0) == 0x80) {
613 if (unlikely((pos+3) >= count)) {
614 cmd = -1; // incomplete cmd, can't consume yet
617 do_vram_copy(data + pos + 1);
623 // 0xex cmds might affect frameskip.allow, so pass to do_cmd_list_skip
624 if (gpu.frameskip.active && (gpu.frameskip.allow || ((LE32TOH(data[pos]) >> 24) & 0xf0) == 0xe0))
625 pos += do_cmd_list_skip(data + pos, count - pos, &cmd);
627 pos += do_cmd_list(data + pos, count - pos, &cmd);
636 gpu.status &= ~0x1fff;
637 gpu.status |= gpu.ex_regs[1] & 0x7ff;
638 gpu.status |= (gpu.ex_regs[6] & 3) << 11;
640 gpu.state.fb_dirty |= vram_dirty;
642 if (old_e3 != gpu.ex_regs[3])
643 decide_frameskip_allow(gpu.ex_regs[3]);
648 static noinline void flush_cmd_buffer(void)
650 int left = do_cmd_buffer(gpu.cmd_buffer, gpu.cmd_len);
652 memmove(gpu.cmd_buffer, gpu.cmd_buffer + gpu.cmd_len - left, left * 4);
653 if (left != gpu.cmd_len) {
654 if (!gpu.dma.h && gpu.gpu_state_change)
655 gpu.gpu_state_change(PGS_PRIMITIVE_START);
660 void GPUwriteDataMem(uint32_t *mem, int count)
664 log_io("gpu_dma_write %p %d\n", mem, count);
666 if (unlikely(gpu.cmd_len > 0))
669 left = do_cmd_buffer(mem, count);
671 log_anomaly("GPUwriteDataMem: discarded %d/%d words\n", left, count);
674 void GPUwriteData(uint32_t data)
676 log_io("gpu_write %08x\n", data);
677 gpu.cmd_buffer[gpu.cmd_len++] = HTOLE32(data);
678 if (gpu.cmd_len >= CMD_BUFFER_LEN)
682 long GPUdmaChain(uint32_t *rambase, uint32_t start_addr, uint32_t *progress_addr)
684 uint32_t addr, *list, ld_addr = 0;
685 int len, left, count;
688 preload(rambase + (start_addr & 0x1fffff) / 4);
690 if (unlikely(gpu.cmd_len > 0))
693 log_io("gpu_dma_chain\n");
694 addr = start_addr & 0xffffff;
695 for (count = 0; (addr & 0x800000) == 0; count++)
697 list = rambase + (addr & 0x1fffff) / 4;
698 len = LE32TOH(list[0]) >> 24;
699 addr = LE32TOH(list[0]) & 0xffffff;
700 preload(rambase + (addr & 0x1fffff) / 4);
704 cpu_cycles += 5 + len;
706 log_io(".chain %08lx #%d+%d\n",
707 (long)(list - rambase) * 4, len, gpu.cmd_len);
708 if (unlikely(gpu.cmd_len > 0)) {
709 if (gpu.cmd_len + len > ARRAY_SIZE(gpu.cmd_buffer)) {
710 log_anomaly("cmd_buffer overflow, likely garbage commands\n");
713 memcpy(gpu.cmd_buffer + gpu.cmd_len, list + 1, len * 4);
720 left = do_cmd_buffer(list + 1, len);
722 memcpy(gpu.cmd_buffer, list + 1 + len - left, left * 4);
724 log_anomaly("GPUdmaChain: %d/%d words left\n", left, len);
729 *progress_addr = addr;
732 #define LD_THRESHOLD (8*1024)
733 if (count >= LD_THRESHOLD) {
734 if (count == LD_THRESHOLD) {
739 // loop detection marker
740 // (bit23 set causes DMA error on real machine, so
741 // unlikely to be ever set by the game)
742 list[0] |= HTOLE32(0x800000);
747 // remove loop detection markers
748 count -= LD_THRESHOLD + 2;
749 addr = ld_addr & 0x1fffff;
750 while (count-- > 0) {
751 list = rambase + addr / 4;
752 addr = LE32TOH(list[0]) & 0x1fffff;
753 list[0] &= HTOLE32(~0x800000);
757 gpu.state.last_list.frame = *gpu.state.frame_count;
758 gpu.state.last_list.hcnt = *gpu.state.hcnt;
759 gpu.state.last_list.cycles = cpu_cycles;
760 gpu.state.last_list.addr = start_addr;
765 void GPUreadDataMem(uint32_t *mem, int count)
767 log_io("gpu_dma_read %p %d\n", mem, count);
769 if (unlikely(gpu.cmd_len > 0))
773 do_vram_io(mem, count, 1);
776 uint32_t GPUreadData(void)
780 if (unlikely(gpu.cmd_len > 0))
786 do_vram_io(&ret, 1, 1);
790 log_io("gpu_read %08x\n", ret);
794 uint32_t GPUreadStatus(void)
798 if (unlikely(gpu.cmd_len > 0))
802 log_io("gpu_read_status %08x\n", ret);
808 uint32_t ulFreezeVersion; // should be always 1 for now (set by main emu)
809 uint32_t ulStatus; // current gpu status
810 uint32_t ulControl[256]; // latest control register values
811 unsigned char psxVRam[1024*1024*2]; // current VRam image (full 2 MB for ZN)
814 long GPUfreeze(uint32_t type, struct GPUFreeze *freeze)
822 memcpy(freeze->psxVRam, gpu.vram, 1024 * 512 * 2);
823 memcpy(freeze->ulControl, gpu.regs, sizeof(gpu.regs));
824 memcpy(freeze->ulControl + 0xe0, gpu.ex_regs, sizeof(gpu.ex_regs));
825 freeze->ulStatus = gpu.status;
828 memcpy(gpu.vram, freeze->psxVRam, 1024 * 512 * 2);
829 memcpy(gpu.regs, freeze->ulControl, sizeof(gpu.regs));
830 memcpy(gpu.ex_regs, freeze->ulControl + 0xe0, sizeof(gpu.ex_regs));
831 gpu.status = freeze->ulStatus;
833 for (i = 8; i > 0; i--) {
834 gpu.regs[i] ^= 1; // avoid reg change detection
835 GPUwriteStatus((i << 24) | (gpu.regs[i] ^ 1));
837 renderer_sync_ecmds(gpu.ex_regs);
838 renderer_update_caches(0, 0, 1024, 512, 0);
845 void GPUupdateLace(void)
849 renderer_flush_queues();
851 #ifndef RAW_FB_DISPLAY
852 if (gpu.status & PSX_GPU_STATUS_BLANKING) {
853 if (!gpu.state.blanked) {
855 gpu.state.blanked = 1;
856 gpu.state.fb_dirty = 1;
861 if (!gpu.state.fb_dirty)
865 if (gpu.frameskip.set) {
866 if (!gpu.frameskip.frame_ready) {
867 if (*gpu.state.frame_count - gpu.frameskip.last_flip_frame < 9)
869 gpu.frameskip.active = 0;
871 gpu.frameskip.frame_ready = 0;
875 if (gpu.state.enhancement_active && !gpu.state.enhancement_was_active)
876 renderer_update_caches(0, 0, 1024, 512, 1);
877 gpu.state.enhancement_was_active = gpu.state.enhancement_active;
878 gpu.state.fb_dirty = 0;
879 gpu.state.blanked = 0;
882 void GPUvBlank(int is_vblank, int lcf)
884 int interlace = gpu.state.allow_interlace
885 && (gpu.status & PSX_GPU_STATUS_INTERLACE)
886 && (gpu.status & PSX_GPU_STATUS_DHEIGHT);
887 // interlace doesn't look nice on progressive displays,
888 // so we have this "auto" mode here for games that don't read vram
889 if (gpu.state.allow_interlace == 2
890 && *gpu.state.frame_count - gpu.state.last_vram_read_frame > 1)
894 if (interlace || interlace != gpu.state.old_interlace) {
895 gpu.state.old_interlace = interlace;
899 renderer_flush_queues();
900 renderer_set_interlace(interlace, !lcf);
904 void GPUgetScreenInfo(int *y, int *base_hres)
907 *base_hres = gpu.screen.vres;
908 if (gpu.status & PSX_GPU_STATUS_DHEIGHT)
912 void GPUrearmedCallbacks(const struct rearmed_cbs *cbs)
914 gpu.frameskip.set = cbs->frameskip;
915 gpu.frameskip.advice = &cbs->fskip_advice;
916 gpu.frameskip.active = 0;
917 gpu.frameskip.frame_ready = 1;
918 gpu.state.hcnt = cbs->gpu_hcnt;
919 gpu.state.frame_count = cbs->gpu_frame_count;
920 gpu.state.allow_interlace = cbs->gpu_neon.allow_interlace;
921 gpu.state.enhancement_enable = cbs->gpu_neon.enhancement_enable;
922 if (gpu.state.screen_centering_type != cbs->screen_centering_type
923 || gpu.state.screen_centering_x != cbs->screen_centering_x
924 || gpu.state.screen_centering_y != cbs->screen_centering_y) {
925 gpu.state.screen_centering_type = cbs->screen_centering_type;
926 gpu.state.screen_centering_x = cbs->screen_centering_x;
927 gpu.state.screen_centering_y = cbs->screen_centering_y;
932 gpu.mmap = cbs->mmap;
933 gpu.munmap = cbs->munmap;
934 gpu.gpu_state_change = cbs->gpu_state_change;
937 if (gpu.vram == NULL)
940 if (cbs->pl_vout_set_raw_vram)
941 cbs->pl_vout_set_raw_vram(gpu.vram);
942 renderer_set_config(cbs);
943 vout_set_config(cbs);
946 // vim:shiftwidth=2:expandtab