2 * (C) GraÅžvydas "notaz" Ignotas, 2011-2012
4 * This work is licensed under the terms of any of these licenses
6 * - GNU GPL, version 2 or later.
7 * - GNU LGPL, version 2.1 or later.
8 * See the COPYING file in the top-level directory.
15 #include "../../libpcsxcore/gpu.h" // meh
17 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
19 #define unlikely(x) __builtin_expect((x), 0)
20 #define preload __builtin_prefetch
21 #define noinline __attribute__((noinline))
28 //#define log_io gpu_log
33 static noinline int do_cmd_buffer(uint32_t *data, int count);
34 static void finish_vram_transfer(int is_read);
36 static noinline void do_cmd_reset(void)
38 if (unlikely(gpu.cmd_len > 0))
39 do_cmd_buffer(gpu.cmd_buffer, gpu.cmd_len);
42 if (unlikely(gpu.dma.h > 0))
43 finish_vram_transfer(gpu.dma_start.is_read);
47 static noinline void do_reset(void)
53 memset(gpu.regs, 0, sizeof(gpu.regs));
54 for (i = 0; i < sizeof(gpu.ex_regs) / sizeof(gpu.ex_regs[0]); i++)
55 gpu.ex_regs[i] = (0xe0 + i) << 24;
56 gpu.status = 0x14802000;
59 gpu.screen.hres = gpu.screen.w = 256;
60 gpu.screen.vres = gpu.screen.h = 240;
61 gpu.screen.x = gpu.screen.y = 0;
62 renderer_sync_ecmds(gpu.ex_regs);
63 renderer_notify_res_change();
66 static noinline void update_width(void)
68 static const short hres_all[8] = { 256, 368, 320, 368, 512, 368, 640, 368 };
69 static const uint8_t hdivs[8] = { 10, 7, 8, 7, 5, 7, 4, 7 };
70 uint8_t hdiv = hdivs[(gpu.status >> 16) & 7];
71 int hres = hres_all[(gpu.status >> 16) & 7];
72 int pal = gpu.status & PSX_GPU_STATUS_PAL;
73 int sw = gpu.screen.x2 - gpu.screen.x1;
76 /* nothing displayed? */;
78 int s = pal ? 656 : 608; // or 600? pal is just a guess
79 x = (gpu.screen.x1 - s) / hdiv;
80 x = (x + 1) & ~1; // blitter limitation
82 sw = (sw + 2) & ~3; // according to nocash
83 switch (gpu.state.screen_centering_type) {
87 x = gpu.state.screen_centering_x;
90 // correct if slightly miscentered
91 x_auto = (hres - sw) / 2 & ~3;
92 if ((uint32_t)x_auto <= 8u && abs(x) < 24)
97 // .x range check is done in vout_update()
99 // reduce the unpleasant right border that a few games have
100 if (gpu.state.screen_centering_type == 0
101 && x <= 4 && hres - (x + sw) >= 4)
105 gpu.screen.hres = hres;
106 gpu.state.dims_changed = 1;
107 //printf("xx %d %d -> %2d, %d / %d\n",
108 // gpu.screen.x1, gpu.screen.x2, x, sw, hres);
111 static noinline void update_height(void)
113 int pal = gpu.status & PSX_GPU_STATUS_PAL;
114 int dheight = gpu.status & PSX_GPU_STATUS_DHEIGHT;
115 int y = gpu.screen.y1 - (pal ? 39 : 16); // 39 for spyro
116 int sh = gpu.screen.y2 - gpu.screen.y1;
120 if (pal && (sh > 240 || gpu.screen.vres == 256))
123 y *= 2, sh *= 2, vres *= 2, center_tol *= 2;
125 /* nothing displayed? */;
127 switch (gpu.state.screen_centering_type) {
131 y = gpu.state.screen_centering_y;
134 // correct if slightly miscentered
135 if ((uint32_t)(vres - sh) <= 1 && abs(y) <= center_tol)
143 gpu.screen.vres = vres;
144 gpu.state.dims_changed = 1;
145 //printf("yy %d %d -> %d, %d / %d\n",
146 // gpu.screen.y1, gpu.screen.y2, y, sh, vres);
149 static noinline void decide_frameskip(void)
151 if (gpu.frameskip.active)
154 gpu.frameskip.cnt = 0;
155 gpu.frameskip.frame_ready = 1;
158 if (!gpu.frameskip.active && *gpu.frameskip.advice)
159 gpu.frameskip.active = 1;
160 else if (gpu.frameskip.set > 0 && gpu.frameskip.cnt < gpu.frameskip.set)
161 gpu.frameskip.active = 1;
163 gpu.frameskip.active = 0;
165 if (!gpu.frameskip.active && gpu.frameskip.pending_fill[0] != 0) {
167 do_cmd_list(gpu.frameskip.pending_fill, 3, &dummy);
168 gpu.frameskip.pending_fill[0] = 0;
172 static noinline int decide_frameskip_allow(uint32_t cmd_e3)
174 // no frameskip if it decides to draw to display area,
175 // but not for interlace since it'll most likely always do that
176 uint32_t x = cmd_e3 & 0x3ff;
177 uint32_t y = (cmd_e3 >> 10) & 0x3ff;
178 gpu.frameskip.allow = (gpu.status & PSX_GPU_STATUS_INTERLACE) ||
179 (uint32_t)(x - gpu.screen.src_x) >= (uint32_t)gpu.screen.w ||
180 (uint32_t)(y - gpu.screen.src_y) >= (uint32_t)gpu.screen.h;
181 return gpu.frameskip.allow;
184 static void flush_cmd_buffer(void);
186 static noinline void get_gpu_info(uint32_t data)
188 if (unlikely(gpu.cmd_len > 0))
190 switch (data & 0x0f) {
194 gpu.gp0 = gpu.ex_regs[data & 7] & 0xfffff;
197 gpu.gp0 = gpu.ex_regs[5] & 0x3fffff;
208 // double, for overdraw guard
209 #define VRAM_SIZE (1024 * 512 * 2 * 2)
211 static int map_vram(void)
213 gpu.vram = gpu.mmap(VRAM_SIZE);
214 if (gpu.vram != NULL) {
215 gpu.vram += 4096 / 2;
219 fprintf(stderr, "could not map vram, expect crashes\n");
228 ret |= renderer_init();
230 memset(&gpu.state, 0, sizeof(gpu.state));
231 memset(&gpu.frameskip, 0, sizeof(gpu.frameskip));
233 gpu.state.frame_count = &gpu.zero;
234 gpu.state.hcnt = &gpu.zero;
238 if (gpu.mmap != NULL) {
245 long GPUshutdown(void)
251 if (gpu.vram != NULL) {
252 gpu.vram -= 4096 / 2;
253 gpu.munmap(gpu.vram, VRAM_SIZE);
260 void GPUwriteStatus(uint32_t data)
262 uint32_t cmd = data >> 24;
265 if (cmd < ARRAY_SIZE(gpu.regs)) {
266 if (cmd > 1 && cmd != 5 && gpu.regs[cmd] == data)
268 gpu.regs[cmd] = data;
271 gpu.state.fb_dirty = 1;
282 gpu.status |= PSX_GPU_STATUS_BLANKING;
283 gpu.state.dims_changed = 1; // for hud clearing
286 gpu.status &= ~PSX_GPU_STATUS_BLANKING;
289 gpu.status &= ~PSX_GPU_STATUS_DMA_MASK;
290 gpu.status |= PSX_GPU_STATUS_DMA(data & 3);
293 src_x = data & 0x3ff; src_y = (data >> 10) & 0x1ff;
294 if (src_x != gpu.screen.src_x || src_y != gpu.screen.src_y) {
295 gpu.screen.src_x = src_x;
296 gpu.screen.src_y = src_y;
297 renderer_notify_scanout_change(src_x, src_y);
298 if (gpu.frameskip.set) {
299 decide_frameskip_allow(gpu.ex_regs[3]);
300 if (gpu.frameskip.last_flip_frame != *gpu.state.frame_count) {
302 gpu.frameskip.last_flip_frame = *gpu.state.frame_count;
308 gpu.screen.x1 = data & 0xfff;
309 gpu.screen.x2 = (data >> 12) & 0xfff;
313 gpu.screen.y1 = data & 0x3ff;
314 gpu.screen.y2 = (data >> 10) & 0x3ff;
318 gpu.status = (gpu.status & ~0x7f0000) | ((data & 0x3F) << 17) | ((data & 0x40) << 10);
321 renderer_notify_res_change();
324 if ((cmd & 0xf0) == 0x10)
329 #ifdef GPUwriteStatus_ext
330 GPUwriteStatus_ext(data);
334 const unsigned char cmd_lengths[256] =
336 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
337 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
338 3, 3, 3, 3, 6, 6, 6, 6, 4, 4, 4, 4, 8, 8, 8, 8, // 20
339 5, 5, 5, 5, 8, 8, 8, 8, 7, 7, 7, 7, 11, 11, 11, 11,
340 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, // 40
341 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
342 2, 2, 2, 2, 3, 3, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, // 60
343 1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2,
344 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, // 80
345 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
346 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // a0
347 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
348 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // c0
349 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
350 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // e0
351 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
354 #define VRAM_MEM_XY(x, y) &gpu.vram[(y) * 1024 + (x)]
356 static void cpy_msb(uint16_t *dst, const uint16_t *src, int l, uint16_t msb)
359 for (i = 0; i < l; i++)
360 dst[i] = src[i] | msb;
363 static inline void do_vram_line(int x, int y, uint16_t *mem, int l,
364 int is_read, uint16_t msb)
366 uint16_t *vram = VRAM_MEM_XY(x, y);
367 if (unlikely(is_read))
368 memcpy(mem, vram, l * 2);
369 else if (unlikely(msb))
370 cpy_msb(vram, mem, l, msb);
372 memcpy(vram, mem, l * 2);
375 static int do_vram_io(uint32_t *data, int count, int is_read)
377 int count_initial = count;
378 uint16_t msb = gpu.ex_regs[6] << 15;
379 uint16_t *sdata = (uint16_t *)data;
380 int x = gpu.dma.x, y = gpu.dma.y;
381 int w = gpu.dma.w, h = gpu.dma.h;
382 int o = gpu.dma.offset;
384 count *= 2; // operate in 16bpp pixels
386 if (gpu.dma.offset) {
387 l = w - gpu.dma.offset;
391 do_vram_line(x + o, y, sdata, l, is_read, msb);
404 for (; h > 0 && count >= w; sdata += w, count -= w, y++, h--) {
406 do_vram_line(x, y, sdata, w, is_read, msb);
412 do_vram_line(x, y, sdata, count, is_read, msb);
418 finish_vram_transfer(is_read);
423 return count_initial - count / 2;
426 static void start_vram_transfer(uint32_t pos_word, uint32_t size_word, int is_read)
429 log_anomaly("start_vram_transfer while old unfinished\n");
431 gpu.dma.x = pos_word & 0x3ff;
432 gpu.dma.y = (pos_word >> 16) & 0x1ff;
433 gpu.dma.w = ((size_word - 1) & 0x3ff) + 1;
434 gpu.dma.h = (((size_word >> 16) - 1) & 0x1ff) + 1;
436 gpu.dma.is_read = is_read;
437 gpu.dma_start = gpu.dma;
439 renderer_flush_queues();
441 gpu.status |= PSX_GPU_STATUS_IMG;
442 // XXX: wrong for width 1
443 gpu.gp0 = LE32TOH(*(uint32_t *) VRAM_MEM_XY(gpu.dma.x, gpu.dma.y));
444 gpu.state.last_vram_read_frame = *gpu.state.frame_count;
447 log_io("start_vram_transfer %c (%d, %d) %dx%d\n", is_read ? 'r' : 'w',
448 gpu.dma.x, gpu.dma.y, gpu.dma.w, gpu.dma.h);
449 if (gpu.gpu_state_change)
450 gpu.gpu_state_change(PGS_VRAM_TRANSFER_START);
453 static void finish_vram_transfer(int is_read)
456 gpu.status &= ~PSX_GPU_STATUS_IMG;
458 gpu.state.fb_dirty = 1;
459 renderer_update_caches(gpu.dma_start.x, gpu.dma_start.y,
460 gpu.dma_start.w, gpu.dma_start.h, 0);
462 if (gpu.gpu_state_change)
463 gpu.gpu_state_change(PGS_VRAM_TRANSFER_END);
466 static void do_vram_copy(const uint32_t *params)
468 const uint32_t sx = LE32TOH(params[0]) & 0x3FF;
469 const uint32_t sy = (LE32TOH(params[0]) >> 16) & 0x1FF;
470 const uint32_t dx = LE32TOH(params[1]) & 0x3FF;
471 const uint32_t dy = (LE32TOH(params[1]) >> 16) & 0x1FF;
472 uint32_t w = ((LE32TOH(params[2]) - 1) & 0x3FF) + 1;
473 uint32_t h = (((LE32TOH(params[2]) >> 16) - 1) & 0x1FF) + 1;
474 uint16_t msb = gpu.ex_regs[6] << 15;
478 if (sx == dx && sy == dy && msb == 0)
481 renderer_flush_queues();
483 if (unlikely((sx < dx && dx < sx + w) || sx + w > 1024 || dx + w > 1024 || msb))
485 for (y = 0; y < h; y++)
487 const uint16_t *src = VRAM_MEM_XY(0, (sy + y) & 0x1ff);
488 uint16_t *dst = VRAM_MEM_XY(0, (dy + y) & 0x1ff);
489 for (x = 0; x < w; x += ARRAY_SIZE(lbuf))
491 uint32_t x1, w1 = w - x;
492 if (w1 > ARRAY_SIZE(lbuf))
493 w1 = ARRAY_SIZE(lbuf);
494 for (x1 = 0; x1 < w1; x1++)
495 lbuf[x1] = src[(sx + x + x1) & 0x3ff];
496 for (x1 = 0; x1 < w1; x1++)
497 dst[(dx + x + x1) & 0x3ff] = lbuf[x1] | msb;
503 uint32_t sy1 = sy, dy1 = dy;
504 for (y = 0; y < h; y++, sy1++, dy1++)
505 memcpy(VRAM_MEM_XY(dx, dy1 & 0x1ff), VRAM_MEM_XY(sx, sy1 & 0x1ff), w * 2);
508 renderer_update_caches(dx, dy, w, h, 0);
511 static noinline int do_cmd_list_skip(uint32_t *data, int count, int *last_cmd)
513 int cmd = 0, pos = 0, len, dummy, v;
516 gpu.frameskip.pending_fill[0] = 0;
518 while (pos < count && skip) {
519 uint32_t *list = data + pos;
520 cmd = LE32TOH(list[0]) >> 24;
521 len = 1 + cmd_lengths[cmd];
525 if ((LE32TOH(list[2]) & 0x3ff) > gpu.screen.w || ((LE32TOH(list[2]) >> 16) & 0x1ff) > gpu.screen.h)
526 // clearing something large, don't skip
527 do_cmd_list(list, 3, &dummy);
529 memcpy(gpu.frameskip.pending_fill, list, 3 * 4);
535 gpu.ex_regs[1] &= ~0x1ff;
536 gpu.ex_regs[1] |= LE32TOH(list[4 + ((cmd >> 4) & 1)]) & 0x1ff;
539 for (v = 3; pos + v < count; v++)
541 if ((list[v] & HTOLE32(0xf000f000)) == HTOLE32(0x50005000))
547 for (v = 4; pos + v < count; v += 2)
549 if ((list[v] & HTOLE32(0xf000f000)) == HTOLE32(0x50005000))
556 skip = decide_frameskip_allow(LE32TOH(list[0]));
557 if ((cmd & 0xf8) == 0xe0)
558 gpu.ex_regs[cmd & 7] = LE32TOH(list[0]);
562 if (pos + len > count) {
564 break; // incomplete cmd
566 if (0x80 <= cmd && cmd <= 0xdf)
572 renderer_sync_ecmds(gpu.ex_regs);
577 static noinline int do_cmd_buffer(uint32_t *data, int count)
580 uint32_t old_e3 = gpu.ex_regs[3];
584 for (pos = 0; pos < count; )
586 if (gpu.dma.h && !gpu.dma_start.is_read) { // XXX: need to verify
588 pos += do_vram_io(data + pos, count - pos, 0);
593 cmd = LE32TOH(data[pos]) >> 24;
594 if (0xa0 <= cmd && cmd <= 0xdf) {
595 if (unlikely((pos+2) >= count)) {
596 // incomplete vram write/read cmd, can't consume yet
601 // consume vram write/read cmd
602 start_vram_transfer(LE32TOH(data[pos + 1]), LE32TOH(data[pos + 2]), (cmd & 0xe0) == 0xc0);
606 else if ((cmd & 0xe0) == 0x80) {
607 if (unlikely((pos+3) >= count)) {
608 cmd = -1; // incomplete cmd, can't consume yet
611 do_vram_copy(data + pos + 1);
617 // 0xex cmds might affect frameskip.allow, so pass to do_cmd_list_skip
618 if (gpu.frameskip.active && (gpu.frameskip.allow || ((LE32TOH(data[pos]) >> 24) & 0xf0) == 0xe0))
619 pos += do_cmd_list_skip(data + pos, count - pos, &cmd);
621 pos += do_cmd_list(data + pos, count - pos, &cmd);
630 gpu.status &= ~0x1fff;
631 gpu.status |= gpu.ex_regs[1] & 0x7ff;
632 gpu.status |= (gpu.ex_regs[6] & 3) << 11;
634 gpu.state.fb_dirty |= vram_dirty;
636 if (old_e3 != gpu.ex_regs[3])
637 decide_frameskip_allow(gpu.ex_regs[3]);
642 static noinline void flush_cmd_buffer(void)
644 int left = do_cmd_buffer(gpu.cmd_buffer, gpu.cmd_len);
646 memmove(gpu.cmd_buffer, gpu.cmd_buffer + gpu.cmd_len - left, left * 4);
647 if (left != gpu.cmd_len) {
648 if (!gpu.dma.h && gpu.gpu_state_change)
649 gpu.gpu_state_change(PGS_PRIMITIVE_START);
654 void GPUwriteDataMem(uint32_t *mem, int count)
658 log_io("gpu_dma_write %p %d\n", mem, count);
660 if (unlikely(gpu.cmd_len > 0))
663 left = do_cmd_buffer(mem, count);
665 log_anomaly("GPUwriteDataMem: discarded %d/%d words\n", left, count);
668 void GPUwriteData(uint32_t data)
670 log_io("gpu_write %08x\n", data);
671 gpu.cmd_buffer[gpu.cmd_len++] = HTOLE32(data);
672 if (gpu.cmd_len >= CMD_BUFFER_LEN)
676 long GPUdmaChain(uint32_t *rambase, uint32_t start_addr, uint32_t *progress_addr)
678 uint32_t addr, *list, ld_addr = 0;
679 int len, left, count;
682 preload(rambase + (start_addr & 0x1fffff) / 4);
684 if (unlikely(gpu.cmd_len > 0))
687 log_io("gpu_dma_chain\n");
688 addr = start_addr & 0xffffff;
689 for (count = 0; (addr & 0x800000) == 0; count++)
691 list = rambase + (addr & 0x1fffff) / 4;
692 len = LE32TOH(list[0]) >> 24;
693 addr = LE32TOH(list[0]) & 0xffffff;
694 preload(rambase + (addr & 0x1fffff) / 4);
698 cpu_cycles += 5 + len;
700 log_io(".chain %08lx #%d+%d\n",
701 (long)(list - rambase) * 4, len, gpu.cmd_len);
702 if (unlikely(gpu.cmd_len > 0)) {
703 if (gpu.cmd_len + len > ARRAY_SIZE(gpu.cmd_buffer)) {
704 log_anomaly("cmd_buffer overflow, likely garbage commands\n");
707 memcpy(gpu.cmd_buffer + gpu.cmd_len, list + 1, len * 4);
714 left = do_cmd_buffer(list + 1, len);
716 memcpy(gpu.cmd_buffer, list + 1 + len - left, left * 4);
718 log_anomaly("GPUdmaChain: %d/%d words left\n", left, len);
723 *progress_addr = addr;
726 #define LD_THRESHOLD (8*1024)
727 if (count >= LD_THRESHOLD) {
728 if (count == LD_THRESHOLD) {
733 // loop detection marker
734 // (bit23 set causes DMA error on real machine, so
735 // unlikely to be ever set by the game)
736 list[0] |= HTOLE32(0x800000);
741 // remove loop detection markers
742 count -= LD_THRESHOLD + 2;
743 addr = ld_addr & 0x1fffff;
744 while (count-- > 0) {
745 list = rambase + addr / 4;
746 addr = LE32TOH(list[0]) & 0x1fffff;
747 list[0] &= HTOLE32(~0x800000);
751 gpu.state.last_list.frame = *gpu.state.frame_count;
752 gpu.state.last_list.hcnt = *gpu.state.hcnt;
753 gpu.state.last_list.cycles = cpu_cycles;
754 gpu.state.last_list.addr = start_addr;
759 void GPUreadDataMem(uint32_t *mem, int count)
761 log_io("gpu_dma_read %p %d\n", mem, count);
763 if (unlikely(gpu.cmd_len > 0))
767 do_vram_io(mem, count, 1);
770 uint32_t GPUreadData(void)
774 if (unlikely(gpu.cmd_len > 0))
780 do_vram_io(&ret, 1, 1);
784 log_io("gpu_read %08x\n", ret);
788 uint32_t GPUreadStatus(void)
792 if (unlikely(gpu.cmd_len > 0))
796 log_io("gpu_read_status %08x\n", ret);
802 uint32_t ulFreezeVersion; // should be always 1 for now (set by main emu)
803 uint32_t ulStatus; // current gpu status
804 uint32_t ulControl[256]; // latest control register values
805 unsigned char psxVRam[1024*1024*2]; // current VRam image (full 2 MB for ZN)
808 long GPUfreeze(uint32_t type, struct GPUFreeze *freeze)
816 memcpy(freeze->psxVRam, gpu.vram, 1024 * 512 * 2);
817 memcpy(freeze->ulControl, gpu.regs, sizeof(gpu.regs));
818 memcpy(freeze->ulControl + 0xe0, gpu.ex_regs, sizeof(gpu.ex_regs));
819 freeze->ulStatus = gpu.status;
822 memcpy(gpu.vram, freeze->psxVRam, 1024 * 512 * 2);
823 memcpy(gpu.regs, freeze->ulControl, sizeof(gpu.regs));
824 memcpy(gpu.ex_regs, freeze->ulControl + 0xe0, sizeof(gpu.ex_regs));
825 gpu.status = freeze->ulStatus;
827 for (i = 8; i > 0; i--) {
828 gpu.regs[i] ^= 1; // avoid reg change detection
829 GPUwriteStatus((i << 24) | (gpu.regs[i] ^ 1));
831 renderer_sync_ecmds(gpu.ex_regs);
832 renderer_update_caches(0, 0, 1024, 512, 0);
839 void GPUupdateLace(void)
843 renderer_flush_queues();
845 #ifndef RAW_FB_DISPLAY
846 if (gpu.status & PSX_GPU_STATUS_BLANKING) {
847 if (!gpu.state.blanked) {
849 gpu.state.blanked = 1;
850 gpu.state.fb_dirty = 1;
855 if (!gpu.state.fb_dirty)
859 if (gpu.frameskip.set) {
860 if (!gpu.frameskip.frame_ready) {
861 if (*gpu.state.frame_count - gpu.frameskip.last_flip_frame < 9)
863 gpu.frameskip.active = 0;
865 gpu.frameskip.frame_ready = 0;
869 if (gpu.state.enhancement_active && !gpu.state.enhancement_was_active)
870 renderer_update_caches(0, 0, 1024, 512, 1);
871 gpu.state.enhancement_was_active = gpu.state.enhancement_active;
872 gpu.state.fb_dirty = 0;
873 gpu.state.blanked = 0;
876 void GPUvBlank(int is_vblank, int lcf)
878 int interlace = gpu.state.allow_interlace
879 && (gpu.status & PSX_GPU_STATUS_INTERLACE)
880 && (gpu.status & PSX_GPU_STATUS_DHEIGHT);
881 // interlace doesn't look nice on progressive displays,
882 // so we have this "auto" mode here for games that don't read vram
883 if (gpu.state.allow_interlace == 2
884 && *gpu.state.frame_count - gpu.state.last_vram_read_frame > 1)
888 if (interlace || interlace != gpu.state.old_interlace) {
889 gpu.state.old_interlace = interlace;
893 renderer_flush_queues();
894 renderer_set_interlace(interlace, !lcf);
898 void GPUgetScreenInfo(int *y, int *base_hres)
901 *base_hres = gpu.screen.vres;
902 if (gpu.status & PSX_GPU_STATUS_DHEIGHT)
906 #include "../../frontend/plugin_lib.h"
908 void GPUrearmedCallbacks(const struct rearmed_cbs *cbs)
910 gpu.frameskip.set = cbs->frameskip;
911 gpu.frameskip.advice = &cbs->fskip_advice;
912 gpu.frameskip.active = 0;
913 gpu.frameskip.frame_ready = 1;
914 gpu.state.hcnt = cbs->gpu_hcnt;
915 gpu.state.frame_count = cbs->gpu_frame_count;
916 gpu.state.allow_interlace = cbs->gpu_neon.allow_interlace;
917 gpu.state.enhancement_enable = cbs->gpu_neon.enhancement_enable;
918 if (gpu.state.screen_centering_type != cbs->screen_centering_type
919 || gpu.state.screen_centering_x != cbs->screen_centering_x
920 || gpu.state.screen_centering_y != cbs->screen_centering_y) {
921 gpu.state.screen_centering_type = cbs->screen_centering_type;
922 gpu.state.screen_centering_x = cbs->screen_centering_x;
923 gpu.state.screen_centering_y = cbs->screen_centering_y;
928 gpu.mmap = cbs->mmap;
929 gpu.munmap = cbs->munmap;
930 gpu.gpu_state_change = cbs->gpu_state_change;
933 if (gpu.vram == NULL)
936 if (cbs->pl_vout_set_raw_vram)
937 cbs->pl_vout_set_raw_vram(gpu.vram);
938 renderer_set_config(cbs);
939 vout_set_config(cbs);
942 // vim:shiftwidth=2:expandtab