2 * (C) GraÅžvydas "notaz" Ignotas, 2011-2012
4 * This work is licensed under the terms of any of these licenses
6 * - GNU GPL, version 2 or later.
7 * - GNU LGPL, version 2.1 or later.
8 * See the COPYING file in the top-level directory.
15 #include "../../libpcsxcore/gpu.h" // meh
17 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
19 #define unlikely(x) __builtin_expect((x), 0)
20 #define preload __builtin_prefetch
21 #define noinline __attribute__((noinline))
28 //#define log_io gpu_log
33 static noinline int do_cmd_buffer(uint32_t *data, int count);
34 static void finish_vram_transfer(int is_read);
36 static noinline void do_cmd_reset(void)
38 if (unlikely(gpu.cmd_len > 0))
39 do_cmd_buffer(gpu.cmd_buffer, gpu.cmd_len);
42 if (unlikely(gpu.dma.h > 0))
43 finish_vram_transfer(gpu.dma_start.is_read);
47 static noinline void do_reset(void)
53 memset(gpu.regs, 0, sizeof(gpu.regs));
54 for (i = 0; i < sizeof(gpu.ex_regs) / sizeof(gpu.ex_regs[0]); i++)
55 gpu.ex_regs[i] = (0xe0 + i) << 24;
56 gpu.status = 0x14802000;
59 gpu.screen.hres = gpu.screen.w = 256;
60 gpu.screen.vres = gpu.screen.h = 240;
61 gpu.screen.x = gpu.screen.y = 0;
62 renderer_sync_ecmds(gpu.ex_regs);
63 renderer_notify_res_change();
66 static noinline void update_width(void)
68 static const short hres_all[8] = { 256, 368, 320, 368, 512, 368, 640, 368 };
69 static const uint8_t hdivs[8] = { 10, 7, 8, 7, 5, 7, 4, 7 };
70 uint8_t hdiv = hdivs[(gpu.status >> 16) & 7];
71 int hres = hres_all[(gpu.status >> 16) & 7];
72 int pal = gpu.status & PSX_GPU_STATUS_PAL;
73 int sw = gpu.screen.x2 - gpu.screen.x1;
76 /* nothing displayed? */;
78 int s = pal ? 656 : 608; // or 600? pal is just a guess
79 x = (gpu.screen.x1 - s) / hdiv;
80 x = (x + 1) & ~1; // blitter limitation
82 sw = (sw + 2) & ~3; // according to nocash
83 switch (gpu.state.screen_centering_type) {
87 x = gpu.state.screen_centering_x;
90 // correct if slightly miscentered
91 x_auto = (hres - sw) / 2 & ~3;
92 if ((uint32_t)x_auto <= 8u && abs(x) < 24)
97 // .x range check is done in vout_update()
99 // reduce the unpleasant right border that a few games have
100 if (gpu.state.screen_centering_type == 0
101 && x <= 4 && hres - (x + sw) >= 4)
105 gpu.screen.hres = hres;
106 gpu.state.dims_changed = 1;
107 //printf("xx %d %d -> %2d, %d / %d\n",
108 // gpu.screen.x1, gpu.screen.x2, x, sw, hres);
111 static noinline void update_height(void)
113 int pal = gpu.status & PSX_GPU_STATUS_PAL;
114 int dheight = gpu.status & PSX_GPU_STATUS_DHEIGHT;
115 int y = gpu.screen.y1 - (pal ? 39 : 16); // 39 for spyro
116 int sh = gpu.screen.y2 - gpu.screen.y1;
120 if (pal && (sh > 240 || gpu.screen.vres == 256))
123 y *= 2, sh *= 2, vres *= 2, center_tol *= 2;
125 /* nothing displayed? */;
127 switch (gpu.state.screen_centering_type) {
131 y = gpu.state.screen_centering_y;
134 // correct if slightly miscentered
135 if ((uint32_t)(vres - sh) <= 1 && abs(y) <= center_tol)
143 gpu.screen.vres = vres;
144 gpu.state.dims_changed = 1;
145 //printf("yy %d %d -> %d, %d / %d\n",
146 // gpu.screen.y1, gpu.screen.y2, y, sh, vres);
149 static noinline void decide_frameskip(void)
151 if (gpu.frameskip.active)
154 gpu.frameskip.cnt = 0;
155 gpu.frameskip.frame_ready = 1;
158 if (!gpu.frameskip.active && *gpu.frameskip.advice)
159 gpu.frameskip.active = 1;
160 else if (gpu.frameskip.set > 0 && gpu.frameskip.cnt < gpu.frameskip.set)
161 gpu.frameskip.active = 1;
163 gpu.frameskip.active = 0;
165 if (!gpu.frameskip.active && gpu.frameskip.pending_fill[0] != 0) {
167 do_cmd_list(gpu.frameskip.pending_fill, 3, &dummy);
168 gpu.frameskip.pending_fill[0] = 0;
172 static noinline int decide_frameskip_allow(uint32_t cmd_e3)
174 // no frameskip if it decides to draw to display area,
175 // but not for interlace since it'll most likely always do that
176 uint32_t x = cmd_e3 & 0x3ff;
177 uint32_t y = (cmd_e3 >> 10) & 0x3ff;
178 gpu.frameskip.allow = (gpu.status & PSX_GPU_STATUS_INTERLACE) ||
179 (uint32_t)(x - gpu.screen.src_x) >= (uint32_t)gpu.screen.w ||
180 (uint32_t)(y - gpu.screen.src_y) >= (uint32_t)gpu.screen.h;
181 return gpu.frameskip.allow;
184 static void flush_cmd_buffer(void);
186 static noinline void get_gpu_info(uint32_t data)
188 if (unlikely(gpu.cmd_len > 0))
190 switch (data & 0x0f) {
194 gpu.gp0 = gpu.ex_regs[data & 7] & 0xfffff;
197 gpu.gp0 = gpu.ex_regs[5] & 0x3fffff;
208 // double, for overdraw guard
209 #define VRAM_SIZE (1024 * 512 * 2 * 2)
211 static int map_vram(void)
213 gpu.vram = gpu.mmap(VRAM_SIZE);
214 if (gpu.vram != NULL) {
215 gpu.vram += 4096 / 2;
219 fprintf(stderr, "could not map vram, expect crashes\n");
228 ret |= renderer_init();
230 memset(&gpu.state, 0, sizeof(gpu.state));
231 memset(&gpu.frameskip, 0, sizeof(gpu.frameskip));
233 gpu.state.frame_count = &gpu.zero;
234 gpu.state.hcnt = &gpu.zero;
238 if (gpu.mmap != NULL) {
245 long GPUshutdown(void)
251 if (gpu.vram != NULL) {
252 gpu.vram -= 4096 / 2;
253 gpu.munmap(gpu.vram, VRAM_SIZE);
260 void GPUwriteStatus(uint32_t data)
262 uint32_t cmd = data >> 24;
264 if (cmd < ARRAY_SIZE(gpu.regs)) {
265 if (cmd > 1 && cmd != 5 && gpu.regs[cmd] == data)
267 gpu.regs[cmd] = data;
270 gpu.state.fb_dirty = 1;
281 gpu.status |= PSX_GPU_STATUS_BLANKING;
282 gpu.state.dims_changed = 1; // for hud clearing
285 gpu.status &= ~PSX_GPU_STATUS_BLANKING;
288 gpu.status &= ~PSX_GPU_STATUS_DMA_MASK;
289 gpu.status |= PSX_GPU_STATUS_DMA(data & 3);
292 gpu.screen.src_x = data & 0x3ff;
293 gpu.screen.src_y = (data >> 10) & 0x1ff;
294 renderer_notify_scanout_x_change(gpu.screen.src_x, gpu.screen.hres);
295 if (gpu.frameskip.set) {
296 decide_frameskip_allow(gpu.ex_regs[3]);
297 if (gpu.frameskip.last_flip_frame != *gpu.state.frame_count) {
299 gpu.frameskip.last_flip_frame = *gpu.state.frame_count;
304 gpu.screen.x1 = data & 0xfff;
305 gpu.screen.x2 = (data >> 12) & 0xfff;
309 gpu.screen.y1 = data & 0x3ff;
310 gpu.screen.y2 = (data >> 10) & 0x3ff;
314 gpu.status = (gpu.status & ~0x7f0000) | ((data & 0x3F) << 17) | ((data & 0x40) << 10);
317 renderer_notify_res_change();
320 if ((cmd & 0xf0) == 0x10)
325 #ifdef GPUwriteStatus_ext
326 GPUwriteStatus_ext(data);
330 const unsigned char cmd_lengths[256] =
332 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
333 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
334 3, 3, 3, 3, 6, 6, 6, 6, 4, 4, 4, 4, 8, 8, 8, 8, // 20
335 5, 5, 5, 5, 8, 8, 8, 8, 7, 7, 7, 7, 11, 11, 11, 11,
336 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, // 40
337 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
338 2, 2, 2, 2, 3, 3, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, // 60
339 1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2,
340 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, // 80
341 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
342 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // a0
343 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
344 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // c0
345 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
346 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // e0
347 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
350 #define VRAM_MEM_XY(x, y) &gpu.vram[(y) * 1024 + (x)]
352 static void cpy_msb(uint16_t *dst, const uint16_t *src, int l, uint16_t msb)
355 for (i = 0; i < l; i++)
356 dst[i] = src[i] | msb;
359 static inline void do_vram_line(int x, int y, uint16_t *mem, int l,
360 int is_read, uint16_t msb)
362 uint16_t *vram = VRAM_MEM_XY(x, y);
363 if (unlikely(is_read))
364 memcpy(mem, vram, l * 2);
365 else if (unlikely(msb))
366 cpy_msb(vram, mem, l, msb);
368 memcpy(vram, mem, l * 2);
371 static int do_vram_io(uint32_t *data, int count, int is_read)
373 int count_initial = count;
374 uint16_t msb = gpu.ex_regs[6] << 15;
375 uint16_t *sdata = (uint16_t *)data;
376 int x = gpu.dma.x, y = gpu.dma.y;
377 int w = gpu.dma.w, h = gpu.dma.h;
378 int o = gpu.dma.offset;
380 count *= 2; // operate in 16bpp pixels
382 if (gpu.dma.offset) {
383 l = w - gpu.dma.offset;
387 do_vram_line(x + o, y, sdata, l, is_read, msb);
400 for (; h > 0 && count >= w; sdata += w, count -= w, y++, h--) {
402 do_vram_line(x, y, sdata, w, is_read, msb);
408 do_vram_line(x, y, sdata, count, is_read, msb);
414 finish_vram_transfer(is_read);
419 return count_initial - count / 2;
422 static void start_vram_transfer(uint32_t pos_word, uint32_t size_word, int is_read)
425 log_anomaly("start_vram_transfer while old unfinished\n");
427 gpu.dma.x = pos_word & 0x3ff;
428 gpu.dma.y = (pos_word >> 16) & 0x1ff;
429 gpu.dma.w = ((size_word - 1) & 0x3ff) + 1;
430 gpu.dma.h = (((size_word >> 16) - 1) & 0x1ff) + 1;
432 gpu.dma.is_read = is_read;
433 gpu.dma_start = gpu.dma;
435 renderer_flush_queues();
437 gpu.status |= PSX_GPU_STATUS_IMG;
438 // XXX: wrong for width 1
439 gpu.gp0 = LE32TOH(*(uint32_t *) VRAM_MEM_XY(gpu.dma.x, gpu.dma.y));
440 gpu.state.last_vram_read_frame = *gpu.state.frame_count;
443 log_io("start_vram_transfer %c (%d, %d) %dx%d\n", is_read ? 'r' : 'w',
444 gpu.dma.x, gpu.dma.y, gpu.dma.w, gpu.dma.h);
445 if (gpu.gpu_state_change)
446 gpu.gpu_state_change(PGS_VRAM_TRANSFER_START);
449 static void finish_vram_transfer(int is_read)
452 gpu.status &= ~PSX_GPU_STATUS_IMG;
454 gpu.state.fb_dirty = 1;
455 renderer_update_caches(gpu.dma_start.x, gpu.dma_start.y,
456 gpu.dma_start.w, gpu.dma_start.h, 0);
458 if (gpu.gpu_state_change)
459 gpu.gpu_state_change(PGS_VRAM_TRANSFER_END);
462 static void do_vram_copy(const uint32_t *params)
464 const uint32_t sx = LE32TOH(params[0]) & 0x3FF;
465 const uint32_t sy = (LE32TOH(params[0]) >> 16) & 0x1FF;
466 const uint32_t dx = LE32TOH(params[1]) & 0x3FF;
467 const uint32_t dy = (LE32TOH(params[1]) >> 16) & 0x1FF;
468 uint32_t w = ((LE32TOH(params[2]) - 1) & 0x3FF) + 1;
469 uint32_t h = (((LE32TOH(params[2]) >> 16) - 1) & 0x1FF) + 1;
470 uint16_t msb = gpu.ex_regs[6] << 15;
474 if (sx == dx && sy == dy && msb == 0)
477 renderer_flush_queues();
479 if (unlikely((sx < dx && dx < sx + w) || sx + w > 1024 || dx + w > 1024 || msb))
481 for (y = 0; y < h; y++)
483 const uint16_t *src = VRAM_MEM_XY(0, (sy + y) & 0x1ff);
484 uint16_t *dst = VRAM_MEM_XY(0, (dy + y) & 0x1ff);
485 for (x = 0; x < w; x += ARRAY_SIZE(lbuf))
487 uint32_t x1, w1 = w - x;
488 if (w1 > ARRAY_SIZE(lbuf))
489 w1 = ARRAY_SIZE(lbuf);
490 for (x1 = 0; x1 < w1; x1++)
491 lbuf[x1] = src[(sx + x + x1) & 0x3ff];
492 for (x1 = 0; x1 < w1; x1++)
493 dst[(dx + x + x1) & 0x3ff] = lbuf[x1] | msb;
499 uint32_t sy1 = sy, dy1 = dy;
500 for (y = 0; y < h; y++, sy1++, dy1++)
501 memcpy(VRAM_MEM_XY(dx, dy1 & 0x1ff), VRAM_MEM_XY(sx, sy1 & 0x1ff), w * 2);
504 renderer_update_caches(dx, dy, w, h, 0);
507 static noinline int do_cmd_list_skip(uint32_t *data, int count, int *last_cmd)
509 int cmd = 0, pos = 0, len, dummy, v;
512 gpu.frameskip.pending_fill[0] = 0;
514 while (pos < count && skip) {
515 uint32_t *list = data + pos;
516 cmd = LE32TOH(list[0]) >> 24;
517 len = 1 + cmd_lengths[cmd];
521 if ((LE32TOH(list[2]) & 0x3ff) > gpu.screen.w || ((LE32TOH(list[2]) >> 16) & 0x1ff) > gpu.screen.h)
522 // clearing something large, don't skip
523 do_cmd_list(list, 3, &dummy);
525 memcpy(gpu.frameskip.pending_fill, list, 3 * 4);
531 gpu.ex_regs[1] &= ~0x1ff;
532 gpu.ex_regs[1] |= LE32TOH(list[4 + ((cmd >> 4) & 1)]) & 0x1ff;
535 for (v = 3; pos + v < count; v++)
537 if ((list[v] & HTOLE32(0xf000f000)) == HTOLE32(0x50005000))
543 for (v = 4; pos + v < count; v += 2)
545 if ((list[v] & HTOLE32(0xf000f000)) == HTOLE32(0x50005000))
552 skip = decide_frameskip_allow(LE32TOH(list[0]));
553 if ((cmd & 0xf8) == 0xe0)
554 gpu.ex_regs[cmd & 7] = LE32TOH(list[0]);
558 if (pos + len > count) {
560 break; // incomplete cmd
562 if (0x80 <= cmd && cmd <= 0xdf)
568 renderer_sync_ecmds(gpu.ex_regs);
573 static noinline int do_cmd_buffer(uint32_t *data, int count)
576 uint32_t old_e3 = gpu.ex_regs[3];
580 for (pos = 0; pos < count; )
582 if (gpu.dma.h && !gpu.dma_start.is_read) { // XXX: need to verify
584 pos += do_vram_io(data + pos, count - pos, 0);
589 cmd = LE32TOH(data[pos]) >> 24;
590 if (0xa0 <= cmd && cmd <= 0xdf) {
591 if (unlikely((pos+2) >= count)) {
592 // incomplete vram write/read cmd, can't consume yet
597 // consume vram write/read cmd
598 start_vram_transfer(LE32TOH(data[pos + 1]), LE32TOH(data[pos + 2]), (cmd & 0xe0) == 0xc0);
602 else if ((cmd & 0xe0) == 0x80) {
603 if (unlikely((pos+3) >= count)) {
604 cmd = -1; // incomplete cmd, can't consume yet
607 do_vram_copy(data + pos + 1);
613 // 0xex cmds might affect frameskip.allow, so pass to do_cmd_list_skip
614 if (gpu.frameskip.active && (gpu.frameskip.allow || ((LE32TOH(data[pos]) >> 24) & 0xf0) == 0xe0))
615 pos += do_cmd_list_skip(data + pos, count - pos, &cmd);
617 pos += do_cmd_list(data + pos, count - pos, &cmd);
626 gpu.status &= ~0x1fff;
627 gpu.status |= gpu.ex_regs[1] & 0x7ff;
628 gpu.status |= (gpu.ex_regs[6] & 3) << 11;
630 gpu.state.fb_dirty |= vram_dirty;
632 if (old_e3 != gpu.ex_regs[3])
633 decide_frameskip_allow(gpu.ex_regs[3]);
638 static noinline void flush_cmd_buffer(void)
640 int left = do_cmd_buffer(gpu.cmd_buffer, gpu.cmd_len);
642 memmove(gpu.cmd_buffer, gpu.cmd_buffer + gpu.cmd_len - left, left * 4);
643 if (left != gpu.cmd_len) {
644 if (!gpu.dma.h && gpu.gpu_state_change)
645 gpu.gpu_state_change(PGS_PRIMITIVE_START);
650 void GPUwriteDataMem(uint32_t *mem, int count)
654 log_io("gpu_dma_write %p %d\n", mem, count);
656 if (unlikely(gpu.cmd_len > 0))
659 left = do_cmd_buffer(mem, count);
661 log_anomaly("GPUwriteDataMem: discarded %d/%d words\n", left, count);
664 void GPUwriteData(uint32_t data)
666 log_io("gpu_write %08x\n", data);
667 gpu.cmd_buffer[gpu.cmd_len++] = HTOLE32(data);
668 if (gpu.cmd_len >= CMD_BUFFER_LEN)
672 long GPUdmaChain(uint32_t *rambase, uint32_t start_addr, uint32_t *progress_addr)
674 uint32_t addr, *list, ld_addr = 0;
675 int len, left, count;
678 preload(rambase + (start_addr & 0x1fffff) / 4);
680 if (unlikely(gpu.cmd_len > 0))
683 log_io("gpu_dma_chain\n");
684 addr = start_addr & 0xffffff;
685 for (count = 0; (addr & 0x800000) == 0; count++)
687 list = rambase + (addr & 0x1fffff) / 4;
688 len = LE32TOH(list[0]) >> 24;
689 addr = LE32TOH(list[0]) & 0xffffff;
690 preload(rambase + (addr & 0x1fffff) / 4);
694 cpu_cycles += 5 + len;
696 log_io(".chain %08lx #%d+%d\n",
697 (long)(list - rambase) * 4, len, gpu.cmd_len);
698 if (unlikely(gpu.cmd_len > 0)) {
699 if (gpu.cmd_len + len > ARRAY_SIZE(gpu.cmd_buffer)) {
700 log_anomaly("cmd_buffer overflow, likely garbage commands\n");
703 memcpy(gpu.cmd_buffer + gpu.cmd_len, list + 1, len * 4);
710 left = do_cmd_buffer(list + 1, len);
712 memcpy(gpu.cmd_buffer, list + 1 + len - left, left * 4);
714 log_anomaly("GPUdmaChain: %d/%d words left\n", left, len);
719 *progress_addr = addr;
722 #define LD_THRESHOLD (8*1024)
723 if (count >= LD_THRESHOLD) {
724 if (count == LD_THRESHOLD) {
729 // loop detection marker
730 // (bit23 set causes DMA error on real machine, so
731 // unlikely to be ever set by the game)
732 list[0] |= HTOLE32(0x800000);
737 // remove loop detection markers
738 count -= LD_THRESHOLD + 2;
739 addr = ld_addr & 0x1fffff;
740 while (count-- > 0) {
741 list = rambase + addr / 4;
742 addr = LE32TOH(list[0]) & 0x1fffff;
743 list[0] &= HTOLE32(~0x800000);
747 gpu.state.last_list.frame = *gpu.state.frame_count;
748 gpu.state.last_list.hcnt = *gpu.state.hcnt;
749 gpu.state.last_list.cycles = cpu_cycles;
750 gpu.state.last_list.addr = start_addr;
755 void GPUreadDataMem(uint32_t *mem, int count)
757 log_io("gpu_dma_read %p %d\n", mem, count);
759 if (unlikely(gpu.cmd_len > 0))
763 do_vram_io(mem, count, 1);
766 uint32_t GPUreadData(void)
770 if (unlikely(gpu.cmd_len > 0))
776 do_vram_io(&ret, 1, 1);
780 log_io("gpu_read %08x\n", ret);
784 uint32_t GPUreadStatus(void)
788 if (unlikely(gpu.cmd_len > 0))
792 log_io("gpu_read_status %08x\n", ret);
798 uint32_t ulFreezeVersion; // should be always 1 for now (set by main emu)
799 uint32_t ulStatus; // current gpu status
800 uint32_t ulControl[256]; // latest control register values
801 unsigned char psxVRam[1024*1024*2]; // current VRam image (full 2 MB for ZN)
804 long GPUfreeze(uint32_t type, struct GPUFreeze *freeze)
812 memcpy(freeze->psxVRam, gpu.vram, 1024 * 512 * 2);
813 memcpy(freeze->ulControl, gpu.regs, sizeof(gpu.regs));
814 memcpy(freeze->ulControl + 0xe0, gpu.ex_regs, sizeof(gpu.ex_regs));
815 freeze->ulStatus = gpu.status;
818 memcpy(gpu.vram, freeze->psxVRam, 1024 * 512 * 2);
819 memcpy(gpu.regs, freeze->ulControl, sizeof(gpu.regs));
820 memcpy(gpu.ex_regs, freeze->ulControl + 0xe0, sizeof(gpu.ex_regs));
821 gpu.status = freeze->ulStatus;
823 for (i = 8; i > 0; i--) {
824 gpu.regs[i] ^= 1; // avoid reg change detection
825 GPUwriteStatus((i << 24) | (gpu.regs[i] ^ 1));
827 renderer_sync_ecmds(gpu.ex_regs);
828 renderer_update_caches(0, 0, 1024, 512, 1);
835 void GPUupdateLace(void)
839 renderer_flush_queues();
841 #ifndef RAW_FB_DISPLAY
842 if (gpu.status & PSX_GPU_STATUS_BLANKING) {
843 if (!gpu.state.blanked) {
845 gpu.state.blanked = 1;
846 gpu.state.fb_dirty = 1;
851 if (!gpu.state.fb_dirty)
855 if (gpu.frameskip.set) {
856 if (!gpu.frameskip.frame_ready) {
857 if (*gpu.state.frame_count - gpu.frameskip.last_flip_frame < 9)
859 gpu.frameskip.active = 0;
861 gpu.frameskip.frame_ready = 0;
865 if (gpu.state.enhancement_active && !gpu.state.enhancement_was_active)
866 renderer_update_caches(0, 0, 1024, 512, 1);
867 gpu.state.enhancement_was_active = gpu.state.enhancement_active;
868 gpu.state.fb_dirty = 0;
869 gpu.state.blanked = 0;
872 void GPUvBlank(int is_vblank, int lcf)
874 int interlace = gpu.state.allow_interlace
875 && (gpu.status & PSX_GPU_STATUS_INTERLACE)
876 && (gpu.status & PSX_GPU_STATUS_DHEIGHT);
877 // interlace doesn't look nice on progressive displays,
878 // so we have this "auto" mode here for games that don't read vram
879 if (gpu.state.allow_interlace == 2
880 && *gpu.state.frame_count - gpu.state.last_vram_read_frame > 1)
884 if (interlace || interlace != gpu.state.old_interlace) {
885 gpu.state.old_interlace = interlace;
889 renderer_flush_queues();
890 renderer_set_interlace(interlace, !lcf);
894 void GPUgetScreenInfo(int *y, int *base_hres)
897 *base_hres = gpu.screen.vres;
898 if (gpu.status & PSX_GPU_STATUS_DHEIGHT)
902 #include "../../frontend/plugin_lib.h"
904 void GPUrearmedCallbacks(const struct rearmed_cbs *cbs)
906 gpu.frameskip.set = cbs->frameskip;
907 gpu.frameskip.advice = &cbs->fskip_advice;
908 gpu.frameskip.active = 0;
909 gpu.frameskip.frame_ready = 1;
910 gpu.state.hcnt = cbs->gpu_hcnt;
911 gpu.state.frame_count = cbs->gpu_frame_count;
912 gpu.state.allow_interlace = cbs->gpu_neon.allow_interlace;
913 gpu.state.enhancement_enable = cbs->gpu_neon.enhancement_enable;
914 if (gpu.state.screen_centering_type != cbs->screen_centering_type
915 || gpu.state.screen_centering_x != cbs->screen_centering_x
916 || gpu.state.screen_centering_y != cbs->screen_centering_y) {
917 gpu.state.screen_centering_type = cbs->screen_centering_type;
918 gpu.state.screen_centering_x = cbs->screen_centering_x;
919 gpu.state.screen_centering_y = cbs->screen_centering_y;
924 gpu.mmap = cbs->mmap;
925 gpu.munmap = cbs->munmap;
926 gpu.gpu_state_change = cbs->gpu_state_change;
929 if (gpu.vram == NULL)
932 if (cbs->pl_vout_set_raw_vram)
933 cbs->pl_vout_set_raw_vram(gpu.vram);
934 renderer_set_config(cbs);
935 vout_set_config(cbs);
938 // vim:shiftwidth=2:expandtab