2 * (C) GraÅžvydas "notaz" Ignotas, 2011-2012
4 * This work is licensed under the terms of any of these licenses
6 * - GNU GPL, version 2 or later.
7 * - GNU LGPL, version 2.1 or later.
8 * See the COPYING file in the top-level directory.
16 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
18 #define unlikely(x) __builtin_expect((x), 0)
19 #define preload __builtin_prefetch
20 #define noinline __attribute__((noinline))
27 //#define log_io gpu_log
32 static noinline int do_cmd_buffer(uint32_t *data, int count);
33 static void finish_vram_transfer(int is_read);
35 static noinline void do_cmd_reset(void)
37 if (unlikely(gpu.cmd_len > 0))
38 do_cmd_buffer(gpu.cmd_buffer, gpu.cmd_len);
41 if (unlikely(gpu.dma.h > 0))
42 finish_vram_transfer(gpu.dma_start.is_read);
46 static noinline void do_reset(void)
52 memset(gpu.regs, 0, sizeof(gpu.regs));
53 for (i = 0; i < sizeof(gpu.ex_regs) / sizeof(gpu.ex_regs[0]); i++)
54 gpu.ex_regs[i] = (0xe0 + i) << 24;
55 gpu.status = 0x14802000;
58 gpu.screen.hres = gpu.screen.w = 256;
59 gpu.screen.vres = gpu.screen.h = 240;
60 gpu.screen.x = gpu.screen.y = 0;
61 renderer_sync_ecmds(gpu.ex_regs);
62 renderer_notify_res_change();
65 static noinline void update_width(void)
67 static const short hres_all[8] = { 256, 368, 320, 368, 512, 368, 640, 368 };
68 static const uint8_t hdivs[8] = { 10, 7, 8, 7, 5, 7, 4, 7 };
69 uint8_t hdiv = hdivs[(gpu.status >> 16) & 7];
70 int hres = hres_all[(gpu.status >> 16) & 7];
71 int pal = gpu.status & PSX_GPU_STATUS_PAL;
72 int sw = gpu.screen.x2 - gpu.screen.x1;
75 /* nothing displayed? */;
77 int s = pal ? 656 : 608; // or 600? pal is just a guess
78 x = (gpu.screen.x1 - s) / hdiv;
79 x = (x + 1) & ~1; // blitter limitation
81 sw = (sw + 2) & ~3; // according to nocash
82 switch (gpu.state.screen_centering_type) {
86 x = gpu.state.screen_centering_x;
89 // correct if slightly miscentered
90 x_auto = (hres - sw) / 2 & ~3;
91 if ((uint32_t)x_auto <= 8u && abs(x) < 24)
96 // .x range check is done in vout_update()
98 // reduce the unpleasant right border that a few games have
99 if (gpu.state.screen_centering_type == 0
100 && x <= 4 && hres - (x + sw) >= 4)
104 gpu.screen.hres = hres;
105 gpu.state.dims_changed = 1;
106 //printf("xx %d %d -> %2d, %d / %d\n",
107 // gpu.screen.x1, gpu.screen.x2, x, sw, hres);
110 static noinline void update_height(void)
112 int pal = gpu.status & PSX_GPU_STATUS_PAL;
113 int dheight = gpu.status & PSX_GPU_STATUS_DHEIGHT;
114 int y = gpu.screen.y1 - (pal ? 39 : 16); // 39 for spyro
115 int sh = gpu.screen.y2 - gpu.screen.y1;
119 if (pal && (sh > 240 || gpu.screen.vres == 256))
122 y *= 2, sh *= 2, vres *= 2, center_tol *= 2;
124 /* nothing displayed? */;
126 switch (gpu.state.screen_centering_type) {
130 y = gpu.state.screen_centering_y;
133 // correct if slightly miscentered
134 if ((uint32_t)(vres - sh) <= 1 && abs(y) <= center_tol)
142 gpu.screen.vres = vres;
143 gpu.state.dims_changed = 1;
144 //printf("yy %d %d -> %d, %d / %d\n",
145 // gpu.screen.y1, gpu.screen.y2, y, sh, vres);
148 static noinline void decide_frameskip(void)
150 if (gpu.frameskip.active)
153 gpu.frameskip.cnt = 0;
154 gpu.frameskip.frame_ready = 1;
157 if (!gpu.frameskip.active && *gpu.frameskip.advice)
158 gpu.frameskip.active = 1;
159 else if (gpu.frameskip.set > 0 && gpu.frameskip.cnt < gpu.frameskip.set)
160 gpu.frameskip.active = 1;
162 gpu.frameskip.active = 0;
164 if (!gpu.frameskip.active && gpu.frameskip.pending_fill[0] != 0) {
166 do_cmd_list(gpu.frameskip.pending_fill, 3, &dummy);
167 gpu.frameskip.pending_fill[0] = 0;
171 static noinline int decide_frameskip_allow(uint32_t cmd_e3)
173 // no frameskip if it decides to draw to display area,
174 // but not for interlace since it'll most likely always do that
175 uint32_t x = cmd_e3 & 0x3ff;
176 uint32_t y = (cmd_e3 >> 10) & 0x3ff;
177 gpu.frameskip.allow = (gpu.status & PSX_GPU_STATUS_INTERLACE) ||
178 (uint32_t)(x - gpu.screen.src_x) >= (uint32_t)gpu.screen.w ||
179 (uint32_t)(y - gpu.screen.src_y) >= (uint32_t)gpu.screen.h;
180 return gpu.frameskip.allow;
183 static void flush_cmd_buffer(void);
185 static noinline void get_gpu_info(uint32_t data)
187 if (unlikely(gpu.cmd_len > 0))
189 switch (data & 0x0f) {
193 gpu.gp0 = gpu.ex_regs[data & 7] & 0xfffff;
196 gpu.gp0 = gpu.ex_regs[5] & 0x3fffff;
207 // double, for overdraw guard
208 #define VRAM_SIZE (1024 * 512 * 2 * 2)
210 static int map_vram(void)
212 gpu.vram = gpu.mmap(VRAM_SIZE);
213 if (gpu.vram != NULL) {
214 gpu.vram += 4096 / 2;
218 fprintf(stderr, "could not map vram, expect crashes\n");
227 ret |= renderer_init();
229 memset(&gpu.state, 0, sizeof(gpu.state));
230 memset(&gpu.frameskip, 0, sizeof(gpu.frameskip));
232 gpu.state.frame_count = &gpu.zero;
233 gpu.state.hcnt = &gpu.zero;
237 if (gpu.mmap != NULL) {
244 long GPUshutdown(void)
250 if (gpu.vram != NULL) {
251 gpu.vram -= 4096 / 2;
252 gpu.munmap(gpu.vram, VRAM_SIZE);
259 void GPUwriteStatus(uint32_t data)
261 uint32_t cmd = data >> 24;
263 if (cmd < ARRAY_SIZE(gpu.regs)) {
264 if (cmd > 1 && cmd != 5 && gpu.regs[cmd] == data)
266 gpu.regs[cmd] = data;
269 gpu.state.fb_dirty = 1;
280 gpu.status |= PSX_GPU_STATUS_BLANKING;
281 gpu.state.dims_changed = 1; // for hud clearing
284 gpu.status &= ~PSX_GPU_STATUS_BLANKING;
287 gpu.status &= ~PSX_GPU_STATUS_DMA_MASK;
288 gpu.status |= PSX_GPU_STATUS_DMA(data & 3);
291 gpu.screen.src_x = data & 0x3ff;
292 gpu.screen.src_y = (data >> 10) & 0x1ff;
293 renderer_notify_scanout_x_change(gpu.screen.src_x, gpu.screen.hres);
294 if (gpu.frameskip.set) {
295 decide_frameskip_allow(gpu.ex_regs[3]);
296 if (gpu.frameskip.last_flip_frame != *gpu.state.frame_count) {
298 gpu.frameskip.last_flip_frame = *gpu.state.frame_count;
303 gpu.screen.x1 = data & 0xfff;
304 gpu.screen.x2 = (data >> 12) & 0xfff;
308 gpu.screen.y1 = data & 0x3ff;
309 gpu.screen.y2 = (data >> 10) & 0x3ff;
313 gpu.status = (gpu.status & ~0x7f0000) | ((data & 0x3F) << 17) | ((data & 0x40) << 10);
316 renderer_notify_res_change();
319 if ((cmd & 0xf0) == 0x10)
324 #ifdef GPUwriteStatus_ext
325 GPUwriteStatus_ext(data);
329 const unsigned char cmd_lengths[256] =
331 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
332 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
333 3, 3, 3, 3, 6, 6, 6, 6, 4, 4, 4, 4, 8, 8, 8, 8, // 20
334 5, 5, 5, 5, 8, 8, 8, 8, 7, 7, 7, 7, 11, 11, 11, 11,
335 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, // 40
336 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
337 2, 2, 2, 2, 3, 3, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, // 60
338 1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2,
339 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, // 80
340 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
341 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // a0
342 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
343 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // c0
344 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
345 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // e0
346 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
349 #define VRAM_MEM_XY(x, y) &gpu.vram[(y) * 1024 + (x)]
351 static void cpy_msb(uint16_t *dst, const uint16_t *src, int l, uint16_t msb)
354 for (i = 0; i < l; i++)
355 dst[i] = src[i] | msb;
358 static inline void do_vram_line(int x, int y, uint16_t *mem, int l,
359 int is_read, uint16_t msb)
361 uint16_t *vram = VRAM_MEM_XY(x, y);
362 if (unlikely(is_read))
363 memcpy(mem, vram, l * 2);
364 else if (unlikely(msb))
365 cpy_msb(vram, mem, l, msb);
367 memcpy(vram, mem, l * 2);
370 static int do_vram_io(uint32_t *data, int count, int is_read)
372 int count_initial = count;
373 uint16_t msb = gpu.ex_regs[6] << 15;
374 uint16_t *sdata = (uint16_t *)data;
375 int x = gpu.dma.x, y = gpu.dma.y;
376 int w = gpu.dma.w, h = gpu.dma.h;
377 int o = gpu.dma.offset;
379 count *= 2; // operate in 16bpp pixels
381 if (gpu.dma.offset) {
382 l = w - gpu.dma.offset;
386 do_vram_line(x + o, y, sdata, l, is_read, msb);
399 for (; h > 0 && count >= w; sdata += w, count -= w, y++, h--) {
401 do_vram_line(x, y, sdata, w, is_read, msb);
407 do_vram_line(x, y, sdata, count, is_read, msb);
413 finish_vram_transfer(is_read);
418 return count_initial - count / 2;
421 static void start_vram_transfer(uint32_t pos_word, uint32_t size_word, int is_read)
424 log_anomaly("start_vram_transfer while old unfinished\n");
426 gpu.dma.x = pos_word & 0x3ff;
427 gpu.dma.y = (pos_word >> 16) & 0x1ff;
428 gpu.dma.w = ((size_word - 1) & 0x3ff) + 1;
429 gpu.dma.h = (((size_word >> 16) - 1) & 0x1ff) + 1;
431 gpu.dma.is_read = is_read;
432 gpu.dma_start = gpu.dma;
434 renderer_flush_queues();
436 gpu.status |= PSX_GPU_STATUS_IMG;
437 // XXX: wrong for width 1
438 gpu.gp0 = LE32TOH(*(uint32_t *) VRAM_MEM_XY(gpu.dma.x, gpu.dma.y));
439 gpu.state.last_vram_read_frame = *gpu.state.frame_count;
442 log_io("start_vram_transfer %c (%d, %d) %dx%d\n", is_read ? 'r' : 'w',
443 gpu.dma.x, gpu.dma.y, gpu.dma.w, gpu.dma.h);
446 static void finish_vram_transfer(int is_read)
449 gpu.status &= ~PSX_GPU_STATUS_IMG;
451 gpu.state.fb_dirty = 1;
452 renderer_update_caches(gpu.dma_start.x, gpu.dma_start.y,
453 gpu.dma_start.w, gpu.dma_start.h, 0);
457 static void do_vram_copy(const uint32_t *params)
459 const uint32_t sx = LE32TOH(params[0]) & 0x3FF;
460 const uint32_t sy = (LE32TOH(params[0]) >> 16) & 0x1FF;
461 const uint32_t dx = LE32TOH(params[1]) & 0x3FF;
462 const uint32_t dy = (LE32TOH(params[1]) >> 16) & 0x1FF;
463 uint32_t w = ((LE32TOH(params[2]) - 1) & 0x3FF) + 1;
464 uint32_t h = (((LE32TOH(params[2]) >> 16) - 1) & 0x1FF) + 1;
465 uint16_t msb = gpu.ex_regs[6] << 15;
469 if (sx == dx && sy == dy && msb == 0)
472 renderer_flush_queues();
474 if (unlikely((sx < dx && dx < sx + w) || sx + w > 1024 || dx + w > 1024 || msb))
476 for (y = 0; y < h; y++)
478 const uint16_t *src = VRAM_MEM_XY(0, (sy + y) & 0x1ff);
479 uint16_t *dst = VRAM_MEM_XY(0, (dy + y) & 0x1ff);
480 for (x = 0; x < w; x += ARRAY_SIZE(lbuf))
482 uint32_t x1, w1 = w - x;
483 if (w1 > ARRAY_SIZE(lbuf))
484 w1 = ARRAY_SIZE(lbuf);
485 for (x1 = 0; x1 < w1; x1++)
486 lbuf[x1] = src[(sx + x + x1) & 0x3ff];
487 for (x1 = 0; x1 < w1; x1++)
488 dst[(dx + x + x1) & 0x3ff] = lbuf[x1] | msb;
494 uint32_t sy1 = sy, dy1 = dy;
495 for (y = 0; y < h; y++, sy1++, dy1++)
496 memcpy(VRAM_MEM_XY(dx, dy1 & 0x1ff), VRAM_MEM_XY(sx, sy1 & 0x1ff), w * 2);
499 renderer_update_caches(dx, dy, w, h, 0);
502 static noinline int do_cmd_list_skip(uint32_t *data, int count, int *last_cmd)
504 int cmd = 0, pos = 0, len, dummy, v;
507 gpu.frameskip.pending_fill[0] = 0;
509 while (pos < count && skip) {
510 uint32_t *list = data + pos;
511 cmd = LE32TOH(list[0]) >> 24;
512 len = 1 + cmd_lengths[cmd];
516 if ((LE32TOH(list[2]) & 0x3ff) > gpu.screen.w || ((LE32TOH(list[2]) >> 16) & 0x1ff) > gpu.screen.h)
517 // clearing something large, don't skip
518 do_cmd_list(list, 3, &dummy);
520 memcpy(gpu.frameskip.pending_fill, list, 3 * 4);
526 gpu.ex_regs[1] &= ~0x1ff;
527 gpu.ex_regs[1] |= LE32TOH(list[4 + ((cmd >> 4) & 1)]) & 0x1ff;
530 for (v = 3; pos + v < count; v++)
532 if ((list[v] & HTOLE32(0xf000f000)) == HTOLE32(0x50005000))
538 for (v = 4; pos + v < count; v += 2)
540 if ((list[v] & HTOLE32(0xf000f000)) == HTOLE32(0x50005000))
547 skip = decide_frameskip_allow(LE32TOH(list[0]));
548 if ((cmd & 0xf8) == 0xe0)
549 gpu.ex_regs[cmd & 7] = LE32TOH(list[0]);
553 if (pos + len > count) {
555 break; // incomplete cmd
557 if (0x80 <= cmd && cmd <= 0xdf)
563 renderer_sync_ecmds(gpu.ex_regs);
568 static noinline int do_cmd_buffer(uint32_t *data, int count)
571 uint32_t old_e3 = gpu.ex_regs[3];
575 for (pos = 0; pos < count; )
577 if (gpu.dma.h && !gpu.dma_start.is_read) { // XXX: need to verify
579 pos += do_vram_io(data + pos, count - pos, 0);
584 cmd = LE32TOH(data[pos]) >> 24;
585 if (0xa0 <= cmd && cmd <= 0xdf) {
586 if (unlikely((pos+2) >= count)) {
587 // incomplete vram write/read cmd, can't consume yet
592 // consume vram write/read cmd
593 start_vram_transfer(LE32TOH(data[pos + 1]), LE32TOH(data[pos + 2]), (cmd & 0xe0) == 0xc0);
597 else if ((cmd & 0xe0) == 0x80) {
598 if (unlikely((pos+3) >= count)) {
599 cmd = -1; // incomplete cmd, can't consume yet
602 do_vram_copy(data + pos + 1);
608 // 0xex cmds might affect frameskip.allow, so pass to do_cmd_list_skip
609 if (gpu.frameskip.active && (gpu.frameskip.allow || ((LE32TOH(data[pos]) >> 24) & 0xf0) == 0xe0))
610 pos += do_cmd_list_skip(data + pos, count - pos, &cmd);
612 pos += do_cmd_list(data + pos, count - pos, &cmd);
621 gpu.status &= ~0x1fff;
622 gpu.status |= gpu.ex_regs[1] & 0x7ff;
623 gpu.status |= (gpu.ex_regs[6] & 3) << 11;
625 gpu.state.fb_dirty |= vram_dirty;
627 if (old_e3 != gpu.ex_regs[3])
628 decide_frameskip_allow(gpu.ex_regs[3]);
633 static void flush_cmd_buffer(void)
635 int left = do_cmd_buffer(gpu.cmd_buffer, gpu.cmd_len);
637 memmove(gpu.cmd_buffer, gpu.cmd_buffer + gpu.cmd_len - left, left * 4);
641 void GPUwriteDataMem(uint32_t *mem, int count)
645 log_io("gpu_dma_write %p %d\n", mem, count);
647 if (unlikely(gpu.cmd_len > 0))
650 left = do_cmd_buffer(mem, count);
652 log_anomaly("GPUwriteDataMem: discarded %d/%d words\n", left, count);
655 void GPUwriteData(uint32_t data)
657 log_io("gpu_write %08x\n", data);
658 gpu.cmd_buffer[gpu.cmd_len++] = HTOLE32(data);
659 if (gpu.cmd_len >= CMD_BUFFER_LEN)
663 long GPUdmaChain(uint32_t *rambase, uint32_t start_addr, uint32_t *progress_addr)
665 uint32_t addr, *list, ld_addr = 0;
666 int len, left, count;
669 preload(rambase + (start_addr & 0x1fffff) / 4);
671 if (unlikely(gpu.cmd_len > 0))
674 log_io("gpu_dma_chain\n");
675 addr = start_addr & 0xffffff;
676 for (count = 0; (addr & 0x800000) == 0; count++)
678 list = rambase + (addr & 0x1fffff) / 4;
679 len = LE32TOH(list[0]) >> 24;
680 addr = LE32TOH(list[0]) & 0xffffff;
681 preload(rambase + (addr & 0x1fffff) / 4);
685 cpu_cycles += 5 + len;
687 log_io(".chain %08lx #%d+%d\n",
688 (long)(list - rambase) * 4, len, gpu.cmd_len);
689 if (unlikely(gpu.cmd_len > 0)) {
690 if (gpu.cmd_len + len > ARRAY_SIZE(gpu.cmd_buffer)) {
691 log_anomaly("cmd_buffer overflow, likely garbage commands\n");
694 memcpy(gpu.cmd_buffer + gpu.cmd_len, list + 1, len * 4);
701 left = do_cmd_buffer(list + 1, len);
703 memcpy(gpu.cmd_buffer, list + 1 + len - left, left * 4);
705 log_anomaly("GPUdmaChain: %d/%d words left\n", left, len);
710 *progress_addr = addr;
713 #define LD_THRESHOLD (8*1024)
714 if (count >= LD_THRESHOLD) {
715 if (count == LD_THRESHOLD) {
720 // loop detection marker
721 // (bit23 set causes DMA error on real machine, so
722 // unlikely to be ever set by the game)
723 list[0] |= HTOLE32(0x800000);
728 // remove loop detection markers
729 count -= LD_THRESHOLD + 2;
730 addr = ld_addr & 0x1fffff;
731 while (count-- > 0) {
732 list = rambase + addr / 4;
733 addr = LE32TOH(list[0]) & 0x1fffff;
734 list[0] &= HTOLE32(~0x800000);
738 gpu.state.last_list.frame = *gpu.state.frame_count;
739 gpu.state.last_list.hcnt = *gpu.state.hcnt;
740 gpu.state.last_list.cycles = cpu_cycles;
741 gpu.state.last_list.addr = start_addr;
746 void GPUreadDataMem(uint32_t *mem, int count)
748 log_io("gpu_dma_read %p %d\n", mem, count);
750 if (unlikely(gpu.cmd_len > 0))
754 do_vram_io(mem, count, 1);
757 uint32_t GPUreadData(void)
761 if (unlikely(gpu.cmd_len > 0))
767 do_vram_io(&ret, 1, 1);
771 log_io("gpu_read %08x\n", ret);
775 uint32_t GPUreadStatus(void)
779 if (unlikely(gpu.cmd_len > 0))
783 log_io("gpu_read_status %08x\n", ret);
789 uint32_t ulFreezeVersion; // should be always 1 for now (set by main emu)
790 uint32_t ulStatus; // current gpu status
791 uint32_t ulControl[256]; // latest control register values
792 unsigned char psxVRam[1024*1024*2]; // current VRam image (full 2 MB for ZN)
795 long GPUfreeze(uint32_t type, struct GPUFreeze *freeze)
803 memcpy(freeze->psxVRam, gpu.vram, 1024 * 512 * 2);
804 memcpy(freeze->ulControl, gpu.regs, sizeof(gpu.regs));
805 memcpy(freeze->ulControl + 0xe0, gpu.ex_regs, sizeof(gpu.ex_regs));
806 freeze->ulStatus = gpu.status;
809 memcpy(gpu.vram, freeze->psxVRam, 1024 * 512 * 2);
810 memcpy(gpu.regs, freeze->ulControl, sizeof(gpu.regs));
811 memcpy(gpu.ex_regs, freeze->ulControl + 0xe0, sizeof(gpu.ex_regs));
812 gpu.status = freeze->ulStatus;
814 for (i = 8; i > 0; i--) {
815 gpu.regs[i] ^= 1; // avoid reg change detection
816 GPUwriteStatus((i << 24) | (gpu.regs[i] ^ 1));
818 renderer_sync_ecmds(gpu.ex_regs);
819 renderer_update_caches(0, 0, 1024, 512, 1);
826 void GPUupdateLace(void)
830 renderer_flush_queues();
832 #ifndef RAW_FB_DISPLAY
833 if (gpu.status & PSX_GPU_STATUS_BLANKING) {
834 if (!gpu.state.blanked) {
836 gpu.state.blanked = 1;
837 gpu.state.fb_dirty = 1;
842 if (!gpu.state.fb_dirty)
846 if (gpu.frameskip.set) {
847 if (!gpu.frameskip.frame_ready) {
848 if (*gpu.state.frame_count - gpu.frameskip.last_flip_frame < 9)
850 gpu.frameskip.active = 0;
852 gpu.frameskip.frame_ready = 0;
856 if (gpu.state.enhancement_active && !gpu.state.enhancement_was_active)
857 renderer_update_caches(0, 0, 1024, 512, 1);
858 gpu.state.enhancement_was_active = gpu.state.enhancement_active;
859 gpu.state.fb_dirty = 0;
860 gpu.state.blanked = 0;
863 void GPUvBlank(int is_vblank, int lcf)
865 int interlace = gpu.state.allow_interlace
866 && (gpu.status & PSX_GPU_STATUS_INTERLACE)
867 && (gpu.status & PSX_GPU_STATUS_DHEIGHT);
868 // interlace doesn't look nice on progressive displays,
869 // so we have this "auto" mode here for games that don't read vram
870 if (gpu.state.allow_interlace == 2
871 && *gpu.state.frame_count - gpu.state.last_vram_read_frame > 1)
875 if (interlace || interlace != gpu.state.old_interlace) {
876 gpu.state.old_interlace = interlace;
880 renderer_flush_queues();
881 renderer_set_interlace(interlace, !lcf);
885 void GPUgetScreenInfo(int *y, int *base_hres)
888 *base_hres = gpu.screen.vres;
889 if (gpu.status & PSX_GPU_STATUS_DHEIGHT)
893 #include "../../frontend/plugin_lib.h"
895 void GPUrearmedCallbacks(const struct rearmed_cbs *cbs)
897 gpu.frameskip.set = cbs->frameskip;
898 gpu.frameskip.advice = &cbs->fskip_advice;
899 gpu.frameskip.active = 0;
900 gpu.frameskip.frame_ready = 1;
901 gpu.state.hcnt = cbs->gpu_hcnt;
902 gpu.state.frame_count = cbs->gpu_frame_count;
903 gpu.state.allow_interlace = cbs->gpu_neon.allow_interlace;
904 gpu.state.enhancement_enable = cbs->gpu_neon.enhancement_enable;
905 if (gpu.state.screen_centering_type != cbs->screen_centering_type
906 || gpu.state.screen_centering_x != cbs->screen_centering_x
907 || gpu.state.screen_centering_y != cbs->screen_centering_y) {
908 gpu.state.screen_centering_type = cbs->screen_centering_type;
909 gpu.state.screen_centering_x = cbs->screen_centering_x;
910 gpu.state.screen_centering_y = cbs->screen_centering_y;
915 gpu.mmap = cbs->mmap;
916 gpu.munmap = cbs->munmap;
919 if (gpu.vram == NULL)
922 if (cbs->pl_vout_set_raw_vram)
923 cbs->pl_vout_set_raw_vram(gpu.vram);
924 renderer_set_config(cbs);
925 vout_set_config(cbs);
928 // vim:shiftwidth=2:expandtab