2 * (C) GraÅžvydas "notaz" Ignotas, 2011-2012
4 * This work is licensed under the terms of any of these licenses
6 * - GNU GPL, version 2 or later.
7 * - GNU LGPL, version 2.1 or later.
8 * See the COPYING file in the top-level directory.
14 #include <stdlib.h> /* for calloc */
18 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
20 #define unlikely(x) __builtin_expect((x), 0)
21 #define preload __builtin_prefetch
22 #define noinline __attribute__((noinline))
29 //#define log_io gpu_log
34 static noinline int do_cmd_buffer(uint32_t *data, int count);
35 static void finish_vram_transfer(int is_read);
37 static noinline void do_cmd_reset(void)
41 if (unlikely(gpu.cmd_len > 0))
42 do_cmd_buffer(gpu.cmd_buffer, gpu.cmd_len);
45 if (unlikely(gpu.dma.h > 0))
46 finish_vram_transfer(gpu.dma_start.is_read);
50 static noinline void do_reset(void)
56 memset(gpu.regs, 0, sizeof(gpu.regs));
57 for (i = 0; i < sizeof(gpu.ex_regs) / sizeof(gpu.ex_regs[0]); i++)
58 gpu.ex_regs[i] = (0xe0 + i) << 24;
59 gpu.status = 0x14802000;
62 gpu.screen.hres = gpu.screen.w = 256;
63 gpu.screen.vres = gpu.screen.h = 240;
64 gpu.screen.x = gpu.screen.y = 0;
65 renderer_sync_ecmds(gpu.ex_regs);
66 renderer_notify_res_change();
69 static noinline void update_width(void)
71 static const short hres_all[8] = { 256, 368, 320, 368, 512, 368, 640, 368 };
72 static const uint8_t hdivs[8] = { 10, 7, 8, 7, 5, 7, 4, 7 };
73 uint8_t hdiv = hdivs[(gpu.status >> 16) & 7];
74 int hres = hres_all[(gpu.status >> 16) & 7];
75 int pal = gpu.status & PSX_GPU_STATUS_PAL;
76 int sw = gpu.screen.x2 - gpu.screen.x1;
79 /* nothing displayed? */;
81 int s = pal ? 656 : 608; // or 600? pal is just a guess
82 x = (gpu.screen.x1 - s) / hdiv;
83 x = (x + 1) & ~1; // blitter limitation
85 sw = (sw + 2) & ~3; // according to nocash
86 switch (gpu.state.screen_centering_type) {
90 x = gpu.state.screen_centering_x;
93 // correct if slightly miscentered
94 x_auto = (hres - sw) / 2 & ~3;
95 if ((uint32_t)x_auto <= 8u && abs(x) < 24)
100 // .x range check is done in vout_update()
102 // reduce the unpleasant right border that a few games have
103 if (gpu.state.screen_centering_type == 0
104 && x <= 4 && hres - (x + sw) >= 4)
108 gpu.screen.hres = hres;
109 gpu.state.dims_changed = 1;
110 //printf("xx %d %d -> %2d, %d / %d\n",
111 // gpu.screen.x1, gpu.screen.x2, x, sw, hres);
114 static noinline void update_height(void)
116 int pal = gpu.status & PSX_GPU_STATUS_PAL;
117 int dheight = gpu.status & PSX_GPU_STATUS_DHEIGHT;
118 int y = gpu.screen.y1 - (pal ? 39 : 16); // 39 for spyro
119 int sh = gpu.screen.y2 - gpu.screen.y1;
123 if (pal && (sh > 240 || gpu.screen.vres == 256))
126 y *= 2, sh *= 2, vres *= 2, center_tol *= 2;
128 /* nothing displayed? */;
130 switch (gpu.state.screen_centering_type) {
134 y = gpu.state.screen_centering_y;
137 // correct if slightly miscentered
138 if ((uint32_t)(vres - sh) <= 1 && abs(y) <= center_tol)
146 gpu.screen.vres = vres;
147 gpu.state.dims_changed = 1;
148 //printf("yy %d %d -> %d, %d / %d\n",
149 // gpu.screen.y1, gpu.screen.y2, y, sh, vres);
152 static noinline void decide_frameskip(void)
154 *gpu.frameskip.dirty = 1;
156 if (gpu.frameskip.active)
159 gpu.frameskip.cnt = 0;
160 gpu.frameskip.frame_ready = 1;
163 if (*gpu.frameskip.force)
164 gpu.frameskip.active = 1;
165 else if (!gpu.frameskip.active && *gpu.frameskip.advice)
166 gpu.frameskip.active = 1;
167 else if (gpu.frameskip.set > 0 && gpu.frameskip.cnt < gpu.frameskip.set)
168 gpu.frameskip.active = 1;
170 gpu.frameskip.active = 0;
172 if (!gpu.frameskip.active && gpu.frameskip.pending_fill[0] != 0) {
174 do_cmd_list(gpu.frameskip.pending_fill, 3, &dummy);
175 gpu.frameskip.pending_fill[0] = 0;
179 static noinline int decide_frameskip_allow(uint32_t cmd_e3)
181 // no frameskip if it decides to draw to display area,
182 // but not for interlace since it'll most likely always do that
183 uint32_t x = cmd_e3 & 0x3ff;
184 uint32_t y = (cmd_e3 >> 10) & 0x3ff;
185 gpu.frameskip.allow = (gpu.status & PSX_GPU_STATUS_INTERLACE) ||
186 (uint32_t)(x - gpu.screen.src_x) >= (uint32_t)gpu.screen.w ||
187 (uint32_t)(y - gpu.screen.src_y) >= (uint32_t)gpu.screen.h;
188 return gpu.frameskip.allow;
191 static void flush_cmd_buffer(void);
193 static noinline void get_gpu_info(uint32_t data)
195 if (unlikely(gpu.cmd_len > 0))
197 switch (data & 0x0f) {
201 gpu.gp0 = gpu.ex_regs[data & 7] & 0xfffff;
204 gpu.gp0 = gpu.ex_regs[5] & 0x3fffff;
215 // double, for overdraw guard
216 #define VRAM_SIZE ((1024 * 512 * 2 * 2) + 4096)
218 // Minimum 16-byte VRAM alignment needed by gpu_unai's pixel-skipping
219 // renderer/downscaler it uses in high res modes:
221 // On GCW platform (MIPS), align to 8192 bytes (1 TLB entry) to reduce # of
222 // fills. (Will change this value if it ever gets large page support)
223 #define VRAM_ALIGN 8192
225 #define VRAM_ALIGN 16
228 // vram ptr received from mmap/malloc/alloc (will deallocate using this)
229 static uint16_t *vram_ptr_orig = NULL;
231 #ifdef GPULIB_USE_MMAP
232 static int map_vram(void)
234 gpu.vram = vram_ptr_orig = gpu.mmap(VRAM_SIZE + (VRAM_ALIGN-1));
235 if (gpu.vram != NULL) {
236 // 4kb guard in front
237 gpu.vram += (4096 / 2);
239 gpu.vram = (uint16_t*)(((uintptr_t)gpu.vram + (VRAM_ALIGN-1)) & ~(VRAM_ALIGN-1));
243 fprintf(stderr, "could not map vram, expect crashes\n");
248 static int map_vram(void)
250 gpu.vram = vram_ptr_orig = (uint16_t*)calloc(VRAM_SIZE + (VRAM_ALIGN-1), 1);
251 if (gpu.vram != NULL) {
252 // 4kb guard in front
253 gpu.vram += (4096 / 2);
255 gpu.vram = (uint16_t*)(((uintptr_t)gpu.vram + (VRAM_ALIGN-1)) & ~(VRAM_ALIGN-1));
258 fprintf(stderr, "could not allocate vram, expect crashes\n");
263 static int allocate_vram(void)
265 gpu.vram = vram_ptr_orig = (uint16_t*)calloc(VRAM_SIZE + (VRAM_ALIGN-1), 1);
266 if (gpu.vram != NULL) {
267 // 4kb guard in front
268 gpu.vram += (4096 / 2);
270 gpu.vram = (uint16_t*)(((uintptr_t)gpu.vram + (VRAM_ALIGN-1)) & ~(VRAM_ALIGN-1));
273 fprintf(stderr, "could not allocate vram, expect crashes\n");
281 #ifndef GPULIB_USE_MMAP
282 if (gpu.vram == NULL) {
283 if (allocate_vram() != 0) {
284 printf("ERROR: could not allocate VRAM, exiting..\n");
290 //extern uint32_t hSyncCount; // in psxcounters.cpp
291 //extern uint32_t frame_counter; // in psxcounters.cpp
292 //gpu.state.hcnt = &hSyncCount;
293 //gpu.state.frame_count = &frame_counter;
297 ret |= renderer_init();
299 memset(&gpu.state, 0, sizeof(gpu.state));
300 memset(&gpu.frameskip, 0, sizeof(gpu.frameskip));
302 gpu.state.frame_count = &gpu.zero;
303 gpu.state.hcnt = &gpu.zero;
307 /*if (gpu.mmap != NULL) {
314 long GPUshutdown(void)
321 if (vram_ptr_orig != NULL) {
322 #ifdef GPULIB_USE_MMAP
323 gpu.munmap(vram_ptr_orig, VRAM_SIZE);
328 vram_ptr_orig = gpu.vram = NULL;
333 void GPUwriteStatus(uint32_t data)
335 uint32_t cmd = data >> 24;
337 if (cmd < ARRAY_SIZE(gpu.regs)) {
338 if (cmd > 1 && cmd != 5 && gpu.regs[cmd] == data)
340 gpu.regs[cmd] = data;
343 gpu.state.fb_dirty = 1;
354 gpu.status |= PSX_GPU_STATUS_BLANKING;
355 gpu.state.dims_changed = 1; // for hud clearing
358 gpu.status &= ~PSX_GPU_STATUS_BLANKING;
361 gpu.status &= ~PSX_GPU_STATUS_DMA_MASK;
362 gpu.status |= PSX_GPU_STATUS_DMA(data & 3);
365 gpu.screen.src_x = data & 0x3ff;
366 gpu.screen.src_y = (data >> 10) & 0x1ff;
367 renderer_notify_scanout_x_change(gpu.screen.src_x, gpu.screen.hres);
368 if (gpu.frameskip.set) {
369 decide_frameskip_allow(gpu.ex_regs[3]);
370 if (gpu.frameskip.last_flip_frame != *gpu.state.frame_count) {
372 gpu.frameskip.last_flip_frame = *gpu.state.frame_count;
377 gpu.screen.x1 = data & 0xfff;
378 gpu.screen.x2 = (data >> 12) & 0xfff;
382 gpu.screen.y1 = data & 0x3ff;
383 gpu.screen.y2 = (data >> 10) & 0x3ff;
387 gpu.status = (gpu.status & ~0x7f0000) | ((data & 0x3F) << 17) | ((data & 0x40) << 10);
390 renderer_notify_res_change();
393 if ((cmd & 0xf0) == 0x10)
398 #ifdef GPUwriteStatus_ext
399 GPUwriteStatus_ext(data);
403 const unsigned char cmd_lengths[256] =
405 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
406 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
407 3, 3, 3, 3, 6, 6, 6, 6, 4, 4, 4, 4, 8, 8, 8, 8, // 20
408 5, 5, 5, 5, 8, 8, 8, 8, 7, 7, 7, 7, 11, 11, 11, 11,
409 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, // 40
410 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
411 2, 2, 2, 2, 3, 3, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, // 60
412 1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2,
413 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 80
414 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
415 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // a0
416 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
417 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // c0
418 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
419 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // e0
420 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
423 #define VRAM_MEM_XY(x, y) &gpu.vram[(y) * 1024 + (x)]
425 static inline void do_vram_line(int x, int y, uint16_t *mem, int l, int is_read)
427 uint16_t *vram = VRAM_MEM_XY(x, y);
429 memcpy(mem, vram, l * 2);
431 memcpy(vram, mem, l * 2);
434 static int do_vram_io(uint32_t *data, int count, int is_read)
436 int count_initial = count;
437 uint16_t *sdata = (uint16_t *)data;
438 int x = gpu.dma.x, y = gpu.dma.y;
439 int w = gpu.dma.w, h = gpu.dma.h;
440 int o = gpu.dma.offset;
442 count *= 2; // operate in 16bpp pixels
446 if (gpu.dma.offset) {
447 l = w - gpu.dma.offset;
451 do_vram_line(x + o, y, sdata, l, is_read);
464 for (; h > 0 && count >= w; sdata += w, count -= w, y++, h--) {
466 do_vram_line(x, y, sdata, w, is_read);
472 do_vram_line(x, y, sdata, count, is_read);
478 finish_vram_transfer(is_read);
483 return count_initial - count / 2;
486 static void start_vram_transfer(uint32_t pos_word, uint32_t size_word, int is_read)
489 log_anomaly("start_vram_transfer while old unfinished\n");
491 gpu.dma.x = pos_word & 0x3ff;
492 gpu.dma.y = (pos_word >> 16) & 0x1ff;
493 gpu.dma.w = ((size_word - 1) & 0x3ff) + 1;
494 gpu.dma.h = (((size_word >> 16) - 1) & 0x1ff) + 1;
496 gpu.dma.is_read = is_read;
497 gpu.dma_start = gpu.dma;
499 renderer_flush_queues();
501 gpu.status |= PSX_GPU_STATUS_IMG;
502 // XXX: wrong for width 1
503 gpu.gp0 = LE32TOH(*(uint32_t *) VRAM_MEM_XY(gpu.dma.x, gpu.dma.y));
504 gpu.state.last_vram_read_frame = *gpu.state.frame_count;
507 log_io("start_vram_transfer %c (%d, %d) %dx%d\n", is_read ? 'r' : 'w',
508 gpu.dma.x, gpu.dma.y, gpu.dma.w, gpu.dma.h);
511 static void finish_vram_transfer(int is_read)
514 gpu.status &= ~PSX_GPU_STATUS_IMG;
516 renderer_update_caches(gpu.dma_start.x, gpu.dma_start.y,
517 gpu.dma_start.w, gpu.dma_start.h, 0);
520 static noinline int do_cmd_list_skip(uint32_t *data, int count, int *last_cmd)
522 int cmd = 0, pos = 0, len, dummy, v;
525 gpu.frameskip.pending_fill[0] = 0;
527 while (pos < count && skip) {
528 uint32_t *list = data + pos;
529 cmd = LE32TOH(list[0]) >> 24;
530 len = 1 + cmd_lengths[cmd];
534 if ((LE32TOH(list[2]) & 0x3ff) > gpu.screen.w || ((LE32TOH(list[2]) >> 16) & 0x1ff) > gpu.screen.h)
535 // clearing something large, don't skip
536 do_cmd_list(list, 3, &dummy);
538 memcpy(gpu.frameskip.pending_fill, list, 3 * 4);
544 gpu.ex_regs[1] &= ~0x1ff;
545 gpu.ex_regs[1] |= LE32TOH(list[4 + ((cmd >> 4) & 1)]) & 0x1ff;
548 for (v = 3; pos + v < count; v++)
550 if ((list[v] & HTOLE32(0xf000f000)) == HTOLE32(0x50005000))
556 for (v = 4; pos + v < count; v += 2)
558 if ((list[v] & HTOLE32(0xf000f000)) == HTOLE32(0x50005000))
565 skip = decide_frameskip_allow(LE32TOH(list[0]));
566 if ((cmd & 0xf8) == 0xe0)
567 gpu.ex_regs[cmd & 7] = LE32TOH(list[0]);
571 if (pos + len > count) {
573 break; // incomplete cmd
575 if (0xa0 <= cmd && cmd <= 0xdf)
581 renderer_sync_ecmds(gpu.ex_regs);
586 static noinline int do_cmd_buffer(uint32_t *data, int count)
589 uint32_t old_e3 = gpu.ex_regs[3];
593 for (pos = 0; pos < count; )
595 if (gpu.dma.h && !gpu.dma_start.is_read) { // XXX: need to verify
597 pos += do_vram_io(data + pos, count - pos, 0);
602 cmd = LE32TOH(data[pos]) >> 24;
603 if (0xa0 <= cmd && cmd <= 0xdf) {
604 if (unlikely((pos+2) >= count)) {
605 // incomplete vram write/read cmd, can't consume yet
610 // consume vram write/read cmd
611 start_vram_transfer(LE32TOH(data[pos + 1]), LE32TOH(data[pos + 2]), (cmd & 0xe0) == 0xc0);
616 // 0xex cmds might affect frameskip.allow, so pass to do_cmd_list_skip
617 if (gpu.frameskip.active && (gpu.frameskip.allow || ((LE32TOH(data[pos]) >> 24) & 0xf0) == 0xe0))
618 pos += do_cmd_list_skip(data + pos, count - pos, &cmd);
620 pos += do_cmd_list(data + pos, count - pos, &cmd);
629 gpu.status &= ~0x1fff;
630 gpu.status |= gpu.ex_regs[1] & 0x7ff;
631 gpu.status |= (gpu.ex_regs[6] & 3) << 11;
633 gpu.state.fb_dirty |= vram_dirty;
635 if (old_e3 != gpu.ex_regs[3])
636 decide_frameskip_allow(gpu.ex_regs[3]);
641 static void flush_cmd_buffer(void)
643 int left = do_cmd_buffer(gpu.cmd_buffer, gpu.cmd_len);
645 memmove(gpu.cmd_buffer, gpu.cmd_buffer + gpu.cmd_len - left, left * 4);
649 void GPUwriteDataMem(uint32_t *mem, int count)
653 log_io("gpu_dma_write %p %d\n", mem, count);
655 if (unlikely(gpu.cmd_len > 0))
658 left = do_cmd_buffer(mem, count);
660 log_anomaly("GPUwriteDataMem: discarded %d/%d words\n", left, count);
663 void GPUwriteData(uint32_t data)
665 log_io("gpu_write %08x\n", data);
666 gpu.cmd_buffer[gpu.cmd_len++] = HTOLE32(data);
667 if (gpu.cmd_len >= CMD_BUFFER_LEN)
671 long GPUdmaChain(uint32_t *rambase, uint32_t start_addr, uint32_t *progress_addr)
673 uint32_t addr, *list, ld_addr = 0;
674 int len, left, count;
677 preload(rambase + (start_addr & 0x1fffff) / 4);
679 if (unlikely(gpu.cmd_len > 0))
682 log_io("gpu_dma_chain\n");
683 addr = start_addr & 0xffffff;
684 for (count = 0; (addr & 0x800000) == 0; count++)
686 list = rambase + (addr & 0x1fffff) / 4;
687 len = LE32TOH(list[0]) >> 24;
688 addr = LE32TOH(list[0]) & 0xffffff;
689 preload(rambase + (addr & 0x1fffff) / 4);
693 cpu_cycles += 5 + len;
695 log_io(".chain %08lx #%d+%d\n",
696 (long)(list - rambase) * 4, len, gpu.cmd_len);
697 if (unlikely(gpu.cmd_len > 0)) {
698 if (gpu.cmd_len + len > ARRAY_SIZE(gpu.cmd_buffer)) {
699 log_anomaly("cmd_buffer overflow, likely garbage commands\n");
702 memcpy(gpu.cmd_buffer + gpu.cmd_len, list + 1, len * 4);
709 left = do_cmd_buffer(list + 1, len);
711 memcpy(gpu.cmd_buffer, list + 1 + len - left, left * 4);
713 log_anomaly("GPUdmaChain: %d/%d words left\n", left, len);
718 *progress_addr = addr;
721 #define LD_THRESHOLD (8*1024)
722 if (count >= LD_THRESHOLD) {
723 if (count == LD_THRESHOLD) {
728 // loop detection marker
729 // (bit23 set causes DMA error on real machine, so
730 // unlikely to be ever set by the game)
731 list[0] |= HTOLE32(0x800000);
736 // remove loop detection markers
737 count -= LD_THRESHOLD + 2;
738 addr = ld_addr & 0x1fffff;
739 while (count-- > 0) {
740 list = rambase + addr / 4;
741 addr = LE32TOH(list[0]) & 0x1fffff;
742 list[0] &= HTOLE32(~0x800000);
746 gpu.state.last_list.frame = *gpu.state.frame_count;
747 gpu.state.last_list.hcnt = *gpu.state.hcnt;
748 gpu.state.last_list.cycles = cpu_cycles;
749 gpu.state.last_list.addr = start_addr;
754 void GPUreadDataMem(uint32_t *mem, int count)
756 log_io("gpu_dma_read %p %d\n", mem, count);
758 if (unlikely(gpu.cmd_len > 0))
762 do_vram_io(mem, count, 1);
765 uint32_t GPUreadData(void)
769 if (unlikely(gpu.cmd_len > 0))
775 do_vram_io(&ret, 1, 1);
779 log_io("gpu_read %08x\n", ret);
783 uint32_t GPUreadStatus(void)
787 if (unlikely(gpu.cmd_len > 0))
791 log_io("gpu_read_status %08x\n", ret);
797 uint32_t ulFreezeVersion; // should be always 1 for now (set by main emu)
798 uint32_t ulStatus; // current gpu status
799 uint32_t ulControl[256]; // latest control register values
800 unsigned char psxVRam[1024*1024*2]; // current VRam image (full 2 MB for ZN)
803 long GPUfreeze(uint32_t type, struct GPUFreeze *freeze)
813 memcpy(freeze->psxVRam, gpu.vram, 1024 * 512 * 2);
814 memcpy(freeze->ulControl, gpu.regs, sizeof(gpu.regs));
815 memcpy(freeze->ulControl + 0xe0, gpu.ex_regs, sizeof(gpu.ex_regs));
816 freeze->ulStatus = gpu.status;
820 memcpy(gpu.vram, freeze->psxVRam, 1024 * 512 * 2);
821 memcpy(gpu.regs, freeze->ulControl, sizeof(gpu.regs));
822 memcpy(gpu.ex_regs, freeze->ulControl + 0xe0, sizeof(gpu.ex_regs));
823 gpu.status = freeze->ulStatus;
825 for (i = 8; i > 0; i--) {
826 gpu.regs[i] ^= 1; // avoid reg change detection
827 GPUwriteStatus((i << 24) | (gpu.regs[i] ^ 1));
829 renderer_sync_ecmds(gpu.ex_regs);
830 renderer_update_caches(0, 0, 1024, 512, 1);
837 void GPUupdateLace(void)
841 renderer_flush_queues();
843 #ifndef RAW_FB_DISPLAY
844 if (gpu.status & PSX_GPU_STATUS_BLANKING) {
845 if (!gpu.state.blanked) {
847 gpu.state.blanked = 1;
848 gpu.state.fb_dirty = 1;
853 renderer_notify_update_lace(0);
855 if (!gpu.state.fb_dirty)
859 if (gpu.frameskip.set) {
860 if (!gpu.frameskip.frame_ready) {
861 if (*gpu.state.frame_count - gpu.frameskip.last_flip_frame < 9)
863 gpu.frameskip.active = 0;
865 gpu.frameskip.frame_ready = 0;
869 if (gpu.state.enhancement_active && !gpu.state.enhancement_was_active)
870 renderer_update_caches(0, 0, 1024, 512, 1);
871 gpu.state.enhancement_was_active = gpu.state.enhancement_active;
872 gpu.state.fb_dirty = 0;
873 gpu.state.blanked = 0;
874 renderer_notify_update_lace(1);
877 void GPUvBlank(int is_vblank, int lcf)
879 int interlace = gpu.state.allow_interlace
880 && (gpu.status & PSX_GPU_STATUS_INTERLACE)
881 && (gpu.status & PSX_GPU_STATUS_DHEIGHT);
882 // interlace doesn't look nice on progressive displays,
883 // so we have this "auto" mode here for games that don't read vram
884 if (gpu.state.allow_interlace == 2
885 && *gpu.state.frame_count - gpu.state.last_vram_read_frame > 1)
889 if (interlace || interlace != gpu.state.old_interlace) {
890 gpu.state.old_interlace = interlace;
894 renderer_flush_queues();
895 renderer_set_interlace(interlace, !lcf);
899 #include "../../frontend/plugin_lib.h"
901 void GPUrearmedCallbacks(const struct rearmed_cbs *cbs)
903 gpu.frameskip.set = cbs->frameskip;
904 gpu.frameskip.advice = &cbs->fskip_advice;
905 gpu.frameskip.force = &cbs->fskip_force;
906 gpu.frameskip.dirty = (void *)&cbs->fskip_dirty;
907 gpu.frameskip.active = 0;
908 gpu.frameskip.frame_ready = 1;
909 gpu.state.hcnt = cbs->gpu_hcnt;
910 gpu.state.frame_count = cbs->gpu_frame_count;
911 gpu.state.allow_interlace = cbs->gpu_neon.allow_interlace;
912 gpu.state.enhancement_enable = cbs->gpu_neon.enhancement_enable;
913 if (gpu.state.screen_centering_type != cbs->screen_centering_type
914 || gpu.state.screen_centering_x != cbs->screen_centering_x
915 || gpu.state.screen_centering_y != cbs->screen_centering_y) {
916 gpu.state.screen_centering_type = cbs->screen_centering_type;
917 gpu.state.screen_centering_x = cbs->screen_centering_x;
918 gpu.state.screen_centering_y = cbs->screen_centering_y;
923 gpu.mmap = cbs->mmap;
924 gpu.munmap = cbs->munmap;
927 if (gpu.vram == NULL)
930 if (cbs->pl_vout_set_raw_vram)
931 cbs->pl_vout_set_raw_vram(gpu.vram);
932 renderer_set_config(cbs);
933 vout_set_config(cbs);
936 // vim:shiftwidth=2:expandtab