2 * (C) GraÅžvydas "notaz" Ignotas, 2011-2012
4 * This work is licensed under the terms of any of these licenses
6 * - GNU GPL, version 2 or later.
7 * - GNU LGPL, version 2.1 or later.
8 * See the COPYING file in the top-level directory.
14 #include <stdlib.h> /* for calloc */
18 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
20 #define unlikely(x) __builtin_expect((x), 0)
21 #define preload __builtin_prefetch
22 #define noinline __attribute__((noinline))
29 //#define log_io gpu_log
34 static noinline int do_cmd_buffer(uint32_t *data, int count);
35 static void finish_vram_transfer(int is_read);
37 static noinline void do_cmd_reset(void)
41 if (unlikely(gpu.cmd_len > 0))
42 do_cmd_buffer(gpu.cmd_buffer, gpu.cmd_len);
45 if (unlikely(gpu.dma.h > 0))
46 finish_vram_transfer(gpu.dma_start.is_read);
50 static noinline void do_reset(void)
56 memset(gpu.regs, 0, sizeof(gpu.regs));
57 for (i = 0; i < sizeof(gpu.ex_regs) / sizeof(gpu.ex_regs[0]); i++)
58 gpu.ex_regs[i] = (0xe0 + i) << 24;
59 gpu.status = 0x14802000;
62 gpu.screen.hres = gpu.screen.w = 256;
63 gpu.screen.vres = gpu.screen.h = 240;
64 gpu.screen.x = gpu.screen.y = 0;
65 renderer_notify_res_change();
68 static noinline void update_width(void)
70 static const short hres_all[8] = { 256, 368, 320, 368, 512, 368, 640, 368 };
71 static const uint8_t hdivs[8] = { 10, 7, 8, 7, 5, 7, 4, 7 };
72 uint8_t hdiv = hdivs[(gpu.status >> 16) & 7];
73 int hres = hres_all[(gpu.status >> 16) & 7];
74 int pal = gpu.status & PSX_GPU_STATUS_PAL;
75 int sw = gpu.screen.x2 - gpu.screen.x1;
78 /* nothing displayed? */;
80 int s = pal ? 656 : 608; // or 600? pal is just a guess
81 x = (gpu.screen.x1 - s) / hdiv;
82 x = (x + 1) & ~1; // blitter limitation
84 sw = (sw + 2) & ~3; // according to nocash
85 switch (gpu.state.screen_centering_type) {
89 x = gpu.state.screen_centering_x;
92 // correct if slightly miscentered
93 x_auto = (hres - sw) / 2 & ~3;
94 if ((uint32_t)x_auto <= 8u && abs(x) < 24)
99 // .x range check is done in vout_update()
101 // reduce the unpleasant right border that a few games have
102 if (gpu.state.screen_centering_type == 0
103 && x <= 4 && hres - (x + sw) >= 4)
107 gpu.screen.hres = hres;
108 gpu.state.dims_changed = 1;
109 //printf("xx %d %d -> %2d, %d / %d\n",
110 // gpu.screen.x1, gpu.screen.x2, x, sw, hres);
113 static noinline void update_height(void)
115 int pal = gpu.status & PSX_GPU_STATUS_PAL;
116 int dheight = gpu.status & PSX_GPU_STATUS_DHEIGHT;
117 int y = gpu.screen.y1 - (pal ? 39 : 16); // 39 for spyro
118 int sh = gpu.screen.y2 - gpu.screen.y1;
122 if (pal && (sh > 240 || gpu.screen.vres == 256))
125 y *= 2, sh *= 2, vres *= 2, center_tol *= 2;
127 /* nothing displayed? */;
129 switch (gpu.state.screen_centering_type) {
133 y = gpu.state.screen_centering_y;
136 // correct if slightly miscentered
137 if ((uint32_t)(vres - sh) <= 1 && abs(y) <= center_tol)
145 gpu.screen.vres = vres;
146 gpu.state.dims_changed = 1;
147 //printf("yy %d %d -> %d, %d / %d\n",
148 // gpu.screen.y1, gpu.screen.y2, y, sh, vres);
151 static noinline void decide_frameskip(void)
153 *gpu.frameskip.dirty = 1;
155 if (gpu.frameskip.active)
158 gpu.frameskip.cnt = 0;
159 gpu.frameskip.frame_ready = 1;
162 if (*gpu.frameskip.force)
163 gpu.frameskip.active = 1;
164 else if (!gpu.frameskip.active && *gpu.frameskip.advice)
165 gpu.frameskip.active = 1;
166 else if (gpu.frameskip.set > 0 && gpu.frameskip.cnt < gpu.frameskip.set)
167 gpu.frameskip.active = 1;
169 gpu.frameskip.active = 0;
171 if (!gpu.frameskip.active && gpu.frameskip.pending_fill[0] != 0) {
173 do_cmd_list(gpu.frameskip.pending_fill, 3, &dummy);
174 gpu.frameskip.pending_fill[0] = 0;
178 static noinline int decide_frameskip_allow(uint32_t cmd_e3)
180 // no frameskip if it decides to draw to display area,
181 // but not for interlace since it'll most likely always do that
182 uint32_t x = cmd_e3 & 0x3ff;
183 uint32_t y = (cmd_e3 >> 10) & 0x3ff;
184 gpu.frameskip.allow = (gpu.status & PSX_GPU_STATUS_INTERLACE) ||
185 (uint32_t)(x - gpu.screen.src_x) >= (uint32_t)gpu.screen.w ||
186 (uint32_t)(y - gpu.screen.src_y) >= (uint32_t)gpu.screen.h;
187 return gpu.frameskip.allow;
190 static noinline void get_gpu_info(uint32_t data)
192 switch (data & 0x0f) {
196 gpu.gp0 = gpu.ex_regs[data & 7] & 0xfffff;
199 gpu.gp0 = gpu.ex_regs[5] & 0x3fffff;
210 // double, for overdraw guard
211 #define VRAM_SIZE ((1024 * 512 * 2 * 2) + 4096)
213 // Minimum 16-byte VRAM alignment needed by gpu_unai's pixel-skipping
214 // renderer/downscaler it uses in high res modes:
216 // On GCW platform (MIPS), align to 8192 bytes (1 TLB entry) to reduce # of
217 // fills. (Will change this value if it ever gets large page support)
218 #define VRAM_ALIGN 8192
220 #define VRAM_ALIGN 16
223 // vram ptr received from mmap/malloc/alloc (will deallocate using this)
224 static uint16_t *vram_ptr_orig = NULL;
226 #ifdef GPULIB_USE_MMAP
227 static int map_vram(void)
229 gpu.vram = vram_ptr_orig = gpu.mmap(VRAM_SIZE + (VRAM_ALIGN-1));
230 if (gpu.vram != NULL) {
231 // 4kb guard in front
232 gpu.vram += (4096 / 2);
234 gpu.vram = (uint16_t*)(((uintptr_t)gpu.vram + (VRAM_ALIGN-1)) & ~(VRAM_ALIGN-1));
238 fprintf(stderr, "could not map vram, expect crashes\n");
243 static int map_vram(void)
245 gpu.vram = vram_ptr_orig = (uint16_t*)calloc(VRAM_SIZE + (VRAM_ALIGN-1), 1);
246 if (gpu.vram != NULL) {
247 // 4kb guard in front
248 gpu.vram += (4096 / 2);
250 gpu.vram = (uint16_t*)(((uintptr_t)gpu.vram + (VRAM_ALIGN-1)) & ~(VRAM_ALIGN-1));
253 fprintf(stderr, "could not allocate vram, expect crashes\n");
258 static int allocate_vram(void)
260 gpu.vram = vram_ptr_orig = (uint16_t*)calloc(VRAM_SIZE + (VRAM_ALIGN-1), 1);
261 if (gpu.vram != NULL) {
262 // 4kb guard in front
263 gpu.vram += (4096 / 2);
265 gpu.vram = (uint16_t*)(((uintptr_t)gpu.vram + (VRAM_ALIGN-1)) & ~(VRAM_ALIGN-1));
268 fprintf(stderr, "could not allocate vram, expect crashes\n");
276 #ifndef GPULIB_USE_MMAP
277 if (gpu.vram == NULL) {
278 if (allocate_vram() != 0) {
279 printf("ERROR: could not allocate VRAM, exiting..\n");
285 //extern uint32_t hSyncCount; // in psxcounters.cpp
286 //extern uint32_t frame_counter; // in psxcounters.cpp
287 //gpu.state.hcnt = &hSyncCount;
288 //gpu.state.frame_count = &frame_counter;
292 ret |= renderer_init();
294 memset(&gpu.state, 0, sizeof(gpu.state));
295 memset(&gpu.frameskip, 0, sizeof(gpu.frameskip));
297 gpu.state.frame_count = &gpu.zero;
298 gpu.state.hcnt = &gpu.zero;
302 /*if (gpu.mmap != NULL) {
309 long GPUshutdown(void)
316 if (vram_ptr_orig != NULL) {
317 #ifdef GPULIB_USE_MMAP
318 gpu.munmap(vram_ptr_orig, VRAM_SIZE);
323 vram_ptr_orig = gpu.vram = NULL;
328 void GPUwriteStatus(uint32_t data)
330 uint32_t cmd = data >> 24;
332 if (cmd < ARRAY_SIZE(gpu.regs)) {
333 if (cmd > 1 && cmd != 5 && gpu.regs[cmd] == data)
335 gpu.regs[cmd] = data;
338 gpu.state.fb_dirty = 1;
349 gpu.status |= PSX_GPU_STATUS_BLANKING;
350 gpu.state.dims_changed = 1; // for hud clearing
353 gpu.status &= ~PSX_GPU_STATUS_BLANKING;
356 gpu.status &= ~PSX_GPU_STATUS_DMA_MASK;
357 gpu.status |= PSX_GPU_STATUS_DMA(data & 3);
360 gpu.screen.src_x = data & 0x3ff;
361 gpu.screen.src_y = (data >> 10) & 0x1ff;
362 renderer_notify_scanout_x_change(gpu.screen.src_x, gpu.screen.hres);
363 if (gpu.frameskip.set) {
364 decide_frameskip_allow(gpu.ex_regs[3]);
365 if (gpu.frameskip.last_flip_frame != *gpu.state.frame_count) {
367 gpu.frameskip.last_flip_frame = *gpu.state.frame_count;
372 gpu.screen.x1 = data & 0xfff;
373 gpu.screen.x2 = (data >> 12) & 0xfff;
377 gpu.screen.y1 = data & 0x3ff;
378 gpu.screen.y2 = (data >> 10) & 0x3ff;
382 gpu.status = (gpu.status & ~0x7f0000) | ((data & 0x3F) << 17) | ((data & 0x40) << 10);
385 renderer_notify_res_change();
388 if ((cmd & 0xf0) == 0x10)
393 #ifdef GPUwriteStatus_ext
394 GPUwriteStatus_ext(data);
398 const unsigned char cmd_lengths[256] =
400 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
401 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
402 3, 3, 3, 3, 6, 6, 6, 6, 4, 4, 4, 4, 8, 8, 8, 8, // 20
403 5, 5, 5, 5, 8, 8, 8, 8, 7, 7, 7, 7, 11, 11, 11, 11,
404 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, // 40
405 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
406 2, 2, 2, 2, 3, 3, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, // 60
407 1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2,
408 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 80
409 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
410 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // a0
411 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
412 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // c0
413 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
414 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // e0
415 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
418 #define VRAM_MEM_XY(x, y) &gpu.vram[(y) * 1024 + (x)]
420 static inline void do_vram_line(int x, int y, uint16_t *mem, int l, int is_read)
422 uint16_t *vram = VRAM_MEM_XY(x, y);
424 memcpy(mem, vram, l * 2);
426 memcpy(vram, mem, l * 2);
429 static int do_vram_io(uint32_t *data, int count, int is_read)
431 int count_initial = count;
432 uint16_t *sdata = (uint16_t *)data;
433 int x = gpu.dma.x, y = gpu.dma.y;
434 int w = gpu.dma.w, h = gpu.dma.h;
435 int o = gpu.dma.offset;
437 count *= 2; // operate in 16bpp pixels
441 if (gpu.dma.offset) {
442 l = w - gpu.dma.offset;
446 do_vram_line(x + o, y, sdata, l, is_read);
459 for (; h > 0 && count >= w; sdata += w, count -= w, y++, h--) {
461 do_vram_line(x, y, sdata, w, is_read);
467 do_vram_line(x, y, sdata, count, is_read);
473 finish_vram_transfer(is_read);
478 return count_initial - count / 2;
481 static void start_vram_transfer(uint32_t pos_word, uint32_t size_word, int is_read)
484 log_anomaly("start_vram_transfer while old unfinished\n");
486 gpu.dma.x = pos_word & 0x3ff;
487 gpu.dma.y = (pos_word >> 16) & 0x1ff;
488 gpu.dma.w = ((size_word - 1) & 0x3ff) + 1;
489 gpu.dma.h = (((size_word >> 16) - 1) & 0x1ff) + 1;
491 gpu.dma.is_read = is_read;
492 gpu.dma_start = gpu.dma;
494 renderer_flush_queues();
496 gpu.status |= PSX_GPU_STATUS_IMG;
497 // XXX: wrong for width 1
498 gpu.gp0 = LE32TOH(*(uint32_t *) VRAM_MEM_XY(gpu.dma.x, gpu.dma.y));
499 gpu.state.last_vram_read_frame = *gpu.state.frame_count;
502 log_io("start_vram_transfer %c (%d, %d) %dx%d\n", is_read ? 'r' : 'w',
503 gpu.dma.x, gpu.dma.y, gpu.dma.w, gpu.dma.h);
506 static void finish_vram_transfer(int is_read)
509 gpu.status &= ~PSX_GPU_STATUS_IMG;
511 renderer_update_caches(gpu.dma_start.x, gpu.dma_start.y,
512 gpu.dma_start.w, gpu.dma_start.h, 0);
515 static noinline int do_cmd_list_skip(uint32_t *data, int count, int *last_cmd)
517 int cmd = 0, pos = 0, len, dummy, v;
520 gpu.frameskip.pending_fill[0] = 0;
522 while (pos < count && skip) {
523 uint32_t *list = data + pos;
524 cmd = LE32TOH(list[0]) >> 24;
525 len = 1 + cmd_lengths[cmd];
529 if ((LE32TOH(list[2]) & 0x3ff) > gpu.screen.w || ((LE32TOH(list[2]) >> 16) & 0x1ff) > gpu.screen.h)
530 // clearing something large, don't skip
531 do_cmd_list(list, 3, &dummy);
533 memcpy(gpu.frameskip.pending_fill, list, 3 * 4);
539 gpu.ex_regs[1] &= ~0x1ff;
540 gpu.ex_regs[1] |= LE32TOH(list[4 + ((cmd >> 4) & 1)]) & 0x1ff;
543 for (v = 3; pos + v < count; v++)
545 if ((list[v] & HTOLE32(0xf000f000)) == HTOLE32(0x50005000))
551 for (v = 4; pos + v < count; v += 2)
553 if ((list[v] & HTOLE32(0xf000f000)) == HTOLE32(0x50005000))
560 skip = decide_frameskip_allow(LE32TOH(list[0]));
561 if ((cmd & 0xf8) == 0xe0)
562 gpu.ex_regs[cmd & 7] = LE32TOH(list[0]);
566 if (pos + len > count) {
568 break; // incomplete cmd
570 if (0xa0 <= cmd && cmd <= 0xdf)
576 renderer_sync_ecmds(gpu.ex_regs);
581 static noinline int do_cmd_buffer(uint32_t *data, int count)
584 uint32_t old_e3 = gpu.ex_regs[3];
588 for (pos = 0; pos < count; )
590 if (gpu.dma.h && !gpu.dma_start.is_read) { // XXX: need to verify
592 pos += do_vram_io(data + pos, count - pos, 0);
597 cmd = LE32TOH(data[pos]) >> 24;
598 if (0xa0 <= cmd && cmd <= 0xdf) {
599 if (unlikely((pos+2) >= count)) {
600 // incomplete vram write/read cmd, can't consume yet
605 // consume vram write/read cmd
606 start_vram_transfer(LE32TOH(data[pos + 1]), LE32TOH(data[pos + 2]), (cmd & 0xe0) == 0xc0);
611 // 0xex cmds might affect frameskip.allow, so pass to do_cmd_list_skip
612 if (gpu.frameskip.active && (gpu.frameskip.allow || ((LE32TOH(data[pos]) >> 24) & 0xf0) == 0xe0))
613 pos += do_cmd_list_skip(data + pos, count - pos, &cmd);
615 pos += do_cmd_list(data + pos, count - pos, &cmd);
624 gpu.status &= ~0x1fff;
625 gpu.status |= gpu.ex_regs[1] & 0x7ff;
626 gpu.status |= (gpu.ex_regs[6] & 3) << 11;
628 gpu.state.fb_dirty |= vram_dirty;
630 if (old_e3 != gpu.ex_regs[3])
631 decide_frameskip_allow(gpu.ex_regs[3]);
636 static void flush_cmd_buffer(void)
638 int left = do_cmd_buffer(gpu.cmd_buffer, gpu.cmd_len);
640 memmove(gpu.cmd_buffer, gpu.cmd_buffer + gpu.cmd_len - left, left * 4);
644 void GPUwriteDataMem(uint32_t *mem, int count)
648 log_io("gpu_dma_write %p %d\n", mem, count);
650 if (unlikely(gpu.cmd_len > 0))
653 left = do_cmd_buffer(mem, count);
655 log_anomaly("GPUwriteDataMem: discarded %d/%d words\n", left, count);
658 void GPUwriteData(uint32_t data)
660 log_io("gpu_write %08x\n", data);
661 gpu.cmd_buffer[gpu.cmd_len++] = HTOLE32(data);
662 if (gpu.cmd_len >= CMD_BUFFER_LEN)
666 long GPUdmaChain(uint32_t *rambase, uint32_t start_addr, uint32_t *progress_addr)
668 uint32_t addr, *list, ld_addr = 0;
669 int len, left, count;
672 preload(rambase + (start_addr & 0x1fffff) / 4);
674 if (unlikely(gpu.cmd_len > 0))
677 log_io("gpu_dma_chain\n");
678 addr = start_addr & 0xffffff;
679 for (count = 0; (addr & 0x800000) == 0; count++)
681 list = rambase + (addr & 0x1fffff) / 4;
682 len = LE32TOH(list[0]) >> 24;
683 addr = LE32TOH(list[0]) & 0xffffff;
684 preload(rambase + (addr & 0x1fffff) / 4);
688 cpu_cycles += 5 + len;
690 log_io(".chain %08lx #%d+%d\n",
691 (long)(list - rambase) * 4, len, gpu.cmd_len);
692 if (unlikely(gpu.cmd_len > 0)) {
693 if (gpu.cmd_len + len > ARRAY_SIZE(gpu.cmd_buffer)) {
694 log_anomaly("cmd_buffer overflow, likely garbage commands\n");
697 memcpy(gpu.cmd_buffer + gpu.cmd_len, list + 1, len * 4);
704 left = do_cmd_buffer(list + 1, len);
706 memcpy(gpu.cmd_buffer, list + 1 + len - left, left * 4);
708 log_anomaly("GPUdmaChain: %d/%d words left\n", left, len);
713 *progress_addr = addr;
716 #define LD_THRESHOLD (8*1024)
717 if (count >= LD_THRESHOLD) {
718 if (count == LD_THRESHOLD) {
723 // loop detection marker
724 // (bit23 set causes DMA error on real machine, so
725 // unlikely to be ever set by the game)
726 list[0] |= HTOLE32(0x800000);
731 // remove loop detection markers
732 count -= LD_THRESHOLD + 2;
733 addr = ld_addr & 0x1fffff;
734 while (count-- > 0) {
735 list = rambase + addr / 4;
736 addr = LE32TOH(list[0]) & 0x1fffff;
737 list[0] &= HTOLE32(~0x800000);
741 gpu.state.last_list.frame = *gpu.state.frame_count;
742 gpu.state.last_list.hcnt = *gpu.state.hcnt;
743 gpu.state.last_list.cycles = cpu_cycles;
744 gpu.state.last_list.addr = start_addr;
749 void GPUreadDataMem(uint32_t *mem, int count)
751 log_io("gpu_dma_read %p %d\n", mem, count);
753 if (unlikely(gpu.cmd_len > 0))
757 do_vram_io(mem, count, 1);
760 uint32_t GPUreadData(void)
764 if (unlikely(gpu.cmd_len > 0))
770 do_vram_io(&ret, 1, 1);
774 log_io("gpu_read %08x\n", ret);
778 uint32_t GPUreadStatus(void)
782 if (unlikely(gpu.cmd_len > 0))
786 log_io("gpu_read_status %08x\n", ret);
792 uint32_t ulFreezeVersion; // should be always 1 for now (set by main emu)
793 uint32_t ulStatus; // current gpu status
794 uint32_t ulControl[256]; // latest control register values
795 unsigned char psxVRam[1024*1024*2]; // current VRam image (full 2 MB for ZN)
798 long GPUfreeze(uint32_t type, struct GPUFreeze *freeze)
808 memcpy(freeze->psxVRam, gpu.vram, 1024 * 512 * 2);
809 memcpy(freeze->ulControl, gpu.regs, sizeof(gpu.regs));
810 memcpy(freeze->ulControl + 0xe0, gpu.ex_regs, sizeof(gpu.ex_regs));
811 freeze->ulStatus = gpu.status;
815 memcpy(gpu.vram, freeze->psxVRam, 1024 * 512 * 2);
816 memcpy(gpu.regs, freeze->ulControl, sizeof(gpu.regs));
817 memcpy(gpu.ex_regs, freeze->ulControl + 0xe0, sizeof(gpu.ex_regs));
818 gpu.status = freeze->ulStatus;
820 for (i = 8; i > 0; i--) {
821 gpu.regs[i] ^= 1; // avoid reg change detection
822 GPUwriteStatus((i << 24) | (gpu.regs[i] ^ 1));
824 renderer_sync_ecmds(gpu.ex_regs);
825 renderer_update_caches(0, 0, 1024, 512, 1);
832 void GPUupdateLace(void)
836 renderer_flush_queues();
838 if (gpu.status & PSX_GPU_STATUS_BLANKING) {
839 if (!gpu.state.blanked) {
841 gpu.state.blanked = 1;
842 gpu.state.fb_dirty = 1;
847 renderer_notify_update_lace(0);
849 if (!gpu.state.fb_dirty)
852 if (gpu.frameskip.set) {
853 if (!gpu.frameskip.frame_ready) {
854 if (*gpu.state.frame_count - gpu.frameskip.last_flip_frame < 9)
856 gpu.frameskip.active = 0;
858 gpu.frameskip.frame_ready = 0;
862 if (gpu.state.enhancement_active && !gpu.state.enhancement_was_active)
863 renderer_update_caches(0, 0, 1024, 512, 1);
864 gpu.state.enhancement_was_active = gpu.state.enhancement_active;
865 gpu.state.fb_dirty = 0;
866 gpu.state.blanked = 0;
867 renderer_notify_update_lace(1);
870 void GPUvBlank(int is_vblank, int lcf)
872 int interlace = gpu.state.allow_interlace
873 && (gpu.status & PSX_GPU_STATUS_INTERLACE)
874 && (gpu.status & PSX_GPU_STATUS_DHEIGHT);
875 // interlace doesn't look nice on progressive displays,
876 // so we have this "auto" mode here for games that don't read vram
877 if (gpu.state.allow_interlace == 2
878 && *gpu.state.frame_count - gpu.state.last_vram_read_frame > 1)
882 if (interlace || interlace != gpu.state.old_interlace) {
883 gpu.state.old_interlace = interlace;
887 renderer_flush_queues();
888 renderer_set_interlace(interlace, !lcf);
892 #include "../../frontend/plugin_lib.h"
894 void GPUrearmedCallbacks(const struct rearmed_cbs *cbs)
896 gpu.frameskip.set = cbs->frameskip;
897 gpu.frameskip.advice = &cbs->fskip_advice;
898 gpu.frameskip.force = &cbs->fskip_force;
899 gpu.frameskip.dirty = (void *)&cbs->fskip_dirty;
900 gpu.frameskip.active = 0;
901 gpu.frameskip.frame_ready = 1;
902 gpu.state.hcnt = cbs->gpu_hcnt;
903 gpu.state.frame_count = cbs->gpu_frame_count;
904 gpu.state.allow_interlace = cbs->gpu_neon.allow_interlace;
905 gpu.state.enhancement_enable = cbs->gpu_neon.enhancement_enable;
906 if (gpu.state.screen_centering_type != cbs->screen_centering_type
907 || gpu.state.screen_centering_x != cbs->screen_centering_x
908 || gpu.state.screen_centering_y != cbs->screen_centering_y) {
909 gpu.state.screen_centering_type = cbs->screen_centering_type;
910 gpu.state.screen_centering_x = cbs->screen_centering_x;
911 gpu.state.screen_centering_y = cbs->screen_centering_y;
916 gpu.mmap = cbs->mmap;
917 gpu.munmap = cbs->munmap;
920 if (gpu.vram == NULL)
923 if (cbs->pl_vout_set_raw_vram)
924 cbs->pl_vout_set_raw_vram(gpu.vram);
925 renderer_set_config(cbs);
926 vout_set_config(cbs);
929 // vim:shiftwidth=2:expandtab