2 * (C) GraÅžvydas "notaz" Ignotas, 2011-2012
4 * This work is licensed under the terms of any of these licenses
6 * - GNU GPL, version 2 or later.
7 * - GNU LGPL, version 2.1 or later.
8 * See the COPYING file in the top-level directory.
14 #include <stdlib.h> /* for calloc */
18 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
20 #define unlikely(x) __builtin_expect((x), 0)
21 #define preload __builtin_prefetch
22 #define noinline __attribute__((noinline))
29 //#define log_io gpu_log
34 static noinline int do_cmd_buffer(uint32_t *data, int count);
35 static void finish_vram_transfer(int is_read);
37 static noinline void do_cmd_reset(void)
41 if (unlikely(gpu.cmd_len > 0))
42 do_cmd_buffer(gpu.cmd_buffer, gpu.cmd_len);
45 if (unlikely(gpu.dma.h > 0))
46 finish_vram_transfer(gpu.dma_start.is_read);
50 static noinline void do_reset(void)
56 memset(gpu.regs, 0, sizeof(gpu.regs));
57 for (i = 0; i < sizeof(gpu.ex_regs) / sizeof(gpu.ex_regs[0]); i++)
58 gpu.ex_regs[i] = (0xe0 + i) << 24;
59 gpu.status = 0x14802000;
62 gpu.screen.hres = gpu.screen.w = 256;
63 gpu.screen.vres = gpu.screen.h = 240;
64 gpu.screen.x = gpu.screen.y = 0;
65 renderer_sync_ecmds(gpu.ex_regs);
66 renderer_notify_res_change();
69 static noinline void update_width(void)
71 static const short hres_all[8] = { 256, 368, 320, 368, 512, 368, 640, 368 };
72 static const uint8_t hdivs[8] = { 10, 7, 8, 7, 5, 7, 4, 7 };
73 uint8_t hdiv = hdivs[(gpu.status >> 16) & 7];
74 int hres = hres_all[(gpu.status >> 16) & 7];
75 int pal = gpu.status & PSX_GPU_STATUS_PAL;
76 int sw = gpu.screen.x2 - gpu.screen.x1;
79 /* nothing displayed? */;
81 int s = pal ? 656 : 608; // or 600? pal is just a guess
82 x = (gpu.screen.x1 - s) / hdiv;
83 x = (x + 1) & ~1; // blitter limitation
85 sw = (sw + 2) & ~3; // according to nocash
86 switch (gpu.state.screen_centering_type) {
90 x = gpu.state.screen_centering_x;
93 // correct if slightly miscentered
94 x_auto = (hres - sw) / 2 & ~3;
95 if ((uint32_t)x_auto <= 8u && abs(x) < 24)
100 // .x range check is done in vout_update()
102 // reduce the unpleasant right border that a few games have
103 if (gpu.state.screen_centering_type == 0
104 && x <= 4 && hres - (x + sw) >= 4)
108 gpu.screen.hres = hres;
109 gpu.state.dims_changed = 1;
110 //printf("xx %d %d -> %2d, %d / %d\n",
111 // gpu.screen.x1, gpu.screen.x2, x, sw, hres);
114 static noinline void update_height(void)
116 int pal = gpu.status & PSX_GPU_STATUS_PAL;
117 int dheight = gpu.status & PSX_GPU_STATUS_DHEIGHT;
118 int y = gpu.screen.y1 - (pal ? 39 : 16); // 39 for spyro
119 int sh = gpu.screen.y2 - gpu.screen.y1;
123 if (pal && (sh > 240 || gpu.screen.vres == 256))
126 y *= 2, sh *= 2, vres *= 2, center_tol *= 2;
128 /* nothing displayed? */;
130 switch (gpu.state.screen_centering_type) {
134 y = gpu.state.screen_centering_y;
137 // correct if slightly miscentered
138 if ((uint32_t)(vres - sh) <= 1 && abs(y) <= center_tol)
146 gpu.screen.vres = vres;
147 gpu.state.dims_changed = 1;
148 //printf("yy %d %d -> %d, %d / %d\n",
149 // gpu.screen.y1, gpu.screen.y2, y, sh, vres);
152 static noinline void decide_frameskip(void)
154 *gpu.frameskip.dirty = 1;
156 if (gpu.frameskip.active)
159 gpu.frameskip.cnt = 0;
160 gpu.frameskip.frame_ready = 1;
163 if (*gpu.frameskip.force)
164 gpu.frameskip.active = 1;
165 else if (!gpu.frameskip.active && *gpu.frameskip.advice)
166 gpu.frameskip.active = 1;
167 else if (gpu.frameskip.set > 0 && gpu.frameskip.cnt < gpu.frameskip.set)
168 gpu.frameskip.active = 1;
170 gpu.frameskip.active = 0;
172 if (!gpu.frameskip.active && gpu.frameskip.pending_fill[0] != 0) {
174 do_cmd_list(gpu.frameskip.pending_fill, 3, &dummy);
175 gpu.frameskip.pending_fill[0] = 0;
179 static noinline int decide_frameskip_allow(uint32_t cmd_e3)
181 // no frameskip if it decides to draw to display area,
182 // but not for interlace since it'll most likely always do that
183 uint32_t x = cmd_e3 & 0x3ff;
184 uint32_t y = (cmd_e3 >> 10) & 0x3ff;
185 gpu.frameskip.allow = (gpu.status & PSX_GPU_STATUS_INTERLACE) ||
186 (uint32_t)(x - gpu.screen.src_x) >= (uint32_t)gpu.screen.w ||
187 (uint32_t)(y - gpu.screen.src_y) >= (uint32_t)gpu.screen.h;
188 return gpu.frameskip.allow;
191 static void flush_cmd_buffer(void);
193 static noinline void get_gpu_info(uint32_t data)
195 if (unlikely(gpu.cmd_len > 0))
197 switch (data & 0x0f) {
201 gpu.gp0 = gpu.ex_regs[data & 7] & 0xfffff;
204 gpu.gp0 = gpu.ex_regs[5] & 0x3fffff;
215 // double, for overdraw guard
216 #define VRAM_SIZE ((1024 * 512 * 2 * 2) + 4096)
218 // Minimum 16-byte VRAM alignment needed by gpu_unai's pixel-skipping
219 // renderer/downscaler it uses in high res modes:
221 // On GCW platform (MIPS), align to 8192 bytes (1 TLB entry) to reduce # of
222 // fills. (Will change this value if it ever gets large page support)
223 #define VRAM_ALIGN 8192
225 #define VRAM_ALIGN 16
228 // vram ptr received from mmap/malloc/alloc (will deallocate using this)
229 static uint16_t *vram_ptr_orig = NULL;
231 #ifndef GPULIB_USE_MMAP
233 # define GPULIB_USE_MMAP 1
235 # define GPULIB_USE_MMAP 0
238 static int map_vram(void)
241 gpu.vram = vram_ptr_orig = gpu.mmap(VRAM_SIZE + (VRAM_ALIGN-1));
243 gpu.vram = vram_ptr_orig = calloc(VRAM_SIZE + (VRAM_ALIGN-1), 1);
245 if (gpu.vram != NULL && gpu.vram != (void *)(intptr_t)-1) {
246 // 4kb guard in front
247 gpu.vram += (4096 / 2);
249 gpu.vram = (uint16_t*)(((uintptr_t)gpu.vram + (VRAM_ALIGN-1)) & ~(VRAM_ALIGN-1));
253 fprintf(stderr, "could not map vram, expect crashes\n");
262 ret |= renderer_init();
264 memset(&gpu.state, 0, sizeof(gpu.state));
265 memset(&gpu.frameskip, 0, sizeof(gpu.frameskip));
267 gpu.state.frame_count = &gpu.zero;
268 gpu.state.hcnt = &gpu.zero;
272 /*if (gpu.mmap != NULL) {
279 long GPUshutdown(void)
286 if (vram_ptr_orig != NULL) {
288 gpu.munmap(vram_ptr_orig, VRAM_SIZE);
293 vram_ptr_orig = gpu.vram = NULL;
298 void GPUwriteStatus(uint32_t data)
300 uint32_t cmd = data >> 24;
302 if (cmd < ARRAY_SIZE(gpu.regs)) {
303 if (cmd > 1 && cmd != 5 && gpu.regs[cmd] == data)
305 gpu.regs[cmd] = data;
308 gpu.state.fb_dirty = 1;
319 gpu.status |= PSX_GPU_STATUS_BLANKING;
320 gpu.state.dims_changed = 1; // for hud clearing
323 gpu.status &= ~PSX_GPU_STATUS_BLANKING;
326 gpu.status &= ~PSX_GPU_STATUS_DMA_MASK;
327 gpu.status |= PSX_GPU_STATUS_DMA(data & 3);
330 gpu.screen.src_x = data & 0x3ff;
331 gpu.screen.src_y = (data >> 10) & 0x1ff;
332 renderer_notify_scanout_x_change(gpu.screen.src_x, gpu.screen.hres);
333 if (gpu.frameskip.set) {
334 decide_frameskip_allow(gpu.ex_regs[3]);
335 if (gpu.frameskip.last_flip_frame != *gpu.state.frame_count) {
337 gpu.frameskip.last_flip_frame = *gpu.state.frame_count;
342 gpu.screen.x1 = data & 0xfff;
343 gpu.screen.x2 = (data >> 12) & 0xfff;
347 gpu.screen.y1 = data & 0x3ff;
348 gpu.screen.y2 = (data >> 10) & 0x3ff;
352 gpu.status = (gpu.status & ~0x7f0000) | ((data & 0x3F) << 17) | ((data & 0x40) << 10);
355 renderer_notify_res_change();
358 if ((cmd & 0xf0) == 0x10)
363 #ifdef GPUwriteStatus_ext
364 GPUwriteStatus_ext(data);
368 const unsigned char cmd_lengths[256] =
370 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
371 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
372 3, 3, 3, 3, 6, 6, 6, 6, 4, 4, 4, 4, 8, 8, 8, 8, // 20
373 5, 5, 5, 5, 8, 8, 8, 8, 7, 7, 7, 7, 11, 11, 11, 11,
374 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, // 40
375 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
376 2, 2, 2, 2, 3, 3, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, // 60
377 1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2,
378 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, // 80
379 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
380 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // a0
381 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
382 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // c0
383 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
384 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // e0
385 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
388 #define VRAM_MEM_XY(x, y) &gpu.vram[(y) * 1024 + (x)]
390 static void cpy_msb(uint16_t *dst, const uint16_t *src, int l, uint16_t msb)
393 for (i = 0; i < l; i++)
394 dst[i] = src[i] | msb;
397 static inline void do_vram_line(int x, int y, uint16_t *mem, int l,
398 int is_read, uint16_t msb)
400 uint16_t *vram = VRAM_MEM_XY(x, y);
401 if (unlikely(is_read))
402 memcpy(mem, vram, l * 2);
403 else if (unlikely(msb))
404 cpy_msb(vram, mem, l, msb);
406 memcpy(vram, mem, l * 2);
409 static int do_vram_io(uint32_t *data, int count, int is_read)
411 int count_initial = count;
412 uint16_t msb = gpu.ex_regs[6] << 15;
413 uint16_t *sdata = (uint16_t *)data;
414 int x = gpu.dma.x, y = gpu.dma.y;
415 int w = gpu.dma.w, h = gpu.dma.h;
416 int o = gpu.dma.offset;
418 count *= 2; // operate in 16bpp pixels
422 if (gpu.dma.offset) {
423 l = w - gpu.dma.offset;
427 do_vram_line(x + o, y, sdata, l, is_read, msb);
440 for (; h > 0 && count >= w; sdata += w, count -= w, y++, h--) {
442 do_vram_line(x, y, sdata, w, is_read, msb);
448 do_vram_line(x, y, sdata, count, is_read, msb);
454 finish_vram_transfer(is_read);
459 return count_initial - count / 2;
462 static void start_vram_transfer(uint32_t pos_word, uint32_t size_word, int is_read)
465 log_anomaly("start_vram_transfer while old unfinished\n");
467 gpu.dma.x = pos_word & 0x3ff;
468 gpu.dma.y = (pos_word >> 16) & 0x1ff;
469 gpu.dma.w = ((size_word - 1) & 0x3ff) + 1;
470 gpu.dma.h = (((size_word >> 16) - 1) & 0x1ff) + 1;
472 gpu.dma.is_read = is_read;
473 gpu.dma_start = gpu.dma;
475 renderer_flush_queues();
477 gpu.status |= PSX_GPU_STATUS_IMG;
478 // XXX: wrong for width 1
479 gpu.gp0 = LE32TOH(*(uint32_t *) VRAM_MEM_XY(gpu.dma.x, gpu.dma.y));
480 gpu.state.last_vram_read_frame = *gpu.state.frame_count;
483 log_io("start_vram_transfer %c (%d, %d) %dx%d\n", is_read ? 'r' : 'w',
484 gpu.dma.x, gpu.dma.y, gpu.dma.w, gpu.dma.h);
487 static void finish_vram_transfer(int is_read)
490 gpu.status &= ~PSX_GPU_STATUS_IMG;
492 gpu.state.fb_dirty = 1;
493 renderer_update_caches(gpu.dma_start.x, gpu.dma_start.y,
494 gpu.dma_start.w, gpu.dma_start.h, 0);
498 static void do_vram_copy(const uint32_t *params)
500 const uint32_t sx = LE32TOH(params[0]) & 0x3FF;
501 const uint32_t sy = (LE32TOH(params[0]) >> 16) & 0x1FF;
502 const uint32_t dx = LE32TOH(params[1]) & 0x3FF;
503 const uint32_t dy = (LE32TOH(params[1]) >> 16) & 0x1FF;
504 uint32_t w = ((LE32TOH(params[2]) - 1) & 0x3FF) + 1;
505 uint32_t h = (((LE32TOH(params[2]) >> 16) - 1) & 0x1FF) + 1;
506 uint16_t msb = gpu.ex_regs[6] << 15;
510 if (sx == dx && sy == dy && msb == 0)
513 renderer_flush_queues();
515 if (unlikely((sx < dx && dx < sx + w) || sx + w > 1024 || dx + w > 1024 || msb))
517 for (y = 0; y < h; y++)
519 const uint16_t *src = VRAM_MEM_XY(0, (sy + y) & 0x1ff);
520 uint16_t *dst = VRAM_MEM_XY(0, (dy + y) & 0x1ff);
521 for (x = 0; x < w; x += ARRAY_SIZE(lbuf))
523 uint32_t x1, w1 = w - x;
524 if (w1 > ARRAY_SIZE(lbuf))
525 w1 = ARRAY_SIZE(lbuf);
526 for (x1 = 0; x1 < w1; x1++)
527 lbuf[x1] = src[(sx + x + x1) & 0x3ff];
528 for (x1 = 0; x1 < w1; x1++)
529 dst[(dx + x + x1) & 0x3ff] = lbuf[x1] | msb;
535 uint32_t sy1 = sy, dy1 = dy;
536 for (y = 0; y < h; y++, sy1++, dy1++)
537 memcpy(VRAM_MEM_XY(dx, dy1 & 0x1ff), VRAM_MEM_XY(sx, sy1 & 0x1ff), w * 2);
540 renderer_update_caches(dx, dy, w, h, 0);
543 static noinline int do_cmd_list_skip(uint32_t *data, int count, int *last_cmd)
545 int cmd = 0, pos = 0, len, dummy, v;
548 gpu.frameskip.pending_fill[0] = 0;
550 while (pos < count && skip) {
551 uint32_t *list = data + pos;
552 cmd = LE32TOH(list[0]) >> 24;
553 len = 1 + cmd_lengths[cmd];
557 if ((LE32TOH(list[2]) & 0x3ff) > gpu.screen.w || ((LE32TOH(list[2]) >> 16) & 0x1ff) > gpu.screen.h)
558 // clearing something large, don't skip
559 do_cmd_list(list, 3, &dummy);
561 memcpy(gpu.frameskip.pending_fill, list, 3 * 4);
567 gpu.ex_regs[1] &= ~0x1ff;
568 gpu.ex_regs[1] |= LE32TOH(list[4 + ((cmd >> 4) & 1)]) & 0x1ff;
571 for (v = 3; pos + v < count; v++)
573 if ((list[v] & HTOLE32(0xf000f000)) == HTOLE32(0x50005000))
579 for (v = 4; pos + v < count; v += 2)
581 if ((list[v] & HTOLE32(0xf000f000)) == HTOLE32(0x50005000))
588 skip = decide_frameskip_allow(LE32TOH(list[0]));
589 if ((cmd & 0xf8) == 0xe0)
590 gpu.ex_regs[cmd & 7] = LE32TOH(list[0]);
594 if (pos + len > count) {
596 break; // incomplete cmd
598 if (0x80 <= cmd && cmd <= 0xdf)
604 renderer_sync_ecmds(gpu.ex_regs);
609 static noinline int do_cmd_buffer(uint32_t *data, int count)
612 uint32_t old_e3 = gpu.ex_regs[3];
616 for (pos = 0; pos < count; )
618 if (gpu.dma.h && !gpu.dma_start.is_read) { // XXX: need to verify
620 pos += do_vram_io(data + pos, count - pos, 0);
625 cmd = LE32TOH(data[pos]) >> 24;
626 if (0xa0 <= cmd && cmd <= 0xdf) {
627 if (unlikely((pos+2) >= count)) {
628 // incomplete vram write/read cmd, can't consume yet
633 // consume vram write/read cmd
634 start_vram_transfer(LE32TOH(data[pos + 1]), LE32TOH(data[pos + 2]), (cmd & 0xe0) == 0xc0);
638 else if ((cmd & 0xe0) == 0x80) {
639 if (unlikely((pos+3) >= count)) {
640 cmd = -1; // incomplete cmd, can't consume yet
643 do_vram_copy(data + pos + 1);
649 // 0xex cmds might affect frameskip.allow, so pass to do_cmd_list_skip
650 if (gpu.frameskip.active && (gpu.frameskip.allow || ((LE32TOH(data[pos]) >> 24) & 0xf0) == 0xe0))
651 pos += do_cmd_list_skip(data + pos, count - pos, &cmd);
653 pos += do_cmd_list(data + pos, count - pos, &cmd);
662 gpu.status &= ~0x1fff;
663 gpu.status |= gpu.ex_regs[1] & 0x7ff;
664 gpu.status |= (gpu.ex_regs[6] & 3) << 11;
666 gpu.state.fb_dirty |= vram_dirty;
668 if (old_e3 != gpu.ex_regs[3])
669 decide_frameskip_allow(gpu.ex_regs[3]);
674 static void flush_cmd_buffer(void)
676 int left = do_cmd_buffer(gpu.cmd_buffer, gpu.cmd_len);
678 memmove(gpu.cmd_buffer, gpu.cmd_buffer + gpu.cmd_len - left, left * 4);
682 void GPUwriteDataMem(uint32_t *mem, int count)
686 log_io("gpu_dma_write %p %d\n", mem, count);
688 if (unlikely(gpu.cmd_len > 0))
691 left = do_cmd_buffer(mem, count);
693 log_anomaly("GPUwriteDataMem: discarded %d/%d words\n", left, count);
696 void GPUwriteData(uint32_t data)
698 log_io("gpu_write %08x\n", data);
699 gpu.cmd_buffer[gpu.cmd_len++] = HTOLE32(data);
700 if (gpu.cmd_len >= CMD_BUFFER_LEN)
704 long GPUdmaChain(uint32_t *rambase, uint32_t start_addr, uint32_t *progress_addr)
706 uint32_t addr, *list, ld_addr = 0;
707 int len, left, count;
710 preload(rambase + (start_addr & 0x1fffff) / 4);
712 if (unlikely(gpu.cmd_len > 0))
715 log_io("gpu_dma_chain\n");
716 addr = start_addr & 0xffffff;
717 for (count = 0; (addr & 0x800000) == 0; count++)
719 list = rambase + (addr & 0x1fffff) / 4;
720 len = LE32TOH(list[0]) >> 24;
721 addr = LE32TOH(list[0]) & 0xffffff;
722 preload(rambase + (addr & 0x1fffff) / 4);
726 cpu_cycles += 5 + len;
728 log_io(".chain %08lx #%d+%d\n",
729 (long)(list - rambase) * 4, len, gpu.cmd_len);
730 if (unlikely(gpu.cmd_len > 0)) {
731 if (gpu.cmd_len + len > ARRAY_SIZE(gpu.cmd_buffer)) {
732 log_anomaly("cmd_buffer overflow, likely garbage commands\n");
735 memcpy(gpu.cmd_buffer + gpu.cmd_len, list + 1, len * 4);
742 left = do_cmd_buffer(list + 1, len);
744 memcpy(gpu.cmd_buffer, list + 1 + len - left, left * 4);
746 log_anomaly("GPUdmaChain: %d/%d words left\n", left, len);
751 *progress_addr = addr;
754 #define LD_THRESHOLD (8*1024)
755 if (count >= LD_THRESHOLD) {
756 if (count == LD_THRESHOLD) {
761 // loop detection marker
762 // (bit23 set causes DMA error on real machine, so
763 // unlikely to be ever set by the game)
764 list[0] |= HTOLE32(0x800000);
769 // remove loop detection markers
770 count -= LD_THRESHOLD + 2;
771 addr = ld_addr & 0x1fffff;
772 while (count-- > 0) {
773 list = rambase + addr / 4;
774 addr = LE32TOH(list[0]) & 0x1fffff;
775 list[0] &= HTOLE32(~0x800000);
779 gpu.state.last_list.frame = *gpu.state.frame_count;
780 gpu.state.last_list.hcnt = *gpu.state.hcnt;
781 gpu.state.last_list.cycles = cpu_cycles;
782 gpu.state.last_list.addr = start_addr;
787 void GPUreadDataMem(uint32_t *mem, int count)
789 log_io("gpu_dma_read %p %d\n", mem, count);
791 if (unlikely(gpu.cmd_len > 0))
795 do_vram_io(mem, count, 1);
798 uint32_t GPUreadData(void)
802 if (unlikely(gpu.cmd_len > 0))
808 do_vram_io(&ret, 1, 1);
812 log_io("gpu_read %08x\n", ret);
816 uint32_t GPUreadStatus(void)
820 if (unlikely(gpu.cmd_len > 0))
824 log_io("gpu_read_status %08x\n", ret);
830 uint32_t ulFreezeVersion; // should be always 1 for now (set by main emu)
831 uint32_t ulStatus; // current gpu status
832 uint32_t ulControl[256]; // latest control register values
833 unsigned char psxVRam[1024*1024*2]; // current VRam image (full 2 MB for ZN)
836 long GPUfreeze(uint32_t type, struct GPUFreeze *freeze)
846 memcpy(freeze->psxVRam, gpu.vram, 1024 * 512 * 2);
847 memcpy(freeze->ulControl, gpu.regs, sizeof(gpu.regs));
848 memcpy(freeze->ulControl + 0xe0, gpu.ex_regs, sizeof(gpu.ex_regs));
849 freeze->ulStatus = gpu.status;
853 memcpy(gpu.vram, freeze->psxVRam, 1024 * 512 * 2);
854 memcpy(gpu.regs, freeze->ulControl, sizeof(gpu.regs));
855 memcpy(gpu.ex_regs, freeze->ulControl + 0xe0, sizeof(gpu.ex_regs));
856 gpu.status = freeze->ulStatus;
858 for (i = 8; i > 0; i--) {
859 gpu.regs[i] ^= 1; // avoid reg change detection
860 GPUwriteStatus((i << 24) | (gpu.regs[i] ^ 1));
862 renderer_sync_ecmds(gpu.ex_regs);
863 renderer_update_caches(0, 0, 1024, 512, 1);
870 void GPUupdateLace(void)
874 renderer_flush_queues();
876 #ifndef RAW_FB_DISPLAY
877 if (gpu.status & PSX_GPU_STATUS_BLANKING) {
878 if (!gpu.state.blanked) {
880 gpu.state.blanked = 1;
881 gpu.state.fb_dirty = 1;
886 renderer_notify_update_lace(0);
888 if (!gpu.state.fb_dirty)
892 if (gpu.frameskip.set) {
893 if (!gpu.frameskip.frame_ready) {
894 if (*gpu.state.frame_count - gpu.frameskip.last_flip_frame < 9)
896 gpu.frameskip.active = 0;
898 gpu.frameskip.frame_ready = 0;
902 if (gpu.state.enhancement_active && !gpu.state.enhancement_was_active)
903 renderer_update_caches(0, 0, 1024, 512, 1);
904 gpu.state.enhancement_was_active = gpu.state.enhancement_active;
905 gpu.state.fb_dirty = 0;
906 gpu.state.blanked = 0;
907 renderer_notify_update_lace(1);
910 void GPUvBlank(int is_vblank, int lcf)
912 int interlace = gpu.state.allow_interlace
913 && (gpu.status & PSX_GPU_STATUS_INTERLACE)
914 && (gpu.status & PSX_GPU_STATUS_DHEIGHT);
915 // interlace doesn't look nice on progressive displays,
916 // so we have this "auto" mode here for games that don't read vram
917 if (gpu.state.allow_interlace == 2
918 && *gpu.state.frame_count - gpu.state.last_vram_read_frame > 1)
922 if (interlace || interlace != gpu.state.old_interlace) {
923 gpu.state.old_interlace = interlace;
927 renderer_flush_queues();
928 renderer_set_interlace(interlace, !lcf);
932 void GPUgetScreenInfo(int *y, int *base_hres)
935 *base_hres = gpu.screen.vres;
936 if (gpu.status & PSX_GPU_STATUS_DHEIGHT)
940 #include "../../frontend/plugin_lib.h"
942 void GPUrearmedCallbacks(const struct rearmed_cbs *cbs)
944 gpu.frameskip.set = cbs->frameskip;
945 gpu.frameskip.advice = &cbs->fskip_advice;
946 gpu.frameskip.force = &cbs->fskip_force;
947 gpu.frameskip.dirty = (void *)&cbs->fskip_dirty;
948 gpu.frameskip.active = 0;
949 gpu.frameskip.frame_ready = 1;
950 gpu.state.hcnt = cbs->gpu_hcnt;
951 gpu.state.frame_count = cbs->gpu_frame_count;
952 gpu.state.allow_interlace = cbs->gpu_neon.allow_interlace;
953 gpu.state.enhancement_enable = cbs->gpu_neon.enhancement_enable;
954 if (gpu.state.screen_centering_type != cbs->screen_centering_type
955 || gpu.state.screen_centering_x != cbs->screen_centering_x
956 || gpu.state.screen_centering_y != cbs->screen_centering_y) {
957 gpu.state.screen_centering_type = cbs->screen_centering_type;
958 gpu.state.screen_centering_x = cbs->screen_centering_x;
959 gpu.state.screen_centering_y = cbs->screen_centering_y;
964 gpu.mmap = cbs->mmap;
965 gpu.munmap = cbs->munmap;
968 if (gpu.vram == NULL)
971 if (cbs->pl_vout_set_raw_vram)
972 cbs->pl_vout_set_raw_vram(gpu.vram);
973 renderer_set_config(cbs);
974 vout_set_config(cbs);
977 // vim:shiftwidth=2:expandtab