2 * (C) GraÅžvydas "notaz" Ignotas, 2011-2012
4 * This work is licensed under the terms of any of these licenses
6 * - GNU GPL, version 2 or later.
7 * - GNU LGPL, version 2.1 or later.
8 * See the COPYING file in the top-level directory.
15 #include "gpu_timing.h"
16 #include "../../libpcsxcore/gpu.h" // meh
17 #include "../../frontend/plugin_lib.h"
20 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
23 #define unlikely(x) __builtin_expect((x), 0)
24 #define preload __builtin_prefetch
25 #define noinline __attribute__((noinline))
32 //#define log_io gpu_log
37 static noinline int do_cmd_buffer(uint32_t *data, int count, int *cpu_cycles);
38 static void finish_vram_transfer(int is_read);
40 static noinline void do_cmd_reset(void)
43 if (unlikely(gpu.cmd_len > 0))
44 do_cmd_buffer(gpu.cmd_buffer, gpu.cmd_len, &dummy);
47 if (unlikely(gpu.dma.h > 0))
48 finish_vram_transfer(gpu.dma_start.is_read);
52 static noinline void do_reset(void)
58 memset(gpu.regs, 0, sizeof(gpu.regs));
59 for (i = 0; i < sizeof(gpu.ex_regs) / sizeof(gpu.ex_regs[0]); i++)
60 gpu.ex_regs[i] = (0xe0 + i) << 24;
61 gpu.status = 0x14802000;
64 gpu.screen.hres = gpu.screen.w = 256;
65 gpu.screen.vres = gpu.screen.h = 240;
66 gpu.screen.x = gpu.screen.y = 0;
67 renderer_sync_ecmds(gpu.ex_regs);
68 renderer_notify_res_change();
71 static noinline void update_width(void)
73 static const short hres_all[8] = { 256, 368, 320, 368, 512, 368, 640, 368 };
74 static const uint8_t hdivs[8] = { 10, 7, 8, 7, 5, 7, 4, 7 };
75 uint8_t hdiv = hdivs[(gpu.status >> 16) & 7];
76 int hres = hres_all[(gpu.status >> 16) & 7];
77 int pal = gpu.status & PSX_GPU_STATUS_PAL;
78 int sw = gpu.screen.x2 - gpu.screen.x1;
79 int type = gpu.state.screen_centering_type;
82 type = gpu.state.screen_centering_type_default;
84 /* nothing displayed? */;
86 int s = pal ? 656 : 608; // or 600? pal is just a guess
87 x = (gpu.screen.x1 - s) / hdiv;
88 x = (x + 1) & ~1; // blitter limitation
90 sw = (sw + 2) & ~3; // according to nocash
95 x = gpu.state.screen_centering_x;
98 // correct if slightly miscentered
99 x_auto = (hres - sw) / 2 & ~3;
100 if ((uint32_t)x_auto <= 8u && abs(x) < 24)
105 // .x range check is done in vout_update()
107 // reduce the unpleasant right border that a few games have
108 if (gpu.state.screen_centering_type == 0
109 && x <= 4 && hres - (x + sw) >= 4)
113 gpu.screen.hres = hres;
114 gpu.state.dims_changed = 1;
115 //printf("xx %d %d -> %2d, %d / %d\n",
116 // gpu.screen.x1, gpu.screen.x2, x, sw, hres);
119 static noinline void update_height(void)
121 int pal = gpu.status & PSX_GPU_STATUS_PAL;
122 int dheight = gpu.status & PSX_GPU_STATUS_DHEIGHT;
123 int y = gpu.screen.y1 - (pal ? 39 : 16); // 39 for spyro
124 int sh = gpu.screen.y2 - gpu.screen.y1;
128 if (pal && (sh > 240 || gpu.screen.vres == 256))
131 y *= 2, sh *= 2, vres *= 2, center_tol *= 2;
133 /* nothing displayed? */;
135 switch (gpu.state.screen_centering_type) {
142 y = gpu.state.screen_centering_y;
145 // correct if slightly miscentered
146 if ((uint32_t)(vres - sh) <= 1 && abs(y) <= center_tol)
154 gpu.screen.vres = vres;
155 gpu.state.dims_changed = 1;
156 //printf("yy %d %d -> %d, %d / %d\n",
157 // gpu.screen.y1, gpu.screen.y2, y, sh, vres);
160 static noinline void decide_frameskip(void)
162 if (gpu.frameskip.active)
165 gpu.frameskip.cnt = 0;
166 gpu.frameskip.frame_ready = 1;
169 if (!gpu.frameskip.active && *gpu.frameskip.advice)
170 gpu.frameskip.active = 1;
171 else if (gpu.frameskip.set > 0 && gpu.frameskip.cnt < gpu.frameskip.set)
172 gpu.frameskip.active = 1;
174 gpu.frameskip.active = 0;
176 if (!gpu.frameskip.active && gpu.frameskip.pending_fill[0] != 0) {
178 do_cmd_list(gpu.frameskip.pending_fill, 3, &dummy, &dummy);
179 gpu.frameskip.pending_fill[0] = 0;
183 static noinline int decide_frameskip_allow(uint32_t cmd_e3)
185 // no frameskip if it decides to draw to display area,
186 // but not for interlace since it'll most likely always do that
187 uint32_t x = cmd_e3 & 0x3ff;
188 uint32_t y = (cmd_e3 >> 10) & 0x3ff;
189 gpu.frameskip.allow = (gpu.status & PSX_GPU_STATUS_INTERLACE) ||
190 (uint32_t)(x - gpu.screen.src_x) >= (uint32_t)gpu.screen.w ||
191 (uint32_t)(y - gpu.screen.src_y) >= (uint32_t)gpu.screen.h;
192 return gpu.frameskip.allow;
195 static void flush_cmd_buffer(void);
197 static noinline void get_gpu_info(uint32_t data)
199 if (unlikely(gpu.cmd_len > 0))
201 switch (data & 0x0f) {
205 gpu.gp0 = gpu.ex_regs[data & 7] & 0xfffff;
208 gpu.gp0 = gpu.ex_regs[5] & 0x3fffff;
219 // double, for overdraw guard
220 #define VRAM_SIZE (1024 * 512 * 2 * 2)
222 static int map_vram(void)
224 gpu.vram = gpu.mmap(VRAM_SIZE);
225 if (gpu.vram != NULL) {
226 gpu.vram += 4096 / 2;
230 fprintf(stderr, "could not map vram, expect crashes\n");
239 ret |= renderer_init();
241 memset(&gpu.state, 0, sizeof(gpu.state));
242 memset(&gpu.frameskip, 0, sizeof(gpu.frameskip));
244 gpu.state.frame_count = &gpu.zero;
245 gpu.state.hcnt = &gpu.zero;
249 if (gpu.mmap != NULL) {
256 long GPUshutdown(void)
262 if (gpu.vram != NULL) {
263 gpu.vram -= 4096 / 2;
264 gpu.munmap(gpu.vram, VRAM_SIZE);
271 void GPUwriteStatus(uint32_t data)
273 uint32_t cmd = data >> 24;
276 if (cmd < ARRAY_SIZE(gpu.regs)) {
277 if (cmd > 1 && cmd != 5 && gpu.regs[cmd] == data)
279 gpu.regs[cmd] = data;
282 gpu.state.fb_dirty = 1;
293 gpu.status |= PSX_GPU_STATUS_BLANKING;
294 gpu.state.dims_changed = 1; // for hud clearing
297 gpu.status &= ~PSX_GPU_STATUS_BLANKING;
300 gpu.status &= ~PSX_GPU_STATUS_DMA_MASK;
301 gpu.status |= PSX_GPU_STATUS_DMA(data & 3);
304 src_x = data & 0x3ff; src_y = (data >> 10) & 0x1ff;
305 if (src_x != gpu.screen.src_x || src_y != gpu.screen.src_y) {
306 gpu.screen.src_x = src_x;
307 gpu.screen.src_y = src_y;
308 renderer_notify_scanout_change(src_x, src_y);
309 if (gpu.frameskip.set) {
310 decide_frameskip_allow(gpu.ex_regs[3]);
311 if (gpu.frameskip.last_flip_frame != *gpu.state.frame_count) {
313 gpu.frameskip.last_flip_frame = *gpu.state.frame_count;
319 gpu.screen.x1 = data & 0xfff;
320 gpu.screen.x2 = (data >> 12) & 0xfff;
324 gpu.screen.y1 = data & 0x3ff;
325 gpu.screen.y2 = (data >> 10) & 0x3ff;
329 gpu.status = (gpu.status & ~0x7f0000) | ((data & 0x3F) << 17) | ((data & 0x40) << 10);
332 renderer_notify_res_change();
335 if ((cmd & 0xf0) == 0x10)
340 #ifdef GPUwriteStatus_ext
341 GPUwriteStatus_ext(data);
345 const unsigned char cmd_lengths[256] =
347 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
348 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
349 3, 3, 3, 3, 6, 6, 6, 6, 4, 4, 4, 4, 8, 8, 8, 8, // 20
350 5, 5, 5, 5, 8, 8, 8, 8, 7, 7, 7, 7, 11, 11, 11, 11,
351 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, // 40
352 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
353 2, 2, 2, 2, 3, 3, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, // 60
354 1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2,
355 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, // 80
356 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
357 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // a0
358 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
359 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // c0
360 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
361 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // e0
362 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
365 #define VRAM_MEM_XY(x, y) &gpu.vram[(y) * 1024 + (x)]
367 static void cpy_msb(uint16_t *dst, const uint16_t *src, int l, uint16_t msb)
370 for (i = 0; i < l; i++)
371 dst[i] = src[i] | msb;
374 static inline void do_vram_line(int x, int y, uint16_t *mem, int l,
375 int is_read, uint16_t msb)
377 uint16_t *vram = VRAM_MEM_XY(x, y);
378 if (unlikely(is_read))
379 memcpy(mem, vram, l * 2);
380 else if (unlikely(msb))
381 cpy_msb(vram, mem, l, msb);
383 memcpy(vram, mem, l * 2);
386 static int do_vram_io(uint32_t *data, int count, int is_read)
388 int count_initial = count;
389 uint16_t msb = gpu.ex_regs[6] << 15;
390 uint16_t *sdata = (uint16_t *)data;
391 int x = gpu.dma.x, y = gpu.dma.y;
392 int w = gpu.dma.w, h = gpu.dma.h;
393 int o = gpu.dma.offset;
395 count *= 2; // operate in 16bpp pixels
397 if (gpu.dma.offset) {
398 l = w - gpu.dma.offset;
402 do_vram_line(x + o, y, sdata, l, is_read, msb);
415 for (; h > 0 && count >= w; sdata += w, count -= w, y++, h--) {
417 do_vram_line(x, y, sdata, w, is_read, msb);
423 do_vram_line(x, y, sdata, count, is_read, msb);
429 finish_vram_transfer(is_read);
434 return count_initial - count / 2;
437 static void start_vram_transfer(uint32_t pos_word, uint32_t size_word, int is_read)
440 log_anomaly("start_vram_transfer while old unfinished\n");
442 gpu.dma.x = pos_word & 0x3ff;
443 gpu.dma.y = (pos_word >> 16) & 0x1ff;
444 gpu.dma.w = ((size_word - 1) & 0x3ff) + 1;
445 gpu.dma.h = (((size_word >> 16) - 1) & 0x1ff) + 1;
447 gpu.dma.is_read = is_read;
448 gpu.dma_start = gpu.dma;
450 renderer_flush_queues();
452 gpu.status |= PSX_GPU_STATUS_IMG;
453 // XXX: wrong for width 1
454 gpu.gp0 = LE32TOH(*(uint32_t *) VRAM_MEM_XY(gpu.dma.x, gpu.dma.y));
455 gpu.state.last_vram_read_frame = *gpu.state.frame_count;
458 log_io("start_vram_transfer %c (%d, %d) %dx%d\n", is_read ? 'r' : 'w',
459 gpu.dma.x, gpu.dma.y, gpu.dma.w, gpu.dma.h);
460 if (gpu.gpu_state_change)
461 gpu.gpu_state_change(PGS_VRAM_TRANSFER_START);
464 static void finish_vram_transfer(int is_read)
467 gpu.status &= ~PSX_GPU_STATUS_IMG;
469 gpu.state.fb_dirty = 1;
470 renderer_update_caches(gpu.dma_start.x, gpu.dma_start.y,
471 gpu.dma_start.w, gpu.dma_start.h, 0);
473 if (gpu.gpu_state_change)
474 gpu.gpu_state_change(PGS_VRAM_TRANSFER_END);
477 static void do_vram_copy(const uint32_t *params, int *cpu_cycles)
479 const uint32_t sx = LE32TOH(params[0]) & 0x3FF;
480 const uint32_t sy = (LE32TOH(params[0]) >> 16) & 0x1FF;
481 const uint32_t dx = LE32TOH(params[1]) & 0x3FF;
482 const uint32_t dy = (LE32TOH(params[1]) >> 16) & 0x1FF;
483 uint32_t w = ((LE32TOH(params[2]) - 1) & 0x3FF) + 1;
484 uint32_t h = (((LE32TOH(params[2]) >> 16) - 1) & 0x1FF) + 1;
485 uint16_t msb = gpu.ex_regs[6] << 15;
489 *cpu_cycles += gput_copy(w, h);
490 if (sx == dx && sy == dy && msb == 0)
493 renderer_flush_queues();
495 if (unlikely((sx < dx && dx < sx + w) || sx + w > 1024 || dx + w > 1024 || msb))
497 for (y = 0; y < h; y++)
499 const uint16_t *src = VRAM_MEM_XY(0, (sy + y) & 0x1ff);
500 uint16_t *dst = VRAM_MEM_XY(0, (dy + y) & 0x1ff);
501 for (x = 0; x < w; x += ARRAY_SIZE(lbuf))
503 uint32_t x1, w1 = w - x;
504 if (w1 > ARRAY_SIZE(lbuf))
505 w1 = ARRAY_SIZE(lbuf);
506 for (x1 = 0; x1 < w1; x1++)
507 lbuf[x1] = src[(sx + x + x1) & 0x3ff];
508 for (x1 = 0; x1 < w1; x1++)
509 dst[(dx + x + x1) & 0x3ff] = lbuf[x1] | msb;
515 uint32_t sy1 = sy, dy1 = dy;
516 for (y = 0; y < h; y++, sy1++, dy1++)
517 memcpy(VRAM_MEM_XY(dx, dy1 & 0x1ff), VRAM_MEM_XY(sx, sy1 & 0x1ff), w * 2);
520 renderer_update_caches(dx, dy, w, h, 0);
523 static noinline int do_cmd_list_skip(uint32_t *data, int count, int *last_cmd)
525 int cmd = 0, pos = 0, len, dummy = 0, v;
528 gpu.frameskip.pending_fill[0] = 0;
530 while (pos < count && skip) {
531 uint32_t *list = data + pos;
532 cmd = LE32TOH(list[0]) >> 24;
533 len = 1 + cmd_lengths[cmd];
537 if ((LE32TOH(list[2]) & 0x3ff) > gpu.screen.w || ((LE32TOH(list[2]) >> 16) & 0x1ff) > gpu.screen.h)
538 // clearing something large, don't skip
539 do_cmd_list(list, 3, &dummy, &dummy);
541 memcpy(gpu.frameskip.pending_fill, list, 3 * 4);
547 gpu.ex_regs[1] &= ~0x1ff;
548 gpu.ex_regs[1] |= LE32TOH(list[4 + ((cmd >> 4) & 1)]) & 0x1ff;
551 for (v = 3; pos + v < count; v++)
553 if ((list[v] & HTOLE32(0xf000f000)) == HTOLE32(0x50005000))
559 for (v = 4; pos + v < count; v += 2)
561 if ((list[v] & HTOLE32(0xf000f000)) == HTOLE32(0x50005000))
568 skip = decide_frameskip_allow(LE32TOH(list[0]));
569 if ((cmd & 0xf8) == 0xe0)
570 gpu.ex_regs[cmd & 7] = LE32TOH(list[0]);
574 if (pos + len > count) {
576 break; // incomplete cmd
578 if (0x80 <= cmd && cmd <= 0xdf)
584 renderer_sync_ecmds(gpu.ex_regs);
589 static noinline int do_cmd_buffer(uint32_t *data, int count, int *cpu_cycles)
592 uint32_t old_e3 = gpu.ex_regs[3];
596 for (pos = 0; pos < count; )
598 if (gpu.dma.h && !gpu.dma_start.is_read) { // XXX: need to verify
600 pos += do_vram_io(data + pos, count - pos, 0);
605 cmd = LE32TOH(data[pos]) >> 24;
606 if (0xa0 <= cmd && cmd <= 0xdf) {
607 if (unlikely((pos+2) >= count)) {
608 // incomplete vram write/read cmd, can't consume yet
613 // consume vram write/read cmd
614 start_vram_transfer(LE32TOH(data[pos + 1]), LE32TOH(data[pos + 2]), (cmd & 0xe0) == 0xc0);
618 else if ((cmd & 0xe0) == 0x80) {
619 if (unlikely((pos+3) >= count)) {
620 cmd = -1; // incomplete cmd, can't consume yet
623 do_vram_copy(data + pos + 1, cpu_cycles);
628 else if (cmd == 0x1f) {
629 log_anomaly("irq1?\n");
634 // 0xex cmds might affect frameskip.allow, so pass to do_cmd_list_skip
635 if (gpu.frameskip.active && (gpu.frameskip.allow || ((LE32TOH(data[pos]) >> 24) & 0xf0) == 0xe0))
636 pos += do_cmd_list_skip(data + pos, count - pos, &cmd);
638 pos += do_cmd_list(data + pos, count - pos, cpu_cycles, &cmd);
647 gpu.status &= ~0x1fff;
648 gpu.status |= gpu.ex_regs[1] & 0x7ff;
649 gpu.status |= (gpu.ex_regs[6] & 3) << 11;
651 gpu.state.fb_dirty |= vram_dirty;
653 if (old_e3 != gpu.ex_regs[3])
654 decide_frameskip_allow(gpu.ex_regs[3]);
659 static noinline void flush_cmd_buffer(void)
662 left = do_cmd_buffer(gpu.cmd_buffer, gpu.cmd_len, &dummy);
664 memmove(gpu.cmd_buffer, gpu.cmd_buffer + gpu.cmd_len - left, left * 4);
665 if (left != gpu.cmd_len) {
666 if (!gpu.dma.h && gpu.gpu_state_change)
667 gpu.gpu_state_change(PGS_PRIMITIVE_START);
672 void GPUwriteDataMem(uint32_t *mem, int count)
676 log_io("gpu_dma_write %p %d\n", mem, count);
678 if (unlikely(gpu.cmd_len > 0))
681 left = do_cmd_buffer(mem, count, &dummy);
683 log_anomaly("GPUwriteDataMem: discarded %d/%d words\n", left, count);
686 void GPUwriteData(uint32_t data)
688 log_io("gpu_write %08x\n", data);
689 gpu.cmd_buffer[gpu.cmd_len++] = HTOLE32(data);
690 if (gpu.cmd_len >= CMD_BUFFER_LEN)
694 long GPUdmaChain(uint32_t *rambase, uint32_t start_addr, uint32_t *progress_addr)
696 uint32_t addr, *list, ld_addr = 0;
697 int len, left, count;
700 preload(rambase + (start_addr & 0x1fffff) / 4);
702 if (unlikely(gpu.cmd_len > 0))
705 log_io("gpu_dma_chain\n");
706 addr = start_addr & 0xffffff;
707 for (count = 0; (addr & 0x800000) == 0; count++)
709 list = rambase + (addr & 0x1fffff) / 4;
710 len = LE32TOH(list[0]) >> 24;
711 addr = LE32TOH(list[0]) & 0xffffff;
712 preload(rambase + (addr & 0x1fffff) / 4);
716 cpu_cycles += 5 + len;
718 log_io(".chain %08lx #%d+%d %u\n",
719 (long)(list - rambase) * 4, len, gpu.cmd_len, cpu_cycles);
720 if (unlikely(gpu.cmd_len > 0)) {
721 if (gpu.cmd_len + len > ARRAY_SIZE(gpu.cmd_buffer)) {
722 log_anomaly("cmd_buffer overflow, likely garbage commands\n");
725 memcpy(gpu.cmd_buffer + gpu.cmd_len, list + 1, len * 4);
732 left = do_cmd_buffer(list + 1, len, &cpu_cycles);
734 memcpy(gpu.cmd_buffer, list + 1 + len - left, left * 4);
736 log_anomaly("GPUdmaChain: %d/%d words left\n", left, len);
741 *progress_addr = addr;
744 #define LD_THRESHOLD (8*1024)
745 if (count >= LD_THRESHOLD) {
746 if (count == LD_THRESHOLD) {
751 // loop detection marker
752 // (bit23 set causes DMA error on real machine, so
753 // unlikely to be ever set by the game)
754 list[0] |= HTOLE32(0x800000);
759 // remove loop detection markers
760 count -= LD_THRESHOLD + 2;
761 addr = ld_addr & 0x1fffff;
762 while (count-- > 0) {
763 list = rambase + addr / 4;
764 addr = LE32TOH(list[0]) & 0x1fffff;
765 list[0] &= HTOLE32(~0x800000);
769 gpu.state.last_list.frame = *gpu.state.frame_count;
770 gpu.state.last_list.hcnt = *gpu.state.hcnt;
771 gpu.state.last_list.cycles = cpu_cycles;
772 gpu.state.last_list.addr = start_addr;
777 void GPUreadDataMem(uint32_t *mem, int count)
779 log_io("gpu_dma_read %p %d\n", mem, count);
781 if (unlikely(gpu.cmd_len > 0))
785 do_vram_io(mem, count, 1);
788 uint32_t GPUreadData(void)
792 if (unlikely(gpu.cmd_len > 0))
798 do_vram_io(&ret, 1, 1);
802 log_io("gpu_read %08x\n", ret);
806 uint32_t GPUreadStatus(void)
810 if (unlikely(gpu.cmd_len > 0))
814 log_io("gpu_read_status %08x\n", ret);
820 uint32_t ulFreezeVersion; // should be always 1 for now (set by main emu)
821 uint32_t ulStatus; // current gpu status
822 uint32_t ulControl[256]; // latest control register values
823 unsigned char psxVRam[1024*1024*2]; // current VRam image (full 2 MB for ZN)
826 long GPUfreeze(uint32_t type, struct GPUFreeze *freeze)
834 memcpy(freeze->psxVRam, gpu.vram, 1024 * 512 * 2);
835 memcpy(freeze->ulControl, gpu.regs, sizeof(gpu.regs));
836 memcpy(freeze->ulControl + 0xe0, gpu.ex_regs, sizeof(gpu.ex_regs));
837 freeze->ulStatus = gpu.status;
840 memcpy(gpu.vram, freeze->psxVRam, 1024 * 512 * 2);
841 memcpy(gpu.regs, freeze->ulControl, sizeof(gpu.regs));
842 memcpy(gpu.ex_regs, freeze->ulControl + 0xe0, sizeof(gpu.ex_regs));
843 gpu.status = freeze->ulStatus;
845 for (i = 8; i > 0; i--) {
846 gpu.regs[i] ^= 1; // avoid reg change detection
847 GPUwriteStatus((i << 24) | (gpu.regs[i] ^ 1));
849 renderer_sync_ecmds(gpu.ex_regs);
850 renderer_update_caches(0, 0, 1024, 512, 0);
857 void GPUupdateLace(void)
861 renderer_flush_queues();
863 #ifndef RAW_FB_DISPLAY
864 if (gpu.status & PSX_GPU_STATUS_BLANKING) {
865 if (!gpu.state.blanked) {
867 gpu.state.blanked = 1;
868 gpu.state.fb_dirty = 1;
873 if (!gpu.state.fb_dirty)
877 if (gpu.frameskip.set) {
878 if (!gpu.frameskip.frame_ready) {
879 if (*gpu.state.frame_count - gpu.frameskip.last_flip_frame < 9)
881 gpu.frameskip.active = 0;
883 gpu.frameskip.frame_ready = 0;
887 if (gpu.state.enhancement_active && !gpu.state.enhancement_was_active)
888 renderer_update_caches(0, 0, 1024, 512, 1);
889 gpu.state.enhancement_was_active = gpu.state.enhancement_active;
890 gpu.state.fb_dirty = 0;
891 gpu.state.blanked = 0;
894 void GPUvBlank(int is_vblank, int lcf)
896 int interlace = gpu.state.allow_interlace
897 && (gpu.status & PSX_GPU_STATUS_INTERLACE)
898 && (gpu.status & PSX_GPU_STATUS_DHEIGHT);
899 // interlace doesn't look nice on progressive displays,
900 // so we have this "auto" mode here for games that don't read vram
901 if (gpu.state.allow_interlace == 2
902 && *gpu.state.frame_count - gpu.state.last_vram_read_frame > 1)
906 if (interlace || interlace != gpu.state.old_interlace) {
907 gpu.state.old_interlace = interlace;
911 renderer_flush_queues();
912 renderer_set_interlace(interlace, !lcf);
916 void GPUgetScreenInfo(int *y, int *base_hres)
919 *base_hres = gpu.screen.vres;
920 if (gpu.status & PSX_GPU_STATUS_DHEIGHT)
924 void GPUrearmedCallbacks(const struct rearmed_cbs *cbs)
926 gpu.frameskip.set = cbs->frameskip;
927 gpu.frameskip.advice = &cbs->fskip_advice;
928 gpu.frameskip.active = 0;
929 gpu.frameskip.frame_ready = 1;
930 gpu.state.hcnt = cbs->gpu_hcnt;
931 gpu.state.frame_count = cbs->gpu_frame_count;
932 gpu.state.allow_interlace = cbs->gpu_neon.allow_interlace;
933 gpu.state.enhancement_enable = cbs->gpu_neon.enhancement_enable;
934 gpu.state.screen_centering_type_default = cbs->screen_centering_type_default;
935 if (gpu.state.screen_centering_type != cbs->screen_centering_type
936 || gpu.state.screen_centering_x != cbs->screen_centering_x
937 || gpu.state.screen_centering_y != cbs->screen_centering_y) {
938 gpu.state.screen_centering_type = cbs->screen_centering_type;
939 gpu.state.screen_centering_x = cbs->screen_centering_x;
940 gpu.state.screen_centering_y = cbs->screen_centering_y;
945 gpu.mmap = cbs->mmap;
946 gpu.munmap = cbs->munmap;
947 gpu.gpu_state_change = cbs->gpu_state_change;
950 if (gpu.vram == NULL)
953 if (cbs->pl_vout_set_raw_vram)
954 cbs->pl_vout_set_raw_vram(gpu.vram);
955 renderer_set_config(cbs);
956 vout_set_config(cbs);
959 // vim:shiftwidth=2:expandtab