2 * (C) GraÅžvydas "notaz" Ignotas, 2011-2012
4 * This work is licensed under the terms of any of these licenses
6 * - GNU GPL, version 2 or later.
7 * - GNU LGPL, version 2.1 or later.
8 * See the COPYING file in the top-level directory.
15 #include "gpu_timing.h"
16 #include "../../libpcsxcore/gpu.h" // meh
17 #include "../../frontend/plugin_lib.h"
20 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
23 #define unlikely(x) __builtin_expect((x), 0)
24 #define preload __builtin_prefetch
25 #define noinline __attribute__((noinline))
32 //#define log_io gpu_log
37 static noinline int do_cmd_buffer(uint32_t *data, int count,
38 int *cycles_sum, int *cycles_last);
39 static void finish_vram_transfer(int is_read);
41 static noinline void do_cmd_reset(void)
44 if (unlikely(gpu.cmd_len > 0))
45 do_cmd_buffer(gpu.cmd_buffer, gpu.cmd_len, &dummy, &dummy);
48 if (unlikely(gpu.dma.h > 0))
49 finish_vram_transfer(gpu.dma_start.is_read);
53 static noinline void do_reset(void)
59 memset(gpu.regs, 0, sizeof(gpu.regs));
60 for (i = 0; i < sizeof(gpu.ex_regs) / sizeof(gpu.ex_regs[0]); i++)
61 gpu.ex_regs[i] = (0xe0 + i) << 24;
62 gpu.status = 0x14802000;
65 gpu.screen.hres = gpu.screen.w = 256;
66 gpu.screen.vres = gpu.screen.h = 240;
67 gpu.screen.x = gpu.screen.y = 0;
68 renderer_sync_ecmds(gpu.ex_regs);
69 renderer_notify_res_change();
72 static noinline void update_width(void)
74 static const short hres_all[8] = { 256, 368, 320, 368, 512, 368, 640, 368 };
75 static const uint8_t hdivs[8] = { 10, 7, 8, 7, 5, 7, 4, 7 };
76 uint8_t hdiv = hdivs[(gpu.status >> 16) & 7];
77 int hres = hres_all[(gpu.status >> 16) & 7];
78 int pal = gpu.status & PSX_GPU_STATUS_PAL;
79 int sw = gpu.screen.x2 - gpu.screen.x1;
80 int type = gpu.state.screen_centering_type;
83 type = gpu.state.screen_centering_type_default;
85 /* nothing displayed? */;
87 int s = pal ? 656 : 608; // or 600? pal is just a guess
88 x = (gpu.screen.x1 - s) / hdiv;
89 x = (x + 1) & ~1; // blitter limitation
91 sw = (sw + 2) & ~3; // according to nocash
93 if (gpu.state.show_overscan == 2) // widescreen hack
95 if (gpu.state.show_overscan && sw >= hres)
101 x = gpu.state.screen_centering_x;
104 // correct if slightly miscentered
105 x_auto = (hres - sw) / 2 & ~3;
106 if ((uint32_t)x_auto <= 8u && abs(x) < 24)
111 // .x range check is done in vout_update()
113 // reduce the unpleasant right border that a few games have
114 if (gpu.state.screen_centering_type == 0
115 && x <= 4 && hres - (x + sw) >= 4)
119 gpu.screen.hres = hres;
120 gpu.state.dims_changed = 1;
121 //printf("xx %d %d (%d) -> %2d, %d / %d\n", gpu.screen.x1,
122 // gpu.screen.x2, gpu.screen.x2 - gpu.screen.x1, x, sw, hres);
125 static noinline void update_height(void)
127 int pal = gpu.status & PSX_GPU_STATUS_PAL;
128 int dheight = gpu.status & PSX_GPU_STATUS_DHEIGHT;
129 int y = gpu.screen.y1 - (pal ? 39 : 16); // 39 for spyro
130 int sh = gpu.screen.y2 - gpu.screen.y1;
134 if (pal && (sh > 240 || gpu.screen.vres == 256))
137 y *= 2, sh *= 2, vres *= 2, center_tol *= 2;
139 /* nothing displayed? */;
141 switch (gpu.state.screen_centering_type) {
148 y = gpu.state.screen_centering_y;
151 // correct if slightly miscentered
152 if ((uint32_t)(vres - sh) <= 1 && abs(y) <= center_tol)
160 gpu.screen.vres = vres;
161 gpu.state.dims_changed = 1;
162 //printf("yy %d %d -> %d, %d / %d\n",
163 // gpu.screen.y1, gpu.screen.y2, y, sh, vres);
166 static noinline void decide_frameskip(void)
168 if (gpu.frameskip.active)
171 gpu.frameskip.cnt = 0;
172 gpu.frameskip.frame_ready = 1;
175 if (!gpu.frameskip.active && *gpu.frameskip.advice)
176 gpu.frameskip.active = 1;
177 else if (gpu.frameskip.set > 0 && gpu.frameskip.cnt < gpu.frameskip.set)
178 gpu.frameskip.active = 1;
180 gpu.frameskip.active = 0;
182 if (!gpu.frameskip.active && gpu.frameskip.pending_fill[0] != 0) {
184 do_cmd_list(gpu.frameskip.pending_fill, 3, &dummy, &dummy, &dummy);
185 gpu.frameskip.pending_fill[0] = 0;
189 static noinline int decide_frameskip_allow(uint32_t cmd_e3)
191 // no frameskip if it decides to draw to display area,
192 // but not for interlace since it'll most likely always do that
193 uint32_t x = cmd_e3 & 0x3ff;
194 uint32_t y = (cmd_e3 >> 10) & 0x3ff;
195 gpu.frameskip.allow = (gpu.status & PSX_GPU_STATUS_INTERLACE) ||
196 (uint32_t)(x - gpu.screen.src_x) >= (uint32_t)gpu.screen.w ||
197 (uint32_t)(y - gpu.screen.src_y) >= (uint32_t)gpu.screen.h;
198 return gpu.frameskip.allow;
201 static void flush_cmd_buffer(void);
203 static noinline void get_gpu_info(uint32_t data)
205 if (unlikely(gpu.cmd_len > 0))
207 switch (data & 0x0f) {
211 gpu.gp0 = gpu.ex_regs[data & 7] & 0xfffff;
214 gpu.gp0 = gpu.ex_regs[5] & 0x3fffff;
225 // double, for overdraw guard
226 #define VRAM_SIZE (1024 * 512 * 2 * 2)
228 static int map_vram(void)
230 gpu.vram = gpu.mmap(VRAM_SIZE);
231 if (gpu.vram != NULL) {
232 gpu.vram += 4096 / 2;
236 fprintf(stderr, "could not map vram, expect crashes\n");
245 ret |= renderer_init();
247 memset(&gpu.state, 0, sizeof(gpu.state));
248 memset(&gpu.frameskip, 0, sizeof(gpu.frameskip));
250 gpu.state.frame_count = &gpu.zero;
251 gpu.state.hcnt = &gpu.zero;
255 if (gpu.mmap != NULL) {
262 long GPUshutdown(void)
268 if (gpu.vram != NULL) {
269 gpu.vram -= 4096 / 2;
270 gpu.munmap(gpu.vram, VRAM_SIZE);
277 void GPUwriteStatus(uint32_t data)
279 uint32_t cmd = data >> 24;
282 if (cmd < ARRAY_SIZE(gpu.regs)) {
283 if (cmd > 1 && cmd != 5 && gpu.regs[cmd] == data)
285 gpu.regs[cmd] = data;
288 gpu.state.fb_dirty = 1;
299 gpu.status |= PSX_GPU_STATUS_BLANKING;
300 gpu.state.dims_changed = 1; // for hud clearing
303 gpu.status &= ~PSX_GPU_STATUS_BLANKING;
306 gpu.status &= ~PSX_GPU_STATUS_DMA_MASK;
307 gpu.status |= PSX_GPU_STATUS_DMA(data & 3);
310 src_x = data & 0x3ff; src_y = (data >> 10) & 0x1ff;
311 if (src_x != gpu.screen.src_x || src_y != gpu.screen.src_y) {
312 gpu.screen.src_x = src_x;
313 gpu.screen.src_y = src_y;
314 renderer_notify_scanout_change(src_x, src_y);
315 if (gpu.frameskip.set) {
316 decide_frameskip_allow(gpu.ex_regs[3]);
317 if (gpu.frameskip.last_flip_frame != *gpu.state.frame_count) {
319 gpu.frameskip.last_flip_frame = *gpu.state.frame_count;
325 gpu.screen.x1 = data & 0xfff;
326 gpu.screen.x2 = (data >> 12) & 0xfff;
330 gpu.screen.y1 = data & 0x3ff;
331 gpu.screen.y2 = (data >> 10) & 0x3ff;
335 gpu.status = (gpu.status & ~0x7f0000) | ((data & 0x3F) << 17) | ((data & 0x40) << 10);
338 renderer_notify_res_change();
341 if ((cmd & 0xf0) == 0x10)
346 #ifdef GPUwriteStatus_ext
347 GPUwriteStatus_ext(data);
351 const unsigned char cmd_lengths[256] =
353 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
354 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
355 3, 3, 3, 3, 6, 6, 6, 6, 4, 4, 4, 4, 8, 8, 8, 8, // 20
356 5, 5, 5, 5, 8, 8, 8, 8, 7, 7, 7, 7, 11, 11, 11, 11,
357 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, // 40
358 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
359 2, 2, 2, 2, 3, 3, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, // 60
360 1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2,
361 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, // 80
362 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
363 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // a0
364 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
365 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // c0
366 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
367 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // e0
368 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
371 #define VRAM_MEM_XY(x, y) &gpu.vram[(y) * 1024 + (x)]
373 static void cpy_msb(uint16_t *dst, const uint16_t *src, int l, uint16_t msb)
376 for (i = 0; i < l; i++)
377 dst[i] = src[i] | msb;
380 static inline void do_vram_line(int x, int y, uint16_t *mem, int l,
381 int is_read, uint16_t msb)
383 uint16_t *vram = VRAM_MEM_XY(x, y);
384 if (unlikely(is_read))
385 memcpy(mem, vram, l * 2);
386 else if (unlikely(msb))
387 cpy_msb(vram, mem, l, msb);
389 memcpy(vram, mem, l * 2);
392 static int do_vram_io(uint32_t *data, int count, int is_read)
394 int count_initial = count;
395 uint16_t msb = gpu.ex_regs[6] << 15;
396 uint16_t *sdata = (uint16_t *)data;
397 int x = gpu.dma.x, y = gpu.dma.y;
398 int w = gpu.dma.w, h = gpu.dma.h;
399 int o = gpu.dma.offset;
401 count *= 2; // operate in 16bpp pixels
403 if (gpu.dma.offset) {
404 l = w - gpu.dma.offset;
408 do_vram_line(x + o, y, sdata, l, is_read, msb);
421 for (; h > 0 && count >= w; sdata += w, count -= w, y++, h--) {
423 do_vram_line(x, y, sdata, w, is_read, msb);
429 do_vram_line(x, y, sdata, count, is_read, msb);
435 finish_vram_transfer(is_read);
440 return count_initial - count / 2;
443 static void start_vram_transfer(uint32_t pos_word, uint32_t size_word, int is_read)
446 log_anomaly("start_vram_transfer while old unfinished\n");
448 gpu.dma.x = pos_word & 0x3ff;
449 gpu.dma.y = (pos_word >> 16) & 0x1ff;
450 gpu.dma.w = ((size_word - 1) & 0x3ff) + 1;
451 gpu.dma.h = (((size_word >> 16) - 1) & 0x1ff) + 1;
453 gpu.dma.is_read = is_read;
454 gpu.dma_start = gpu.dma;
456 renderer_flush_queues();
458 gpu.status |= PSX_GPU_STATUS_IMG;
459 // XXX: wrong for width 1
460 gpu.gp0 = LE32TOH(*(uint32_t *) VRAM_MEM_XY(gpu.dma.x, gpu.dma.y));
461 gpu.state.last_vram_read_frame = *gpu.state.frame_count;
464 log_io("start_vram_transfer %c (%d, %d) %dx%d\n", is_read ? 'r' : 'w',
465 gpu.dma.x, gpu.dma.y, gpu.dma.w, gpu.dma.h);
466 if (gpu.gpu_state_change)
467 gpu.gpu_state_change(PGS_VRAM_TRANSFER_START);
470 static void finish_vram_transfer(int is_read)
473 gpu.status &= ~PSX_GPU_STATUS_IMG;
475 gpu.state.fb_dirty = 1;
476 renderer_update_caches(gpu.dma_start.x, gpu.dma_start.y,
477 gpu.dma_start.w, gpu.dma_start.h, 0);
479 if (gpu.gpu_state_change)
480 gpu.gpu_state_change(PGS_VRAM_TRANSFER_END);
483 static void do_vram_copy(const uint32_t *params, int *cpu_cycles)
485 const uint32_t sx = LE32TOH(params[0]) & 0x3FF;
486 const uint32_t sy = (LE32TOH(params[0]) >> 16) & 0x1FF;
487 const uint32_t dx = LE32TOH(params[1]) & 0x3FF;
488 const uint32_t dy = (LE32TOH(params[1]) >> 16) & 0x1FF;
489 uint32_t w = ((LE32TOH(params[2]) - 1) & 0x3FF) + 1;
490 uint32_t h = (((LE32TOH(params[2]) >> 16) - 1) & 0x1FF) + 1;
491 uint16_t msb = gpu.ex_regs[6] << 15;
495 *cpu_cycles += gput_copy(w, h);
496 if (sx == dx && sy == dy && msb == 0)
499 renderer_flush_queues();
501 if (unlikely((sx < dx && dx < sx + w) || sx + w > 1024 || dx + w > 1024 || msb))
503 for (y = 0; y < h; y++)
505 const uint16_t *src = VRAM_MEM_XY(0, (sy + y) & 0x1ff);
506 uint16_t *dst = VRAM_MEM_XY(0, (dy + y) & 0x1ff);
507 for (x = 0; x < w; x += ARRAY_SIZE(lbuf))
509 uint32_t x1, w1 = w - x;
510 if (w1 > ARRAY_SIZE(lbuf))
511 w1 = ARRAY_SIZE(lbuf);
512 for (x1 = 0; x1 < w1; x1++)
513 lbuf[x1] = src[(sx + x + x1) & 0x3ff];
514 for (x1 = 0; x1 < w1; x1++)
515 dst[(dx + x + x1) & 0x3ff] = lbuf[x1] | msb;
521 uint32_t sy1 = sy, dy1 = dy;
522 for (y = 0; y < h; y++, sy1++, dy1++)
523 memcpy(VRAM_MEM_XY(dx, dy1 & 0x1ff), VRAM_MEM_XY(sx, sy1 & 0x1ff), w * 2);
526 renderer_update_caches(dx, dy, w, h, 0);
529 static noinline int do_cmd_list_skip(uint32_t *data, int count, int *last_cmd)
531 int cmd = 0, pos = 0, len, dummy = 0, v;
534 gpu.frameskip.pending_fill[0] = 0;
536 while (pos < count && skip) {
537 uint32_t *list = data + pos;
538 cmd = LE32TOH(list[0]) >> 24;
539 len = 1 + cmd_lengths[cmd];
540 if (pos + len > count) {
542 break; // incomplete cmd
547 if ((LE32TOH(list[2]) & 0x3ff) > gpu.screen.w || ((LE32TOH(list[2]) >> 16) & 0x1ff) > gpu.screen.h)
548 // clearing something large, don't skip
549 do_cmd_list(list, 3, &dummy, &dummy, &dummy);
551 memcpy(gpu.frameskip.pending_fill, list, 3 * 4);
557 gpu.ex_regs[1] &= ~0x1ff;
558 gpu.ex_regs[1] |= LE32TOH(list[4 + ((cmd >> 4) & 1)]) & 0x1ff;
561 for (v = 3; pos + v < count; v++)
563 if ((list[v] & HTOLE32(0xf000f000)) == HTOLE32(0x50005000))
569 for (v = 4; pos + v < count; v += 2)
571 if ((list[v] & HTOLE32(0xf000f000)) == HTOLE32(0x50005000))
578 skip = decide_frameskip_allow(LE32TOH(list[0]));
579 if ((cmd & 0xf8) == 0xe0)
580 gpu.ex_regs[cmd & 7] = LE32TOH(list[0]);
583 if (0x80 <= cmd && cmd <= 0xdf)
589 renderer_sync_ecmds(gpu.ex_regs);
594 static noinline int do_cmd_buffer(uint32_t *data, int count,
595 int *cycles_sum, int *cycles_last)
598 uint32_t old_e3 = gpu.ex_regs[3];
602 for (pos = 0; pos < count; )
604 if (gpu.dma.h && !gpu.dma_start.is_read) { // XXX: need to verify
606 pos += do_vram_io(data + pos, count - pos, 0);
611 cmd = LE32TOH(data[pos]) >> 24;
612 if (0xa0 <= cmd && cmd <= 0xdf) {
613 if (unlikely((pos+2) >= count)) {
614 // incomplete vram write/read cmd, can't consume yet
619 // consume vram write/read cmd
620 start_vram_transfer(LE32TOH(data[pos + 1]), LE32TOH(data[pos + 2]), (cmd & 0xe0) == 0xc0);
624 else if ((cmd & 0xe0) == 0x80) {
625 if (unlikely((pos+3) >= count)) {
626 cmd = -1; // incomplete cmd, can't consume yet
629 *cycles_sum += *cycles_last;
631 do_vram_copy(data + pos + 1, cycles_last);
636 else if (cmd == 0x1f) {
637 log_anomaly("irq1?\n");
642 // 0xex cmds might affect frameskip.allow, so pass to do_cmd_list_skip
643 if (gpu.frameskip.active && (gpu.frameskip.allow || ((LE32TOH(data[pos]) >> 24) & 0xf0) == 0xe0))
644 pos += do_cmd_list_skip(data + pos, count - pos, &cmd);
646 pos += do_cmd_list(data + pos, count - pos, cycles_sum, cycles_last, &cmd);
655 gpu.status &= ~0x1fff;
656 gpu.status |= gpu.ex_regs[1] & 0x7ff;
657 gpu.status |= (gpu.ex_regs[6] & 3) << 11;
659 gpu.state.fb_dirty |= vram_dirty;
661 if (old_e3 != gpu.ex_regs[3])
662 decide_frameskip_allow(gpu.ex_regs[3]);
667 static noinline void flush_cmd_buffer(void)
670 left = do_cmd_buffer(gpu.cmd_buffer, gpu.cmd_len, &dummy, &dummy);
672 memmove(gpu.cmd_buffer, gpu.cmd_buffer + gpu.cmd_len - left, left * 4);
673 if (left != gpu.cmd_len) {
674 if (!gpu.dma.h && gpu.gpu_state_change)
675 gpu.gpu_state_change(PGS_PRIMITIVE_START);
680 void GPUwriteDataMem(uint32_t *mem, int count)
684 log_io("gpu_dma_write %p %d\n", mem, count);
686 if (unlikely(gpu.cmd_len > 0))
689 left = do_cmd_buffer(mem, count, &dummy, &dummy);
691 log_anomaly("GPUwriteDataMem: discarded %d/%d words\n", left, count);
694 void GPUwriteData(uint32_t data)
696 log_io("gpu_write %08x\n", data);
697 gpu.cmd_buffer[gpu.cmd_len++] = HTOLE32(data);
698 if (gpu.cmd_len >= CMD_BUFFER_LEN)
702 long GPUdmaChain(uint32_t *rambase, uint32_t start_addr,
703 uint32_t *progress_addr, int32_t *cycles_last_cmd)
705 uint32_t addr, *list, ld_addr;
706 int len, left, count, ld_count = 32;
707 int cpu_cycles_sum = 0;
708 int cpu_cycles_last = 0;
710 preload(rambase + (start_addr & 0x1fffff) / 4);
712 if (unlikely(gpu.cmd_len > 0))
715 log_io("gpu_dma_chain\n");
716 addr = ld_addr = start_addr & 0xffffff;
717 for (count = 0; (addr & 0x800000) == 0; count++)
719 list = rambase + (addr & 0x1fffff) / 4;
720 len = LE32TOH(list[0]) >> 24;
721 addr = LE32TOH(list[0]) & 0xffffff;
722 preload(rambase + (addr & 0x1fffff) / 4);
724 cpu_cycles_sum += 10;
726 cpu_cycles_sum += 5 + len;
728 log_io(".chain %08lx #%d+%d %u+%u\n",
729 (long)(list - rambase) * 4, len, gpu.cmd_len, cpu_cycles_sum, cpu_cycles_last);
730 if (unlikely(gpu.cmd_len > 0)) {
731 if (gpu.cmd_len + len > ARRAY_SIZE(gpu.cmd_buffer)) {
732 log_anomaly("cmd_buffer overflow, likely garbage commands\n");
735 memcpy(gpu.cmd_buffer + gpu.cmd_len, list + 1, len * 4);
742 left = do_cmd_buffer(list + 1, len, &cpu_cycles_sum, &cpu_cycles_last);
744 memcpy(gpu.cmd_buffer, list + 1 + len - left, left * 4);
746 log_anomaly("GPUdmaChain: %d/%d words left\n", left, len);
751 *progress_addr = addr;
754 if (addr == ld_addr) {
755 log_anomaly("GPUdmaChain: loop @ %08x, cnt=%u\n", addr, count);
758 if (count == ld_count) {
764 //printf(" -> %d %d\n", cpu_cycles_sum, cpu_cycles_last);
765 gpu.state.last_list.frame = *gpu.state.frame_count;
766 gpu.state.last_list.hcnt = *gpu.state.hcnt;
767 gpu.state.last_list.cycles = cpu_cycles_sum + cpu_cycles_last;
768 gpu.state.last_list.addr = start_addr;
770 *cycles_last_cmd = cpu_cycles_last;
771 return cpu_cycles_sum;
774 void GPUreadDataMem(uint32_t *mem, int count)
776 log_io("gpu_dma_read %p %d\n", mem, count);
778 if (unlikely(gpu.cmd_len > 0))
782 do_vram_io(mem, count, 1);
785 uint32_t GPUreadData(void)
789 if (unlikely(gpu.cmd_len > 0))
795 do_vram_io(&ret, 1, 1);
799 log_io("gpu_read %08x\n", ret);
803 uint32_t GPUreadStatus(void)
807 if (unlikely(gpu.cmd_len > 0))
811 log_io("gpu_read_status %08x\n", ret);
817 uint32_t ulFreezeVersion; // should be always 1 for now (set by main emu)
818 uint32_t ulStatus; // current gpu status
819 uint32_t ulControl[256]; // latest control register values
820 unsigned char psxVRam[1024*1024*2]; // current VRam image (full 2 MB for ZN)
823 long GPUfreeze(uint32_t type, struct GPUFreeze *freeze)
831 memcpy(freeze->psxVRam, gpu.vram, 1024 * 512 * 2);
832 memcpy(freeze->ulControl, gpu.regs, sizeof(gpu.regs));
833 memcpy(freeze->ulControl + 0xe0, gpu.ex_regs, sizeof(gpu.ex_regs));
834 freeze->ulStatus = gpu.status;
837 memcpy(gpu.vram, freeze->psxVRam, 1024 * 512 * 2);
838 memcpy(gpu.regs, freeze->ulControl, sizeof(gpu.regs));
839 memcpy(gpu.ex_regs, freeze->ulControl + 0xe0, sizeof(gpu.ex_regs));
840 gpu.status = freeze->ulStatus;
842 for (i = 8; i > 0; i--) {
843 gpu.regs[i] ^= 1; // avoid reg change detection
844 GPUwriteStatus((i << 24) | (gpu.regs[i] ^ 1));
846 renderer_sync_ecmds(gpu.ex_regs);
847 renderer_update_caches(0, 0, 1024, 512, 0);
854 void GPUupdateLace(void)
858 renderer_flush_queues();
860 #ifndef RAW_FB_DISPLAY
861 if (gpu.status & PSX_GPU_STATUS_BLANKING) {
862 if (!gpu.state.blanked) {
864 gpu.state.blanked = 1;
865 gpu.state.fb_dirty = 1;
870 if (!gpu.state.fb_dirty)
874 if (gpu.frameskip.set) {
875 if (!gpu.frameskip.frame_ready) {
876 if (*gpu.state.frame_count - gpu.frameskip.last_flip_frame < 9)
878 gpu.frameskip.active = 0;
880 gpu.frameskip.frame_ready = 0;
884 if (gpu.state.enhancement_active && !gpu.state.enhancement_was_active)
885 renderer_update_caches(0, 0, 1024, 512, 1);
886 gpu.state.enhancement_was_active = gpu.state.enhancement_active;
887 gpu.state.fb_dirty = 0;
888 gpu.state.blanked = 0;
891 void GPUvBlank(int is_vblank, int lcf)
893 int interlace = gpu.state.allow_interlace
894 && (gpu.status & PSX_GPU_STATUS_INTERLACE)
895 && (gpu.status & PSX_GPU_STATUS_DHEIGHT);
896 // interlace doesn't look nice on progressive displays,
897 // so we have this "auto" mode here for games that don't read vram
898 if (gpu.state.allow_interlace == 2
899 && *gpu.state.frame_count - gpu.state.last_vram_read_frame > 1)
903 if (interlace || interlace != gpu.state.old_interlace) {
904 gpu.state.old_interlace = interlace;
908 renderer_flush_queues();
909 renderer_set_interlace(interlace, !lcf);
913 void GPUgetScreenInfo(int *y, int *base_hres)
916 *base_hres = gpu.screen.vres;
917 if (gpu.status & PSX_GPU_STATUS_DHEIGHT)
921 void GPUrearmedCallbacks(const struct rearmed_cbs *cbs)
923 gpu.frameskip.set = cbs->frameskip;
924 gpu.frameskip.advice = &cbs->fskip_advice;
925 gpu.frameskip.active = 0;
926 gpu.frameskip.frame_ready = 1;
927 gpu.state.hcnt = (uint32_t *)cbs->gpu_hcnt;
928 gpu.state.frame_count = (uint32_t *)cbs->gpu_frame_count;
929 gpu.state.allow_interlace = cbs->gpu_neon.allow_interlace;
930 gpu.state.enhancement_enable = cbs->gpu_neon.enhancement_enable;
931 gpu.state.screen_centering_type_default = cbs->screen_centering_type_default;
932 if (gpu.state.screen_centering_type != cbs->screen_centering_type
933 || gpu.state.screen_centering_x != cbs->screen_centering_x
934 || gpu.state.screen_centering_y != cbs->screen_centering_y
935 || gpu.state.show_overscan != cbs->show_overscan) {
936 gpu.state.screen_centering_type = cbs->screen_centering_type;
937 gpu.state.screen_centering_x = cbs->screen_centering_x;
938 gpu.state.screen_centering_y = cbs->screen_centering_y;
939 gpu.state.show_overscan = cbs->show_overscan;
944 gpu.mmap = cbs->mmap;
945 gpu.munmap = cbs->munmap;
946 gpu.gpu_state_change = cbs->gpu_state_change;
949 if (gpu.vram == NULL)
952 if (cbs->pl_vout_set_raw_vram)
953 cbs->pl_vout_set_raw_vram(gpu.vram);
954 renderer_set_config(cbs);
955 vout_set_config(cbs);
958 // vim:shiftwidth=2:expandtab