2 * (C) GraÅžvydas "notaz" Ignotas, 2011-2012
4 * This work is licensed under the terms of any of these licenses
6 * - GNU GPL, version 2 or later.
7 * - GNU LGPL, version 2.1 or later.
8 * See the COPYING file in the top-level directory.
15 #include "gpu_timing.h"
16 #include "../../libpcsxcore/gpu.h" // meh
17 #include "../../frontend/plugin_lib.h"
20 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
23 #define unlikely(x) __builtin_expect((x), 0)
24 #define preload __builtin_prefetch
25 #define noinline __attribute__((noinline))
32 //#define log_io gpu_log
37 static noinline int do_cmd_buffer(uint32_t *data, int count,
38 int *cycles_sum, int *cycles_last);
39 static void finish_vram_transfer(int is_read);
41 static noinline void do_cmd_reset(void)
44 if (unlikely(gpu.cmd_len > 0))
45 do_cmd_buffer(gpu.cmd_buffer, gpu.cmd_len, &dummy, &dummy);
48 if (unlikely(gpu.dma.h > 0))
49 finish_vram_transfer(gpu.dma_start.is_read);
53 static noinline void do_reset(void)
59 memset(gpu.regs, 0, sizeof(gpu.regs));
60 for (i = 0; i < sizeof(gpu.ex_regs) / sizeof(gpu.ex_regs[0]); i++)
61 gpu.ex_regs[i] = (0xe0 + i) << 24;
62 gpu.status = 0x14802000;
65 gpu.screen.hres = gpu.screen.w = 256;
66 gpu.screen.vres = gpu.screen.h = 240;
67 gpu.screen.x = gpu.screen.y = 0;
68 renderer_sync_ecmds(gpu.ex_regs);
69 renderer_notify_res_change();
72 static noinline void update_width(void)
74 static const short hres_all[8] = { 256, 368, 320, 368, 512, 368, 640, 368 };
75 static const uint8_t hdivs[8] = { 10, 7, 8, 7, 5, 7, 4, 7 };
76 uint8_t hdiv = hdivs[(gpu.status >> 16) & 7];
77 int hres = hres_all[(gpu.status >> 16) & 7];
78 int pal = gpu.status & PSX_GPU_STATUS_PAL;
79 int sw = gpu.screen.x2 - gpu.screen.x1;
80 int type = gpu.state.screen_centering_type;
83 type = gpu.state.screen_centering_type_default;
85 /* nothing displayed? */;
87 int s = pal ? 656 : 608; // or 600? pal is just a guess
88 x = (gpu.screen.x1 - s) / hdiv;
89 x = (x + 1) & ~1; // blitter limitation
91 sw = (sw + 2) & ~3; // according to nocash
93 if (gpu.state.show_overscan == 2) // widescreen hack
95 if (gpu.state.show_overscan && sw >= hres)
101 x = gpu.state.screen_centering_x;
104 // correct if slightly miscentered
105 x_auto = (hres - sw) / 2 & ~3;
106 if ((uint32_t)x_auto <= 8u && abs(x) < 24)
111 // .x range check is done in vout_update()
113 // reduce the unpleasant right border that a few games have
114 if (gpu.state.screen_centering_type == 0
115 && x <= 4 && hres - (x + sw) >= 4)
119 gpu.screen.hres = hres;
120 gpu.state.dims_changed = 1;
121 //printf("xx %d %d (%d) -> %2d, %d / %d\n", gpu.screen.x1,
122 // gpu.screen.x2, gpu.screen.x2 - gpu.screen.x1, x, sw, hres);
125 static noinline void update_height(void)
127 int pal = gpu.status & PSX_GPU_STATUS_PAL;
128 int dheight = gpu.status & PSX_GPU_STATUS_DHEIGHT;
129 int y = gpu.screen.y1 - (pal ? 39 : 16); // 39 for spyro
130 int sh = gpu.screen.y2 - gpu.screen.y1;
134 if (pal && (sh > 240 || gpu.screen.vres == 256))
137 y *= 2, sh *= 2, vres *= 2, center_tol *= 2;
139 /* nothing displayed? */;
141 switch (gpu.state.screen_centering_type) {
148 y = gpu.state.screen_centering_y;
151 // correct if slightly miscentered
152 if ((uint32_t)(vres - sh) <= 1 && abs(y) <= center_tol)
160 gpu.screen.vres = vres;
161 gpu.state.dims_changed = 1;
162 //printf("yy %d %d -> %d, %d / %d\n",
163 // gpu.screen.y1, gpu.screen.y2, y, sh, vres);
166 static noinline void decide_frameskip(void)
168 if (gpu.frameskip.active)
171 gpu.frameskip.cnt = 0;
172 gpu.frameskip.frame_ready = 1;
175 if (!gpu.frameskip.active && *gpu.frameskip.advice)
176 gpu.frameskip.active = 1;
177 else if (gpu.frameskip.set > 0 && gpu.frameskip.cnt < gpu.frameskip.set)
178 gpu.frameskip.active = 1;
180 gpu.frameskip.active = 0;
182 if (!gpu.frameskip.active && gpu.frameskip.pending_fill[0] != 0) {
184 do_cmd_list(gpu.frameskip.pending_fill, 3, &dummy, &dummy, &dummy);
185 gpu.frameskip.pending_fill[0] = 0;
189 static noinline int decide_frameskip_allow(uint32_t cmd_e3)
191 // no frameskip if it decides to draw to display area,
192 // but not for interlace since it'll most likely always do that
193 uint32_t x = cmd_e3 & 0x3ff;
194 uint32_t y = (cmd_e3 >> 10) & 0x3ff;
195 gpu.frameskip.allow = (gpu.status & PSX_GPU_STATUS_INTERLACE) ||
196 (uint32_t)(x - gpu.screen.src_x) >= (uint32_t)gpu.screen.w ||
197 (uint32_t)(y - gpu.screen.src_y) >= (uint32_t)gpu.screen.h;
198 return gpu.frameskip.allow;
201 static void flush_cmd_buffer(void);
203 static noinline void get_gpu_info(uint32_t data)
205 if (unlikely(gpu.cmd_len > 0))
207 switch (data & 0x0f) {
211 gpu.gp0 = gpu.ex_regs[data & 7] & 0xfffff;
214 gpu.gp0 = gpu.ex_regs[5] & 0x3fffff;
225 // double, for overdraw guard
226 #define VRAM_SIZE (1024 * 512 * 2 * 2)
228 static int map_vram(void)
230 gpu.vram = gpu.mmap(VRAM_SIZE);
231 if (gpu.vram != NULL) {
232 gpu.vram += 4096 / 2;
236 fprintf(stderr, "could not map vram, expect crashes\n");
245 ret |= renderer_init();
247 memset(&gpu.state, 0, sizeof(gpu.state));
248 memset(&gpu.frameskip, 0, sizeof(gpu.frameskip));
250 gpu.state.frame_count = &gpu.zero;
251 gpu.state.hcnt = &gpu.zero;
255 if (gpu.mmap != NULL) {
262 long GPUshutdown(void)
268 if (gpu.vram != NULL) {
269 gpu.vram -= 4096 / 2;
270 gpu.munmap(gpu.vram, VRAM_SIZE);
277 void GPUwriteStatus(uint32_t data)
279 uint32_t cmd = data >> 24;
282 if (cmd < ARRAY_SIZE(gpu.regs)) {
283 if (cmd > 1 && cmd != 5 && gpu.regs[cmd] == data)
285 gpu.regs[cmd] = data;
288 gpu.state.fb_dirty = 1;
299 gpu.status |= PSX_GPU_STATUS_BLANKING;
300 gpu.state.dims_changed = 1; // for hud clearing
303 gpu.status &= ~PSX_GPU_STATUS_BLANKING;
306 gpu.status &= ~PSX_GPU_STATUS_DMA_MASK;
307 gpu.status |= PSX_GPU_STATUS_DMA(data & 3);
310 src_x = data & 0x3ff; src_y = (data >> 10) & 0x1ff;
311 if (src_x != gpu.screen.src_x || src_y != gpu.screen.src_y) {
312 gpu.screen.src_x = src_x;
313 gpu.screen.src_y = src_y;
314 renderer_notify_scanout_change(src_x, src_y);
315 if (gpu.frameskip.set) {
316 decide_frameskip_allow(gpu.ex_regs[3]);
317 if (gpu.frameskip.last_flip_frame != *gpu.state.frame_count) {
319 gpu.frameskip.last_flip_frame = *gpu.state.frame_count;
325 gpu.screen.x1 = data & 0xfff;
326 gpu.screen.x2 = (data >> 12) & 0xfff;
330 gpu.screen.y1 = data & 0x3ff;
331 gpu.screen.y2 = (data >> 10) & 0x3ff;
335 gpu.status = (gpu.status & ~0x7f0000) | ((data & 0x3F) << 17) | ((data & 0x40) << 10);
338 renderer_notify_res_change();
341 if ((cmd & 0xf0) == 0x10)
346 #ifdef GPUwriteStatus_ext
347 GPUwriteStatus_ext(data);
351 const unsigned char cmd_lengths[256] =
353 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
354 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
355 3, 3, 3, 3, 6, 6, 6, 6, 4, 4, 4, 4, 8, 8, 8, 8, // 20
356 5, 5, 5, 5, 8, 8, 8, 8, 7, 7, 7, 7, 11, 11, 11, 11,
357 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, // 40
358 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
359 2, 2, 2, 2, 3, 3, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, // 60
360 1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2,
361 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, // 80
362 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
363 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // a0
364 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
365 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // c0
366 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
367 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // e0
368 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
371 #define VRAM_MEM_XY(x, y) &gpu.vram[(y) * 1024 + (x)]
373 // this isn't very useful so should be rare
374 static void cpy_mask(uint16_t *dst, const uint16_t *src, int l, uint32_t r6)
378 for (i = 0; i < l; i++)
379 dst[i] = src[i] | 0x8000;
382 uint16_t msb = r6 << 15;
383 for (i = 0; i < l; i++) {
384 uint16_t mask = (int16_t)dst[i] >> 15;
385 dst[i] = (dst[i] & mask) | ((src[i] | msb) & ~mask);
390 static inline void do_vram_line(int x, int y, uint16_t *mem, int l,
391 int is_read, uint32_t r6)
393 uint16_t *vram = VRAM_MEM_XY(x, y);
394 if (unlikely(is_read))
395 memcpy(mem, vram, l * 2);
396 else if (unlikely(r6))
397 cpy_mask(vram, mem, l, r6);
399 memcpy(vram, mem, l * 2);
402 static int do_vram_io(uint32_t *data, int count, int is_read)
404 int count_initial = count;
405 uint32_t r6 = gpu.ex_regs[6] & 3;
406 uint16_t *sdata = (uint16_t *)data;
407 int x = gpu.dma.x, y = gpu.dma.y;
408 int w = gpu.dma.w, h = gpu.dma.h;
409 int o = gpu.dma.offset;
411 count *= 2; // operate in 16bpp pixels
413 if (gpu.dma.offset) {
414 l = w - gpu.dma.offset;
418 do_vram_line(x + o, y, sdata, l, is_read, r6);
431 for (; h > 0 && count >= w; sdata += w, count -= w, y++, h--) {
433 do_vram_line(x, y, sdata, w, is_read, r6);
439 do_vram_line(x, y, sdata, count, is_read, r6);
445 finish_vram_transfer(is_read);
450 return count_initial - count / 2;
453 static void start_vram_transfer(uint32_t pos_word, uint32_t size_word, int is_read)
456 log_anomaly("start_vram_transfer while old unfinished\n");
458 gpu.dma.x = pos_word & 0x3ff;
459 gpu.dma.y = (pos_word >> 16) & 0x1ff;
460 gpu.dma.w = ((size_word - 1) & 0x3ff) + 1;
461 gpu.dma.h = (((size_word >> 16) - 1) & 0x1ff) + 1;
463 gpu.dma.is_read = is_read;
464 gpu.dma_start = gpu.dma;
466 renderer_flush_queues();
468 gpu.status |= PSX_GPU_STATUS_IMG;
469 // XXX: wrong for width 1
470 gpu.gp0 = LE32TOH(*(uint32_t *) VRAM_MEM_XY(gpu.dma.x, gpu.dma.y));
471 gpu.state.last_vram_read_frame = *gpu.state.frame_count;
474 log_io("start_vram_transfer %c (%d, %d) %dx%d\n", is_read ? 'r' : 'w',
475 gpu.dma.x, gpu.dma.y, gpu.dma.w, gpu.dma.h);
476 if (gpu.gpu_state_change)
477 gpu.gpu_state_change(PGS_VRAM_TRANSFER_START);
480 static void finish_vram_transfer(int is_read)
483 gpu.status &= ~PSX_GPU_STATUS_IMG;
485 gpu.state.fb_dirty = 1;
486 renderer_update_caches(gpu.dma_start.x, gpu.dma_start.y,
487 gpu.dma_start.w, gpu.dma_start.h, 0);
489 if (gpu.gpu_state_change)
490 gpu.gpu_state_change(PGS_VRAM_TRANSFER_END);
493 static void do_vram_copy(const uint32_t *params, int *cpu_cycles)
495 const uint32_t sx = LE32TOH(params[0]) & 0x3FF;
496 const uint32_t sy = (LE32TOH(params[0]) >> 16) & 0x1FF;
497 const uint32_t dx = LE32TOH(params[1]) & 0x3FF;
498 const uint32_t dy = (LE32TOH(params[1]) >> 16) & 0x1FF;
499 uint32_t w = ((LE32TOH(params[2]) - 1) & 0x3FF) + 1;
500 uint32_t h = (((LE32TOH(params[2]) >> 16) - 1) & 0x1FF) + 1;
501 uint16_t msb = gpu.ex_regs[6] << 15;
505 *cpu_cycles += gput_copy(w, h);
506 if (sx == dx && sy == dy && msb == 0)
509 renderer_flush_queues();
511 if (unlikely((sx < dx && dx < sx + w) || sx + w > 1024 || dx + w > 1024 || msb))
513 for (y = 0; y < h; y++)
515 const uint16_t *src = VRAM_MEM_XY(0, (sy + y) & 0x1ff);
516 uint16_t *dst = VRAM_MEM_XY(0, (dy + y) & 0x1ff);
517 for (x = 0; x < w; x += ARRAY_SIZE(lbuf))
519 uint32_t x1, w1 = w - x;
520 if (w1 > ARRAY_SIZE(lbuf))
521 w1 = ARRAY_SIZE(lbuf);
522 for (x1 = 0; x1 < w1; x1++)
523 lbuf[x1] = src[(sx + x + x1) & 0x3ff];
524 for (x1 = 0; x1 < w1; x1++)
525 dst[(dx + x + x1) & 0x3ff] = lbuf[x1] | msb;
531 uint32_t sy1 = sy, dy1 = dy;
532 for (y = 0; y < h; y++, sy1++, dy1++)
533 memcpy(VRAM_MEM_XY(dx, dy1 & 0x1ff), VRAM_MEM_XY(sx, sy1 & 0x1ff), w * 2);
536 renderer_update_caches(dx, dy, w, h, 0);
539 static noinline int do_cmd_list_skip(uint32_t *data, int count, int *last_cmd)
541 int cmd = 0, pos = 0, len, dummy = 0, v;
544 gpu.frameskip.pending_fill[0] = 0;
546 while (pos < count && skip) {
547 uint32_t *list = data + pos;
548 cmd = LE32TOH(list[0]) >> 24;
549 len = 1 + cmd_lengths[cmd];
550 if (pos + len > count) {
552 break; // incomplete cmd
557 if ((LE32TOH(list[2]) & 0x3ff) > gpu.screen.w || ((LE32TOH(list[2]) >> 16) & 0x1ff) > gpu.screen.h)
558 // clearing something large, don't skip
559 do_cmd_list(list, 3, &dummy, &dummy, &dummy);
561 memcpy(gpu.frameskip.pending_fill, list, 3 * 4);
567 gpu.ex_regs[1] &= ~0x1ff;
568 gpu.ex_regs[1] |= LE32TOH(list[4 + ((cmd >> 4) & 1)]) & 0x1ff;
571 for (v = 3; pos + v < count; v++)
573 if ((list[v] & HTOLE32(0xf000f000)) == HTOLE32(0x50005000))
579 for (v = 4; pos + v < count; v += 2)
581 if ((list[v] & HTOLE32(0xf000f000)) == HTOLE32(0x50005000))
588 skip = decide_frameskip_allow(LE32TOH(list[0]));
589 if ((cmd & 0xf8) == 0xe0)
590 gpu.ex_regs[cmd & 7] = LE32TOH(list[0]);
593 if (0x80 <= cmd && cmd <= 0xdf)
599 renderer_sync_ecmds(gpu.ex_regs);
604 static noinline int do_cmd_buffer(uint32_t *data, int count,
605 int *cycles_sum, int *cycles_last)
608 uint32_t old_e3 = gpu.ex_regs[3];
612 for (pos = 0; pos < count; )
614 if (gpu.dma.h && !gpu.dma_start.is_read) { // XXX: need to verify
616 pos += do_vram_io(data + pos, count - pos, 0);
621 cmd = LE32TOH(data[pos]) >> 24;
622 if (0xa0 <= cmd && cmd <= 0xdf) {
623 if (unlikely((pos+2) >= count)) {
624 // incomplete vram write/read cmd, can't consume yet
629 // consume vram write/read cmd
630 start_vram_transfer(LE32TOH(data[pos + 1]), LE32TOH(data[pos + 2]), (cmd & 0xe0) == 0xc0);
634 else if ((cmd & 0xe0) == 0x80) {
635 if (unlikely((pos+3) >= count)) {
636 cmd = -1; // incomplete cmd, can't consume yet
639 *cycles_sum += *cycles_last;
641 do_vram_copy(data + pos + 1, cycles_last);
646 else if (cmd == 0x1f) {
647 log_anomaly("irq1?\n");
652 // 0xex cmds might affect frameskip.allow, so pass to do_cmd_list_skip
653 if (gpu.frameskip.active && (gpu.frameskip.allow || ((LE32TOH(data[pos]) >> 24) & 0xf0) == 0xe0))
654 pos += do_cmd_list_skip(data + pos, count - pos, &cmd);
656 pos += do_cmd_list(data + pos, count - pos, cycles_sum, cycles_last, &cmd);
665 gpu.status &= ~0x1fff;
666 gpu.status |= gpu.ex_regs[1] & 0x7ff;
667 gpu.status |= (gpu.ex_regs[6] & 3) << 11;
669 gpu.state.fb_dirty |= vram_dirty;
671 if (old_e3 != gpu.ex_regs[3])
672 decide_frameskip_allow(gpu.ex_regs[3]);
677 static noinline void flush_cmd_buffer(void)
680 left = do_cmd_buffer(gpu.cmd_buffer, gpu.cmd_len, &dummy, &dummy);
682 memmove(gpu.cmd_buffer, gpu.cmd_buffer + gpu.cmd_len - left, left * 4);
683 if (left != gpu.cmd_len) {
684 if (!gpu.dma.h && gpu.gpu_state_change)
685 gpu.gpu_state_change(PGS_PRIMITIVE_START);
690 void GPUwriteDataMem(uint32_t *mem, int count)
694 log_io("gpu_dma_write %p %d\n", mem, count);
696 if (unlikely(gpu.cmd_len > 0))
699 left = do_cmd_buffer(mem, count, &dummy, &dummy);
701 log_anomaly("GPUwriteDataMem: discarded %d/%d words\n", left, count);
704 void GPUwriteData(uint32_t data)
706 log_io("gpu_write %08x\n", data);
707 gpu.cmd_buffer[gpu.cmd_len++] = HTOLE32(data);
708 if (gpu.cmd_len >= CMD_BUFFER_LEN)
712 long GPUdmaChain(uint32_t *rambase, uint32_t start_addr,
713 uint32_t *progress_addr, int32_t *cycles_last_cmd)
715 uint32_t addr, *list, ld_addr;
716 int len, left, count, ld_count = 32;
717 int cpu_cycles_sum = 0;
718 int cpu_cycles_last = 0;
720 preload(rambase + (start_addr & 0x1fffff) / 4);
722 if (unlikely(gpu.cmd_len > 0))
725 log_io("gpu_dma_chain\n");
726 addr = ld_addr = start_addr & 0xffffff;
727 for (count = 0; (addr & 0x800000) == 0; count++)
729 list = rambase + (addr & 0x1fffff) / 4;
730 len = LE32TOH(list[0]) >> 24;
731 addr = LE32TOH(list[0]) & 0xffffff;
732 preload(rambase + (addr & 0x1fffff) / 4);
734 cpu_cycles_sum += 10;
736 cpu_cycles_sum += 5 + len;
738 log_io(".chain %08lx #%d+%d %u+%u\n",
739 (long)(list - rambase) * 4, len, gpu.cmd_len, cpu_cycles_sum, cpu_cycles_last);
740 if (unlikely(gpu.cmd_len > 0)) {
741 if (gpu.cmd_len + len > ARRAY_SIZE(gpu.cmd_buffer)) {
742 log_anomaly("cmd_buffer overflow, likely garbage commands\n");
745 memcpy(gpu.cmd_buffer + gpu.cmd_len, list + 1, len * 4);
752 left = do_cmd_buffer(list + 1, len, &cpu_cycles_sum, &cpu_cycles_last);
754 memcpy(gpu.cmd_buffer, list + 1 + len - left, left * 4);
756 log_anomaly("GPUdmaChain: %d/%d words left\n", left, len);
761 *progress_addr = addr;
764 if (addr == ld_addr) {
765 log_anomaly("GPUdmaChain: loop @ %08x, cnt=%u\n", addr, count);
768 if (count == ld_count) {
774 //printf(" -> %d %d\n", cpu_cycles_sum, cpu_cycles_last);
775 gpu.state.last_list.frame = *gpu.state.frame_count;
776 gpu.state.last_list.hcnt = *gpu.state.hcnt;
777 gpu.state.last_list.cycles = cpu_cycles_sum + cpu_cycles_last;
778 gpu.state.last_list.addr = start_addr;
780 *cycles_last_cmd = cpu_cycles_last;
781 return cpu_cycles_sum;
784 void GPUreadDataMem(uint32_t *mem, int count)
786 log_io("gpu_dma_read %p %d\n", mem, count);
788 if (unlikely(gpu.cmd_len > 0))
792 do_vram_io(mem, count, 1);
795 uint32_t GPUreadData(void)
799 if (unlikely(gpu.cmd_len > 0))
805 do_vram_io(&ret, 1, 1);
809 log_io("gpu_read %08x\n", ret);
813 uint32_t GPUreadStatus(void)
817 if (unlikely(gpu.cmd_len > 0))
821 log_io("gpu_read_status %08x\n", ret);
827 uint32_t ulFreezeVersion; // should be always 1 for now (set by main emu)
828 uint32_t ulStatus; // current gpu status
829 uint32_t ulControl[256]; // latest control register values
830 unsigned char psxVRam[1024*1024*2]; // current VRam image (full 2 MB for ZN)
833 long GPUfreeze(uint32_t type, struct GPUFreeze *freeze)
841 memcpy(freeze->psxVRam, gpu.vram, 1024 * 512 * 2);
842 memcpy(freeze->ulControl, gpu.regs, sizeof(gpu.regs));
843 memcpy(freeze->ulControl + 0xe0, gpu.ex_regs, sizeof(gpu.ex_regs));
844 freeze->ulStatus = gpu.status;
847 memcpy(gpu.vram, freeze->psxVRam, 1024 * 512 * 2);
848 memcpy(gpu.regs, freeze->ulControl, sizeof(gpu.regs));
849 memcpy(gpu.ex_regs, freeze->ulControl + 0xe0, sizeof(gpu.ex_regs));
850 gpu.status = freeze->ulStatus;
852 for (i = 8; i > 0; i--) {
853 gpu.regs[i] ^= 1; // avoid reg change detection
854 GPUwriteStatus((i << 24) | (gpu.regs[i] ^ 1));
856 renderer_sync_ecmds(gpu.ex_regs);
857 renderer_update_caches(0, 0, 1024, 512, 0);
864 void GPUupdateLace(void)
868 renderer_flush_queues();
870 #ifndef RAW_FB_DISPLAY
871 if (gpu.status & PSX_GPU_STATUS_BLANKING) {
872 if (!gpu.state.blanked) {
874 gpu.state.blanked = 1;
875 gpu.state.fb_dirty = 1;
880 if (!gpu.state.fb_dirty)
884 if (gpu.frameskip.set) {
885 if (!gpu.frameskip.frame_ready) {
886 if (*gpu.state.frame_count - gpu.frameskip.last_flip_frame < 9)
888 gpu.frameskip.active = 0;
890 gpu.frameskip.frame_ready = 0;
894 if (gpu.state.enhancement_active && !gpu.state.enhancement_was_active)
895 renderer_update_caches(0, 0, 1024, 512, 1);
896 gpu.state.enhancement_was_active = gpu.state.enhancement_active;
897 gpu.state.fb_dirty = 0;
898 gpu.state.blanked = 0;
901 void GPUvBlank(int is_vblank, int lcf)
903 int interlace = gpu.state.allow_interlace
904 && (gpu.status & PSX_GPU_STATUS_INTERLACE)
905 && (gpu.status & PSX_GPU_STATUS_DHEIGHT);
906 // interlace doesn't look nice on progressive displays,
907 // so we have this "auto" mode here for games that don't read vram
908 if (gpu.state.allow_interlace == 2
909 && *gpu.state.frame_count - gpu.state.last_vram_read_frame > 1)
913 if (interlace || interlace != gpu.state.old_interlace) {
914 gpu.state.old_interlace = interlace;
918 renderer_flush_queues();
919 renderer_set_interlace(interlace, !lcf);
923 void GPUgetScreenInfo(int *y, int *base_hres)
926 *base_hres = gpu.screen.vres;
927 if (gpu.status & PSX_GPU_STATUS_DHEIGHT)
931 void GPUrearmedCallbacks(const struct rearmed_cbs *cbs)
933 gpu.frameskip.set = cbs->frameskip;
934 gpu.frameskip.advice = &cbs->fskip_advice;
935 gpu.frameskip.active = 0;
936 gpu.frameskip.frame_ready = 1;
937 gpu.state.hcnt = (uint32_t *)cbs->gpu_hcnt;
938 gpu.state.frame_count = (uint32_t *)cbs->gpu_frame_count;
939 gpu.state.allow_interlace = cbs->gpu_neon.allow_interlace;
940 gpu.state.enhancement_enable = cbs->gpu_neon.enhancement_enable;
941 gpu.state.screen_centering_type_default = cbs->screen_centering_type_default;
942 if (gpu.state.screen_centering_type != cbs->screen_centering_type
943 || gpu.state.screen_centering_x != cbs->screen_centering_x
944 || gpu.state.screen_centering_y != cbs->screen_centering_y
945 || gpu.state.show_overscan != cbs->show_overscan) {
946 gpu.state.screen_centering_type = cbs->screen_centering_type;
947 gpu.state.screen_centering_x = cbs->screen_centering_x;
948 gpu.state.screen_centering_y = cbs->screen_centering_y;
949 gpu.state.show_overscan = cbs->show_overscan;
954 gpu.mmap = cbs->mmap;
955 gpu.munmap = cbs->munmap;
956 gpu.gpu_state_change = cbs->gpu_state_change;
959 if (gpu.vram == NULL)
962 if (cbs->pl_vout_set_raw_vram)
963 cbs->pl_vout_set_raw_vram(gpu.vram);
964 renderer_set_config(cbs);
965 vout_set_config(cbs);
968 // vim:shiftwidth=2:expandtab