2 * (C) GraÅžvydas "notaz" Ignotas, 2011-2012
4 * This work is licensed under the terms of any of these licenses
6 * - GNU GPL, version 2 or later.
7 * - GNU LGPL, version 2.1 or later.
8 * See the COPYING file in the top-level directory.
14 #include <stdlib.h> /* for calloc */
17 #include "../../libpcsxcore/gpu.h" // meh
18 #include "../../frontend/plugin_lib.h"
21 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
24 #define unlikely(x) __builtin_expect((x), 0)
25 #define preload __builtin_prefetch
26 #define noinline __attribute__((noinline))
33 //#define log_io gpu_log
38 static noinline int do_cmd_buffer(uint32_t *data, int count);
39 static void finish_vram_transfer(int is_read);
41 static noinline void do_cmd_reset(void)
45 if (unlikely(gpu.cmd_len > 0))
46 do_cmd_buffer(gpu.cmd_buffer, gpu.cmd_len);
49 if (unlikely(gpu.dma.h > 0))
50 finish_vram_transfer(gpu.dma_start.is_read);
54 static noinline void do_reset(void)
60 memset(gpu.regs, 0, sizeof(gpu.regs));
61 for (i = 0; i < sizeof(gpu.ex_regs) / sizeof(gpu.ex_regs[0]); i++)
62 gpu.ex_regs[i] = (0xe0 + i) << 24;
63 gpu.status = 0x14802000;
66 gpu.screen.hres = gpu.screen.w = 256;
67 gpu.screen.vres = gpu.screen.h = 240;
68 gpu.screen.x = gpu.screen.y = 0;
69 renderer_sync_ecmds(gpu.ex_regs);
70 renderer_notify_res_change();
73 static noinline void update_width(void)
75 static const short hres_all[8] = { 256, 368, 320, 368, 512, 368, 640, 368 };
76 static const uint8_t hdivs[8] = { 10, 7, 8, 7, 5, 7, 4, 7 };
77 uint8_t hdiv = hdivs[(gpu.status >> 16) & 7];
78 int hres = hres_all[(gpu.status >> 16) & 7];
79 int pal = gpu.status & PSX_GPU_STATUS_PAL;
80 int sw = gpu.screen.x2 - gpu.screen.x1;
81 int type = gpu.state.screen_centering_type;
84 type = gpu.state.screen_centering_type_default;
86 /* nothing displayed? */;
88 int s = pal ? 656 : 608; // or 600? pal is just a guess
89 x = (gpu.screen.x1 - s) / hdiv;
90 x = (x + 1) & ~1; // blitter limitation
92 sw = (sw + 2) & ~3; // according to nocash
97 x = gpu.state.screen_centering_x;
100 // correct if slightly miscentered
101 x_auto = (hres - sw) / 2 & ~3;
102 if ((uint32_t)x_auto <= 8u && abs(x) < 24)
107 // .x range check is done in vout_update()
109 // reduce the unpleasant right border that a few games have
110 if (gpu.state.screen_centering_type == 0
111 && x <= 4 && hres - (x + sw) >= 4)
115 gpu.screen.hres = hres;
116 gpu.state.dims_changed = 1;
117 //printf("xx %d %d -> %2d, %d / %d\n",
118 // gpu.screen.x1, gpu.screen.x2, x, sw, hres);
121 static noinline void update_height(void)
123 int pal = gpu.status & PSX_GPU_STATUS_PAL;
124 int dheight = gpu.status & PSX_GPU_STATUS_DHEIGHT;
125 int y = gpu.screen.y1 - (pal ? 39 : 16); // 39 for spyro
126 int sh = gpu.screen.y2 - gpu.screen.y1;
130 if (pal && (sh > 240 || gpu.screen.vres == 256))
133 y *= 2, sh *= 2, vres *= 2, center_tol *= 2;
135 /* nothing displayed? */;
137 switch (gpu.state.screen_centering_type) {
144 y = gpu.state.screen_centering_y;
147 // correct if slightly miscentered
148 if ((uint32_t)(vres - sh) <= 1 && abs(y) <= center_tol)
156 gpu.screen.vres = vres;
157 gpu.state.dims_changed = 1;
158 //printf("yy %d %d -> %d, %d / %d\n",
159 // gpu.screen.y1, gpu.screen.y2, y, sh, vres);
162 static noinline void decide_frameskip(void)
164 *gpu.frameskip.dirty = 1;
166 if (gpu.frameskip.active)
169 gpu.frameskip.cnt = 0;
170 gpu.frameskip.frame_ready = 1;
173 if (*gpu.frameskip.force)
174 gpu.frameskip.active = 1;
175 else if (!gpu.frameskip.active && *gpu.frameskip.advice)
176 gpu.frameskip.active = 1;
177 else if (gpu.frameskip.set > 0 && gpu.frameskip.cnt < gpu.frameskip.set)
178 gpu.frameskip.active = 1;
180 gpu.frameskip.active = 0;
182 if (!gpu.frameskip.active && gpu.frameskip.pending_fill[0] != 0) {
184 do_cmd_list(gpu.frameskip.pending_fill, 3, &dummy);
185 gpu.frameskip.pending_fill[0] = 0;
189 static noinline int decide_frameskip_allow(uint32_t cmd_e3)
191 // no frameskip if it decides to draw to display area,
192 // but not for interlace since it'll most likely always do that
193 uint32_t x = cmd_e3 & 0x3ff;
194 uint32_t y = (cmd_e3 >> 10) & 0x3ff;
195 gpu.frameskip.allow = (gpu.status & PSX_GPU_STATUS_INTERLACE) ||
196 (uint32_t)(x - gpu.screen.src_x) >= (uint32_t)gpu.screen.w ||
197 (uint32_t)(y - gpu.screen.src_y) >= (uint32_t)gpu.screen.h;
198 return gpu.frameskip.allow;
201 static void flush_cmd_buffer(void);
203 static noinline void get_gpu_info(uint32_t data)
205 if (unlikely(gpu.cmd_len > 0))
207 switch (data & 0x0f) {
211 gpu.gp0 = gpu.ex_regs[data & 7] & 0xfffff;
214 gpu.gp0 = gpu.ex_regs[5] & 0x3fffff;
226 #define max(a, b) (((a) > (b)) ? (a) : (b))
229 // Minimum 16-byte VRAM alignment needed by gpu_unai's pixel-skipping
230 // renderer/downscaler it uses in high res modes:
232 // On GCW platform (MIPS), align to 8192 bytes (1 TLB entry) to reduce # of
233 // fills. (Will change this value if it ever gets large page support)
234 #define VRAM_ALIGN 8192
236 #define VRAM_ALIGN 16
239 // double, for overdraw guard + at least 1 page before
240 #define VRAM_SIZE ((1024 * 512 * 2 * 2) + max(VRAM_ALIGN, 4096))
242 // vram ptr received from mmap/malloc/alloc (will deallocate using this)
243 static uint16_t *vram_ptr_orig = NULL;
245 #ifndef GPULIB_USE_MMAP
247 # define GPULIB_USE_MMAP 1
249 # define GPULIB_USE_MMAP 0
252 static int map_vram(void)
255 gpu.vram = vram_ptr_orig = gpu.mmap(VRAM_SIZE);
257 gpu.vram = vram_ptr_orig = calloc(VRAM_SIZE, 1);
259 if (gpu.vram != NULL && gpu.vram != (void *)(intptr_t)-1) {
260 // 4kb guard in front
261 gpu.vram += (4096 / 2);
263 gpu.vram = (uint16_t*)(((uintptr_t)gpu.vram + (VRAM_ALIGN-1)) & ~(VRAM_ALIGN-1));
267 fprintf(stderr, "could not map vram, expect crashes\n");
276 ret |= renderer_init();
278 memset(&gpu.state, 0, sizeof(gpu.state));
279 memset(&gpu.frameskip, 0, sizeof(gpu.frameskip));
281 gpu.state.frame_count = &gpu.zero;
282 gpu.state.hcnt = &gpu.zero;
286 /*if (gpu.mmap != NULL) {
293 long GPUshutdown(void)
300 if (vram_ptr_orig != NULL) {
302 gpu.munmap(vram_ptr_orig, VRAM_SIZE);
307 vram_ptr_orig = gpu.vram = NULL;
312 void GPUwriteStatus(uint32_t data)
314 uint32_t cmd = data >> 24;
317 if (cmd < ARRAY_SIZE(gpu.regs)) {
318 if (cmd > 1 && cmd != 5 && gpu.regs[cmd] == data)
320 gpu.regs[cmd] = data;
323 gpu.state.fb_dirty = 1;
334 gpu.status |= PSX_GPU_STATUS_BLANKING;
335 gpu.state.dims_changed = 1; // for hud clearing
338 gpu.status &= ~PSX_GPU_STATUS_BLANKING;
341 gpu.status &= ~PSX_GPU_STATUS_DMA_MASK;
342 gpu.status |= PSX_GPU_STATUS_DMA(data & 3);
345 src_x = data & 0x3ff; src_y = (data >> 10) & 0x1ff;
346 if (src_x != gpu.screen.src_x || src_y != gpu.screen.src_y) {
347 gpu.screen.src_x = src_x;
348 gpu.screen.src_y = src_y;
349 renderer_notify_scanout_change(src_x, src_y);
350 if (gpu.frameskip.set) {
351 decide_frameskip_allow(gpu.ex_regs[3]);
352 if (gpu.frameskip.last_flip_frame != *gpu.state.frame_count) {
354 gpu.frameskip.last_flip_frame = *gpu.state.frame_count;
360 gpu.screen.x1 = data & 0xfff;
361 gpu.screen.x2 = (data >> 12) & 0xfff;
365 gpu.screen.y1 = data & 0x3ff;
366 gpu.screen.y2 = (data >> 10) & 0x3ff;
370 gpu.status = (gpu.status & ~0x7f0000) | ((data & 0x3F) << 17) | ((data & 0x40) << 10);
373 renderer_notify_res_change();
376 if ((cmd & 0xf0) == 0x10)
381 #ifdef GPUwriteStatus_ext
382 GPUwriteStatus_ext(data);
386 const unsigned char cmd_lengths[256] =
388 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
389 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
390 3, 3, 3, 3, 6, 6, 6, 6, 4, 4, 4, 4, 8, 8, 8, 8, // 20
391 5, 5, 5, 5, 8, 8, 8, 8, 7, 7, 7, 7, 11, 11, 11, 11,
392 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, // 40
393 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
394 2, 2, 2, 2, 3, 3, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, // 60
395 1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2,
396 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, // 80
397 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
398 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // a0
399 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
400 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // c0
401 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
402 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // e0
403 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
406 #define VRAM_MEM_XY(x, y) &gpu.vram[(y) * 1024 + (x)]
408 static void cpy_msb(uint16_t *dst, const uint16_t *src, int l, uint16_t msb)
411 for (i = 0; i < l; i++)
412 dst[i] = src[i] | msb;
415 static inline void do_vram_line(int x, int y, uint16_t *mem, int l,
416 int is_read, uint16_t msb)
418 uint16_t *vram = VRAM_MEM_XY(x, y);
419 if (unlikely(is_read))
420 memcpy(mem, vram, l * 2);
421 else if (unlikely(msb))
422 cpy_msb(vram, mem, l, msb);
424 memcpy(vram, mem, l * 2);
427 static int do_vram_io(uint32_t *data, int count, int is_read)
429 int count_initial = count;
430 uint16_t msb = gpu.ex_regs[6] << 15;
431 uint16_t *sdata = (uint16_t *)data;
432 int x = gpu.dma.x, y = gpu.dma.y;
433 int w = gpu.dma.w, h = gpu.dma.h;
434 int o = gpu.dma.offset;
436 count *= 2; // operate in 16bpp pixels
440 if (gpu.dma.offset) {
441 l = w - gpu.dma.offset;
445 do_vram_line(x + o, y, sdata, l, is_read, msb);
458 for (; h > 0 && count >= w; sdata += w, count -= w, y++, h--) {
460 do_vram_line(x, y, sdata, w, is_read, msb);
466 do_vram_line(x, y, sdata, count, is_read, msb);
472 finish_vram_transfer(is_read);
477 return count_initial - count / 2;
480 static void start_vram_transfer(uint32_t pos_word, uint32_t size_word, int is_read)
483 log_anomaly("start_vram_transfer while old unfinished\n");
485 gpu.dma.x = pos_word & 0x3ff;
486 gpu.dma.y = (pos_word >> 16) & 0x1ff;
487 gpu.dma.w = ((size_word - 1) & 0x3ff) + 1;
488 gpu.dma.h = (((size_word >> 16) - 1) & 0x1ff) + 1;
490 gpu.dma.is_read = is_read;
491 gpu.dma_start = gpu.dma;
493 renderer_flush_queues();
495 gpu.status |= PSX_GPU_STATUS_IMG;
496 // XXX: wrong for width 1
497 gpu.gp0 = LE32TOH(*(uint32_t *) VRAM_MEM_XY(gpu.dma.x, gpu.dma.y));
498 gpu.state.last_vram_read_frame = *gpu.state.frame_count;
501 log_io("start_vram_transfer %c (%d, %d) %dx%d\n", is_read ? 'r' : 'w',
502 gpu.dma.x, gpu.dma.y, gpu.dma.w, gpu.dma.h);
503 if (gpu.gpu_state_change)
504 gpu.gpu_state_change(PGS_VRAM_TRANSFER_START);
507 static void finish_vram_transfer(int is_read)
510 gpu.status &= ~PSX_GPU_STATUS_IMG;
512 gpu.state.fb_dirty = 1;
513 renderer_update_caches(gpu.dma_start.x, gpu.dma_start.y,
514 gpu.dma_start.w, gpu.dma_start.h, 0);
516 if (gpu.gpu_state_change)
517 gpu.gpu_state_change(PGS_VRAM_TRANSFER_END);
520 static void do_vram_copy(const uint32_t *params)
522 const uint32_t sx = LE32TOH(params[0]) & 0x3FF;
523 const uint32_t sy = (LE32TOH(params[0]) >> 16) & 0x1FF;
524 const uint32_t dx = LE32TOH(params[1]) & 0x3FF;
525 const uint32_t dy = (LE32TOH(params[1]) >> 16) & 0x1FF;
526 uint32_t w = ((LE32TOH(params[2]) - 1) & 0x3FF) + 1;
527 uint32_t h = (((LE32TOH(params[2]) >> 16) - 1) & 0x1FF) + 1;
528 uint16_t msb = gpu.ex_regs[6] << 15;
532 if (sx == dx && sy == dy && msb == 0)
535 renderer_flush_queues();
537 if (unlikely((sx < dx && dx < sx + w) || sx + w > 1024 || dx + w > 1024 || msb))
539 for (y = 0; y < h; y++)
541 const uint16_t *src = VRAM_MEM_XY(0, (sy + y) & 0x1ff);
542 uint16_t *dst = VRAM_MEM_XY(0, (dy + y) & 0x1ff);
543 for (x = 0; x < w; x += ARRAY_SIZE(lbuf))
545 uint32_t x1, w1 = w - x;
546 if (w1 > ARRAY_SIZE(lbuf))
547 w1 = ARRAY_SIZE(lbuf);
548 for (x1 = 0; x1 < w1; x1++)
549 lbuf[x1] = src[(sx + x + x1) & 0x3ff];
550 for (x1 = 0; x1 < w1; x1++)
551 dst[(dx + x + x1) & 0x3ff] = lbuf[x1] | msb;
557 uint32_t sy1 = sy, dy1 = dy;
558 for (y = 0; y < h; y++, sy1++, dy1++)
559 memcpy(VRAM_MEM_XY(dx, dy1 & 0x1ff), VRAM_MEM_XY(sx, sy1 & 0x1ff), w * 2);
562 renderer_update_caches(dx, dy, w, h, 0);
565 static noinline int do_cmd_list_skip(uint32_t *data, int count, int *last_cmd)
567 int cmd = 0, pos = 0, len, dummy, v;
570 gpu.frameskip.pending_fill[0] = 0;
572 while (pos < count && skip) {
573 uint32_t *list = data + pos;
574 cmd = LE32TOH(list[0]) >> 24;
575 len = 1 + cmd_lengths[cmd];
579 if ((LE32TOH(list[2]) & 0x3ff) > gpu.screen.w || ((LE32TOH(list[2]) >> 16) & 0x1ff) > gpu.screen.h)
580 // clearing something large, don't skip
581 do_cmd_list(list, 3, &dummy);
583 memcpy(gpu.frameskip.pending_fill, list, 3 * 4);
589 gpu.ex_regs[1] &= ~0x1ff;
590 gpu.ex_regs[1] |= LE32TOH(list[4 + ((cmd >> 4) & 1)]) & 0x1ff;
593 for (v = 3; pos + v < count; v++)
595 if ((list[v] & HTOLE32(0xf000f000)) == HTOLE32(0x50005000))
601 for (v = 4; pos + v < count; v += 2)
603 if ((list[v] & HTOLE32(0xf000f000)) == HTOLE32(0x50005000))
610 skip = decide_frameskip_allow(LE32TOH(list[0]));
611 if ((cmd & 0xf8) == 0xe0)
612 gpu.ex_regs[cmd & 7] = LE32TOH(list[0]);
616 if (pos + len > count) {
618 break; // incomplete cmd
620 if (0x80 <= cmd && cmd <= 0xdf)
626 renderer_sync_ecmds(gpu.ex_regs);
631 static noinline int do_cmd_buffer(uint32_t *data, int count)
634 uint32_t old_e3 = gpu.ex_regs[3];
638 for (pos = 0; pos < count; )
640 if (gpu.dma.h && !gpu.dma_start.is_read) { // XXX: need to verify
642 pos += do_vram_io(data + pos, count - pos, 0);
647 cmd = LE32TOH(data[pos]) >> 24;
648 if (0xa0 <= cmd && cmd <= 0xdf) {
649 if (unlikely((pos+2) >= count)) {
650 // incomplete vram write/read cmd, can't consume yet
655 // consume vram write/read cmd
656 start_vram_transfer(LE32TOH(data[pos + 1]), LE32TOH(data[pos + 2]), (cmd & 0xe0) == 0xc0);
660 else if ((cmd & 0xe0) == 0x80) {
661 if (unlikely((pos+3) >= count)) {
662 cmd = -1; // incomplete cmd, can't consume yet
665 do_vram_copy(data + pos + 1);
671 // 0xex cmds might affect frameskip.allow, so pass to do_cmd_list_skip
672 if (gpu.frameskip.active && (gpu.frameskip.allow || ((LE32TOH(data[pos]) >> 24) & 0xf0) == 0xe0))
673 pos += do_cmd_list_skip(data + pos, count - pos, &cmd);
675 pos += do_cmd_list(data + pos, count - pos, &cmd);
684 gpu.status &= ~0x1fff;
685 gpu.status |= gpu.ex_regs[1] & 0x7ff;
686 gpu.status |= (gpu.ex_regs[6] & 3) << 11;
688 gpu.state.fb_dirty |= vram_dirty;
690 if (old_e3 != gpu.ex_regs[3])
691 decide_frameskip_allow(gpu.ex_regs[3]);
696 static noinline void flush_cmd_buffer(void)
698 int left = do_cmd_buffer(gpu.cmd_buffer, gpu.cmd_len);
700 memmove(gpu.cmd_buffer, gpu.cmd_buffer + gpu.cmd_len - left, left * 4);
701 if (left != gpu.cmd_len) {
702 if (!gpu.dma.h && gpu.gpu_state_change)
703 gpu.gpu_state_change(PGS_PRIMITIVE_START);
708 void GPUwriteDataMem(uint32_t *mem, int count)
712 log_io("gpu_dma_write %p %d\n", mem, count);
714 if (unlikely(gpu.cmd_len > 0))
717 left = do_cmd_buffer(mem, count);
719 log_anomaly("GPUwriteDataMem: discarded %d/%d words\n", left, count);
722 void GPUwriteData(uint32_t data)
724 log_io("gpu_write %08x\n", data);
725 gpu.cmd_buffer[gpu.cmd_len++] = HTOLE32(data);
726 if (gpu.cmd_len >= CMD_BUFFER_LEN)
730 long GPUdmaChain(uint32_t *rambase, uint32_t start_addr, uint32_t *progress_addr)
732 uint32_t addr, *list, ld_addr = 0;
733 int len, left, count;
736 preload(rambase + (start_addr & 0x1fffff) / 4);
738 if (unlikely(gpu.cmd_len > 0))
741 log_io("gpu_dma_chain\n");
742 addr = start_addr & 0xffffff;
743 for (count = 0; (addr & 0x800000) == 0; count++)
745 list = rambase + (addr & 0x1fffff) / 4;
746 len = LE32TOH(list[0]) >> 24;
747 addr = LE32TOH(list[0]) & 0xffffff;
748 preload(rambase + (addr & 0x1fffff) / 4);
752 cpu_cycles += 5 + len;
754 log_io(".chain %08lx #%d+%d\n",
755 (long)(list - rambase) * 4, len, gpu.cmd_len);
756 if (unlikely(gpu.cmd_len > 0)) {
757 if (gpu.cmd_len + len > ARRAY_SIZE(gpu.cmd_buffer)) {
758 log_anomaly("cmd_buffer overflow, likely garbage commands\n");
761 memcpy(gpu.cmd_buffer + gpu.cmd_len, list + 1, len * 4);
768 left = do_cmd_buffer(list + 1, len);
770 memcpy(gpu.cmd_buffer, list + 1 + len - left, left * 4);
772 log_anomaly("GPUdmaChain: %d/%d words left\n", left, len);
777 *progress_addr = addr;
780 #define LD_THRESHOLD (8*1024)
781 if (count >= LD_THRESHOLD) {
782 if (count == LD_THRESHOLD) {
787 // loop detection marker
788 // (bit23 set causes DMA error on real machine, so
789 // unlikely to be ever set by the game)
790 list[0] |= HTOLE32(0x800000);
795 // remove loop detection markers
796 count -= LD_THRESHOLD + 2;
797 addr = ld_addr & 0x1fffff;
798 while (count-- > 0) {
799 list = rambase + addr / 4;
800 addr = LE32TOH(list[0]) & 0x1fffff;
801 list[0] &= HTOLE32(~0x800000);
805 gpu.state.last_list.frame = *gpu.state.frame_count;
806 gpu.state.last_list.hcnt = *gpu.state.hcnt;
807 gpu.state.last_list.cycles = cpu_cycles;
808 gpu.state.last_list.addr = start_addr;
813 void GPUreadDataMem(uint32_t *mem, int count)
815 log_io("gpu_dma_read %p %d\n", mem, count);
817 if (unlikely(gpu.cmd_len > 0))
821 do_vram_io(mem, count, 1);
824 uint32_t GPUreadData(void)
828 if (unlikely(gpu.cmd_len > 0))
834 do_vram_io(&ret, 1, 1);
838 log_io("gpu_read %08x\n", ret);
842 uint32_t GPUreadStatus(void)
846 if (unlikely(gpu.cmd_len > 0))
850 log_io("gpu_read_status %08x\n", ret);
856 uint32_t ulFreezeVersion; // should be always 1 for now (set by main emu)
857 uint32_t ulStatus; // current gpu status
858 uint32_t ulControl[256]; // latest control register values
859 unsigned char psxVRam[1024*1024*2]; // current VRam image (full 2 MB for ZN)
862 long GPUfreeze(uint32_t type, struct GPUFreeze *freeze)
872 memcpy(freeze->psxVRam, gpu.vram, 1024 * 512 * 2);
873 memcpy(freeze->ulControl, gpu.regs, sizeof(gpu.regs));
874 memcpy(freeze->ulControl + 0xe0, gpu.ex_regs, sizeof(gpu.ex_regs));
875 freeze->ulStatus = gpu.status;
879 memcpy(gpu.vram, freeze->psxVRam, 1024 * 512 * 2);
880 memcpy(gpu.regs, freeze->ulControl, sizeof(gpu.regs));
881 memcpy(gpu.ex_regs, freeze->ulControl + 0xe0, sizeof(gpu.ex_regs));
882 gpu.status = freeze->ulStatus;
884 for (i = 8; i > 0; i--) {
885 gpu.regs[i] ^= 1; // avoid reg change detection
886 GPUwriteStatus((i << 24) | (gpu.regs[i] ^ 1));
888 renderer_sync_ecmds(gpu.ex_regs);
889 renderer_update_caches(0, 0, 1024, 512, 0);
896 void GPUupdateLace(void)
900 renderer_flush_queues();
902 #ifndef RAW_FB_DISPLAY
903 if (gpu.status & PSX_GPU_STATUS_BLANKING) {
904 if (!gpu.state.blanked) {
906 gpu.state.blanked = 1;
907 gpu.state.fb_dirty = 1;
912 renderer_notify_update_lace(0);
914 if (!gpu.state.fb_dirty)
918 if (gpu.frameskip.set) {
919 if (!gpu.frameskip.frame_ready) {
920 if (*gpu.state.frame_count - gpu.frameskip.last_flip_frame < 9)
922 gpu.frameskip.active = 0;
924 gpu.frameskip.frame_ready = 0;
928 if (gpu.state.enhancement_active && !gpu.state.enhancement_was_active)
929 renderer_update_caches(0, 0, 1024, 512, 1);
930 gpu.state.enhancement_was_active = gpu.state.enhancement_active;
931 gpu.state.fb_dirty = 0;
932 gpu.state.blanked = 0;
933 renderer_notify_update_lace(1);
936 void GPUvBlank(int is_vblank, int lcf)
938 int interlace = gpu.state.allow_interlace
939 && (gpu.status & PSX_GPU_STATUS_INTERLACE)
940 && (gpu.status & PSX_GPU_STATUS_DHEIGHT);
941 // interlace doesn't look nice on progressive displays,
942 // so we have this "auto" mode here for games that don't read vram
943 if (gpu.state.allow_interlace == 2
944 && *gpu.state.frame_count - gpu.state.last_vram_read_frame > 1)
948 if (interlace || interlace != gpu.state.old_interlace) {
949 gpu.state.old_interlace = interlace;
953 renderer_flush_queues();
954 renderer_set_interlace(interlace, !lcf);
958 void GPUgetScreenInfo(int *y, int *base_hres)
961 *base_hres = gpu.screen.vres;
962 if (gpu.status & PSX_GPU_STATUS_DHEIGHT)
966 void GPUrearmedCallbacks(const struct rearmed_cbs *cbs)
968 gpu.frameskip.set = cbs->frameskip;
969 gpu.frameskip.advice = &cbs->fskip_advice;
970 gpu.frameskip.force = &cbs->fskip_force;
971 gpu.frameskip.dirty = (void *)&cbs->fskip_dirty;
972 gpu.frameskip.active = 0;
973 gpu.frameskip.frame_ready = 1;
974 gpu.state.hcnt = cbs->gpu_hcnt;
975 gpu.state.frame_count = cbs->gpu_frame_count;
976 gpu.state.allow_interlace = cbs->gpu_neon.allow_interlace;
977 gpu.state.enhancement_enable = cbs->gpu_neon.enhancement_enable;
978 gpu.state.screen_centering_type_default = cbs->screen_centering_type_default;
979 if (gpu.state.screen_centering_type != cbs->screen_centering_type
980 || gpu.state.screen_centering_x != cbs->screen_centering_x
981 || gpu.state.screen_centering_y != cbs->screen_centering_y) {
982 gpu.state.screen_centering_type = cbs->screen_centering_type;
983 gpu.state.screen_centering_x = cbs->screen_centering_x;
984 gpu.state.screen_centering_y = cbs->screen_centering_y;
989 gpu.mmap = cbs->mmap;
990 gpu.munmap = cbs->munmap;
991 gpu.gpu_state_change = cbs->gpu_state_change;
994 if (gpu.vram == NULL)
997 if (cbs->pl_vout_set_raw_vram)
998 cbs->pl_vout_set_raw_vram(gpu.vram);
999 renderer_set_config(cbs);
1000 vout_set_config(cbs);
1003 // vim:shiftwidth=2:expandtab