2 * (C) GraÅžvydas "notaz" Ignotas, 2011-2012
4 * This work is licensed under the terms of any of these licenses
6 * - GNU GPL, version 2 or later.
7 * - GNU LGPL, version 2.1 or later.
8 * See the COPYING file in the top-level directory.
14 #include <stdlib.h> /* for calloc */
17 #include "../../libpcsxcore/gpu.h" // meh
18 #include "../../frontend/plugin_lib.h"
21 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
24 #define unlikely(x) __builtin_expect((x), 0)
25 #define preload __builtin_prefetch
26 #define noinline __attribute__((noinline))
33 //#define log_io gpu_log
38 static noinline int do_cmd_buffer(uint32_t *data, int count);
39 static void finish_vram_transfer(int is_read);
41 static noinline void do_cmd_reset(void)
45 if (unlikely(gpu.cmd_len > 0))
46 do_cmd_buffer(gpu.cmd_buffer, gpu.cmd_len);
49 if (unlikely(gpu.dma.h > 0))
50 finish_vram_transfer(gpu.dma_start.is_read);
54 static noinline void do_reset(void)
60 memset(gpu.regs, 0, sizeof(gpu.regs));
61 for (i = 0; i < sizeof(gpu.ex_regs) / sizeof(gpu.ex_regs[0]); i++)
62 gpu.ex_regs[i] = (0xe0 + i) << 24;
63 gpu.status = 0x14802000;
66 gpu.screen.hres = gpu.screen.w = 256;
67 gpu.screen.vres = gpu.screen.h = 240;
68 gpu.screen.x = gpu.screen.y = 0;
69 renderer_sync_ecmds(gpu.ex_regs);
70 renderer_notify_res_change();
73 static noinline void update_width(void)
75 static const short hres_all[8] = { 256, 368, 320, 368, 512, 368, 640, 368 };
76 static const uint8_t hdivs[8] = { 10, 7, 8, 7, 5, 7, 4, 7 };
77 uint8_t hdiv = hdivs[(gpu.status >> 16) & 7];
78 int hres = hres_all[(gpu.status >> 16) & 7];
79 int pal = gpu.status & PSX_GPU_STATUS_PAL;
80 int sw = gpu.screen.x2 - gpu.screen.x1;
81 int type = gpu.state.screen_centering_type;
84 type = gpu.state.screen_centering_type_default;
86 /* nothing displayed? */;
88 int s = pal ? 656 : 608; // or 600? pal is just a guess
89 x = (gpu.screen.x1 - s) / hdiv;
90 x = (x + 1) & ~1; // blitter limitation
92 sw = (sw + 2) & ~3; // according to nocash
97 x = gpu.state.screen_centering_x;
100 // correct if slightly miscentered
101 x_auto = (hres - sw) / 2 & ~3;
102 if ((uint32_t)x_auto <= 8u && abs(x) < 24)
107 // .x range check is done in vout_update()
109 // reduce the unpleasant right border that a few games have
110 if (gpu.state.screen_centering_type == 0
111 && x <= 4 && hres - (x + sw) >= 4)
115 gpu.screen.hres = hres;
116 gpu.state.dims_changed = 1;
117 //printf("xx %d %d -> %2d, %d / %d\n",
118 // gpu.screen.x1, gpu.screen.x2, x, sw, hres);
121 static noinline void update_height(void)
123 int pal = gpu.status & PSX_GPU_STATUS_PAL;
124 int dheight = gpu.status & PSX_GPU_STATUS_DHEIGHT;
125 int y = gpu.screen.y1 - (pal ? 39 : 16); // 39 for spyro
126 int sh = gpu.screen.y2 - gpu.screen.y1;
130 if (pal && (sh > 240 || gpu.screen.vres == 256))
133 y *= 2, sh *= 2, vres *= 2, center_tol *= 2;
135 /* nothing displayed? */;
137 switch (gpu.state.screen_centering_type) {
144 y = gpu.state.screen_centering_y;
147 // correct if slightly miscentered
148 if ((uint32_t)(vres - sh) <= 1 && abs(y) <= center_tol)
156 gpu.screen.vres = vres;
157 gpu.state.dims_changed = 1;
158 //printf("yy %d %d -> %d, %d / %d\n",
159 // gpu.screen.y1, gpu.screen.y2, y, sh, vres);
162 static noinline void decide_frameskip(void)
164 *gpu.frameskip.dirty = 1;
166 if (gpu.frameskip.active)
169 gpu.frameskip.cnt = 0;
170 gpu.frameskip.frame_ready = 1;
173 if (*gpu.frameskip.force)
174 gpu.frameskip.active = 1;
175 else if (!gpu.frameskip.active && *gpu.frameskip.advice)
176 gpu.frameskip.active = 1;
177 else if (gpu.frameskip.set > 0 && gpu.frameskip.cnt < gpu.frameskip.set)
178 gpu.frameskip.active = 1;
180 gpu.frameskip.active = 0;
182 if (!gpu.frameskip.active && gpu.frameskip.pending_fill[0] != 0) {
184 do_cmd_list(gpu.frameskip.pending_fill, 3, &dummy);
185 gpu.frameskip.pending_fill[0] = 0;
189 static noinline int decide_frameskip_allow(uint32_t cmd_e3)
191 // no frameskip if it decides to draw to display area,
192 // but not for interlace since it'll most likely always do that
193 uint32_t x = cmd_e3 & 0x3ff;
194 uint32_t y = (cmd_e3 >> 10) & 0x3ff;
195 gpu.frameskip.allow = (gpu.status & PSX_GPU_STATUS_INTERLACE) ||
196 (uint32_t)(x - gpu.screen.src_x) >= (uint32_t)gpu.screen.w ||
197 (uint32_t)(y - gpu.screen.src_y) >= (uint32_t)gpu.screen.h;
198 return gpu.frameskip.allow;
201 static void flush_cmd_buffer(void);
203 static noinline void get_gpu_info(uint32_t data)
205 if (unlikely(gpu.cmd_len > 0))
207 switch (data & 0x0f) {
211 gpu.gp0 = gpu.ex_regs[data & 7] & 0xfffff;
214 gpu.gp0 = gpu.ex_regs[5] & 0x3fffff;
225 // double, for overdraw guard
226 #define VRAM_SIZE ((1024 * 512 * 2 * 2) + 4096)
228 // Minimum 16-byte VRAM alignment needed by gpu_unai's pixel-skipping
229 // renderer/downscaler it uses in high res modes:
231 // On GCW platform (MIPS), align to 8192 bytes (1 TLB entry) to reduce # of
232 // fills. (Will change this value if it ever gets large page support)
233 #define VRAM_ALIGN 8192
235 #define VRAM_ALIGN 16
238 // vram ptr received from mmap/malloc/alloc (will deallocate using this)
239 static uint16_t *vram_ptr_orig = NULL;
241 #ifndef GPULIB_USE_MMAP
243 # define GPULIB_USE_MMAP 1
245 # define GPULIB_USE_MMAP 0
248 static int map_vram(void)
251 gpu.vram = vram_ptr_orig = gpu.mmap(VRAM_SIZE + (VRAM_ALIGN-1));
253 gpu.vram = vram_ptr_orig = calloc(VRAM_SIZE + (VRAM_ALIGN-1), 1);
255 if (gpu.vram != NULL && gpu.vram != (void *)(intptr_t)-1) {
256 // 4kb guard in front
257 gpu.vram += (4096 / 2);
259 gpu.vram = (uint16_t*)(((uintptr_t)gpu.vram + (VRAM_ALIGN-1)) & ~(VRAM_ALIGN-1));
263 fprintf(stderr, "could not map vram, expect crashes\n");
272 ret |= renderer_init();
274 memset(&gpu.state, 0, sizeof(gpu.state));
275 memset(&gpu.frameskip, 0, sizeof(gpu.frameskip));
277 gpu.state.frame_count = &gpu.zero;
278 gpu.state.hcnt = &gpu.zero;
282 /*if (gpu.mmap != NULL) {
289 long GPUshutdown(void)
296 if (vram_ptr_orig != NULL) {
298 gpu.munmap(vram_ptr_orig, VRAM_SIZE);
303 vram_ptr_orig = gpu.vram = NULL;
308 void GPUwriteStatus(uint32_t data)
310 uint32_t cmd = data >> 24;
313 if (cmd < ARRAY_SIZE(gpu.regs)) {
314 if (cmd > 1 && cmd != 5 && gpu.regs[cmd] == data)
316 gpu.regs[cmd] = data;
319 gpu.state.fb_dirty = 1;
330 gpu.status |= PSX_GPU_STATUS_BLANKING;
331 gpu.state.dims_changed = 1; // for hud clearing
334 gpu.status &= ~PSX_GPU_STATUS_BLANKING;
337 gpu.status &= ~PSX_GPU_STATUS_DMA_MASK;
338 gpu.status |= PSX_GPU_STATUS_DMA(data & 3);
341 src_x = data & 0x3ff; src_y = (data >> 10) & 0x1ff;
342 if (src_x != gpu.screen.src_x || src_y != gpu.screen.src_y) {
343 gpu.screen.src_x = src_x;
344 gpu.screen.src_y = src_y;
345 renderer_notify_scanout_change(src_x, src_y);
346 if (gpu.frameskip.set) {
347 decide_frameskip_allow(gpu.ex_regs[3]);
348 if (gpu.frameskip.last_flip_frame != *gpu.state.frame_count) {
350 gpu.frameskip.last_flip_frame = *gpu.state.frame_count;
356 gpu.screen.x1 = data & 0xfff;
357 gpu.screen.x2 = (data >> 12) & 0xfff;
361 gpu.screen.y1 = data & 0x3ff;
362 gpu.screen.y2 = (data >> 10) & 0x3ff;
366 gpu.status = (gpu.status & ~0x7f0000) | ((data & 0x3F) << 17) | ((data & 0x40) << 10);
369 renderer_notify_res_change();
372 if ((cmd & 0xf0) == 0x10)
377 #ifdef GPUwriteStatus_ext
378 GPUwriteStatus_ext(data);
382 const unsigned char cmd_lengths[256] =
384 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
385 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
386 3, 3, 3, 3, 6, 6, 6, 6, 4, 4, 4, 4, 8, 8, 8, 8, // 20
387 5, 5, 5, 5, 8, 8, 8, 8, 7, 7, 7, 7, 11, 11, 11, 11,
388 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, // 40
389 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
390 2, 2, 2, 2, 3, 3, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, // 60
391 1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2,
392 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, // 80
393 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
394 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // a0
395 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
396 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // c0
397 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
398 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // e0
399 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
402 #define VRAM_MEM_XY(x, y) &gpu.vram[(y) * 1024 + (x)]
404 static void cpy_msb(uint16_t *dst, const uint16_t *src, int l, uint16_t msb)
407 for (i = 0; i < l; i++)
408 dst[i] = src[i] | msb;
411 static inline void do_vram_line(int x, int y, uint16_t *mem, int l,
412 int is_read, uint16_t msb)
414 uint16_t *vram = VRAM_MEM_XY(x, y);
415 if (unlikely(is_read))
416 memcpy(mem, vram, l * 2);
417 else if (unlikely(msb))
418 cpy_msb(vram, mem, l, msb);
420 memcpy(vram, mem, l * 2);
423 static int do_vram_io(uint32_t *data, int count, int is_read)
425 int count_initial = count;
426 uint16_t msb = gpu.ex_regs[6] << 15;
427 uint16_t *sdata = (uint16_t *)data;
428 int x = gpu.dma.x, y = gpu.dma.y;
429 int w = gpu.dma.w, h = gpu.dma.h;
430 int o = gpu.dma.offset;
432 count *= 2; // operate in 16bpp pixels
436 if (gpu.dma.offset) {
437 l = w - gpu.dma.offset;
441 do_vram_line(x + o, y, sdata, l, is_read, msb);
454 for (; h > 0 && count >= w; sdata += w, count -= w, y++, h--) {
456 do_vram_line(x, y, sdata, w, is_read, msb);
462 do_vram_line(x, y, sdata, count, is_read, msb);
468 finish_vram_transfer(is_read);
473 return count_initial - count / 2;
476 static void start_vram_transfer(uint32_t pos_word, uint32_t size_word, int is_read)
479 log_anomaly("start_vram_transfer while old unfinished\n");
481 gpu.dma.x = pos_word & 0x3ff;
482 gpu.dma.y = (pos_word >> 16) & 0x1ff;
483 gpu.dma.w = ((size_word - 1) & 0x3ff) + 1;
484 gpu.dma.h = (((size_word >> 16) - 1) & 0x1ff) + 1;
486 gpu.dma.is_read = is_read;
487 gpu.dma_start = gpu.dma;
489 renderer_flush_queues();
491 gpu.status |= PSX_GPU_STATUS_IMG;
492 // XXX: wrong for width 1
493 gpu.gp0 = LE32TOH(*(uint32_t *) VRAM_MEM_XY(gpu.dma.x, gpu.dma.y));
494 gpu.state.last_vram_read_frame = *gpu.state.frame_count;
497 log_io("start_vram_transfer %c (%d, %d) %dx%d\n", is_read ? 'r' : 'w',
498 gpu.dma.x, gpu.dma.y, gpu.dma.w, gpu.dma.h);
499 if (gpu.gpu_state_change)
500 gpu.gpu_state_change(PGS_VRAM_TRANSFER_START);
503 static void finish_vram_transfer(int is_read)
506 gpu.status &= ~PSX_GPU_STATUS_IMG;
508 gpu.state.fb_dirty = 1;
509 renderer_update_caches(gpu.dma_start.x, gpu.dma_start.y,
510 gpu.dma_start.w, gpu.dma_start.h, 0);
512 if (gpu.gpu_state_change)
513 gpu.gpu_state_change(PGS_VRAM_TRANSFER_END);
516 static void do_vram_copy(const uint32_t *params)
518 const uint32_t sx = LE32TOH(params[0]) & 0x3FF;
519 const uint32_t sy = (LE32TOH(params[0]) >> 16) & 0x1FF;
520 const uint32_t dx = LE32TOH(params[1]) & 0x3FF;
521 const uint32_t dy = (LE32TOH(params[1]) >> 16) & 0x1FF;
522 uint32_t w = ((LE32TOH(params[2]) - 1) & 0x3FF) + 1;
523 uint32_t h = (((LE32TOH(params[2]) >> 16) - 1) & 0x1FF) + 1;
524 uint16_t msb = gpu.ex_regs[6] << 15;
528 if (sx == dx && sy == dy && msb == 0)
531 renderer_flush_queues();
533 if (unlikely((sx < dx && dx < sx + w) || sx + w > 1024 || dx + w > 1024 || msb))
535 for (y = 0; y < h; y++)
537 const uint16_t *src = VRAM_MEM_XY(0, (sy + y) & 0x1ff);
538 uint16_t *dst = VRAM_MEM_XY(0, (dy + y) & 0x1ff);
539 for (x = 0; x < w; x += ARRAY_SIZE(lbuf))
541 uint32_t x1, w1 = w - x;
542 if (w1 > ARRAY_SIZE(lbuf))
543 w1 = ARRAY_SIZE(lbuf);
544 for (x1 = 0; x1 < w1; x1++)
545 lbuf[x1] = src[(sx + x + x1) & 0x3ff];
546 for (x1 = 0; x1 < w1; x1++)
547 dst[(dx + x + x1) & 0x3ff] = lbuf[x1] | msb;
553 uint32_t sy1 = sy, dy1 = dy;
554 for (y = 0; y < h; y++, sy1++, dy1++)
555 memcpy(VRAM_MEM_XY(dx, dy1 & 0x1ff), VRAM_MEM_XY(sx, sy1 & 0x1ff), w * 2);
558 renderer_update_caches(dx, dy, w, h, 0);
561 static noinline int do_cmd_list_skip(uint32_t *data, int count, int *last_cmd)
563 int cmd = 0, pos = 0, len, dummy, v;
566 gpu.frameskip.pending_fill[0] = 0;
568 while (pos < count && skip) {
569 uint32_t *list = data + pos;
570 cmd = LE32TOH(list[0]) >> 24;
571 len = 1 + cmd_lengths[cmd];
575 if ((LE32TOH(list[2]) & 0x3ff) > gpu.screen.w || ((LE32TOH(list[2]) >> 16) & 0x1ff) > gpu.screen.h)
576 // clearing something large, don't skip
577 do_cmd_list(list, 3, &dummy);
579 memcpy(gpu.frameskip.pending_fill, list, 3 * 4);
585 gpu.ex_regs[1] &= ~0x1ff;
586 gpu.ex_regs[1] |= LE32TOH(list[4 + ((cmd >> 4) & 1)]) & 0x1ff;
589 for (v = 3; pos + v < count; v++)
591 if ((list[v] & HTOLE32(0xf000f000)) == HTOLE32(0x50005000))
597 for (v = 4; pos + v < count; v += 2)
599 if ((list[v] & HTOLE32(0xf000f000)) == HTOLE32(0x50005000))
606 skip = decide_frameskip_allow(LE32TOH(list[0]));
607 if ((cmd & 0xf8) == 0xe0)
608 gpu.ex_regs[cmd & 7] = LE32TOH(list[0]);
612 if (pos + len > count) {
614 break; // incomplete cmd
616 if (0x80 <= cmd && cmd <= 0xdf)
622 renderer_sync_ecmds(gpu.ex_regs);
627 static noinline int do_cmd_buffer(uint32_t *data, int count)
630 uint32_t old_e3 = gpu.ex_regs[3];
634 for (pos = 0; pos < count; )
636 if (gpu.dma.h && !gpu.dma_start.is_read) { // XXX: need to verify
638 pos += do_vram_io(data + pos, count - pos, 0);
643 cmd = LE32TOH(data[pos]) >> 24;
644 if (0xa0 <= cmd && cmd <= 0xdf) {
645 if (unlikely((pos+2) >= count)) {
646 // incomplete vram write/read cmd, can't consume yet
651 // consume vram write/read cmd
652 start_vram_transfer(LE32TOH(data[pos + 1]), LE32TOH(data[pos + 2]), (cmd & 0xe0) == 0xc0);
656 else if ((cmd & 0xe0) == 0x80) {
657 if (unlikely((pos+3) >= count)) {
658 cmd = -1; // incomplete cmd, can't consume yet
661 do_vram_copy(data + pos + 1);
667 // 0xex cmds might affect frameskip.allow, so pass to do_cmd_list_skip
668 if (gpu.frameskip.active && (gpu.frameskip.allow || ((LE32TOH(data[pos]) >> 24) & 0xf0) == 0xe0))
669 pos += do_cmd_list_skip(data + pos, count - pos, &cmd);
671 pos += do_cmd_list(data + pos, count - pos, &cmd);
680 gpu.status &= ~0x1fff;
681 gpu.status |= gpu.ex_regs[1] & 0x7ff;
682 gpu.status |= (gpu.ex_regs[6] & 3) << 11;
684 gpu.state.fb_dirty |= vram_dirty;
686 if (old_e3 != gpu.ex_regs[3])
687 decide_frameskip_allow(gpu.ex_regs[3]);
692 static noinline void flush_cmd_buffer(void)
694 int left = do_cmd_buffer(gpu.cmd_buffer, gpu.cmd_len);
696 memmove(gpu.cmd_buffer, gpu.cmd_buffer + gpu.cmd_len - left, left * 4);
697 if (left != gpu.cmd_len) {
698 if (!gpu.dma.h && gpu.gpu_state_change)
699 gpu.gpu_state_change(PGS_PRIMITIVE_START);
704 void GPUwriteDataMem(uint32_t *mem, int count)
708 log_io("gpu_dma_write %p %d\n", mem, count);
710 if (unlikely(gpu.cmd_len > 0))
713 left = do_cmd_buffer(mem, count);
715 log_anomaly("GPUwriteDataMem: discarded %d/%d words\n", left, count);
718 void GPUwriteData(uint32_t data)
720 log_io("gpu_write %08x\n", data);
721 gpu.cmd_buffer[gpu.cmd_len++] = HTOLE32(data);
722 if (gpu.cmd_len >= CMD_BUFFER_LEN)
726 long GPUdmaChain(uint32_t *rambase, uint32_t start_addr, uint32_t *progress_addr)
728 uint32_t addr, *list, ld_addr = 0;
729 int len, left, count;
732 preload(rambase + (start_addr & 0x1fffff) / 4);
734 if (unlikely(gpu.cmd_len > 0))
737 log_io("gpu_dma_chain\n");
738 addr = start_addr & 0xffffff;
739 for (count = 0; (addr & 0x800000) == 0; count++)
741 list = rambase + (addr & 0x1fffff) / 4;
742 len = LE32TOH(list[0]) >> 24;
743 addr = LE32TOH(list[0]) & 0xffffff;
744 preload(rambase + (addr & 0x1fffff) / 4);
748 cpu_cycles += 5 + len;
750 log_io(".chain %08lx #%d+%d\n",
751 (long)(list - rambase) * 4, len, gpu.cmd_len);
752 if (unlikely(gpu.cmd_len > 0)) {
753 if (gpu.cmd_len + len > ARRAY_SIZE(gpu.cmd_buffer)) {
754 log_anomaly("cmd_buffer overflow, likely garbage commands\n");
757 memcpy(gpu.cmd_buffer + gpu.cmd_len, list + 1, len * 4);
764 left = do_cmd_buffer(list + 1, len);
766 memcpy(gpu.cmd_buffer, list + 1 + len - left, left * 4);
768 log_anomaly("GPUdmaChain: %d/%d words left\n", left, len);
773 *progress_addr = addr;
776 #define LD_THRESHOLD (8*1024)
777 if (count >= LD_THRESHOLD) {
778 if (count == LD_THRESHOLD) {
783 // loop detection marker
784 // (bit23 set causes DMA error on real machine, so
785 // unlikely to be ever set by the game)
786 list[0] |= HTOLE32(0x800000);
791 // remove loop detection markers
792 count -= LD_THRESHOLD + 2;
793 addr = ld_addr & 0x1fffff;
794 while (count-- > 0) {
795 list = rambase + addr / 4;
796 addr = LE32TOH(list[0]) & 0x1fffff;
797 list[0] &= HTOLE32(~0x800000);
801 gpu.state.last_list.frame = *gpu.state.frame_count;
802 gpu.state.last_list.hcnt = *gpu.state.hcnt;
803 gpu.state.last_list.cycles = cpu_cycles;
804 gpu.state.last_list.addr = start_addr;
809 void GPUreadDataMem(uint32_t *mem, int count)
811 log_io("gpu_dma_read %p %d\n", mem, count);
813 if (unlikely(gpu.cmd_len > 0))
817 do_vram_io(mem, count, 1);
820 uint32_t GPUreadData(void)
824 if (unlikely(gpu.cmd_len > 0))
830 do_vram_io(&ret, 1, 1);
834 log_io("gpu_read %08x\n", ret);
838 uint32_t GPUreadStatus(void)
842 if (unlikely(gpu.cmd_len > 0))
846 log_io("gpu_read_status %08x\n", ret);
852 uint32_t ulFreezeVersion; // should be always 1 for now (set by main emu)
853 uint32_t ulStatus; // current gpu status
854 uint32_t ulControl[256]; // latest control register values
855 unsigned char psxVRam[1024*1024*2]; // current VRam image (full 2 MB for ZN)
858 long GPUfreeze(uint32_t type, struct GPUFreeze *freeze)
868 memcpy(freeze->psxVRam, gpu.vram, 1024 * 512 * 2);
869 memcpy(freeze->ulControl, gpu.regs, sizeof(gpu.regs));
870 memcpy(freeze->ulControl + 0xe0, gpu.ex_regs, sizeof(gpu.ex_regs));
871 freeze->ulStatus = gpu.status;
875 memcpy(gpu.vram, freeze->psxVRam, 1024 * 512 * 2);
876 memcpy(gpu.regs, freeze->ulControl, sizeof(gpu.regs));
877 memcpy(gpu.ex_regs, freeze->ulControl + 0xe0, sizeof(gpu.ex_regs));
878 gpu.status = freeze->ulStatus;
880 for (i = 8; i > 0; i--) {
881 gpu.regs[i] ^= 1; // avoid reg change detection
882 GPUwriteStatus((i << 24) | (gpu.regs[i] ^ 1));
884 renderer_sync_ecmds(gpu.ex_regs);
885 renderer_update_caches(0, 0, 1024, 512, 0);
892 void GPUupdateLace(void)
896 renderer_flush_queues();
898 #ifndef RAW_FB_DISPLAY
899 if (gpu.status & PSX_GPU_STATUS_BLANKING) {
900 if (!gpu.state.blanked) {
902 gpu.state.blanked = 1;
903 gpu.state.fb_dirty = 1;
908 renderer_notify_update_lace(0);
910 if (!gpu.state.fb_dirty)
914 if (gpu.frameskip.set) {
915 if (!gpu.frameskip.frame_ready) {
916 if (*gpu.state.frame_count - gpu.frameskip.last_flip_frame < 9)
918 gpu.frameskip.active = 0;
920 gpu.frameskip.frame_ready = 0;
924 if (gpu.state.enhancement_active && !gpu.state.enhancement_was_active)
925 renderer_update_caches(0, 0, 1024, 512, 1);
926 gpu.state.enhancement_was_active = gpu.state.enhancement_active;
927 gpu.state.fb_dirty = 0;
928 gpu.state.blanked = 0;
929 renderer_notify_update_lace(1);
932 void GPUvBlank(int is_vblank, int lcf)
934 int interlace = gpu.state.allow_interlace
935 && (gpu.status & PSX_GPU_STATUS_INTERLACE)
936 && (gpu.status & PSX_GPU_STATUS_DHEIGHT);
937 // interlace doesn't look nice on progressive displays,
938 // so we have this "auto" mode here for games that don't read vram
939 if (gpu.state.allow_interlace == 2
940 && *gpu.state.frame_count - gpu.state.last_vram_read_frame > 1)
944 if (interlace || interlace != gpu.state.old_interlace) {
945 gpu.state.old_interlace = interlace;
949 renderer_flush_queues();
950 renderer_set_interlace(interlace, !lcf);
954 void GPUgetScreenInfo(int *y, int *base_hres)
957 *base_hres = gpu.screen.vres;
958 if (gpu.status & PSX_GPU_STATUS_DHEIGHT)
962 void GPUrearmedCallbacks(const struct rearmed_cbs *cbs)
964 gpu.frameskip.set = cbs->frameskip;
965 gpu.frameskip.advice = &cbs->fskip_advice;
966 gpu.frameskip.force = &cbs->fskip_force;
967 gpu.frameskip.dirty = (void *)&cbs->fskip_dirty;
968 gpu.frameskip.active = 0;
969 gpu.frameskip.frame_ready = 1;
970 gpu.state.hcnt = cbs->gpu_hcnt;
971 gpu.state.frame_count = cbs->gpu_frame_count;
972 gpu.state.allow_interlace = cbs->gpu_neon.allow_interlace;
973 gpu.state.enhancement_enable = cbs->gpu_neon.enhancement_enable;
974 gpu.state.screen_centering_type_default = cbs->screen_centering_type_default;
975 if (gpu.state.screen_centering_type != cbs->screen_centering_type
976 || gpu.state.screen_centering_x != cbs->screen_centering_x
977 || gpu.state.screen_centering_y != cbs->screen_centering_y) {
978 gpu.state.screen_centering_type = cbs->screen_centering_type;
979 gpu.state.screen_centering_x = cbs->screen_centering_x;
980 gpu.state.screen_centering_y = cbs->screen_centering_y;
985 gpu.mmap = cbs->mmap;
986 gpu.munmap = cbs->munmap;
987 gpu.gpu_state_change = cbs->gpu_state_change;
990 if (gpu.vram == NULL)
993 if (cbs->pl_vout_set_raw_vram)
994 cbs->pl_vout_set_raw_vram(gpu.vram);
995 renderer_set_config(cbs);
996 vout_set_config(cbs);
999 // vim:shiftwidth=2:expandtab