gpulib: update gpuinfo
[pcsx_rearmed.git] / plugins / gpulib / gpu.c
... / ...
CommitLineData
1/*
2 * (C) GraÅžvydas "notaz" Ignotas, 2011-2012
3 *
4 * This work is licensed under the terms of any of these licenses
5 * (at your option):
6 * - GNU GPL, version 2 or later.
7 * - GNU LGPL, version 2.1 or later.
8 * See the COPYING file in the top-level directory.
9 */
10
11#include <stdio.h>
12#include <string.h>
13#include "gpu.h"
14
15#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
16#ifdef __GNUC__
17#define unlikely(x) __builtin_expect((x), 0)
18#define preload __builtin_prefetch
19#define noinline __attribute__((noinline))
20#else
21#define unlikely(x)
22#define preload(...)
23#define noinline
24#endif
25
26#define gpu_log(fmt, ...) \
27 printf("%d:%03d: " fmt, *gpu.state.frame_count, *gpu.state.hcnt, ##__VA_ARGS__)
28
29//#define log_io gpu_log
30#define log_io(...)
31//#define log_anomaly gpu_log
32#define log_anomaly(...)
33
34struct psx_gpu gpu;
35
36static noinline int do_cmd_buffer(uint32_t *data, int count);
37static void finish_vram_transfer(int is_read);
38
39static noinline void do_cmd_reset(void)
40{
41 if (unlikely(gpu.cmd_len > 0))
42 do_cmd_buffer(gpu.cmd_buffer, gpu.cmd_len);
43 gpu.cmd_len = 0;
44
45 if (unlikely(gpu.dma.h > 0))
46 finish_vram_transfer(gpu.dma_start.is_read);
47 gpu.dma.h = 0;
48}
49
50static noinline void do_reset(void)
51{
52 unsigned int i;
53
54 do_cmd_reset();
55
56 memset(gpu.regs, 0, sizeof(gpu.regs));
57 for (i = 0; i < sizeof(gpu.ex_regs) / sizeof(gpu.ex_regs[0]); i++)
58 gpu.ex_regs[i] = (0xe0 + i) << 24;
59 gpu.status = 0x14802000;
60 gpu.gp0 = 0;
61 gpu.regs[3] = 1;
62 gpu.screen.hres = gpu.screen.w = 256;
63 gpu.screen.vres = gpu.screen.h = 240;
64}
65
66static noinline void update_width(void)
67{
68 int sw = gpu.screen.x2 - gpu.screen.x1;
69 if (sw <= 0 || sw >= 2560)
70 // full width
71 gpu.screen.w = gpu.screen.hres;
72 else
73 gpu.screen.w = sw * gpu.screen.hres / 2560;
74}
75
76static noinline void update_height(void)
77{
78 // TODO: emulate this properly..
79 int sh = gpu.screen.y2 - gpu.screen.y1;
80 if (gpu.status & PSX_GPU_STATUS_DHEIGHT)
81 sh *= 2;
82 if (sh <= 0 || sh > gpu.screen.vres)
83 sh = gpu.screen.vres;
84
85 gpu.screen.h = sh;
86}
87
88static noinline void decide_frameskip(void)
89{
90 if (gpu.frameskip.active)
91 gpu.frameskip.cnt++;
92 else {
93 gpu.frameskip.cnt = 0;
94 gpu.frameskip.frame_ready = 1;
95 }
96
97 if (!gpu.frameskip.active && *gpu.frameskip.advice)
98 gpu.frameskip.active = 1;
99 else if (gpu.frameskip.set > 0 && gpu.frameskip.cnt < gpu.frameskip.set)
100 gpu.frameskip.active = 1;
101 else
102 gpu.frameskip.active = 0;
103
104 if (!gpu.frameskip.active && gpu.frameskip.pending_fill[0] != 0) {
105 int dummy;
106 do_cmd_list(gpu.frameskip.pending_fill, 3, &dummy);
107 gpu.frameskip.pending_fill[0] = 0;
108 }
109}
110
111static noinline int decide_frameskip_allow(uint32_t cmd_e3)
112{
113 // no frameskip if it decides to draw to display area,
114 // but not for interlace since it'll most likely always do that
115 uint32_t x = cmd_e3 & 0x3ff;
116 uint32_t y = (cmd_e3 >> 10) & 0x3ff;
117 gpu.frameskip.allow = (gpu.status & PSX_GPU_STATUS_INTERLACE) ||
118 (uint32_t)(x - gpu.screen.x) >= (uint32_t)gpu.screen.w ||
119 (uint32_t)(y - gpu.screen.y) >= (uint32_t)gpu.screen.h;
120 return gpu.frameskip.allow;
121}
122
123static noinline void get_gpu_info(uint32_t data)
124{
125 switch (data & 0x0f) {
126 case 0x02:
127 case 0x03:
128 case 0x04:
129 gpu.gp0 = gpu.ex_regs[data & 7] & 0xfffff;
130 break;
131 case 0x05:
132 gpu.gp0 = gpu.ex_regs[5] & 0x3fffff;
133 break;
134 case 0x07:
135 gpu.gp0 = 2;
136 break;
137 default:
138 // gpu.gp0 unchanged
139 break;
140 }
141}
142
143// double, for overdraw guard
144#define VRAM_SIZE (1024 * 512 * 2 * 2)
145
146static int map_vram(void)
147{
148 gpu.vram = gpu.mmap(VRAM_SIZE);
149 if (gpu.vram != NULL) {
150 gpu.vram += 4096 / 2;
151 return 0;
152 }
153 else {
154 fprintf(stderr, "could not map vram, expect crashes\n");
155 return -1;
156 }
157}
158
159long GPUinit(void)
160{
161 int ret;
162 ret = vout_init();
163 ret |= renderer_init();
164
165 gpu.state.frame_count = &gpu.zero;
166 gpu.state.hcnt = &gpu.zero;
167 gpu.frameskip.active = 0;
168 gpu.cmd_len = 0;
169 do_reset();
170
171 if (gpu.mmap != NULL) {
172 if (map_vram() != 0)
173 ret = -1;
174 }
175 return ret;
176}
177
178long GPUshutdown(void)
179{
180 long ret;
181
182 renderer_finish();
183 ret = vout_finish();
184 if (gpu.vram != NULL) {
185 gpu.vram -= 4096 / 2;
186 gpu.munmap(gpu.vram, VRAM_SIZE);
187 }
188 gpu.vram = NULL;
189
190 return ret;
191}
192
193void GPUwriteStatus(uint32_t data)
194{
195 static const short hres[8] = { 256, 368, 320, 384, 512, 512, 640, 640 };
196 static const short vres[4] = { 240, 480, 256, 480 };
197 uint32_t cmd = data >> 24;
198
199 if (cmd < ARRAY_SIZE(gpu.regs)) {
200 if (cmd > 1 && cmd != 5 && gpu.regs[cmd] == data)
201 return;
202 gpu.regs[cmd] = data;
203 }
204
205 gpu.state.fb_dirty = 1;
206
207 switch (cmd) {
208 case 0x00:
209 do_reset();
210 break;
211 case 0x01:
212 do_cmd_reset();
213 break;
214 case 0x03:
215 if (data & 1)
216 gpu.status |= PSX_GPU_STATUS_BLANKING;
217 else
218 gpu.status &= ~PSX_GPU_STATUS_BLANKING;
219 break;
220 case 0x04:
221 gpu.status &= ~PSX_GPU_STATUS_DMA_MASK;
222 gpu.status |= PSX_GPU_STATUS_DMA(data & 3);
223 break;
224 case 0x05:
225 gpu.screen.x = data & 0x3ff;
226 gpu.screen.y = (data >> 10) & 0x1ff;
227 if (gpu.frameskip.set) {
228 decide_frameskip_allow(gpu.ex_regs[3]);
229 if (gpu.frameskip.last_flip_frame != *gpu.state.frame_count) {
230 decide_frameskip();
231 gpu.frameskip.last_flip_frame = *gpu.state.frame_count;
232 }
233 }
234 break;
235 case 0x06:
236 gpu.screen.x1 = data & 0xfff;
237 gpu.screen.x2 = (data >> 12) & 0xfff;
238 update_width();
239 break;
240 case 0x07:
241 gpu.screen.y1 = data & 0x3ff;
242 gpu.screen.y2 = (data >> 10) & 0x3ff;
243 update_height();
244 break;
245 case 0x08:
246 gpu.status = (gpu.status & ~0x7f0000) | ((data & 0x3F) << 17) | ((data & 0x40) << 10);
247 gpu.screen.hres = hres[(gpu.status >> 16) & 7];
248 gpu.screen.vres = vres[(gpu.status >> 19) & 3];
249 update_width();
250 update_height();
251 renderer_notify_res_change();
252 break;
253 default:
254 if ((cmd & 0xf0) == 0x10)
255 get_gpu_info(data);
256 break;
257 }
258
259#ifdef GPUwriteStatus_ext
260 GPUwriteStatus_ext(data);
261#endif
262}
263
264const unsigned char cmd_lengths[256] =
265{
266 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
267 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
268 3, 3, 3, 3, 6, 6, 6, 6, 4, 4, 4, 4, 8, 8, 8, 8, // 20
269 5, 5, 5, 5, 8, 8, 8, 8, 7, 7, 7, 7, 11, 11, 11, 11,
270 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, // 40
271 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
272 2, 2, 2, 2, 3, 3, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, // 60
273 1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2,
274 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 80
275 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
276 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // a0
277 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
278 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // c0
279 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
280 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // e0
281 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
282};
283
284#define VRAM_MEM_XY(x, y) &gpu.vram[(y) * 1024 + (x)]
285
286static inline void do_vram_line(int x, int y, uint16_t *mem, int l, int is_read)
287{
288 uint16_t *vram = VRAM_MEM_XY(x, y);
289 if (is_read)
290 memcpy(mem, vram, l * 2);
291 else
292 memcpy(vram, mem, l * 2);
293}
294
295static int do_vram_io(uint32_t *data, int count, int is_read)
296{
297 int count_initial = count;
298 uint16_t *sdata = (uint16_t *)data;
299 int x = gpu.dma.x, y = gpu.dma.y;
300 int w = gpu.dma.w, h = gpu.dma.h;
301 int o = gpu.dma.offset;
302 int l;
303 count *= 2; // operate in 16bpp pixels
304
305 if (gpu.dma.offset) {
306 l = w - gpu.dma.offset;
307 if (count < l)
308 l = count;
309
310 do_vram_line(x + o, y, sdata, l, is_read);
311
312 if (o + l < w)
313 o += l;
314 else {
315 o = 0;
316 y++;
317 h--;
318 }
319 sdata += l;
320 count -= l;
321 }
322
323 for (; h > 0 && count >= w; sdata += w, count -= w, y++, h--) {
324 y &= 511;
325 do_vram_line(x, y, sdata, w, is_read);
326 }
327
328 if (h > 0) {
329 if (count > 0) {
330 y &= 511;
331 do_vram_line(x, y, sdata, count, is_read);
332 o = count;
333 count = 0;
334 }
335 }
336 else
337 finish_vram_transfer(is_read);
338 gpu.dma.y = y;
339 gpu.dma.h = h;
340 gpu.dma.offset = o;
341
342 return count_initial - count / 2;
343}
344
345static void start_vram_transfer(uint32_t pos_word, uint32_t size_word, int is_read)
346{
347 if (gpu.dma.h)
348 log_anomaly("start_vram_transfer while old unfinished\n");
349
350 gpu.dma.x = pos_word & 0x3ff;
351 gpu.dma.y = (pos_word >> 16) & 0x1ff;
352 gpu.dma.w = ((size_word - 1) & 0x3ff) + 1;
353 gpu.dma.h = (((size_word >> 16) - 1) & 0x1ff) + 1;
354 gpu.dma.offset = 0;
355 gpu.dma.is_read = is_read;
356 gpu.dma_start = gpu.dma;
357
358 renderer_flush_queues();
359 if (is_read) {
360 gpu.status |= PSX_GPU_STATUS_IMG;
361 // XXX: wrong for width 1
362 gpu.gp0 = LE32TOH(*(uint32_t *) VRAM_MEM_XY(gpu.dma.x, gpu.dma.y));
363 gpu.state.last_vram_read_frame = *gpu.state.frame_count;
364 }
365
366 log_io("start_vram_transfer %c (%d, %d) %dx%d\n", is_read ? 'r' : 'w',
367 gpu.dma.x, gpu.dma.y, gpu.dma.w, gpu.dma.h);
368}
369
370static void finish_vram_transfer(int is_read)
371{
372 if (is_read)
373 gpu.status &= ~PSX_GPU_STATUS_IMG;
374 else
375 renderer_update_caches(gpu.dma_start.x, gpu.dma_start.y,
376 gpu.dma_start.w, gpu.dma_start.h);
377}
378
379static noinline int do_cmd_list_skip(uint32_t *data, int count, int *last_cmd)
380{
381 int cmd = 0, pos = 0, len, dummy, v;
382 int skip = 1;
383
384 gpu.frameskip.pending_fill[0] = 0;
385
386 while (pos < count && skip) {
387 uint32_t *list = data + pos;
388 cmd = LE32TOH(list[0]) >> 24;
389 len = 1 + cmd_lengths[cmd];
390
391 switch (cmd) {
392 case 0x02:
393 if ((LE32TOH(list[2]) & 0x3ff) > gpu.screen.w || ((LE32TOH(list[2]) >> 16) & 0x1ff) > gpu.screen.h)
394 // clearing something large, don't skip
395 do_cmd_list(list, 3, &dummy);
396 else
397 memcpy(gpu.frameskip.pending_fill, list, 3 * 4);
398 break;
399 case 0x24 ... 0x27:
400 case 0x2c ... 0x2f:
401 case 0x34 ... 0x37:
402 case 0x3c ... 0x3f:
403 gpu.ex_regs[1] &= ~0x1ff;
404 gpu.ex_regs[1] |= LE32TOH(list[4 + ((cmd >> 4) & 1)]) & 0x1ff;
405 break;
406 case 0x48 ... 0x4F:
407 for (v = 3; pos + v < count; v++)
408 {
409 if ((list[v] & HTOLE32(0xf000f000)) == HTOLE32(0x50005000))
410 break;
411 }
412 len += v - 3;
413 break;
414 case 0x58 ... 0x5F:
415 for (v = 4; pos + v < count; v += 2)
416 {
417 if ((list[v] & HTOLE32(0xf000f000)) == HTOLE32(0x50005000))
418 break;
419 }
420 len += v - 4;
421 break;
422 default:
423 if (cmd == 0xe3)
424 skip = decide_frameskip_allow(LE32TOH(list[0]));
425 if ((cmd & 0xf8) == 0xe0)
426 gpu.ex_regs[cmd & 7] = LE32TOH(list[0]);
427 break;
428 }
429
430 if (pos + len > count) {
431 cmd = -1;
432 break; // incomplete cmd
433 }
434 if (0xa0 <= cmd && cmd <= 0xdf)
435 break; // image i/o
436
437 pos += len;
438 }
439
440 renderer_sync_ecmds(gpu.ex_regs);
441 *last_cmd = cmd;
442 return pos;
443}
444
445static noinline int do_cmd_buffer(uint32_t *data, int count)
446{
447 int cmd, pos;
448 uint32_t old_e3 = gpu.ex_regs[3];
449 int vram_dirty = 0;
450
451 // process buffer
452 for (pos = 0; pos < count; )
453 {
454 if (gpu.dma.h && !gpu.dma_start.is_read) { // XXX: need to verify
455 vram_dirty = 1;
456 pos += do_vram_io(data + pos, count - pos, 0);
457 if (pos == count)
458 break;
459 }
460
461 cmd = LE32TOH(data[pos]) >> 24;
462 if (0xa0 <= cmd && cmd <= 0xdf) {
463 if (unlikely((pos+2) >= count)) {
464 // incomplete vram write/read cmd, can't consume yet
465 cmd = -1;
466 break;
467 }
468
469 // consume vram write/read cmd
470 start_vram_transfer(LE32TOH(data[pos + 1]), LE32TOH(data[pos + 2]), (cmd & 0xe0) == 0xc0);
471 pos += 3;
472 continue;
473 }
474
475 // 0xex cmds might affect frameskip.allow, so pass to do_cmd_list_skip
476 if (gpu.frameskip.active && (gpu.frameskip.allow || ((LE32TOH(data[pos]) >> 24) & 0xf0) == 0xe0))
477 pos += do_cmd_list_skip(data + pos, count - pos, &cmd);
478 else {
479 pos += do_cmd_list(data + pos, count - pos, &cmd);
480 vram_dirty = 1;
481 }
482
483 if (cmd == -1)
484 // incomplete cmd
485 break;
486 }
487
488 gpu.status &= ~0x1fff;
489 gpu.status |= gpu.ex_regs[1] & 0x7ff;
490 gpu.status |= (gpu.ex_regs[6] & 3) << 11;
491
492 gpu.state.fb_dirty |= vram_dirty;
493
494 if (old_e3 != gpu.ex_regs[3])
495 decide_frameskip_allow(gpu.ex_regs[3]);
496
497 return count - pos;
498}
499
500static void flush_cmd_buffer(void)
501{
502 int left = do_cmd_buffer(gpu.cmd_buffer, gpu.cmd_len);
503 if (left > 0)
504 memmove(gpu.cmd_buffer, gpu.cmd_buffer + gpu.cmd_len - left, left * 4);
505 gpu.cmd_len = left;
506}
507
508void GPUwriteDataMem(uint32_t *mem, int count)
509{
510 int left;
511
512 log_io("gpu_dma_write %p %d\n", mem, count);
513
514 if (unlikely(gpu.cmd_len > 0))
515 flush_cmd_buffer();
516
517 left = do_cmd_buffer(mem, count);
518 if (left)
519 log_anomaly("GPUwriteDataMem: discarded %d/%d words\n", left, count);
520}
521
522void GPUwriteData(uint32_t data)
523{
524 log_io("gpu_write %08x\n", data);
525 gpu.cmd_buffer[gpu.cmd_len++] = HTOLE32(data);
526 if (gpu.cmd_len >= CMD_BUFFER_LEN)
527 flush_cmd_buffer();
528}
529
530long GPUdmaChain(uint32_t *rambase, uint32_t start_addr)
531{
532 uint32_t addr, *list, ld_addr = 0;
533 int len, left, count;
534 long cpu_cycles = 0;
535
536 preload(rambase + (start_addr & 0x1fffff) / 4);
537
538 if (unlikely(gpu.cmd_len > 0))
539 flush_cmd_buffer();
540
541 log_io("gpu_dma_chain\n");
542 addr = start_addr & 0xffffff;
543 for (count = 0; (addr & 0x800000) == 0; count++)
544 {
545 list = rambase + (addr & 0x1fffff) / 4;
546 len = LE32TOH(list[0]) >> 24;
547 addr = LE32TOH(list[0]) & 0xffffff;
548 preload(rambase + (addr & 0x1fffff) / 4);
549
550 cpu_cycles += 10;
551 if (len > 0)
552 cpu_cycles += 5 + len;
553
554 log_io(".chain %08x #%d\n", (list - rambase) * 4, len);
555
556 if (len) {
557 left = do_cmd_buffer(list + 1, len);
558 if (left)
559 log_anomaly("GPUdmaChain: discarded %d/%d words\n", left, len);
560 }
561
562 #define LD_THRESHOLD (8*1024)
563 if (count >= LD_THRESHOLD) {
564 if (count == LD_THRESHOLD) {
565 ld_addr = addr;
566 continue;
567 }
568
569 // loop detection marker
570 // (bit23 set causes DMA error on real machine, so
571 // unlikely to be ever set by the game)
572 list[0] |= HTOLE32(0x800000);
573 }
574 }
575
576 if (ld_addr != 0) {
577 // remove loop detection markers
578 count -= LD_THRESHOLD + 2;
579 addr = ld_addr & 0x1fffff;
580 while (count-- > 0) {
581 list = rambase + addr / 4;
582 addr = LE32TOH(list[0]) & 0x1fffff;
583 list[0] &= HTOLE32(~0x800000);
584 }
585 }
586
587 gpu.state.last_list.frame = *gpu.state.frame_count;
588 gpu.state.last_list.hcnt = *gpu.state.hcnt;
589 gpu.state.last_list.cycles = cpu_cycles;
590 gpu.state.last_list.addr = start_addr;
591
592 return cpu_cycles;
593}
594
595void GPUreadDataMem(uint32_t *mem, int count)
596{
597 log_io("gpu_dma_read %p %d\n", mem, count);
598
599 if (unlikely(gpu.cmd_len > 0))
600 flush_cmd_buffer();
601
602 if (gpu.dma.h)
603 do_vram_io(mem, count, 1);
604}
605
606uint32_t GPUreadData(void)
607{
608 uint32_t ret;
609
610 if (unlikely(gpu.cmd_len > 0))
611 flush_cmd_buffer();
612
613 ret = gpu.gp0;
614 if (gpu.dma.h) {
615 ret = HTOLE32(ret);
616 do_vram_io(&ret, 1, 1);
617 ret = LE32TOH(ret);
618 }
619
620 log_io("gpu_read %08x\n", ret);
621 return ret;
622}
623
624uint32_t GPUreadStatus(void)
625{
626 uint32_t ret;
627
628 if (unlikely(gpu.cmd_len > 0))
629 flush_cmd_buffer();
630
631 ret = gpu.status;
632 log_io("gpu_read_status %08x\n", ret);
633 return ret;
634}
635
636struct GPUFreeze
637{
638 uint32_t ulFreezeVersion; // should be always 1 for now (set by main emu)
639 uint32_t ulStatus; // current gpu status
640 uint32_t ulControl[256]; // latest control register values
641 unsigned char psxVRam[1024*1024*2]; // current VRam image (full 2 MB for ZN)
642};
643
644long GPUfreeze(uint32_t type, struct GPUFreeze *freeze)
645{
646 int i;
647
648 switch (type) {
649 case 1: // save
650 if (gpu.cmd_len > 0)
651 flush_cmd_buffer();
652 memcpy(freeze->psxVRam, gpu.vram, 1024 * 512 * 2);
653 memcpy(freeze->ulControl, gpu.regs, sizeof(gpu.regs));
654 memcpy(freeze->ulControl + 0xe0, gpu.ex_regs, sizeof(gpu.ex_regs));
655 freeze->ulStatus = gpu.status;
656 break;
657 case 0: // load
658 memcpy(gpu.vram, freeze->psxVRam, 1024 * 512 * 2);
659 memcpy(gpu.regs, freeze->ulControl, sizeof(gpu.regs));
660 memcpy(gpu.ex_regs, freeze->ulControl + 0xe0, sizeof(gpu.ex_regs));
661 gpu.status = freeze->ulStatus;
662 gpu.cmd_len = 0;
663 for (i = 8; i > 0; i--) {
664 gpu.regs[i] ^= 1; // avoid reg change detection
665 GPUwriteStatus((i << 24) | (gpu.regs[i] ^ 1));
666 }
667 renderer_sync_ecmds(gpu.ex_regs);
668 renderer_update_caches(0, 0, 1024, 512);
669 break;
670 }
671
672 return 1;
673}
674
675void GPUupdateLace(void)
676{
677 if (gpu.cmd_len > 0)
678 flush_cmd_buffer();
679 renderer_flush_queues();
680
681 if (gpu.status & PSX_GPU_STATUS_BLANKING) {
682 if (!gpu.state.blanked) {
683 vout_blank();
684 gpu.state.blanked = 1;
685 gpu.state.fb_dirty = 1;
686 }
687 return;
688 }
689
690 if (!gpu.state.fb_dirty)
691 return;
692
693 if (gpu.frameskip.set) {
694 if (!gpu.frameskip.frame_ready) {
695 if (*gpu.state.frame_count - gpu.frameskip.last_flip_frame < 9)
696 return;
697 gpu.frameskip.active = 0;
698 }
699 gpu.frameskip.frame_ready = 0;
700 }
701
702 vout_update();
703 gpu.state.fb_dirty = 0;
704 gpu.state.blanked = 0;
705}
706
707void GPUvBlank(int is_vblank, int lcf)
708{
709 int interlace = gpu.state.allow_interlace
710 && (gpu.status & PSX_GPU_STATUS_INTERLACE)
711 && (gpu.status & PSX_GPU_STATUS_DHEIGHT);
712 // interlace doesn't look nice on progressive displays,
713 // so we have this "auto" mode here for games that don't read vram
714 if (gpu.state.allow_interlace == 2
715 && *gpu.state.frame_count - gpu.state.last_vram_read_frame > 1)
716 {
717 interlace = 0;
718 }
719 if (interlace || interlace != gpu.state.old_interlace) {
720 gpu.state.old_interlace = interlace;
721
722 if (gpu.cmd_len > 0)
723 flush_cmd_buffer();
724 renderer_flush_queues();
725 renderer_set_interlace(interlace, !lcf);
726 }
727}
728
729#include "../../frontend/plugin_lib.h"
730
731void GPUrearmedCallbacks(const struct rearmed_cbs *cbs)
732{
733 gpu.frameskip.set = cbs->frameskip;
734 gpu.frameskip.advice = &cbs->fskip_advice;
735 gpu.frameskip.active = 0;
736 gpu.frameskip.frame_ready = 1;
737 gpu.state.hcnt = cbs->gpu_hcnt;
738 gpu.state.frame_count = cbs->gpu_frame_count;
739 gpu.state.allow_interlace = cbs->gpu_neon.allow_interlace;
740 gpu.state.enhancement_enable = cbs->gpu_neon.enhancement_enable;
741
742 gpu.mmap = cbs->mmap;
743 gpu.munmap = cbs->munmap;
744
745 // delayed vram mmap
746 if (gpu.vram == NULL)
747 map_vram();
748
749 if (cbs->pl_vout_set_raw_vram)
750 cbs->pl_vout_set_raw_vram(gpu.vram);
751 renderer_set_config(cbs);
752 vout_set_config(cbs);
753}
754
755// vim:shiftwidth=2:expandtab