merge from libretro fork
[pcsx_rearmed.git] / plugins / gpulib / gpu.c
... / ...
CommitLineData
1/*
2 * (C) GraÅžvydas "notaz" Ignotas, 2011-2012
3 *
4 * This work is licensed under the terms of any of these licenses
5 * (at your option):
6 * - GNU GPL, version 2 or later.
7 * - GNU LGPL, version 2.1 or later.
8 * See the COPYING file in the top-level directory.
9 */
10
11#include <stdio.h>
12#include <string.h>
13#include "gpu.h"
14
15#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
16#ifdef __GNUC__
17#define unlikely(x) __builtin_expect((x), 0)
18#define preload __builtin_prefetch
19#define noinline __attribute__((noinline))
20#else
21#define unlikely(x)
22#define preload(...)
23#define noinline
24#endif
25
26#define gpu_log(fmt, ...) \
27 printf("%d:%03d: " fmt, *gpu.state.frame_count, *gpu.state.hcnt, ##__VA_ARGS__)
28
29//#define log_io gpu_log
30#define log_io(...)
31//#define log_anomaly gpu_log
32#define log_anomaly(...)
33
34struct psx_gpu gpu;
35
36static noinline int do_cmd_buffer(uint32_t *data, int count);
37static void finish_vram_transfer(int is_read);
38
39static noinline void do_cmd_reset(void)
40{
41 if (unlikely(gpu.cmd_len > 0))
42 do_cmd_buffer(gpu.cmd_buffer, gpu.cmd_len);
43 gpu.cmd_len = 0;
44
45 if (unlikely(gpu.dma.h > 0))
46 finish_vram_transfer(gpu.dma_start.is_read);
47 gpu.dma.h = 0;
48}
49
50static noinline void do_reset(void)
51{
52 unsigned int i;
53
54 do_cmd_reset();
55
56 memset(gpu.regs, 0, sizeof(gpu.regs));
57 for (i = 0; i < sizeof(gpu.ex_regs) / sizeof(gpu.ex_regs[0]); i++)
58 gpu.ex_regs[i] = (0xe0 + i) << 24;
59 gpu.status = 0x14802000;
60 gpu.gp0 = 0;
61 gpu.regs[3] = 1;
62 gpu.screen.hres = gpu.screen.w = 256;
63 gpu.screen.vres = gpu.screen.h = 240;
64}
65
66static noinline void update_width(void)
67{
68 int sw = gpu.screen.x2 - gpu.screen.x1;
69 if (sw <= 0 || sw >= 2560)
70 // full width
71 gpu.screen.w = gpu.screen.hres;
72 else
73 gpu.screen.w = sw * gpu.screen.hres / 2560;
74}
75
76static noinline void update_height(void)
77{
78 // TODO: emulate this properly..
79 int sh = gpu.screen.y2 - gpu.screen.y1;
80 if (gpu.status & PSX_GPU_STATUS_DHEIGHT)
81 sh *= 2;
82 if (sh <= 0 || sh > gpu.screen.vres)
83 sh = gpu.screen.vres;
84
85 gpu.screen.h = sh;
86}
87
88static noinline void decide_frameskip(void)
89{
90 if (gpu.frameskip.active)
91 gpu.frameskip.cnt++;
92 else {
93 gpu.frameskip.cnt = 0;
94 gpu.frameskip.frame_ready = 1;
95 }
96
97 if (!gpu.frameskip.active && *gpu.frameskip.advice)
98 gpu.frameskip.active = 1;
99 else if (gpu.frameskip.set > 0 && gpu.frameskip.cnt < gpu.frameskip.set)
100 gpu.frameskip.active = 1;
101 else
102 gpu.frameskip.active = 0;
103
104 if (!gpu.frameskip.active && gpu.frameskip.pending_fill[0] != 0) {
105 int dummy;
106 do_cmd_list(gpu.frameskip.pending_fill, 3, &dummy);
107 gpu.frameskip.pending_fill[0] = 0;
108 }
109}
110
111static noinline int decide_frameskip_allow(uint32_t cmd_e3)
112{
113 // no frameskip if it decides to draw to display area,
114 // but not for interlace since it'll most likely always do that
115 uint32_t x = cmd_e3 & 0x3ff;
116 uint32_t y = (cmd_e3 >> 10) & 0x3ff;
117 gpu.frameskip.allow = (gpu.status & PSX_GPU_STATUS_INTERLACE) ||
118 (uint32_t)(x - gpu.screen.x) >= (uint32_t)gpu.screen.w ||
119 (uint32_t)(y - gpu.screen.y) >= (uint32_t)gpu.screen.h;
120 return gpu.frameskip.allow;
121}
122
123static noinline void get_gpu_info(uint32_t data)
124{
125 switch (data & 0x0f) {
126 case 0x02:
127 case 0x03:
128 case 0x04:
129 case 0x05:
130 gpu.gp0 = gpu.ex_regs[data & 7] & 0xfffff;
131 break;
132 case 0x06:
133 gpu.gp0 = gpu.ex_regs[5] & 0xfffff;
134 break;
135 case 0x07:
136 gpu.gp0 = 2;
137 break;
138 default:
139 gpu.gp0 = 0;
140 break;
141 }
142}
143
144// double, for overdraw guard
145#define VRAM_SIZE (1024 * 512 * 2 * 2)
146
147static int map_vram(void)
148{
149 gpu.vram = gpu.mmap(VRAM_SIZE);
150 if (gpu.vram != NULL) {
151 gpu.vram += 4096 / 2;
152 return 0;
153 }
154 else {
155 fprintf(stderr, "could not map vram, expect crashes\n");
156 return -1;
157 }
158}
159
160long GPUinit(void)
161{
162 int ret;
163 ret = vout_init();
164 ret |= renderer_init();
165
166 gpu.state.frame_count = &gpu.zero;
167 gpu.state.hcnt = &gpu.zero;
168 gpu.frameskip.active = 0;
169 gpu.cmd_len = 0;
170 do_reset();
171
172 if (gpu.mmap != NULL) {
173 if (map_vram() != 0)
174 ret = -1;
175 }
176 return ret;
177}
178
179long GPUshutdown(void)
180{
181 long ret;
182
183 renderer_finish();
184 ret = vout_finish();
185 if (gpu.vram != NULL) {
186 gpu.vram -= 4096 / 2;
187 gpu.munmap(gpu.vram, VRAM_SIZE);
188 }
189 gpu.vram = NULL;
190
191 return ret;
192}
193
194void GPUwriteStatus(uint32_t data)
195{
196 static const short hres[8] = { 256, 368, 320, 384, 512, 512, 640, 640 };
197 static const short vres[4] = { 240, 480, 256, 480 };
198 uint32_t cmd = data >> 24;
199
200 if (cmd < ARRAY_SIZE(gpu.regs)) {
201 if (cmd > 1 && cmd != 5 && gpu.regs[cmd] == data)
202 return;
203 gpu.regs[cmd] = data;
204 }
205
206 gpu.state.fb_dirty = 1;
207
208 switch (cmd) {
209 case 0x00:
210 do_reset();
211 break;
212 case 0x01:
213 do_cmd_reset();
214 break;
215 case 0x03:
216 if (data & 1)
217 gpu.status |= PSX_GPU_STATUS_BLANKING;
218 else
219 gpu.status &= ~PSX_GPU_STATUS_BLANKING;
220 break;
221 case 0x04:
222 gpu.status &= ~PSX_GPU_STATUS_DMA_MASK;
223 gpu.status |= PSX_GPU_STATUS_DMA(data & 3);
224 break;
225 case 0x05:
226 gpu.screen.x = data & 0x3ff;
227 gpu.screen.y = (data >> 10) & 0x1ff;
228 if (gpu.frameskip.set) {
229 decide_frameskip_allow(gpu.ex_regs[3]);
230 if (gpu.frameskip.last_flip_frame != *gpu.state.frame_count) {
231 decide_frameskip();
232 gpu.frameskip.last_flip_frame = *gpu.state.frame_count;
233 }
234 }
235 break;
236 case 0x06:
237 gpu.screen.x1 = data & 0xfff;
238 gpu.screen.x2 = (data >> 12) & 0xfff;
239 update_width();
240 break;
241 case 0x07:
242 gpu.screen.y1 = data & 0x3ff;
243 gpu.screen.y2 = (data >> 10) & 0x3ff;
244 update_height();
245 break;
246 case 0x08:
247 gpu.status = (gpu.status & ~0x7f0000) | ((data & 0x3F) << 17) | ((data & 0x40) << 10);
248 gpu.screen.hres = hres[(gpu.status >> 16) & 7];
249 gpu.screen.vres = vres[(gpu.status >> 19) & 3];
250 update_width();
251 update_height();
252 renderer_notify_res_change();
253 break;
254 default:
255 if ((cmd & 0xf0) == 0x10)
256 get_gpu_info(data);
257 break;
258 }
259
260#ifdef GPUwriteStatus_ext
261 GPUwriteStatus_ext(data);
262#endif
263}
264
265const unsigned char cmd_lengths[256] =
266{
267 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
268 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
269 3, 3, 3, 3, 6, 6, 6, 6, 4, 4, 4, 4, 8, 8, 8, 8, // 20
270 5, 5, 5, 5, 8, 8, 8, 8, 7, 7, 7, 7, 11, 11, 11, 11,
271 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, // 40
272 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
273 2, 2, 2, 2, 3, 3, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, // 60
274 1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2,
275 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 80
276 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
277 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // a0
278 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
279 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // c0
280 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
281 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // e0
282 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
283};
284
285#define VRAM_MEM_XY(x, y) &gpu.vram[(y) * 1024 + (x)]
286
287static inline void do_vram_line(int x, int y, uint16_t *mem, int l, int is_read)
288{
289 uint16_t *vram = VRAM_MEM_XY(x, y);
290 if (is_read)
291 memcpy(mem, vram, l * 2);
292 else
293 memcpy(vram, mem, l * 2);
294}
295
296static int do_vram_io(uint32_t *data, int count, int is_read)
297{
298 int count_initial = count;
299 uint16_t *sdata = (uint16_t *)data;
300 int x = gpu.dma.x, y = gpu.dma.y;
301 int w = gpu.dma.w, h = gpu.dma.h;
302 int o = gpu.dma.offset;
303 int l;
304 count *= 2; // operate in 16bpp pixels
305
306 if (gpu.dma.offset) {
307 l = w - gpu.dma.offset;
308 if (count < l)
309 l = count;
310
311 do_vram_line(x + o, y, sdata, l, is_read);
312
313 if (o + l < w)
314 o += l;
315 else {
316 o = 0;
317 y++;
318 h--;
319 }
320 sdata += l;
321 count -= l;
322 }
323
324 for (; h > 0 && count >= w; sdata += w, count -= w, y++, h--) {
325 y &= 511;
326 do_vram_line(x, y, sdata, w, is_read);
327 }
328
329 if (h > 0) {
330 if (count > 0) {
331 y &= 511;
332 do_vram_line(x, y, sdata, count, is_read);
333 o = count;
334 count = 0;
335 }
336 }
337 else
338 finish_vram_transfer(is_read);
339 gpu.dma.y = y;
340 gpu.dma.h = h;
341 gpu.dma.offset = o;
342
343 return count_initial - count / 2;
344}
345
346static void start_vram_transfer(uint32_t pos_word, uint32_t size_word, int is_read)
347{
348 if (gpu.dma.h)
349 log_anomaly("start_vram_transfer while old unfinished\n");
350
351 gpu.dma.x = pos_word & 0x3ff;
352 gpu.dma.y = (pos_word >> 16) & 0x1ff;
353 gpu.dma.w = ((size_word - 1) & 0x3ff) + 1;
354 gpu.dma.h = (((size_word >> 16) - 1) & 0x1ff) + 1;
355 gpu.dma.offset = 0;
356 gpu.dma.is_read = is_read;
357 gpu.dma_start = gpu.dma;
358
359 renderer_flush_queues();
360 if (is_read) {
361 gpu.status |= PSX_GPU_STATUS_IMG;
362 // XXX: wrong for width 1
363 gpu.gp0 = LE32TOH(*(uint32_t *) VRAM_MEM_XY(gpu.dma.x, gpu.dma.y));
364 gpu.state.last_vram_read_frame = *gpu.state.frame_count;
365 }
366
367 log_io("start_vram_transfer %c (%d, %d) %dx%d\n", is_read ? 'r' : 'w',
368 gpu.dma.x, gpu.dma.y, gpu.dma.w, gpu.dma.h);
369}
370
371static void finish_vram_transfer(int is_read)
372{
373 if (is_read)
374 gpu.status &= ~PSX_GPU_STATUS_IMG;
375 else
376 renderer_update_caches(gpu.dma_start.x, gpu.dma_start.y,
377 gpu.dma_start.w, gpu.dma_start.h);
378}
379
380static noinline int do_cmd_list_skip(uint32_t *data, int count, int *last_cmd)
381{
382 int cmd = 0, pos = 0, len, dummy, v;
383 int skip = 1;
384
385 gpu.frameskip.pending_fill[0] = 0;
386
387 while (pos < count && skip) {
388 uint32_t *list = data + pos;
389 cmd = LE32TOH(list[0]) >> 24;
390 len = 1 + cmd_lengths[cmd];
391
392 switch (cmd) {
393 case 0x02:
394 if ((LE32TOH(list[2]) & 0x3ff) > gpu.screen.w || ((LE32TOH(list[2]) >> 16) & 0x1ff) > gpu.screen.h)
395 // clearing something large, don't skip
396 do_cmd_list(list, 3, &dummy);
397 else
398 memcpy(gpu.frameskip.pending_fill, list, 3 * 4);
399 break;
400 case 0x24 ... 0x27:
401 case 0x2c ... 0x2f:
402 case 0x34 ... 0x37:
403 case 0x3c ... 0x3f:
404 gpu.ex_regs[1] &= ~0x1ff;
405 gpu.ex_regs[1] |= LE32TOH(list[4 + ((cmd >> 4) & 1)]) & 0x1ff;
406 break;
407 case 0x48 ... 0x4F:
408 for (v = 3; pos + v < count; v++)
409 {
410 if ((list[v] & HTOLE32(0xf000f000)) == HTOLE32(0x50005000))
411 break;
412 }
413 len += v - 3;
414 break;
415 case 0x58 ... 0x5F:
416 for (v = 4; pos + v < count; v += 2)
417 {
418 if ((list[v] & HTOLE32(0xf000f000)) == HTOLE32(0x50005000))
419 break;
420 }
421 len += v - 4;
422 break;
423 default:
424 if (cmd == 0xe3)
425 skip = decide_frameskip_allow(LE32TOH(list[0]));
426 if ((cmd & 0xf8) == 0xe0)
427 gpu.ex_regs[cmd & 7] = LE32TOH(list[0]);
428 break;
429 }
430
431 if (pos + len > count) {
432 cmd = -1;
433 break; // incomplete cmd
434 }
435 if (0xa0 <= cmd && cmd <= 0xdf)
436 break; // image i/o
437
438 pos += len;
439 }
440
441 renderer_sync_ecmds(gpu.ex_regs);
442 *last_cmd = cmd;
443 return pos;
444}
445
446static noinline int do_cmd_buffer(uint32_t *data, int count)
447{
448 int cmd, pos;
449 uint32_t old_e3 = gpu.ex_regs[3];
450 int vram_dirty = 0;
451
452 // process buffer
453 for (pos = 0; pos < count; )
454 {
455 if (gpu.dma.h && !gpu.dma_start.is_read) { // XXX: need to verify
456 vram_dirty = 1;
457 pos += do_vram_io(data + pos, count - pos, 0);
458 if (pos == count)
459 break;
460 }
461
462 cmd = LE32TOH(data[pos]) >> 24;
463 if (0xa0 <= cmd && cmd <= 0xdf) {
464 if (unlikely((pos+2) >= count)) {
465 // incomplete vram write/read cmd, can't consume yet
466 cmd = -1;
467 break;
468 }
469
470 // consume vram write/read cmd
471 start_vram_transfer(LE32TOH(data[pos + 1]), LE32TOH(data[pos + 2]), (cmd & 0xe0) == 0xc0);
472 pos += 3;
473 continue;
474 }
475
476 // 0xex cmds might affect frameskip.allow, so pass to do_cmd_list_skip
477 if (gpu.frameskip.active && (gpu.frameskip.allow || ((LE32TOH(data[pos]) >> 24) & 0xf0) == 0xe0))
478 pos += do_cmd_list_skip(data + pos, count - pos, &cmd);
479 else {
480 pos += do_cmd_list(data + pos, count - pos, &cmd);
481 vram_dirty = 1;
482 }
483
484 if (cmd == -1)
485 // incomplete cmd
486 break;
487 }
488
489 gpu.status &= ~0x1fff;
490 gpu.status |= gpu.ex_regs[1] & 0x7ff;
491 gpu.status |= (gpu.ex_regs[6] & 3) << 11;
492
493 gpu.state.fb_dirty |= vram_dirty;
494
495 if (old_e3 != gpu.ex_regs[3])
496 decide_frameskip_allow(gpu.ex_regs[3]);
497
498 return count - pos;
499}
500
501static void flush_cmd_buffer(void)
502{
503 int left = do_cmd_buffer(gpu.cmd_buffer, gpu.cmd_len);
504 if (left > 0)
505 memmove(gpu.cmd_buffer, gpu.cmd_buffer + gpu.cmd_len - left, left * 4);
506 gpu.cmd_len = left;
507}
508
509void GPUwriteDataMem(uint32_t *mem, int count)
510{
511 int left;
512
513 log_io("gpu_dma_write %p %d\n", mem, count);
514
515 if (unlikely(gpu.cmd_len > 0))
516 flush_cmd_buffer();
517
518 left = do_cmd_buffer(mem, count);
519 if (left)
520 log_anomaly("GPUwriteDataMem: discarded %d/%d words\n", left, count);
521}
522
523void GPUwriteData(uint32_t data)
524{
525 log_io("gpu_write %08x\n", data);
526 gpu.cmd_buffer[gpu.cmd_len++] = HTOLE32(data);
527 if (gpu.cmd_len >= CMD_BUFFER_LEN)
528 flush_cmd_buffer();
529}
530
531long GPUdmaChain(uint32_t *rambase, uint32_t start_addr)
532{
533 uint32_t addr, *list, ld_addr = 0;
534 int len, left, count;
535 long cpu_cycles = 0;
536
537 preload(rambase + (start_addr & 0x1fffff) / 4);
538
539 if (unlikely(gpu.cmd_len > 0))
540 flush_cmd_buffer();
541
542 log_io("gpu_dma_chain\n");
543 addr = start_addr & 0xffffff;
544 for (count = 0; (addr & 0x800000) == 0; count++)
545 {
546 list = rambase + (addr & 0x1fffff) / 4;
547 len = LE32TOH(list[0]) >> 24;
548 addr = LE32TOH(list[0]) & 0xffffff;
549 preload(rambase + (addr & 0x1fffff) / 4);
550
551 cpu_cycles += 10;
552 if (len > 0)
553 cpu_cycles += 5 + len;
554
555 log_io(".chain %08x #%d\n", (list - rambase) * 4, len);
556
557 if (len) {
558 left = do_cmd_buffer(list + 1, len);
559 if (left)
560 log_anomaly("GPUdmaChain: discarded %d/%d words\n", left, len);
561 }
562
563 #define LD_THRESHOLD (8*1024)
564 if (count >= LD_THRESHOLD) {
565 if (count == LD_THRESHOLD) {
566 ld_addr = addr;
567 continue;
568 }
569
570 // loop detection marker
571 // (bit23 set causes DMA error on real machine, so
572 // unlikely to be ever set by the game)
573 list[0] |= HTOLE32(0x800000);
574 }
575 }
576
577 if (ld_addr != 0) {
578 // remove loop detection markers
579 count -= LD_THRESHOLD + 2;
580 addr = ld_addr & 0x1fffff;
581 while (count-- > 0) {
582 list = rambase + addr / 4;
583 addr = LE32TOH(list[0]) & 0x1fffff;
584 list[0] &= HTOLE32(~0x800000);
585 }
586 }
587
588 gpu.state.last_list.frame = *gpu.state.frame_count;
589 gpu.state.last_list.hcnt = *gpu.state.hcnt;
590 gpu.state.last_list.cycles = cpu_cycles;
591 gpu.state.last_list.addr = start_addr;
592
593 return cpu_cycles;
594}
595
596void GPUreadDataMem(uint32_t *mem, int count)
597{
598 log_io("gpu_dma_read %p %d\n", mem, count);
599
600 if (unlikely(gpu.cmd_len > 0))
601 flush_cmd_buffer();
602
603 if (gpu.dma.h)
604 do_vram_io(mem, count, 1);
605}
606
607uint32_t GPUreadData(void)
608{
609 uint32_t ret;
610
611 if (unlikely(gpu.cmd_len > 0))
612 flush_cmd_buffer();
613
614 ret = gpu.gp0;
615 if (gpu.dma.h) {
616 ret = HTOLE32(ret);
617 do_vram_io(&ret, 1, 1);
618 ret = LE32TOH(ret);
619 }
620
621 log_io("gpu_read %08x\n", ret);
622 return ret;
623}
624
625uint32_t GPUreadStatus(void)
626{
627 uint32_t ret;
628
629 if (unlikely(gpu.cmd_len > 0))
630 flush_cmd_buffer();
631
632 ret = gpu.status;
633 log_io("gpu_read_status %08x\n", ret);
634 return ret;
635}
636
637struct GPUFreeze
638{
639 uint32_t ulFreezeVersion; // should be always 1 for now (set by main emu)
640 uint32_t ulStatus; // current gpu status
641 uint32_t ulControl[256]; // latest control register values
642 unsigned char psxVRam[1024*1024*2]; // current VRam image (full 2 MB for ZN)
643};
644
645long GPUfreeze(uint32_t type, struct GPUFreeze *freeze)
646{
647 int i;
648
649 switch (type) {
650 case 1: // save
651 if (gpu.cmd_len > 0)
652 flush_cmd_buffer();
653 memcpy(freeze->psxVRam, gpu.vram, 1024 * 512 * 2);
654 memcpy(freeze->ulControl, gpu.regs, sizeof(gpu.regs));
655 memcpy(freeze->ulControl + 0xe0, gpu.ex_regs, sizeof(gpu.ex_regs));
656 freeze->ulStatus = gpu.status;
657 break;
658 case 0: // load
659 memcpy(gpu.vram, freeze->psxVRam, 1024 * 512 * 2);
660 memcpy(gpu.regs, freeze->ulControl, sizeof(gpu.regs));
661 memcpy(gpu.ex_regs, freeze->ulControl + 0xe0, sizeof(gpu.ex_regs));
662 gpu.status = freeze->ulStatus;
663 gpu.cmd_len = 0;
664 for (i = 8; i > 0; i--) {
665 gpu.regs[i] ^= 1; // avoid reg change detection
666 GPUwriteStatus((i << 24) | (gpu.regs[i] ^ 1));
667 }
668 renderer_sync_ecmds(gpu.ex_regs);
669 renderer_update_caches(0, 0, 1024, 512);
670 break;
671 }
672
673 return 1;
674}
675
676void GPUupdateLace(void)
677{
678 if (gpu.cmd_len > 0)
679 flush_cmd_buffer();
680 renderer_flush_queues();
681
682 if (gpu.status & PSX_GPU_STATUS_BLANKING) {
683 if (!gpu.state.blanked) {
684 vout_blank();
685 gpu.state.blanked = 1;
686 gpu.state.fb_dirty = 1;
687 }
688 return;
689 }
690
691 if (!gpu.state.fb_dirty)
692 return;
693
694 if (gpu.frameskip.set) {
695 if (!gpu.frameskip.frame_ready) {
696 if (*gpu.state.frame_count - gpu.frameskip.last_flip_frame < 9)
697 return;
698 gpu.frameskip.active = 0;
699 }
700 gpu.frameskip.frame_ready = 0;
701 }
702
703 vout_update();
704 gpu.state.fb_dirty = 0;
705 gpu.state.blanked = 0;
706}
707
708void GPUvBlank(int is_vblank, int lcf)
709{
710 int interlace = gpu.state.allow_interlace
711 && (gpu.status & PSX_GPU_STATUS_INTERLACE)
712 && (gpu.status & PSX_GPU_STATUS_DHEIGHT);
713 // interlace doesn't look nice on progressive displays,
714 // so we have this "auto" mode here for games that don't read vram
715 if (gpu.state.allow_interlace == 2
716 && *gpu.state.frame_count - gpu.state.last_vram_read_frame > 1)
717 {
718 interlace = 0;
719 }
720 if (interlace || interlace != gpu.state.old_interlace) {
721 gpu.state.old_interlace = interlace;
722
723 if (gpu.cmd_len > 0)
724 flush_cmd_buffer();
725 renderer_flush_queues();
726 renderer_set_interlace(interlace, !lcf);
727 }
728}
729
730#include "../../frontend/plugin_lib.h"
731
732void GPUrearmedCallbacks(const struct rearmed_cbs *cbs)
733{
734 gpu.frameskip.set = cbs->frameskip;
735 gpu.frameskip.advice = &cbs->fskip_advice;
736 gpu.frameskip.active = 0;
737 gpu.frameskip.frame_ready = 1;
738 gpu.state.hcnt = cbs->gpu_hcnt;
739 gpu.state.frame_count = cbs->gpu_frame_count;
740 gpu.state.allow_interlace = cbs->gpu_neon.allow_interlace;
741 gpu.state.enhancement_enable = cbs->gpu_neon.enhancement_enable;
742
743 gpu.mmap = cbs->mmap;
744 gpu.munmap = cbs->munmap;
745
746 // delayed vram mmap
747 if (gpu.vram == NULL)
748 map_vram();
749
750 if (cbs->pl_vout_set_raw_vram)
751 cbs->pl_vout_set_raw_vram(gpu.vram);
752 renderer_set_config(cbs);
753 vout_set_config(cbs);
754}
755
756// vim:shiftwidth=2:expandtab