still adjusting asm for better as compatibility..
[pcsx_rearmed.git] / plugins / gpulib / gpu.c
... / ...
CommitLineData
1/*
2 * (C) GraÅžvydas "notaz" Ignotas, 2011-2012
3 *
4 * This work is licensed under the terms of any of these licenses
5 * (at your option):
6 * - GNU GPL, version 2 or later.
7 * - GNU LGPL, version 2.1 or later.
8 * See the COPYING file in the top-level directory.
9 */
10
11#include <stdio.h>
12#include <string.h>
13#include "gpu.h"
14
15#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
16#define unlikely(x) __builtin_expect((x), 0)
17#define noinline __attribute__((noinline))
18
19#define gpu_log(fmt, ...) \
20 printf("%d:%03d: " fmt, *gpu.state.frame_count, *gpu.state.hcnt, ##__VA_ARGS__)
21
22//#define log_io gpu_log
23#define log_io(...)
24//#define log_anomaly gpu_log
25#define log_anomaly(...)
26
27struct psx_gpu gpu;
28
29static noinline int do_cmd_buffer(uint32_t *data, int count);
30static void finish_vram_transfer(int is_read);
31
32static noinline void do_cmd_reset(void)
33{
34 if (unlikely(gpu.cmd_len > 0))
35 do_cmd_buffer(gpu.cmd_buffer, gpu.cmd_len);
36 gpu.cmd_len = 0;
37
38 if (unlikely(gpu.dma.h > 0))
39 finish_vram_transfer(gpu.dma_start.is_read);
40 gpu.dma.h = 0;
41}
42
43static noinline void do_reset(void)
44{
45 unsigned int i;
46
47 do_cmd_reset();
48
49 memset(gpu.regs, 0, sizeof(gpu.regs));
50 for (i = 0; i < sizeof(gpu.ex_regs) / sizeof(gpu.ex_regs[0]); i++)
51 gpu.ex_regs[i] = (0xe0 + i) << 24;
52 gpu.status.reg = 0x14802000;
53 gpu.gp0 = 0;
54 gpu.regs[3] = 1;
55 gpu.screen.hres = gpu.screen.w = 256;
56 gpu.screen.vres = gpu.screen.h = 240;
57}
58
59static noinline void update_width(void)
60{
61 int sw = gpu.screen.x2 - gpu.screen.x1;
62 if (sw <= 0 || sw >= 2560)
63 // full width
64 gpu.screen.w = gpu.screen.hres;
65 else
66 gpu.screen.w = sw * gpu.screen.hres / 2560;
67}
68
69static noinline void update_height(void)
70{
71 // TODO: emulate this properly..
72 int sh = gpu.screen.y2 - gpu.screen.y1;
73 if (gpu.status.dheight)
74 sh *= 2;
75 if (sh <= 0 || sh > gpu.screen.vres)
76 sh = gpu.screen.vres;
77
78 gpu.screen.h = sh;
79}
80
81static noinline void decide_frameskip(void)
82{
83 if (gpu.frameskip.active)
84 gpu.frameskip.cnt++;
85 else {
86 gpu.frameskip.cnt = 0;
87 gpu.frameskip.frame_ready = 1;
88 }
89
90 if (!gpu.frameskip.active && *gpu.frameskip.advice)
91 gpu.frameskip.active = 1;
92 else if (gpu.frameskip.set > 0 && gpu.frameskip.cnt < gpu.frameskip.set)
93 gpu.frameskip.active = 1;
94 else
95 gpu.frameskip.active = 0;
96
97 if (!gpu.frameskip.active && gpu.frameskip.pending_fill[0] != 0) {
98 int dummy;
99 do_cmd_list(gpu.frameskip.pending_fill, 3, &dummy);
100 gpu.frameskip.pending_fill[0] = 0;
101 }
102}
103
104static noinline int decide_frameskip_allow(uint32_t cmd_e3)
105{
106 // no frameskip if it decides to draw to display area,
107 // but not for interlace since it'll most likely always do that
108 uint32_t x = cmd_e3 & 0x3ff;
109 uint32_t y = (cmd_e3 >> 10) & 0x3ff;
110 gpu.frameskip.allow = gpu.status.interlace ||
111 (uint32_t)(x - gpu.screen.x) >= (uint32_t)gpu.screen.w ||
112 (uint32_t)(y - gpu.screen.y) >= (uint32_t)gpu.screen.h;
113 return gpu.frameskip.allow;
114}
115
116static noinline void get_gpu_info(uint32_t data)
117{
118 switch (data & 0x0f) {
119 case 0x02:
120 case 0x03:
121 case 0x04:
122 case 0x05:
123 gpu.gp0 = gpu.ex_regs[data & 7] & 0xfffff;
124 break;
125 case 0x06:
126 gpu.gp0 = gpu.ex_regs[5] & 0xfffff;
127 break;
128 case 0x07:
129 gpu.gp0 = 2;
130 break;
131 default:
132 gpu.gp0 = 0;
133 break;
134 }
135}
136
137// double, for overdraw guard
138#define VRAM_SIZE (1024 * 512 * 2 * 2)
139
140static int map_vram(void)
141{
142 gpu.vram = gpu.mmap(VRAM_SIZE);
143 if (gpu.vram != NULL) {
144 gpu.vram += 4096 / 2;
145 return 0;
146 }
147 else {
148 fprintf(stderr, "could not map vram, expect crashes\n");
149 return -1;
150 }
151}
152
153long GPUinit(void)
154{
155 int ret;
156 ret = vout_init();
157 ret |= renderer_init();
158
159 gpu.state.frame_count = &gpu.zero;
160 gpu.state.hcnt = &gpu.zero;
161 gpu.frameskip.active = 0;
162 gpu.cmd_len = 0;
163 do_reset();
164
165 if (gpu.mmap != NULL) {
166 if (map_vram() != 0)
167 ret = -1;
168 }
169 return ret;
170}
171
172long GPUshutdown(void)
173{
174 long ret;
175
176 renderer_finish();
177 ret = vout_finish();
178 if (gpu.vram != NULL) {
179 gpu.vram -= 4096 / 2;
180 gpu.munmap(gpu.vram, VRAM_SIZE);
181 }
182 gpu.vram = NULL;
183
184 return ret;
185}
186
187void GPUwriteStatus(uint32_t data)
188{
189 static const short hres[8] = { 256, 368, 320, 384, 512, 512, 640, 640 };
190 static const short vres[4] = { 240, 480, 256, 480 };
191 uint32_t cmd = data >> 24;
192
193 if (cmd < ARRAY_SIZE(gpu.regs)) {
194 if (cmd > 1 && cmd != 5 && gpu.regs[cmd] == data)
195 return;
196 gpu.regs[cmd] = data;
197 }
198
199 gpu.state.fb_dirty = 1;
200
201 switch (cmd) {
202 case 0x00:
203 do_reset();
204 break;
205 case 0x01:
206 do_cmd_reset();
207 break;
208 case 0x03:
209 gpu.status.blanking = data & 1;
210 break;
211 case 0x04:
212 gpu.status.dma = data & 3;
213 break;
214 case 0x05:
215 gpu.screen.x = data & 0x3ff;
216 gpu.screen.y = (data >> 10) & 0x1ff;
217 if (gpu.frameskip.set) {
218 decide_frameskip_allow(gpu.ex_regs[3]);
219 if (gpu.frameskip.last_flip_frame != *gpu.state.frame_count) {
220 decide_frameskip();
221 gpu.frameskip.last_flip_frame = *gpu.state.frame_count;
222 }
223 }
224 break;
225 case 0x06:
226 gpu.screen.x1 = data & 0xfff;
227 gpu.screen.x2 = (data >> 12) & 0xfff;
228 update_width();
229 break;
230 case 0x07:
231 gpu.screen.y1 = data & 0x3ff;
232 gpu.screen.y2 = (data >> 10) & 0x3ff;
233 update_height();
234 break;
235 case 0x08:
236 gpu.status.reg = (gpu.status.reg & ~0x7f0000) | ((data & 0x3F) << 17) | ((data & 0x40) << 10);
237 gpu.screen.hres = hres[(gpu.status.reg >> 16) & 7];
238 gpu.screen.vres = vres[(gpu.status.reg >> 19) & 3];
239 update_width();
240 update_height();
241 renderer_notify_res_change();
242 break;
243 default:
244 if ((cmd & 0xf0) == 0x10)
245 get_gpu_info(data);
246 break;
247 }
248
249#ifdef GPUwriteStatus_ext
250 GPUwriteStatus_ext(data);
251#endif
252}
253
254const unsigned char cmd_lengths[256] =
255{
256 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
257 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
258 3, 3, 3, 3, 6, 6, 6, 6, 4, 4, 4, 4, 8, 8, 8, 8, // 20
259 5, 5, 5, 5, 8, 8, 8, 8, 7, 7, 7, 7, 11, 11, 11, 11,
260 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, // 40
261 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
262 2, 2, 2, 2, 3, 3, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, // 60
263 1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2,
264 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 80
265 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
266 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // a0
267 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
268 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // c0
269 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
270 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // e0
271 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
272};
273
274#define VRAM_MEM_XY(x, y) &gpu.vram[(y) * 1024 + (x)]
275
276static inline void do_vram_line(int x, int y, uint16_t *mem, int l, int is_read)
277{
278 uint16_t *vram = VRAM_MEM_XY(x, y);
279 if (is_read)
280 memcpy(mem, vram, l * 2);
281 else
282 memcpy(vram, mem, l * 2);
283}
284
285static int do_vram_io(uint32_t *data, int count, int is_read)
286{
287 int count_initial = count;
288 uint16_t *sdata = (uint16_t *)data;
289 int x = gpu.dma.x, y = gpu.dma.y;
290 int w = gpu.dma.w, h = gpu.dma.h;
291 int o = gpu.dma.offset;
292 int l;
293 count *= 2; // operate in 16bpp pixels
294
295 if (gpu.dma.offset) {
296 l = w - gpu.dma.offset;
297 if (count < l)
298 l = count;
299
300 do_vram_line(x + o, y, sdata, l, is_read);
301
302 if (o + l < w)
303 o += l;
304 else {
305 o = 0;
306 y++;
307 h--;
308 }
309 sdata += l;
310 count -= l;
311 }
312
313 for (; h > 0 && count >= w; sdata += w, count -= w, y++, h--) {
314 y &= 511;
315 do_vram_line(x, y, sdata, w, is_read);
316 }
317
318 if (h > 0) {
319 if (count > 0) {
320 y &= 511;
321 do_vram_line(x, y, sdata, count, is_read);
322 o = count;
323 count = 0;
324 }
325 }
326 else
327 finish_vram_transfer(is_read);
328 gpu.dma.y = y;
329 gpu.dma.h = h;
330 gpu.dma.offset = o;
331
332 return count_initial - count / 2;
333}
334
335static void start_vram_transfer(uint32_t pos_word, uint32_t size_word, int is_read)
336{
337 if (gpu.dma.h)
338 log_anomaly("start_vram_transfer while old unfinished\n");
339
340 gpu.dma.x = pos_word & 0x3ff;
341 gpu.dma.y = (pos_word >> 16) & 0x1ff;
342 gpu.dma.w = ((size_word - 1) & 0x3ff) + 1;
343 gpu.dma.h = (((size_word >> 16) - 1) & 0x1ff) + 1;
344 gpu.dma.offset = 0;
345 gpu.dma.is_read = is_read;
346 gpu.dma_start = gpu.dma;
347
348 renderer_flush_queues();
349 if (is_read) {
350 gpu.status.img = 1;
351 // XXX: wrong for width 1
352 memcpy(&gpu.gp0, VRAM_MEM_XY(gpu.dma.x, gpu.dma.y), 4);
353 gpu.state.last_vram_read_frame = *gpu.state.frame_count;
354 }
355
356 log_io("start_vram_transfer %c (%d, %d) %dx%d\n", is_read ? 'r' : 'w',
357 gpu.dma.x, gpu.dma.y, gpu.dma.w, gpu.dma.h);
358}
359
360static void finish_vram_transfer(int is_read)
361{
362 if (is_read)
363 gpu.status.img = 0;
364 else
365 renderer_update_caches(gpu.dma_start.x, gpu.dma_start.y,
366 gpu.dma_start.w, gpu.dma_start.h);
367}
368
369static noinline int do_cmd_list_skip(uint32_t *data, int count, int *last_cmd)
370{
371 int cmd = 0, pos = 0, len, dummy, v;
372 int skip = 1;
373
374 gpu.frameskip.pending_fill[0] = 0;
375
376 while (pos < count && skip) {
377 uint32_t *list = data + pos;
378 cmd = list[0] >> 24;
379 len = 1 + cmd_lengths[cmd];
380
381 switch (cmd) {
382 case 0x02:
383 if ((list[2] & 0x3ff) > gpu.screen.w || ((list[2] >> 16) & 0x1ff) > gpu.screen.h)
384 // clearing something large, don't skip
385 do_cmd_list(list, 3, &dummy);
386 else
387 memcpy(gpu.frameskip.pending_fill, list, 3 * 4);
388 break;
389 case 0x24 ... 0x27:
390 case 0x2c ... 0x2f:
391 case 0x34 ... 0x37:
392 case 0x3c ... 0x3f:
393 gpu.ex_regs[1] &= ~0x1ff;
394 gpu.ex_regs[1] |= list[4 + ((cmd >> 4) & 1)] & 0x1ff;
395 break;
396 case 0x48 ... 0x4F:
397 for (v = 3; pos + v < count; v++)
398 {
399 if ((list[v] & 0xf000f000) == 0x50005000)
400 break;
401 }
402 len += v - 3;
403 break;
404 case 0x58 ... 0x5F:
405 for (v = 4; pos + v < count; v += 2)
406 {
407 if ((list[v] & 0xf000f000) == 0x50005000)
408 break;
409 }
410 len += v - 4;
411 break;
412 default:
413 if (cmd == 0xe3)
414 skip = decide_frameskip_allow(list[0]);
415 if ((cmd & 0xf8) == 0xe0)
416 gpu.ex_regs[cmd & 7] = list[0];
417 break;
418 }
419
420 if (pos + len > count) {
421 cmd = -1;
422 break; // incomplete cmd
423 }
424 if (0xa0 <= cmd && cmd <= 0xdf)
425 break; // image i/o
426
427 pos += len;
428 }
429
430 renderer_sync_ecmds(gpu.ex_regs);
431 *last_cmd = cmd;
432 return pos;
433}
434
435static noinline int do_cmd_buffer(uint32_t *data, int count)
436{
437 int cmd, pos;
438 uint32_t old_e3 = gpu.ex_regs[3];
439 int vram_dirty = 0;
440
441 // process buffer
442 for (pos = 0; pos < count; )
443 {
444 if (gpu.dma.h && !gpu.dma_start.is_read) { // XXX: need to verify
445 vram_dirty = 1;
446 pos += do_vram_io(data + pos, count - pos, 0);
447 if (pos == count)
448 break;
449 }
450
451 cmd = data[pos] >> 24;
452 if (0xa0 <= cmd && cmd <= 0xdf) {
453 // consume vram write/read cmd
454 start_vram_transfer(data[pos + 1], data[pos + 2], (cmd & 0xe0) == 0xc0);
455 pos += 3;
456 continue;
457 }
458
459 // 0xex cmds might affect frameskip.allow, so pass to do_cmd_list_skip
460 if (gpu.frameskip.active && (gpu.frameskip.allow || ((data[pos] >> 24) & 0xf0) == 0xe0))
461 pos += do_cmd_list_skip(data + pos, count - pos, &cmd);
462 else {
463 pos += do_cmd_list(data + pos, count - pos, &cmd);
464 vram_dirty = 1;
465 }
466
467 if (cmd == -1)
468 // incomplete cmd
469 break;
470 }
471
472 gpu.status.reg &= ~0x1fff;
473 gpu.status.reg |= gpu.ex_regs[1] & 0x7ff;
474 gpu.status.reg |= (gpu.ex_regs[6] & 3) << 11;
475
476 gpu.state.fb_dirty |= vram_dirty;
477
478 if (old_e3 != gpu.ex_regs[3])
479 decide_frameskip_allow(gpu.ex_regs[3]);
480
481 return count - pos;
482}
483
484static void flush_cmd_buffer(void)
485{
486 int left = do_cmd_buffer(gpu.cmd_buffer, gpu.cmd_len);
487 if (left > 0)
488 memmove(gpu.cmd_buffer, gpu.cmd_buffer + gpu.cmd_len - left, left * 4);
489 gpu.cmd_len = left;
490}
491
492void GPUwriteDataMem(uint32_t *mem, int count)
493{
494 int left;
495
496 log_io("gpu_dma_write %p %d\n", mem, count);
497
498 if (unlikely(gpu.cmd_len > 0))
499 flush_cmd_buffer();
500
501 left = do_cmd_buffer(mem, count);
502 if (left)
503 log_anomaly("GPUwriteDataMem: discarded %d/%d words\n", left, count);
504}
505
506void GPUwriteData(uint32_t data)
507{
508 log_io("gpu_write %08x\n", data);
509 gpu.cmd_buffer[gpu.cmd_len++] = data;
510 if (gpu.cmd_len >= CMD_BUFFER_LEN)
511 flush_cmd_buffer();
512}
513
514long GPUdmaChain(uint32_t *rambase, uint32_t start_addr)
515{
516 uint32_t addr, *list;
517 uint32_t *llist_entry = NULL;
518 int len, left, count;
519 long cpu_cycles = 0;
520
521 if (unlikely(gpu.cmd_len > 0))
522 flush_cmd_buffer();
523
524 // ff7 sends it's main list twice, detect this
525 if (*gpu.state.frame_count == gpu.state.last_list.frame &&
526 *gpu.state.hcnt - gpu.state.last_list.hcnt <= 1 &&
527 gpu.state.last_list.cycles > 2048)
528 {
529 llist_entry = rambase + (gpu.state.last_list.addr & 0x1fffff) / 4;
530 *llist_entry |= 0x800000;
531 }
532
533 log_io("gpu_dma_chain\n");
534 addr = start_addr & 0xffffff;
535 for (count = 0; addr != 0xffffff; count++)
536 {
537 list = rambase + (addr & 0x1fffff) / 4;
538 len = list[0] >> 24;
539 addr = list[0] & 0xffffff;
540 cpu_cycles += 10;
541 if (len > 0)
542 cpu_cycles += 5 + len;
543
544 log_io(".chain %08x #%d\n", (list - rambase) * 4, len);
545
546 // loop detection marker
547 // (bit23 set causes DMA error on real machine, so
548 // unlikely to be ever set by the game)
549 list[0] |= 0x800000;
550
551 if (len) {
552 left = do_cmd_buffer(list + 1, len);
553 if (left)
554 log_anomaly("GPUdmaChain: discarded %d/%d words\n", left, len);
555 }
556
557 if (addr & 0x800000)
558 break;
559 }
560
561 // remove loop detection markers
562 addr = start_addr & 0x1fffff;
563 while (count-- > 0) {
564 list = rambase + addr / 4;
565 addr = list[0] & 0x1fffff;
566 list[0] &= ~0x800000;
567 }
568 if (llist_entry)
569 *llist_entry &= ~0x800000;
570
571 gpu.state.last_list.frame = *gpu.state.frame_count;
572 gpu.state.last_list.hcnt = *gpu.state.hcnt;
573 gpu.state.last_list.cycles = cpu_cycles;
574 gpu.state.last_list.addr = start_addr;
575
576 return cpu_cycles;
577}
578
579void GPUreadDataMem(uint32_t *mem, int count)
580{
581 log_io("gpu_dma_read %p %d\n", mem, count);
582
583 if (unlikely(gpu.cmd_len > 0))
584 flush_cmd_buffer();
585
586 if (gpu.dma.h)
587 do_vram_io(mem, count, 1);
588}
589
590uint32_t GPUreadData(void)
591{
592 uint32_t ret;
593
594 if (unlikely(gpu.cmd_len > 0))
595 flush_cmd_buffer();
596
597 ret = gpu.gp0;
598 if (gpu.dma.h)
599 do_vram_io(&ret, 1, 1);
600
601 log_io("gpu_read %08x\n", ret);
602 return ret;
603}
604
605uint32_t GPUreadStatus(void)
606{
607 uint32_t ret;
608
609 if (unlikely(gpu.cmd_len > 0))
610 flush_cmd_buffer();
611
612 ret = gpu.status.reg;
613 log_io("gpu_read_status %08x\n", ret);
614 return ret;
615}
616
617struct GPUFreeze
618{
619 uint32_t ulFreezeVersion; // should be always 1 for now (set by main emu)
620 uint32_t ulStatus; // current gpu status
621 uint32_t ulControl[256]; // latest control register values
622 unsigned char psxVRam[1024*1024*2]; // current VRam image (full 2 MB for ZN)
623};
624
625long GPUfreeze(uint32_t type, struct GPUFreeze *freeze)
626{
627 int i;
628
629 switch (type) {
630 case 1: // save
631 if (gpu.cmd_len > 0)
632 flush_cmd_buffer();
633 memcpy(freeze->psxVRam, gpu.vram, 1024 * 512 * 2);
634 memcpy(freeze->ulControl, gpu.regs, sizeof(gpu.regs));
635 memcpy(freeze->ulControl + 0xe0, gpu.ex_regs, sizeof(gpu.ex_regs));
636 freeze->ulStatus = gpu.status.reg;
637 break;
638 case 0: // load
639 memcpy(gpu.vram, freeze->psxVRam, 1024 * 512 * 2);
640 memcpy(gpu.regs, freeze->ulControl, sizeof(gpu.regs));
641 memcpy(gpu.ex_regs, freeze->ulControl + 0xe0, sizeof(gpu.ex_regs));
642 gpu.status.reg = freeze->ulStatus;
643 gpu.cmd_len = 0;
644 for (i = 8; i > 0; i--) {
645 gpu.regs[i] ^= 1; // avoid reg change detection
646 GPUwriteStatus((i << 24) | (gpu.regs[i] ^ 1));
647 }
648 renderer_sync_ecmds(gpu.ex_regs);
649 renderer_update_caches(0, 0, 1024, 512);
650 break;
651 }
652
653 return 1;
654}
655
656void GPUupdateLace(void)
657{
658 if (gpu.cmd_len > 0)
659 flush_cmd_buffer();
660 renderer_flush_queues();
661
662 if (gpu.status.blanking) {
663 if (!gpu.state.blanked) {
664 vout_blank();
665 gpu.state.blanked = 1;
666 gpu.state.fb_dirty = 1;
667 }
668 return;
669 }
670
671 if (!gpu.state.fb_dirty)
672 return;
673
674 if (gpu.frameskip.set) {
675 if (!gpu.frameskip.frame_ready) {
676 if (*gpu.state.frame_count - gpu.frameskip.last_flip_frame < 9)
677 return;
678 gpu.frameskip.active = 0;
679 }
680 gpu.frameskip.frame_ready = 0;
681 }
682
683 vout_update();
684 gpu.state.fb_dirty = 0;
685 gpu.state.blanked = 0;
686}
687
688void GPUvBlank(int is_vblank, int lcf)
689{
690 int interlace = gpu.state.allow_interlace
691 && gpu.status.interlace && gpu.status.dheight;
692 // interlace doesn't look nice on progressive displays,
693 // so we have this "auto" mode here for games that don't read vram
694 if (gpu.state.allow_interlace == 2
695 && *gpu.state.frame_count - gpu.state.last_vram_read_frame > 1)
696 {
697 interlace = 0;
698 }
699 if (interlace || interlace != gpu.state.old_interlace) {
700 gpu.state.old_interlace = interlace;
701
702 if (gpu.cmd_len > 0)
703 flush_cmd_buffer();
704 renderer_flush_queues();
705 renderer_set_interlace(interlace, !lcf);
706 }
707}
708
709#include "../../frontend/plugin_lib.h"
710
711void GPUrearmedCallbacks(const struct rearmed_cbs *cbs)
712{
713 gpu.frameskip.set = cbs->frameskip;
714 gpu.frameskip.advice = &cbs->fskip_advice;
715 gpu.frameskip.active = 0;
716 gpu.frameskip.frame_ready = 1;
717 gpu.state.hcnt = cbs->gpu_hcnt;
718 gpu.state.frame_count = cbs->gpu_frame_count;
719 gpu.state.allow_interlace = cbs->gpu_neon.allow_interlace;
720 gpu.state.enhancement_enable = cbs->gpu_neon.enhancement_enable;
721
722 gpu.mmap = cbs->mmap;
723 gpu.munmap = cbs->munmap;
724
725 // delayed vram mmap
726 if (gpu.vram == NULL)
727 map_vram();
728
729 if (cbs->pl_vout_set_raw_vram)
730 cbs->pl_vout_set_raw_vram(gpu.vram);
731 renderer_set_config(cbs);
732 vout_set_config(cbs);
733}
734
735// vim:shiftwidth=2:expandtab