gpulib: fix out-of-bounds reads in do_cmd_buffer()
[pcsx_rearmed.git] / plugins / gpulib / gpu.c
... / ...
CommitLineData
1/*
2 * (C) GraÅžvydas "notaz" Ignotas, 2011-2012
3 *
4 * This work is licensed under the terms of any of these licenses
5 * (at your option):
6 * - GNU GPL, version 2 or later.
7 * - GNU LGPL, version 2.1 or later.
8 * See the COPYING file in the top-level directory.
9 */
10
11#include <stdio.h>
12#include <string.h>
13#include "gpu.h"
14
15#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
16#ifdef __GNUC__
17#define unlikely(x) __builtin_expect((x), 0)
18#define preload __builtin_prefetch
19#define noinline __attribute__((noinline))
20#else
21#define unlikely(x)
22#define preload(...)
23#define noinline
24#endif
25
26#define gpu_log(fmt, ...) \
27 printf("%d:%03d: " fmt, *gpu.state.frame_count, *gpu.state.hcnt, ##__VA_ARGS__)
28
29//#define log_io gpu_log
30#define log_io(...)
31//#define log_anomaly gpu_log
32#define log_anomaly(...)
33
34struct psx_gpu gpu;
35
36static noinline int do_cmd_buffer(uint32_t *data, int count);
37static void finish_vram_transfer(int is_read);
38
39static noinline void do_cmd_reset(void)
40{
41 if (unlikely(gpu.cmd_len > 0))
42 do_cmd_buffer(gpu.cmd_buffer, gpu.cmd_len);
43 gpu.cmd_len = 0;
44
45 if (unlikely(gpu.dma.h > 0))
46 finish_vram_transfer(gpu.dma_start.is_read);
47 gpu.dma.h = 0;
48}
49
50static noinline void do_reset(void)
51{
52 unsigned int i;
53
54 do_cmd_reset();
55
56 memset(gpu.regs, 0, sizeof(gpu.regs));
57 for (i = 0; i < sizeof(gpu.ex_regs) / sizeof(gpu.ex_regs[0]); i++)
58 gpu.ex_regs[i] = (0xe0 + i) << 24;
59 gpu.status.reg = 0x14802000;
60 gpu.gp0 = 0;
61 gpu.regs[3] = 1;
62 gpu.screen.hres = gpu.screen.w = 256;
63 gpu.screen.vres = gpu.screen.h = 240;
64}
65
66static noinline void update_width(void)
67{
68 int sw = gpu.screen.x2 - gpu.screen.x1;
69 if (sw <= 0 || sw >= 2560)
70 // full width
71 gpu.screen.w = gpu.screen.hres;
72 else
73 gpu.screen.w = sw * gpu.screen.hres / 2560;
74}
75
76static noinline void update_height(void)
77{
78 // TODO: emulate this properly..
79 int sh = gpu.screen.y2 - gpu.screen.y1;
80 if (gpu.status.dheight)
81 sh *= 2;
82 if (sh <= 0 || sh > gpu.screen.vres)
83 sh = gpu.screen.vres;
84
85 gpu.screen.h = sh;
86}
87
88static noinline void decide_frameskip(void)
89{
90 if (gpu.frameskip.active)
91 gpu.frameskip.cnt++;
92 else {
93 gpu.frameskip.cnt = 0;
94 gpu.frameskip.frame_ready = 1;
95 }
96
97 if (!gpu.frameskip.active && *gpu.frameskip.advice)
98 gpu.frameskip.active = 1;
99 else if (gpu.frameskip.set > 0 && gpu.frameskip.cnt < gpu.frameskip.set)
100 gpu.frameskip.active = 1;
101 else
102 gpu.frameskip.active = 0;
103
104 if (!gpu.frameskip.active && gpu.frameskip.pending_fill[0] != 0) {
105 int dummy;
106 do_cmd_list(gpu.frameskip.pending_fill, 3, &dummy);
107 gpu.frameskip.pending_fill[0] = 0;
108 }
109}
110
111static noinline int decide_frameskip_allow(uint32_t cmd_e3)
112{
113 // no frameskip if it decides to draw to display area,
114 // but not for interlace since it'll most likely always do that
115 uint32_t x = cmd_e3 & 0x3ff;
116 uint32_t y = (cmd_e3 >> 10) & 0x3ff;
117 gpu.frameskip.allow = gpu.status.interlace ||
118 (uint32_t)(x - gpu.screen.x) >= (uint32_t)gpu.screen.w ||
119 (uint32_t)(y - gpu.screen.y) >= (uint32_t)gpu.screen.h;
120 return gpu.frameskip.allow;
121}
122
123static noinline void get_gpu_info(uint32_t data)
124{
125 switch (data & 0x0f) {
126 case 0x02:
127 case 0x03:
128 case 0x04:
129 case 0x05:
130 gpu.gp0 = gpu.ex_regs[data & 7] & 0xfffff;
131 break;
132 case 0x06:
133 gpu.gp0 = gpu.ex_regs[5] & 0xfffff;
134 break;
135 case 0x07:
136 gpu.gp0 = 2;
137 break;
138 default:
139 gpu.gp0 = 0;
140 break;
141 }
142}
143
144// double, for overdraw guard
145#define VRAM_SIZE (1024 * 512 * 2 * 2)
146
147static int map_vram(void)
148{
149 gpu.vram = gpu.mmap(VRAM_SIZE);
150 if (gpu.vram != NULL) {
151 gpu.vram += 4096 / 2;
152 return 0;
153 }
154 else {
155 fprintf(stderr, "could not map vram, expect crashes\n");
156 return -1;
157 }
158}
159
160long GPUinit(void)
161{
162 int ret;
163 ret = vout_init();
164 ret |= renderer_init();
165
166 gpu.state.frame_count = &gpu.zero;
167 gpu.state.hcnt = &gpu.zero;
168 gpu.frameskip.active = 0;
169 gpu.cmd_len = 0;
170 do_reset();
171
172 if (gpu.mmap != NULL) {
173 if (map_vram() != 0)
174 ret = -1;
175 }
176 return ret;
177}
178
179long GPUshutdown(void)
180{
181 long ret;
182
183 renderer_finish();
184 ret = vout_finish();
185 if (gpu.vram != NULL) {
186 gpu.vram -= 4096 / 2;
187 gpu.munmap(gpu.vram, VRAM_SIZE);
188 }
189 gpu.vram = NULL;
190
191 return ret;
192}
193
194void GPUwriteStatus(uint32_t data)
195{
196 static const short hres[8] = { 256, 368, 320, 384, 512, 512, 640, 640 };
197 static const short vres[4] = { 240, 480, 256, 480 };
198 uint32_t cmd = data >> 24;
199
200 if (cmd < ARRAY_SIZE(gpu.regs)) {
201 if (cmd > 1 && cmd != 5 && gpu.regs[cmd] == data)
202 return;
203 gpu.regs[cmd] = data;
204 }
205
206 gpu.state.fb_dirty = 1;
207
208 switch (cmd) {
209 case 0x00:
210 do_reset();
211 break;
212 case 0x01:
213 do_cmd_reset();
214 break;
215 case 0x03:
216 gpu.status.blanking = data & 1;
217 break;
218 case 0x04:
219 gpu.status.dma = data & 3;
220 break;
221 case 0x05:
222 gpu.screen.x = data & 0x3ff;
223 gpu.screen.y = (data >> 10) & 0x1ff;
224 if (gpu.frameskip.set) {
225 decide_frameskip_allow(gpu.ex_regs[3]);
226 if (gpu.frameskip.last_flip_frame != *gpu.state.frame_count) {
227 decide_frameskip();
228 gpu.frameskip.last_flip_frame = *gpu.state.frame_count;
229 }
230 }
231 break;
232 case 0x06:
233 gpu.screen.x1 = data & 0xfff;
234 gpu.screen.x2 = (data >> 12) & 0xfff;
235 update_width();
236 break;
237 case 0x07:
238 gpu.screen.y1 = data & 0x3ff;
239 gpu.screen.y2 = (data >> 10) & 0x3ff;
240 update_height();
241 break;
242 case 0x08:
243 gpu.status.reg = (gpu.status.reg & ~0x7f0000) | ((data & 0x3F) << 17) | ((data & 0x40) << 10);
244 gpu.screen.hres = hres[(gpu.status.reg >> 16) & 7];
245 gpu.screen.vres = vres[(gpu.status.reg >> 19) & 3];
246 update_width();
247 update_height();
248 renderer_notify_res_change();
249 break;
250 default:
251 if ((cmd & 0xf0) == 0x10)
252 get_gpu_info(data);
253 break;
254 }
255
256#ifdef GPUwriteStatus_ext
257 GPUwriteStatus_ext(data);
258#endif
259}
260
261const unsigned char cmd_lengths[256] =
262{
263 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
264 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
265 3, 3, 3, 3, 6, 6, 6, 6, 4, 4, 4, 4, 8, 8, 8, 8, // 20
266 5, 5, 5, 5, 8, 8, 8, 8, 7, 7, 7, 7, 11, 11, 11, 11,
267 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, // 40
268 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
269 2, 2, 2, 2, 3, 3, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, // 60
270 1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2,
271 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 80
272 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
273 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // a0
274 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
275 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // c0
276 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
277 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // e0
278 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
279};
280
281#define VRAM_MEM_XY(x, y) &gpu.vram[(y) * 1024 + (x)]
282
283static inline void do_vram_line(int x, int y, uint16_t *mem, int l, int is_read)
284{
285 uint16_t *vram = VRAM_MEM_XY(x, y);
286 if (is_read)
287 memcpy(mem, vram, l * 2);
288 else
289 memcpy(vram, mem, l * 2);
290}
291
292static int do_vram_io(uint32_t *data, int count, int is_read)
293{
294 int count_initial = count;
295 uint16_t *sdata = (uint16_t *)data;
296 int x = gpu.dma.x, y = gpu.dma.y;
297 int w = gpu.dma.w, h = gpu.dma.h;
298 int o = gpu.dma.offset;
299 int l;
300 count *= 2; // operate in 16bpp pixels
301
302 if (gpu.dma.offset) {
303 l = w - gpu.dma.offset;
304 if (count < l)
305 l = count;
306
307 do_vram_line(x + o, y, sdata, l, is_read);
308
309 if (o + l < w)
310 o += l;
311 else {
312 o = 0;
313 y++;
314 h--;
315 }
316 sdata += l;
317 count -= l;
318 }
319
320 for (; h > 0 && count >= w; sdata += w, count -= w, y++, h--) {
321 y &= 511;
322 do_vram_line(x, y, sdata, w, is_read);
323 }
324
325 if (h > 0) {
326 if (count > 0) {
327 y &= 511;
328 do_vram_line(x, y, sdata, count, is_read);
329 o = count;
330 count = 0;
331 }
332 }
333 else
334 finish_vram_transfer(is_read);
335 gpu.dma.y = y;
336 gpu.dma.h = h;
337 gpu.dma.offset = o;
338
339 return count_initial - count / 2;
340}
341
342static void start_vram_transfer(uint32_t pos_word, uint32_t size_word, int is_read)
343{
344 if (gpu.dma.h)
345 log_anomaly("start_vram_transfer while old unfinished\n");
346
347 gpu.dma.x = pos_word & 0x3ff;
348 gpu.dma.y = (pos_word >> 16) & 0x1ff;
349 gpu.dma.w = ((size_word - 1) & 0x3ff) + 1;
350 gpu.dma.h = (((size_word >> 16) - 1) & 0x1ff) + 1;
351 gpu.dma.offset = 0;
352 gpu.dma.is_read = is_read;
353 gpu.dma_start = gpu.dma;
354
355 renderer_flush_queues();
356 if (is_read) {
357 gpu.status.img = 1;
358 // XXX: wrong for width 1
359 memcpy(&gpu.gp0, VRAM_MEM_XY(gpu.dma.x, gpu.dma.y), 4);
360 gpu.state.last_vram_read_frame = *gpu.state.frame_count;
361 }
362
363 log_io("start_vram_transfer %c (%d, %d) %dx%d\n", is_read ? 'r' : 'w',
364 gpu.dma.x, gpu.dma.y, gpu.dma.w, gpu.dma.h);
365}
366
367static void finish_vram_transfer(int is_read)
368{
369 if (is_read)
370 gpu.status.img = 0;
371 else
372 renderer_update_caches(gpu.dma_start.x, gpu.dma_start.y,
373 gpu.dma_start.w, gpu.dma_start.h);
374}
375
376static noinline int do_cmd_list_skip(uint32_t *data, int count, int *last_cmd)
377{
378 int cmd = 0, pos = 0, len, dummy, v;
379 int skip = 1;
380
381 gpu.frameskip.pending_fill[0] = 0;
382
383 while (pos < count && skip) {
384 uint32_t *list = data + pos;
385 cmd = list[0] >> 24;
386 len = 1 + cmd_lengths[cmd];
387
388 switch (cmd) {
389 case 0x02:
390 if ((list[2] & 0x3ff) > gpu.screen.w || ((list[2] >> 16) & 0x1ff) > gpu.screen.h)
391 // clearing something large, don't skip
392 do_cmd_list(list, 3, &dummy);
393 else
394 memcpy(gpu.frameskip.pending_fill, list, 3 * 4);
395 break;
396 case 0x24 ... 0x27:
397 case 0x2c ... 0x2f:
398 case 0x34 ... 0x37:
399 case 0x3c ... 0x3f:
400 gpu.ex_regs[1] &= ~0x1ff;
401 gpu.ex_regs[1] |= list[4 + ((cmd >> 4) & 1)] & 0x1ff;
402 break;
403 case 0x48 ... 0x4F:
404 for (v = 3; pos + v < count; v++)
405 {
406 if ((list[v] & 0xf000f000) == 0x50005000)
407 break;
408 }
409 len += v - 3;
410 break;
411 case 0x58 ... 0x5F:
412 for (v = 4; pos + v < count; v += 2)
413 {
414 if ((list[v] & 0xf000f000) == 0x50005000)
415 break;
416 }
417 len += v - 4;
418 break;
419 default:
420 if (cmd == 0xe3)
421 skip = decide_frameskip_allow(list[0]);
422 if ((cmd & 0xf8) == 0xe0)
423 gpu.ex_regs[cmd & 7] = list[0];
424 break;
425 }
426
427 if (pos + len > count) {
428 cmd = -1;
429 break; // incomplete cmd
430 }
431 if (0xa0 <= cmd && cmd <= 0xdf)
432 break; // image i/o
433
434 pos += len;
435 }
436
437 renderer_sync_ecmds(gpu.ex_regs);
438 *last_cmd = cmd;
439 return pos;
440}
441
442static noinline int do_cmd_buffer(uint32_t *data, int count)
443{
444 int cmd, pos;
445 uint32_t old_e3 = gpu.ex_regs[3];
446 int vram_dirty = 0;
447
448 // process buffer
449 for (pos = 0; pos < count; )
450 {
451 if (gpu.dma.h && !gpu.dma_start.is_read) { // XXX: need to verify
452 vram_dirty = 1;
453 pos += do_vram_io(data + pos, count - pos, 0);
454 if (pos == count)
455 break;
456 }
457
458 cmd = data[pos] >> 24;
459 if (0xa0 <= cmd && cmd <= 0xdf) {
460 if (unlikely((pos+2) >= count)) {
461 // incomplete vram write/read cmd, can't consume yet
462 cmd = -1;
463 break;
464 }
465
466 // consume vram write/read cmd
467 start_vram_transfer(data[pos + 1], data[pos + 2], (cmd & 0xe0) == 0xc0);
468 pos += 3;
469 continue;
470 }
471
472 // 0xex cmds might affect frameskip.allow, so pass to do_cmd_list_skip
473 if (gpu.frameskip.active && (gpu.frameskip.allow || ((data[pos] >> 24) & 0xf0) == 0xe0))
474 pos += do_cmd_list_skip(data + pos, count - pos, &cmd);
475 else {
476 pos += do_cmd_list(data + pos, count - pos, &cmd);
477 vram_dirty = 1;
478 }
479
480 if (cmd == -1)
481 // incomplete cmd
482 break;
483 }
484
485 gpu.status.reg &= ~0x1fff;
486 gpu.status.reg |= gpu.ex_regs[1] & 0x7ff;
487 gpu.status.reg |= (gpu.ex_regs[6] & 3) << 11;
488
489 gpu.state.fb_dirty |= vram_dirty;
490
491 if (old_e3 != gpu.ex_regs[3])
492 decide_frameskip_allow(gpu.ex_regs[3]);
493
494 return count - pos;
495}
496
497static void flush_cmd_buffer(void)
498{
499 int left = do_cmd_buffer(gpu.cmd_buffer, gpu.cmd_len);
500 if (left > 0)
501 memmove(gpu.cmd_buffer, gpu.cmd_buffer + gpu.cmd_len - left, left * 4);
502 gpu.cmd_len = left;
503}
504
505void GPUwriteDataMem(uint32_t *mem, int count)
506{
507 int left;
508
509 log_io("gpu_dma_write %p %d\n", mem, count);
510
511 if (unlikely(gpu.cmd_len > 0))
512 flush_cmd_buffer();
513
514 left = do_cmd_buffer(mem, count);
515 if (left)
516 log_anomaly("GPUwriteDataMem: discarded %d/%d words\n", left, count);
517}
518
519void GPUwriteData(uint32_t data)
520{
521 log_io("gpu_write %08x\n", data);
522 gpu.cmd_buffer[gpu.cmd_len++] = data;
523 if (gpu.cmd_len >= CMD_BUFFER_LEN)
524 flush_cmd_buffer();
525}
526
527long GPUdmaChain(uint32_t *rambase, uint32_t start_addr)
528{
529 uint32_t addr, *list, ld_addr = 0;
530 int len, left, count;
531 long cpu_cycles = 0;
532
533 preload(rambase + (start_addr & 0x1fffff) / 4);
534
535 if (unlikely(gpu.cmd_len > 0))
536 flush_cmd_buffer();
537
538 log_io("gpu_dma_chain\n");
539 addr = start_addr & 0xffffff;
540 for (count = 0; (addr & 0x800000) == 0; count++)
541 {
542 list = rambase + (addr & 0x1fffff) / 4;
543 len = list[0] >> 24;
544 addr = list[0] & 0xffffff;
545 preload(rambase + (addr & 0x1fffff) / 4);
546
547 cpu_cycles += 10;
548 if (len > 0)
549 cpu_cycles += 5 + len;
550
551 log_io(".chain %08x #%d\n", (list - rambase) * 4, len);
552
553 if (len) {
554 left = do_cmd_buffer(list + 1, len);
555 if (left)
556 log_anomaly("GPUdmaChain: discarded %d/%d words\n", left, len);
557 }
558
559 #define LD_THRESHOLD (8*1024)
560 if (count >= LD_THRESHOLD) {
561 if (count == LD_THRESHOLD) {
562 ld_addr = addr;
563 continue;
564 }
565
566 // loop detection marker
567 // (bit23 set causes DMA error on real machine, so
568 // unlikely to be ever set by the game)
569 list[0] |= 0x800000;
570 }
571 }
572
573 if (ld_addr != 0) {
574 // remove loop detection markers
575 count -= LD_THRESHOLD + 2;
576 addr = ld_addr & 0x1fffff;
577 while (count-- > 0) {
578 list = rambase + addr / 4;
579 addr = list[0] & 0x1fffff;
580 list[0] &= ~0x800000;
581 }
582 }
583
584 gpu.state.last_list.frame = *gpu.state.frame_count;
585 gpu.state.last_list.hcnt = *gpu.state.hcnt;
586 gpu.state.last_list.cycles = cpu_cycles;
587 gpu.state.last_list.addr = start_addr;
588
589 return cpu_cycles;
590}
591
592void GPUreadDataMem(uint32_t *mem, int count)
593{
594 log_io("gpu_dma_read %p %d\n", mem, count);
595
596 if (unlikely(gpu.cmd_len > 0))
597 flush_cmd_buffer();
598
599 if (gpu.dma.h)
600 do_vram_io(mem, count, 1);
601}
602
603uint32_t GPUreadData(void)
604{
605 uint32_t ret;
606
607 if (unlikely(gpu.cmd_len > 0))
608 flush_cmd_buffer();
609
610 ret = gpu.gp0;
611 if (gpu.dma.h)
612 do_vram_io(&ret, 1, 1);
613
614 log_io("gpu_read %08x\n", ret);
615 return ret;
616}
617
618uint32_t GPUreadStatus(void)
619{
620 uint32_t ret;
621
622 if (unlikely(gpu.cmd_len > 0))
623 flush_cmd_buffer();
624
625 ret = gpu.status.reg;
626 log_io("gpu_read_status %08x\n", ret);
627 return ret;
628}
629
630struct GPUFreeze
631{
632 uint32_t ulFreezeVersion; // should be always 1 for now (set by main emu)
633 uint32_t ulStatus; // current gpu status
634 uint32_t ulControl[256]; // latest control register values
635 unsigned char psxVRam[1024*1024*2]; // current VRam image (full 2 MB for ZN)
636};
637
638long GPUfreeze(uint32_t type, struct GPUFreeze *freeze)
639{
640 int i;
641
642 switch (type) {
643 case 1: // save
644 if (gpu.cmd_len > 0)
645 flush_cmd_buffer();
646 memcpy(freeze->psxVRam, gpu.vram, 1024 * 512 * 2);
647 memcpy(freeze->ulControl, gpu.regs, sizeof(gpu.regs));
648 memcpy(freeze->ulControl + 0xe0, gpu.ex_regs, sizeof(gpu.ex_regs));
649 freeze->ulStatus = gpu.status.reg;
650 break;
651 case 0: // load
652 memcpy(gpu.vram, freeze->psxVRam, 1024 * 512 * 2);
653 memcpy(gpu.regs, freeze->ulControl, sizeof(gpu.regs));
654 memcpy(gpu.ex_regs, freeze->ulControl + 0xe0, sizeof(gpu.ex_regs));
655 gpu.status.reg = freeze->ulStatus;
656 gpu.cmd_len = 0;
657 for (i = 8; i > 0; i--) {
658 gpu.regs[i] ^= 1; // avoid reg change detection
659 GPUwriteStatus((i << 24) | (gpu.regs[i] ^ 1));
660 }
661 renderer_sync_ecmds(gpu.ex_regs);
662 renderer_update_caches(0, 0, 1024, 512);
663 break;
664 }
665
666 return 1;
667}
668
669void GPUupdateLace(void)
670{
671 if (gpu.cmd_len > 0)
672 flush_cmd_buffer();
673 renderer_flush_queues();
674
675 if (gpu.status.blanking) {
676 if (!gpu.state.blanked) {
677 vout_blank();
678 gpu.state.blanked = 1;
679 gpu.state.fb_dirty = 1;
680 }
681 return;
682 }
683
684 if (!gpu.state.fb_dirty)
685 return;
686
687 if (gpu.frameskip.set) {
688 if (!gpu.frameskip.frame_ready) {
689 if (*gpu.state.frame_count - gpu.frameskip.last_flip_frame < 9)
690 return;
691 gpu.frameskip.active = 0;
692 }
693 gpu.frameskip.frame_ready = 0;
694 }
695
696 vout_update();
697 gpu.state.fb_dirty = 0;
698 gpu.state.blanked = 0;
699}
700
701void GPUvBlank(int is_vblank, int lcf)
702{
703 int interlace = gpu.state.allow_interlace
704 && gpu.status.interlace && gpu.status.dheight;
705 // interlace doesn't look nice on progressive displays,
706 // so we have this "auto" mode here for games that don't read vram
707 if (gpu.state.allow_interlace == 2
708 && *gpu.state.frame_count - gpu.state.last_vram_read_frame > 1)
709 {
710 interlace = 0;
711 }
712 if (interlace || interlace != gpu.state.old_interlace) {
713 gpu.state.old_interlace = interlace;
714
715 if (gpu.cmd_len > 0)
716 flush_cmd_buffer();
717 renderer_flush_queues();
718 renderer_set_interlace(interlace, !lcf);
719 }
720}
721
722#include "../../frontend/plugin_lib.h"
723
724void GPUrearmedCallbacks(const struct rearmed_cbs *cbs)
725{
726 gpu.frameskip.set = cbs->frameskip;
727 gpu.frameskip.advice = &cbs->fskip_advice;
728 gpu.frameskip.active = 0;
729 gpu.frameskip.frame_ready = 1;
730 gpu.state.hcnt = cbs->gpu_hcnt;
731 gpu.state.frame_count = cbs->gpu_frame_count;
732 gpu.state.allow_interlace = cbs->gpu_neon.allow_interlace;
733 gpu.state.enhancement_enable = cbs->gpu_neon.enhancement_enable;
734
735 gpu.mmap = cbs->mmap;
736 gpu.munmap = cbs->munmap;
737
738 // delayed vram mmap
739 if (gpu.vram == NULL)
740 map_vram();
741
742 if (cbs->pl_vout_set_raw_vram)
743 cbs->pl_vout_set_raw_vram(gpu.vram);
744 renderer_set_config(cbs);
745 vout_set_config(cbs);
746}
747
748// vim:shiftwidth=2:expandtab