fix some random corner cases
[pcsx_rearmed.git] / plugins / gpulib / gpu.c
... / ...
CommitLineData
1/*
2 * (C) GraÅžvydas "notaz" Ignotas, 2011-2012
3 *
4 * This work is licensed under the terms of any of these licenses
5 * (at your option):
6 * - GNU GPL, version 2 or later.
7 * - GNU LGPL, version 2.1 or later.
8 * See the COPYING file in the top-level directory.
9 */
10
11#include <stdio.h>
12#include <string.h>
13#include "gpu.h"
14
15#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
16#define unlikely(x) __builtin_expect((x), 0)
17#define noinline __attribute__((noinline))
18
19#define gpu_log(fmt, ...) \
20 printf("%d:%03d: " fmt, *gpu.state.frame_count, *gpu.state.hcnt, ##__VA_ARGS__)
21
22//#define log_io gpu_log
23#define log_io(...)
24//#define log_anomaly gpu_log
25#define log_anomaly(...)
26
27struct psx_gpu gpu;
28
29static noinline int do_cmd_buffer(uint32_t *data, int count);
30static void finish_vram_transfer(int is_read);
31
32static noinline void do_cmd_reset(void)
33{
34 if (unlikely(gpu.cmd_len > 0))
35 do_cmd_buffer(gpu.cmd_buffer, gpu.cmd_len);
36 gpu.cmd_len = 0;
37
38 if (unlikely(gpu.dma.h > 0))
39 finish_vram_transfer(gpu.dma_start.is_read);
40 gpu.dma.h = 0;
41}
42
43static noinline void do_reset(void)
44{
45 unsigned int i;
46
47 do_cmd_reset();
48
49 memset(gpu.regs, 0, sizeof(gpu.regs));
50 for (i = 0; i < sizeof(gpu.ex_regs) / sizeof(gpu.ex_regs[0]); i++)
51 gpu.ex_regs[i] = (0xe0 + i) << 24;
52 gpu.status.reg = 0x14802000;
53 gpu.gp0 = 0;
54 gpu.regs[3] = 1;
55 gpu.screen.hres = gpu.screen.w = 256;
56 gpu.screen.vres = gpu.screen.h = 240;
57}
58
59static noinline void update_width(void)
60{
61 int sw = gpu.screen.x2 - gpu.screen.x1;
62 if (sw <= 0 || sw >= 2560)
63 // full width
64 gpu.screen.w = gpu.screen.hres;
65 else
66 gpu.screen.w = sw * gpu.screen.hres / 2560;
67}
68
69static noinline void update_height(void)
70{
71 int sh = gpu.screen.y2 - gpu.screen.y1;
72 if (gpu.status.dheight)
73 sh *= 2;
74 if (sh <= 0)
75 sh = gpu.screen.vres;
76
77 gpu.screen.h = sh;
78}
79
80static noinline void decide_frameskip(void)
81{
82 if (gpu.frameskip.active)
83 gpu.frameskip.cnt++;
84 else {
85 gpu.frameskip.cnt = 0;
86 gpu.frameskip.frame_ready = 1;
87 }
88
89 if (!gpu.frameskip.active && *gpu.frameskip.advice)
90 gpu.frameskip.active = 1;
91 else if (gpu.frameskip.set > 0 && gpu.frameskip.cnt < gpu.frameskip.set)
92 gpu.frameskip.active = 1;
93 else
94 gpu.frameskip.active = 0;
95
96 if (!gpu.frameskip.active && gpu.frameskip.pending_fill[0] != 0) {
97 int dummy;
98 do_cmd_list(gpu.frameskip.pending_fill, 3, &dummy);
99 gpu.frameskip.pending_fill[0] = 0;
100 }
101}
102
103static noinline int decide_frameskip_allow(uint32_t cmd_e3)
104{
105 // no frameskip if it decides to draw to display area,
106 // but not for interlace since it'll most likely always do that
107 uint32_t x = cmd_e3 & 0x3ff;
108 uint32_t y = (cmd_e3 >> 10) & 0x3ff;
109 gpu.frameskip.allow = gpu.status.interlace ||
110 (uint32_t)(x - gpu.screen.x) >= (uint32_t)gpu.screen.w ||
111 (uint32_t)(y - gpu.screen.y) >= (uint32_t)gpu.screen.h;
112 return gpu.frameskip.allow;
113}
114
115static noinline void get_gpu_info(uint32_t data)
116{
117 switch (data & 0x0f) {
118 case 0x02:
119 case 0x03:
120 case 0x04:
121 case 0x05:
122 gpu.gp0 = gpu.ex_regs[data & 7] & 0xfffff;
123 break;
124 case 0x06:
125 gpu.gp0 = gpu.ex_regs[5] & 0xfffff;
126 break;
127 case 0x07:
128 gpu.gp0 = 2;
129 break;
130 default:
131 gpu.gp0 = 0;
132 break;
133 }
134}
135
136// double, for overdraw guard
137#define VRAM_SIZE (1024 * 512 * 2 * 2)
138
139static int map_vram(void)
140{
141 gpu.vram = gpu.mmap(VRAM_SIZE);
142 if (gpu.vram != NULL) {
143 gpu.vram += 4096 / 2;
144 return 0;
145 }
146 else {
147 fprintf(stderr, "could not map vram, expect crashes\n");
148 return -1;
149 }
150}
151
152long GPUinit(void)
153{
154 int ret;
155 ret = vout_init();
156 ret |= renderer_init();
157
158 gpu.state.frame_count = &gpu.zero;
159 gpu.state.hcnt = &gpu.zero;
160 gpu.frameskip.active = 0;
161 gpu.cmd_len = 0;
162 do_reset();
163
164 if (gpu.mmap != NULL) {
165 if (map_vram() != 0)
166 ret = -1;
167 }
168 return ret;
169}
170
171long GPUshutdown(void)
172{
173 long ret;
174
175 renderer_finish();
176 ret = vout_finish();
177 if (gpu.vram != NULL) {
178 gpu.vram -= 4096 / 2;
179 gpu.munmap(gpu.vram, VRAM_SIZE);
180 }
181 gpu.vram = NULL;
182
183 return ret;
184}
185
186void GPUwriteStatus(uint32_t data)
187{
188 static const short hres[8] = { 256, 368, 320, 384, 512, 512, 640, 640 };
189 static const short vres[4] = { 240, 480, 256, 480 };
190 uint32_t cmd = data >> 24;
191
192 if (cmd < ARRAY_SIZE(gpu.regs)) {
193 if (cmd > 1 && cmd != 5 && gpu.regs[cmd] == data)
194 return;
195 gpu.regs[cmd] = data;
196 }
197
198 gpu.state.fb_dirty = 1;
199
200 switch (cmd) {
201 case 0x00:
202 do_reset();
203 break;
204 case 0x01:
205 do_cmd_reset();
206 break;
207 case 0x03:
208 gpu.status.blanking = data & 1;
209 break;
210 case 0x04:
211 gpu.status.dma = data & 3;
212 break;
213 case 0x05:
214 gpu.screen.x = data & 0x3ff;
215 gpu.screen.y = (data >> 10) & 0x1ff;
216 if (gpu.frameskip.set) {
217 decide_frameskip_allow(gpu.ex_regs[3]);
218 if (gpu.frameskip.last_flip_frame != *gpu.state.frame_count) {
219 decide_frameskip();
220 gpu.frameskip.last_flip_frame = *gpu.state.frame_count;
221 }
222 }
223 break;
224 case 0x06:
225 gpu.screen.x1 = data & 0xfff;
226 gpu.screen.x2 = (data >> 12) & 0xfff;
227 update_width();
228 break;
229 case 0x07:
230 gpu.screen.y1 = data & 0x3ff;
231 gpu.screen.y2 = (data >> 10) & 0x3ff;
232 update_height();
233 break;
234 case 0x08:
235 gpu.status.reg = (gpu.status.reg & ~0x7f0000) | ((data & 0x3F) << 17) | ((data & 0x40) << 10);
236 gpu.screen.hres = hres[(gpu.status.reg >> 16) & 7];
237 gpu.screen.vres = vres[(gpu.status.reg >> 19) & 3];
238 update_width();
239 update_height();
240 renderer_notify_res_change();
241 break;
242 default:
243 if ((cmd & 0xf0) == 0x10)
244 get_gpu_info(data);
245 break;
246 }
247
248#ifdef GPUwriteStatus_ext
249 GPUwriteStatus_ext(data);
250#endif
251}
252
253const unsigned char cmd_lengths[256] =
254{
255 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
256 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
257 3, 3, 3, 3, 6, 6, 6, 6, 4, 4, 4, 4, 8, 8, 8, 8, // 20
258 5, 5, 5, 5, 8, 8, 8, 8, 7, 7, 7, 7, 11, 11, 11, 11,
259 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, // 40
260 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
261 2, 2, 2, 2, 3, 3, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, // 60
262 1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2,
263 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 80
264 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
265 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // a0
266 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
267 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // c0
268 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
269 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // e0
270 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
271};
272
273#define VRAM_MEM_XY(x, y) &gpu.vram[(y) * 1024 + (x)]
274
275static inline void do_vram_line(int x, int y, uint16_t *mem, int l, int is_read)
276{
277 uint16_t *vram = VRAM_MEM_XY(x, y);
278 if (is_read)
279 memcpy(mem, vram, l * 2);
280 else
281 memcpy(vram, mem, l * 2);
282}
283
284static int do_vram_io(uint32_t *data, int count, int is_read)
285{
286 int count_initial = count;
287 uint16_t *sdata = (uint16_t *)data;
288 int x = gpu.dma.x, y = gpu.dma.y;
289 int w = gpu.dma.w, h = gpu.dma.h;
290 int o = gpu.dma.offset;
291 int l;
292 count *= 2; // operate in 16bpp pixels
293
294 if (gpu.dma.offset) {
295 l = w - gpu.dma.offset;
296 if (count < l)
297 l = count;
298
299 do_vram_line(x + o, y, sdata, l, is_read);
300
301 if (o + l < w)
302 o += l;
303 else {
304 o = 0;
305 y++;
306 h--;
307 }
308 sdata += l;
309 count -= l;
310 }
311
312 for (; h > 0 && count >= w; sdata += w, count -= w, y++, h--) {
313 y &= 511;
314 do_vram_line(x, y, sdata, w, is_read);
315 }
316
317 if (h > 0) {
318 if (count > 0) {
319 y &= 511;
320 do_vram_line(x, y, sdata, count, is_read);
321 o = count;
322 count = 0;
323 }
324 }
325 else
326 finish_vram_transfer(is_read);
327 gpu.dma.y = y;
328 gpu.dma.h = h;
329 gpu.dma.offset = o;
330
331 return count_initial - count / 2;
332}
333
334static void start_vram_transfer(uint32_t pos_word, uint32_t size_word, int is_read)
335{
336 if (gpu.dma.h)
337 log_anomaly("start_vram_transfer while old unfinished\n");
338
339 gpu.dma.x = pos_word & 0x3ff;
340 gpu.dma.y = (pos_word >> 16) & 0x1ff;
341 gpu.dma.w = ((size_word - 1) & 0x3ff) + 1;
342 gpu.dma.h = (((size_word >> 16) - 1) & 0x1ff) + 1;
343 gpu.dma.offset = 0;
344 gpu.dma.is_read = is_read;
345 gpu.dma_start = gpu.dma;
346
347 renderer_flush_queues();
348 if (is_read) {
349 gpu.status.img = 1;
350 // XXX: wrong for width 1
351 memcpy(&gpu.gp0, VRAM_MEM_XY(gpu.dma.x, gpu.dma.y), 4);
352 gpu.state.last_vram_read_frame = *gpu.state.frame_count;
353 }
354
355 log_io("start_vram_transfer %c (%d, %d) %dx%d\n", is_read ? 'r' : 'w',
356 gpu.dma.x, gpu.dma.y, gpu.dma.w, gpu.dma.h);
357}
358
359static void finish_vram_transfer(int is_read)
360{
361 if (is_read)
362 gpu.status.img = 0;
363 else
364 renderer_update_caches(gpu.dma_start.x, gpu.dma_start.y,
365 gpu.dma_start.w, gpu.dma_start.h);
366}
367
368static noinline int do_cmd_list_skip(uint32_t *data, int count, int *last_cmd)
369{
370 int cmd = 0, pos = 0, len, dummy;
371 int skip = 1;
372
373 gpu.frameskip.pending_fill[0] = 0;
374
375 // XXX: polylines are not properly handled
376 while (pos < count && skip) {
377 uint32_t *list = data + pos;
378 cmd = list[0] >> 24;
379 len = 1 + cmd_lengths[cmd];
380
381 if (cmd == 0x02) {
382 if ((list[2] & 0x3ff) > gpu.screen.w || ((list[2] >> 16) & 0x1ff) > gpu.screen.h)
383 // clearing something large, don't skip
384 do_cmd_list(list, 3, &dummy);
385 else
386 memcpy(gpu.frameskip.pending_fill, list, 3 * 4);
387 }
388 else if ((cmd & 0xf4) == 0x24) {
389 // flat textured prim
390 gpu.ex_regs[1] &= ~0x1ff;
391 gpu.ex_regs[1] |= list[4] & 0x1ff;
392 }
393 else if ((cmd & 0xf4) == 0x34) {
394 // shaded textured prim
395 gpu.ex_regs[1] &= ~0x1ff;
396 gpu.ex_regs[1] |= list[5] & 0x1ff;
397 }
398 else if (cmd == 0xe3)
399 skip = decide_frameskip_allow(list[0]);
400
401 if ((cmd & 0xf8) == 0xe0)
402 gpu.ex_regs[cmd & 7] = list[0];
403
404 if (pos + len > count) {
405 cmd = -1;
406 break; // incomplete cmd
407 }
408 if (cmd == 0xa0 || cmd == 0xc0)
409 break; // image i/o
410 pos += len;
411 }
412
413 renderer_sync_ecmds(gpu.ex_regs);
414 *last_cmd = cmd;
415 return pos;
416}
417
418static noinline int do_cmd_buffer(uint32_t *data, int count)
419{
420 int cmd, pos;
421 uint32_t old_e3 = gpu.ex_regs[3];
422 int vram_dirty = 0;
423
424 // process buffer
425 for (pos = 0; pos < count; )
426 {
427 if (gpu.dma.h && !gpu.dma_start.is_read) { // XXX: need to verify
428 vram_dirty = 1;
429 pos += do_vram_io(data + pos, count - pos, 0);
430 if (pos == count)
431 break;
432 }
433
434 cmd = data[pos] >> 24;
435 if (cmd == 0xa0 || cmd == 0xc0) {
436 // consume vram write/read cmd
437 start_vram_transfer(data[pos + 1], data[pos + 2], cmd == 0xc0);
438 pos += 3;
439 continue;
440 }
441
442 // 0xex cmds might affect frameskip.allow, so pass to do_cmd_list_skip
443 if (gpu.frameskip.active && (gpu.frameskip.allow || ((data[pos] >> 24) & 0xf0) == 0xe0))
444 pos += do_cmd_list_skip(data + pos, count - pos, &cmd);
445 else {
446 pos += do_cmd_list(data + pos, count - pos, &cmd);
447 vram_dirty = 1;
448 }
449
450 if (cmd == -1)
451 // incomplete cmd
452 break;
453 }
454
455 gpu.status.reg &= ~0x1fff;
456 gpu.status.reg |= gpu.ex_regs[1] & 0x7ff;
457 gpu.status.reg |= (gpu.ex_regs[6] & 3) << 11;
458
459 gpu.state.fb_dirty |= vram_dirty;
460
461 if (old_e3 != gpu.ex_regs[3])
462 decide_frameskip_allow(gpu.ex_regs[3]);
463
464 return count - pos;
465}
466
467static void flush_cmd_buffer(void)
468{
469 int left = do_cmd_buffer(gpu.cmd_buffer, gpu.cmd_len);
470 if (left > 0)
471 memmove(gpu.cmd_buffer, gpu.cmd_buffer + gpu.cmd_len - left, left * 4);
472 gpu.cmd_len = left;
473}
474
475void GPUwriteDataMem(uint32_t *mem, int count)
476{
477 int left;
478
479 log_io("gpu_dma_write %p %d\n", mem, count);
480
481 if (unlikely(gpu.cmd_len > 0))
482 flush_cmd_buffer();
483
484 left = do_cmd_buffer(mem, count);
485 if (left)
486 log_anomaly("GPUwriteDataMem: discarded %d/%d words\n", left, count);
487}
488
489void GPUwriteData(uint32_t data)
490{
491 log_io("gpu_write %08x\n", data);
492 gpu.cmd_buffer[gpu.cmd_len++] = data;
493 if (gpu.cmd_len >= CMD_BUFFER_LEN)
494 flush_cmd_buffer();
495}
496
497long GPUdmaChain(uint32_t *rambase, uint32_t start_addr)
498{
499 uint32_t addr, *list;
500 uint32_t *llist_entry = NULL;
501 int len, left, count;
502 long cpu_cycles = 0;
503
504 if (unlikely(gpu.cmd_len > 0))
505 flush_cmd_buffer();
506
507 // ff7 sends it's main list twice, detect this
508 if (*gpu.state.frame_count == gpu.state.last_list.frame &&
509 *gpu.state.hcnt - gpu.state.last_list.hcnt <= 1 &&
510 gpu.state.last_list.cycles > 2048)
511 {
512 llist_entry = rambase + (gpu.state.last_list.addr & 0x1fffff) / 4;
513 *llist_entry |= 0x800000;
514 }
515
516 log_io("gpu_dma_chain\n");
517 addr = start_addr & 0xffffff;
518 for (count = 0; addr != 0xffffff; count++)
519 {
520 list = rambase + (addr & 0x1fffff) / 4;
521 len = list[0] >> 24;
522 addr = list[0] & 0xffffff;
523 cpu_cycles += 10;
524 if (len > 0)
525 cpu_cycles += 5 + len;
526
527 log_io(".chain %08x #%d\n", (list - rambase) * 4, len);
528
529 // loop detection marker
530 // (bit23 set causes DMA error on real machine, so
531 // unlikely to be ever set by the game)
532 list[0] |= 0x800000;
533
534 if (len) {
535 left = do_cmd_buffer(list + 1, len);
536 if (left)
537 log_anomaly("GPUdmaChain: discarded %d/%d words\n", left, len);
538 }
539
540 if (addr & 0x800000)
541 break;
542 }
543
544 // remove loop detection markers
545 addr = start_addr & 0x1fffff;
546 while (count-- > 0) {
547 list = rambase + addr / 4;
548 addr = list[0] & 0x1fffff;
549 list[0] &= ~0x800000;
550 }
551 if (llist_entry)
552 *llist_entry &= ~0x800000;
553
554 gpu.state.last_list.frame = *gpu.state.frame_count;
555 gpu.state.last_list.hcnt = *gpu.state.hcnt;
556 gpu.state.last_list.cycles = cpu_cycles;
557 gpu.state.last_list.addr = start_addr;
558
559 return cpu_cycles;
560}
561
562void GPUreadDataMem(uint32_t *mem, int count)
563{
564 log_io("gpu_dma_read %p %d\n", mem, count);
565
566 if (unlikely(gpu.cmd_len > 0))
567 flush_cmd_buffer();
568
569 if (gpu.dma.h)
570 do_vram_io(mem, count, 1);
571}
572
573uint32_t GPUreadData(void)
574{
575 uint32_t ret;
576
577 if (unlikely(gpu.cmd_len > 0))
578 flush_cmd_buffer();
579
580 ret = gpu.gp0;
581 if (gpu.dma.h)
582 do_vram_io(&ret, 1, 1);
583
584 log_io("gpu_read %08x\n", ret);
585 return ret;
586}
587
588uint32_t GPUreadStatus(void)
589{
590 uint32_t ret;
591
592 if (unlikely(gpu.cmd_len > 0))
593 flush_cmd_buffer();
594
595 ret = gpu.status.reg;
596 log_io("gpu_read_status %08x\n", ret);
597 return ret;
598}
599
600struct GPUFreeze
601{
602 uint32_t ulFreezeVersion; // should be always 1 for now (set by main emu)
603 uint32_t ulStatus; // current gpu status
604 uint32_t ulControl[256]; // latest control register values
605 unsigned char psxVRam[1024*1024*2]; // current VRam image (full 2 MB for ZN)
606};
607
608long GPUfreeze(uint32_t type, struct GPUFreeze *freeze)
609{
610 int i;
611
612 switch (type) {
613 case 1: // save
614 if (gpu.cmd_len > 0)
615 flush_cmd_buffer();
616 memcpy(freeze->psxVRam, gpu.vram, 1024 * 512 * 2);
617 memcpy(freeze->ulControl, gpu.regs, sizeof(gpu.regs));
618 memcpy(freeze->ulControl + 0xe0, gpu.ex_regs, sizeof(gpu.ex_regs));
619 freeze->ulStatus = gpu.status.reg;
620 break;
621 case 0: // load
622 memcpy(gpu.vram, freeze->psxVRam, 1024 * 512 * 2);
623 memcpy(gpu.regs, freeze->ulControl, sizeof(gpu.regs));
624 memcpy(gpu.ex_regs, freeze->ulControl + 0xe0, sizeof(gpu.ex_regs));
625 gpu.status.reg = freeze->ulStatus;
626 gpu.cmd_len = 0;
627 for (i = 8; i > 0; i--) {
628 gpu.regs[i] ^= 1; // avoid reg change detection
629 GPUwriteStatus((i << 24) | (gpu.regs[i] ^ 1));
630 }
631 renderer_sync_ecmds(gpu.ex_regs);
632 renderer_update_caches(0, 0, 1024, 512);
633 break;
634 }
635
636 return 1;
637}
638
639void GPUupdateLace(void)
640{
641 if (gpu.cmd_len > 0)
642 flush_cmd_buffer();
643 renderer_flush_queues();
644
645 if (gpu.status.blanking) {
646 if (!gpu.state.blanked) {
647 vout_blank();
648 gpu.state.blanked = 1;
649 gpu.state.fb_dirty = 1;
650 }
651 return;
652 }
653
654 if (!gpu.state.fb_dirty)
655 return;
656
657 if (gpu.frameskip.set) {
658 if (!gpu.frameskip.frame_ready) {
659 if (*gpu.state.frame_count - gpu.frameskip.last_flip_frame < 9)
660 return;
661 gpu.frameskip.active = 0;
662 }
663 gpu.frameskip.frame_ready = 0;
664 }
665
666 vout_update();
667 gpu.state.fb_dirty = 0;
668 gpu.state.blanked = 0;
669}
670
671void GPUvBlank(int is_vblank, int lcf)
672{
673 int interlace = gpu.state.allow_interlace
674 && gpu.status.interlace && gpu.status.dheight;
675 // interlace doesn't look nice on progressive displays,
676 // so we have this "auto" mode here for games that don't read vram
677 if (gpu.state.allow_interlace == 2
678 && *gpu.state.frame_count - gpu.state.last_vram_read_frame > 1)
679 {
680 interlace = 0;
681 }
682 if (interlace || interlace != gpu.state.old_interlace) {
683 gpu.state.old_interlace = interlace;
684
685 if (gpu.cmd_len > 0)
686 flush_cmd_buffer();
687 renderer_flush_queues();
688 renderer_set_interlace(interlace, !lcf);
689 }
690}
691
692#include "../../frontend/plugin_lib.h"
693
694void GPUrearmedCallbacks(const struct rearmed_cbs *cbs)
695{
696 gpu.frameskip.set = cbs->frameskip;
697 gpu.frameskip.advice = &cbs->fskip_advice;
698 gpu.frameskip.active = 0;
699 gpu.frameskip.frame_ready = 1;
700 gpu.state.hcnt = cbs->gpu_hcnt;
701 gpu.state.frame_count = cbs->gpu_frame_count;
702 gpu.state.allow_interlace = cbs->gpu_neon.allow_interlace;
703 gpu.state.enhancement_enable = cbs->gpu_neon.enhancement_enable;
704
705 gpu.mmap = cbs->mmap;
706 gpu.munmap = cbs->munmap;
707
708 // delayed vram mmap
709 if (gpu.vram == NULL)
710 map_vram();
711
712 if (cbs->pl_vout_set_raw_vram)
713 cbs->pl_vout_set_raw_vram(gpu.vram);
714 renderer_set_config(cbs);
715 vout_set_config(cbs);
716}
717
718// vim:shiftwidth=2:expandtab