add support for software-enhanced rendering
[pcsx_rearmed.git] / plugins / gpulib / gpu.c
... / ...
CommitLineData
1/*
2 * (C) GraÅžvydas "notaz" Ignotas, 2011-2012
3 *
4 * This work is licensed under the terms of any of these licenses
5 * (at your option):
6 * - GNU GPL, version 2 or later.
7 * - GNU LGPL, version 2.1 or later.
8 * See the COPYING file in the top-level directory.
9 */
10
11#include <stdio.h>
12#include <stdlib.h>
13#include <string.h>
14#include "gpu.h"
15
16#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
17#define unlikely(x) __builtin_expect((x), 0)
18#define noinline __attribute__((noinline))
19
20#define gpu_log(fmt, ...) \
21 printf("%d:%03d: " fmt, *gpu.state.frame_count, *gpu.state.hcnt, ##__VA_ARGS__)
22
23//#define log_io gpu_log
24#define log_io(...)
25//#define log_anomaly gpu_log
26#define log_anomaly(...)
27
28struct psx_gpu gpu __attribute__((aligned(2048)));
29
30static noinline int do_cmd_buffer(uint32_t *data, int count);
31static void finish_vram_transfer(int is_read);
32
33static noinline void do_cmd_reset(void)
34{
35 if (unlikely(gpu.cmd_len > 0))
36 do_cmd_buffer(gpu.cmd_buffer, gpu.cmd_len);
37 gpu.cmd_len = 0;
38
39 if (unlikely(gpu.dma.h > 0))
40 finish_vram_transfer(gpu.dma_start.is_read);
41 gpu.dma.h = 0;
42}
43
44static noinline void do_reset(void)
45{
46 unsigned int i;
47
48 do_cmd_reset();
49
50 memset(gpu.regs, 0, sizeof(gpu.regs));
51 for (i = 0; i < sizeof(gpu.ex_regs) / sizeof(gpu.ex_regs[0]); i++)
52 gpu.ex_regs[i] = (0xe0 + i) << 24;
53 gpu.status.reg = 0x14802000;
54 gpu.gp0 = 0;
55 gpu.regs[3] = 1;
56 gpu.screen.hres = gpu.screen.w = 256;
57 gpu.screen.vres = gpu.screen.h = 240;
58}
59
60static noinline void update_width(void)
61{
62 int sw = gpu.screen.x2 - gpu.screen.x1;
63 if (sw <= 0 || sw >= 2560)
64 // full width
65 gpu.screen.w = gpu.screen.hres;
66 else
67 gpu.screen.w = sw * gpu.screen.hres / 2560;
68}
69
70static noinline void update_height(void)
71{
72 int sh = gpu.screen.y2 - gpu.screen.y1;
73 if (gpu.status.dheight)
74 sh *= 2;
75 if (sh <= 0)
76 sh = gpu.screen.vres;
77
78 gpu.screen.h = sh;
79}
80
81static noinline void decide_frameskip(void)
82{
83 if (gpu.frameskip.active)
84 gpu.frameskip.cnt++;
85 else {
86 gpu.frameskip.cnt = 0;
87 gpu.frameskip.frame_ready = 1;
88 }
89
90 if (!gpu.frameskip.active && *gpu.frameskip.advice)
91 gpu.frameskip.active = 1;
92 else if (gpu.frameskip.set > 0 && gpu.frameskip.cnt < gpu.frameskip.set)
93 gpu.frameskip.active = 1;
94 else
95 gpu.frameskip.active = 0;
96
97 if (!gpu.frameskip.active && gpu.frameskip.pending_fill[0] != 0) {
98 int dummy;
99 do_cmd_list(gpu.frameskip.pending_fill, 3, &dummy);
100 gpu.frameskip.pending_fill[0] = 0;
101 }
102}
103
104static noinline int decide_frameskip_allow(uint32_t cmd_e3)
105{
106 // no frameskip if it decides to draw to display area,
107 // but not for interlace since it'll most likely always do that
108 uint32_t x = cmd_e3 & 0x3ff;
109 uint32_t y = (cmd_e3 >> 10) & 0x3ff;
110 gpu.frameskip.allow = gpu.status.interlace ||
111 (uint32_t)(x - gpu.screen.x) >= (uint32_t)gpu.screen.w ||
112 (uint32_t)(y - gpu.screen.y) >= (uint32_t)gpu.screen.h;
113 return gpu.frameskip.allow;
114}
115
116static noinline void get_gpu_info(uint32_t data)
117{
118 switch (data & 0x0f) {
119 case 0x02:
120 case 0x03:
121 case 0x04:
122 case 0x05:
123 gpu.gp0 = gpu.ex_regs[data & 7] & 0xfffff;
124 break;
125 case 0x06:
126 gpu.gp0 = gpu.ex_regs[5] & 0xfffff;
127 break;
128 case 0x07:
129 gpu.gp0 = 2;
130 break;
131 default:
132 gpu.gp0 = 0;
133 break;
134 }
135}
136
137long GPUinit(void)
138{
139 int ret;
140 ret = vout_init();
141
142 gpu.state.enhancement_available = 0;
143 ret |= renderer_init();
144
145 if (gpu.state.enhancement_available) {
146 if (gpu.enhancement_bufer == NULL)
147 gpu.enhancement_bufer = malloc(2048 * 1024 * 2 + 1024 * 512 * 2);
148 if (gpu.enhancement_bufer == NULL)
149 gpu_log("OOM for enhancement buffer\n");
150 }
151 else if (gpu.enhancement_bufer != NULL) {
152 free(gpu.enhancement_bufer);
153 gpu.enhancement_bufer = NULL;
154 }
155
156 gpu.state.frame_count = &gpu.zero;
157 gpu.state.hcnt = &gpu.zero;
158 gpu.frameskip.active = 0;
159 gpu.cmd_len = 0;
160 do_reset();
161
162 return ret;
163}
164
165long GPUshutdown(void)
166{
167 return vout_finish();
168}
169
170void GPUwriteStatus(uint32_t data)
171{
172 static const short hres[8] = { 256, 368, 320, 384, 512, 512, 640, 640 };
173 static const short vres[4] = { 240, 480, 256, 480 };
174 uint32_t cmd = data >> 24;
175
176 if (cmd < ARRAY_SIZE(gpu.regs)) {
177 if (cmd > 1 && cmd != 5 && gpu.regs[cmd] == data)
178 return;
179 gpu.regs[cmd] = data;
180 }
181
182 gpu.state.fb_dirty = 1;
183
184 switch (cmd) {
185 case 0x00:
186 do_reset();
187 break;
188 case 0x01:
189 do_cmd_reset();
190 break;
191 case 0x03:
192 gpu.status.blanking = data & 1;
193 break;
194 case 0x04:
195 gpu.status.dma = data & 3;
196 break;
197 case 0x05:
198 gpu.screen.x = data & 0x3ff;
199 gpu.screen.y = (data >> 10) & 0x3ff;
200 if (gpu.frameskip.set) {
201 decide_frameskip_allow(gpu.ex_regs[3]);
202 if (gpu.frameskip.last_flip_frame != *gpu.state.frame_count) {
203 decide_frameskip();
204 gpu.frameskip.last_flip_frame = *gpu.state.frame_count;
205 }
206 }
207 break;
208 case 0x06:
209 gpu.screen.x1 = data & 0xfff;
210 gpu.screen.x2 = (data >> 12) & 0xfff;
211 update_width();
212 break;
213 case 0x07:
214 gpu.screen.y1 = data & 0x3ff;
215 gpu.screen.y2 = (data >> 10) & 0x3ff;
216 update_height();
217 break;
218 case 0x08:
219 gpu.status.reg = (gpu.status.reg & ~0x7f0000) | ((data & 0x3F) << 17) | ((data & 0x40) << 10);
220 gpu.screen.hres = hres[(gpu.status.reg >> 16) & 7];
221 gpu.screen.vres = vres[(gpu.status.reg >> 19) & 3];
222 update_width();
223 update_height();
224 break;
225 default:
226 if ((cmd & 0xf0) == 0x10)
227 get_gpu_info(data);
228 break;
229 }
230
231#ifdef GPUwriteStatus_ext
232 GPUwriteStatus_ext(data);
233#endif
234}
235
236const unsigned char cmd_lengths[256] =
237{
238 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
239 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
240 3, 3, 3, 3, 6, 6, 6, 6, 4, 4, 4, 4, 8, 8, 8, 8, // 20
241 5, 5, 5, 5, 8, 8, 8, 8, 7, 7, 7, 7, 11, 11, 11, 11,
242 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, // 40
243 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
244 2, 2, 2, 2, 3, 3, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, // 60
245 1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2,
246 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 80
247 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
248 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // a0
249 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
250 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // c0
251 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
252 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // e0
253 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
254};
255
256#define VRAM_MEM_XY(x, y) &gpu.vram[(y) * 1024 + (x)]
257
258static inline void do_vram_line(int x, int y, uint16_t *mem, int l, int is_read)
259{
260 uint16_t *vram = VRAM_MEM_XY(x, y);
261 if (is_read)
262 memcpy(mem, vram, l * 2);
263 else
264 memcpy(vram, mem, l * 2);
265}
266
267static int do_vram_io(uint32_t *data, int count, int is_read)
268{
269 int count_initial = count;
270 uint16_t *sdata = (uint16_t *)data;
271 int x = gpu.dma.x, y = gpu.dma.y;
272 int w = gpu.dma.w, h = gpu.dma.h;
273 int o = gpu.dma.offset;
274 int l;
275 count *= 2; // operate in 16bpp pixels
276
277 if (gpu.dma.offset) {
278 l = w - gpu.dma.offset;
279 if (count < l)
280 l = count;
281
282 do_vram_line(x + o, y, sdata, l, is_read);
283
284 if (o + l < w)
285 o += l;
286 else {
287 o = 0;
288 y++;
289 h--;
290 }
291 sdata += l;
292 count -= l;
293 }
294
295 for (; h > 0 && count >= w; sdata += w, count -= w, y++, h--) {
296 y &= 511;
297 do_vram_line(x, y, sdata, w, is_read);
298 }
299
300 if (h > 0) {
301 if (count > 0) {
302 y &= 511;
303 do_vram_line(x, y, sdata, count, is_read);
304 o = count;
305 count = 0;
306 }
307 }
308 else
309 finish_vram_transfer(is_read);
310 gpu.dma.y = y;
311 gpu.dma.h = h;
312 gpu.dma.offset = o;
313
314 return count_initial - count / 2;
315}
316
317static void start_vram_transfer(uint32_t pos_word, uint32_t size_word, int is_read)
318{
319 if (gpu.dma.h)
320 log_anomaly("start_vram_transfer while old unfinished\n");
321
322 gpu.dma.x = pos_word & 0x3ff;
323 gpu.dma.y = (pos_word >> 16) & 0x1ff;
324 gpu.dma.w = ((size_word - 1) & 0x3ff) + 1;
325 gpu.dma.h = (((size_word >> 16) - 1) & 0x1ff) + 1;
326 gpu.dma.offset = 0;
327 gpu.dma.is_read = is_read;
328 gpu.dma_start = gpu.dma;
329
330 renderer_flush_queues();
331 if (is_read) {
332 gpu.status.img = 1;
333 // XXX: wrong for width 1
334 memcpy(&gpu.gp0, VRAM_MEM_XY(gpu.dma.x, gpu.dma.y), 4);
335 gpu.state.last_vram_read_frame = *gpu.state.frame_count;
336 }
337
338 log_io("start_vram_transfer %c (%d, %d) %dx%d\n", is_read ? 'r' : 'w',
339 gpu.dma.x, gpu.dma.y, gpu.dma.w, gpu.dma.h);
340}
341
342static void finish_vram_transfer(int is_read)
343{
344 if (is_read)
345 gpu.status.img = 0;
346 else
347 renderer_update_caches(gpu.dma_start.x, gpu.dma_start.y,
348 gpu.dma_start.w, gpu.dma_start.h);
349}
350
351static noinline int do_cmd_list_skip(uint32_t *data, int count, int *last_cmd)
352{
353 int cmd = 0, pos = 0, len, dummy;
354 int skip = 1;
355
356 gpu.frameskip.pending_fill[0] = 0;
357
358 // XXX: polylines are not properly handled
359 while (pos < count && skip) {
360 uint32_t *list = data + pos;
361 cmd = list[0] >> 24;
362 len = 1 + cmd_lengths[cmd];
363
364 if (cmd == 0x02) {
365 if ((list[2] & 0x3ff) > gpu.screen.w || ((list[2] >> 16) & 0x1ff) > gpu.screen.h)
366 // clearing something large, don't skip
367 do_cmd_list(list, 3, &dummy);
368 else
369 memcpy(gpu.frameskip.pending_fill, list, 3 * 4);
370 }
371 else if ((cmd & 0xf4) == 0x24) {
372 // flat textured prim
373 gpu.ex_regs[1] &= ~0x1ff;
374 gpu.ex_regs[1] |= list[4] & 0x1ff;
375 }
376 else if ((cmd & 0xf4) == 0x34) {
377 // shaded textured prim
378 gpu.ex_regs[1] &= ~0x1ff;
379 gpu.ex_regs[1] |= list[5] & 0x1ff;
380 }
381 else if (cmd == 0xe3)
382 skip = decide_frameskip_allow(list[0]);
383
384 if ((cmd & 0xf8) == 0xe0)
385 gpu.ex_regs[cmd & 7] = list[0];
386
387 if (pos + len > count) {
388 cmd = -1;
389 break; // incomplete cmd
390 }
391 if (cmd == 0xa0 || cmd == 0xc0)
392 break; // image i/o
393 pos += len;
394 }
395
396 renderer_sync_ecmds(gpu.ex_regs);
397 *last_cmd = cmd;
398 return pos;
399}
400
401static noinline int do_cmd_buffer(uint32_t *data, int count)
402{
403 int cmd, pos;
404 uint32_t old_e3 = gpu.ex_regs[3];
405 int vram_dirty = 0;
406
407 // process buffer
408 for (pos = 0; pos < count; )
409 {
410 if (gpu.dma.h && !gpu.dma_start.is_read) { // XXX: need to verify
411 vram_dirty = 1;
412 pos += do_vram_io(data + pos, count - pos, 0);
413 if (pos == count)
414 break;
415 }
416
417 cmd = data[pos] >> 24;
418 if (cmd == 0xa0 || cmd == 0xc0) {
419 // consume vram write/read cmd
420 start_vram_transfer(data[pos + 1], data[pos + 2], cmd == 0xc0);
421 pos += 3;
422 continue;
423 }
424
425 // 0xex cmds might affect frameskip.allow, so pass to do_cmd_list_skip
426 if (gpu.frameskip.active && (gpu.frameskip.allow || ((data[pos] >> 24) & 0xf0) == 0xe0))
427 pos += do_cmd_list_skip(data + pos, count - pos, &cmd);
428 else {
429 pos += do_cmd_list(data + pos, count - pos, &cmd);
430 vram_dirty = 1;
431 }
432
433 if (cmd == -1)
434 // incomplete cmd
435 break;
436 }
437
438 gpu.status.reg &= ~0x1fff;
439 gpu.status.reg |= gpu.ex_regs[1] & 0x7ff;
440 gpu.status.reg |= (gpu.ex_regs[6] & 3) << 11;
441
442 gpu.state.fb_dirty |= vram_dirty;
443
444 if (old_e3 != gpu.ex_regs[3])
445 decide_frameskip_allow(gpu.ex_regs[3]);
446
447 return count - pos;
448}
449
450static void flush_cmd_buffer(void)
451{
452 int left = do_cmd_buffer(gpu.cmd_buffer, gpu.cmd_len);
453 if (left > 0)
454 memmove(gpu.cmd_buffer, gpu.cmd_buffer + gpu.cmd_len - left, left * 4);
455 gpu.cmd_len = left;
456}
457
458void GPUwriteDataMem(uint32_t *mem, int count)
459{
460 int left;
461
462 log_io("gpu_dma_write %p %d\n", mem, count);
463
464 if (unlikely(gpu.cmd_len > 0))
465 flush_cmd_buffer();
466
467 left = do_cmd_buffer(mem, count);
468 if (left)
469 log_anomaly("GPUwriteDataMem: discarded %d/%d words\n", left, count);
470}
471
472void GPUwriteData(uint32_t data)
473{
474 log_io("gpu_write %08x\n", data);
475 gpu.cmd_buffer[gpu.cmd_len++] = data;
476 if (gpu.cmd_len >= CMD_BUFFER_LEN)
477 flush_cmd_buffer();
478}
479
480long GPUdmaChain(uint32_t *rambase, uint32_t start_addr)
481{
482 uint32_t addr, *list;
483 uint32_t *llist_entry = NULL;
484 int len, left, count;
485 long cpu_cycles = 0;
486
487 if (unlikely(gpu.cmd_len > 0))
488 flush_cmd_buffer();
489
490 // ff7 sends it's main list twice, detect this
491 if (*gpu.state.frame_count == gpu.state.last_list.frame &&
492 *gpu.state.hcnt - gpu.state.last_list.hcnt <= 1 &&
493 gpu.state.last_list.cycles > 2048)
494 {
495 llist_entry = rambase + (gpu.state.last_list.addr & 0x1fffff) / 4;
496 *llist_entry |= 0x800000;
497 }
498
499 log_io("gpu_dma_chain\n");
500 addr = start_addr & 0xffffff;
501 for (count = 0; addr != 0xffffff; count++)
502 {
503 list = rambase + (addr & 0x1fffff) / 4;
504 len = list[0] >> 24;
505 addr = list[0] & 0xffffff;
506 cpu_cycles += 10;
507 if (len > 0)
508 cpu_cycles += 5 + len;
509
510 log_io(".chain %08x #%d\n", (list - rambase) * 4, len);
511
512 // loop detection marker
513 // (bit23 set causes DMA error on real machine, so
514 // unlikely to be ever set by the game)
515 list[0] |= 0x800000;
516
517 if (len) {
518 left = do_cmd_buffer(list + 1, len);
519 if (left)
520 log_anomaly("GPUdmaChain: discarded %d/%d words\n", left, len);
521 }
522
523 if (addr & 0x800000)
524 break;
525 }
526
527 // remove loop detection markers
528 addr = start_addr & 0x1fffff;
529 while (count-- > 0) {
530 list = rambase + addr / 4;
531 addr = list[0] & 0x1fffff;
532 list[0] &= ~0x800000;
533 }
534 if (llist_entry)
535 *llist_entry &= ~0x800000;
536
537 gpu.state.last_list.frame = *gpu.state.frame_count;
538 gpu.state.last_list.hcnt = *gpu.state.hcnt;
539 gpu.state.last_list.cycles = cpu_cycles;
540 gpu.state.last_list.addr = start_addr;
541
542 return cpu_cycles;
543}
544
545void GPUreadDataMem(uint32_t *mem, int count)
546{
547 log_io("gpu_dma_read %p %d\n", mem, count);
548
549 if (unlikely(gpu.cmd_len > 0))
550 flush_cmd_buffer();
551
552 if (gpu.dma.h)
553 do_vram_io(mem, count, 1);
554}
555
556uint32_t GPUreadData(void)
557{
558 uint32_t ret;
559
560 if (unlikely(gpu.cmd_len > 0))
561 flush_cmd_buffer();
562
563 ret = gpu.gp0;
564 if (gpu.dma.h)
565 do_vram_io(&ret, 1, 1);
566
567 log_io("gpu_read %08x\n", ret);
568 return ret;
569}
570
571uint32_t GPUreadStatus(void)
572{
573 uint32_t ret;
574
575 if (unlikely(gpu.cmd_len > 0))
576 flush_cmd_buffer();
577
578 ret = gpu.status.reg;
579 log_io("gpu_read_status %08x\n", ret);
580 return ret;
581}
582
583struct GPUFreeze
584{
585 uint32_t ulFreezeVersion; // should be always 1 for now (set by main emu)
586 uint32_t ulStatus; // current gpu status
587 uint32_t ulControl[256]; // latest control register values
588 unsigned char psxVRam[1024*1024*2]; // current VRam image (full 2 MB for ZN)
589};
590
591long GPUfreeze(uint32_t type, struct GPUFreeze *freeze)
592{
593 int i;
594
595 switch (type) {
596 case 1: // save
597 if (gpu.cmd_len > 0)
598 flush_cmd_buffer();
599 memcpy(freeze->psxVRam, gpu.vram, sizeof(gpu.vram));
600 memcpy(freeze->ulControl, gpu.regs, sizeof(gpu.regs));
601 memcpy(freeze->ulControl + 0xe0, gpu.ex_regs, sizeof(gpu.ex_regs));
602 freeze->ulStatus = gpu.status.reg;
603 break;
604 case 0: // load
605 memcpy(gpu.vram, freeze->psxVRam, sizeof(gpu.vram));
606 memcpy(gpu.regs, freeze->ulControl, sizeof(gpu.regs));
607 memcpy(gpu.ex_regs, freeze->ulControl + 0xe0, sizeof(gpu.ex_regs));
608 gpu.status.reg = freeze->ulStatus;
609 gpu.cmd_len = 0;
610 for (i = 8; i > 0; i--) {
611 gpu.regs[i] ^= 1; // avoid reg change detection
612 GPUwriteStatus((i << 24) | (gpu.regs[i] ^ 1));
613 }
614 renderer_sync_ecmds(gpu.ex_regs);
615 renderer_update_caches(0, 0, 1024, 512);
616 break;
617 }
618
619 return 1;
620}
621
622void GPUupdateLace(void)
623{
624 if (gpu.cmd_len > 0)
625 flush_cmd_buffer();
626 renderer_flush_queues();
627
628 if (gpu.status.blanking) {
629 if (!gpu.state.blanked) {
630 vout_blank();
631 gpu.state.blanked = 1;
632 gpu.state.fb_dirty = 1;
633 }
634 return;
635 }
636
637 if (!gpu.state.fb_dirty)
638 return;
639
640 if (gpu.frameskip.set) {
641 if (!gpu.frameskip.frame_ready) {
642 if (*gpu.state.frame_count - gpu.frameskip.last_flip_frame < 9)
643 return;
644 gpu.frameskip.active = 0;
645 }
646 gpu.frameskip.frame_ready = 0;
647 }
648
649 vout_update();
650 gpu.state.fb_dirty = 0;
651 gpu.state.blanked = 0;
652}
653
654void GPUvBlank(int is_vblank, int lcf)
655{
656 int interlace = gpu.state.allow_interlace
657 && gpu.status.interlace && gpu.status.dheight;
658 // interlace doesn't look nice on progressive displays,
659 // so we have this "auto" mode here for games that don't read vram
660 if (gpu.state.allow_interlace == 2
661 && *gpu.state.frame_count - gpu.state.last_vram_read_frame > 1)
662 {
663 interlace = 0;
664 }
665 if (interlace || interlace != gpu.state.old_interlace) {
666 gpu.state.old_interlace = interlace;
667
668 if (gpu.cmd_len > 0)
669 flush_cmd_buffer();
670 renderer_flush_queues();
671 renderer_set_interlace(interlace, !lcf);
672 }
673}
674
675#include "../../frontend/plugin_lib.h"
676
677void GPUrearmedCallbacks(const struct rearmed_cbs *cbs)
678{
679 gpu.frameskip.set = cbs->frameskip;
680 gpu.frameskip.advice = &cbs->fskip_advice;
681 gpu.frameskip.active = 0;
682 gpu.frameskip.frame_ready = 1;
683 gpu.state.hcnt = cbs->gpu_hcnt;
684 gpu.state.frame_count = cbs->gpu_frame_count;
685 gpu.state.allow_interlace = cbs->gpu_neon.allow_interlace;
686 gpu.state.enhancement_enable = cbs->gpu_neon.enhancement_enable;
687
688 if (cbs->pl_vout_set_raw_vram)
689 cbs->pl_vout_set_raw_vram(gpu.vram);
690 renderer_set_config(cbs);
691 vout_set_config(cbs);
692}
693
694// vim:shiftwidth=2:expandtab