some random improvements
[pcsx_rearmed.git] / plugins / gpulib / gpu.c
... / ...
CommitLineData
1/*
2 * (C) GraÅžvydas "notaz" Ignotas, 2011-2012
3 *
4 * This work is licensed under the terms of any of these licenses
5 * (at your option):
6 * - GNU GPL, version 2 or later.
7 * - GNU LGPL, version 2.1 or later.
8 * See the COPYING file in the top-level directory.
9 */
10
11#include <stdio.h>
12#include <string.h>
13#include "gpu.h"
14
15#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
16#ifdef __GNUC__
17#define unlikely(x) __builtin_expect((x), 0)
18#define preload __builtin_prefetch
19#define noinline __attribute__((noinline))
20#else
21#define unlikely(x)
22#define preload(...)
23#define noinline
24#error huh
25#endif
26
27#define gpu_log(fmt, ...) \
28 printf("%d:%03d: " fmt, *gpu.state.frame_count, *gpu.state.hcnt, ##__VA_ARGS__)
29
30//#define log_io gpu_log
31#define log_io(...)
32//#define log_anomaly gpu_log
33#define log_anomaly(...)
34
35struct psx_gpu gpu;
36
37static noinline int do_cmd_buffer(uint32_t *data, int count);
38static void finish_vram_transfer(int is_read);
39
40static noinline void do_cmd_reset(void)
41{
42 if (unlikely(gpu.cmd_len > 0))
43 do_cmd_buffer(gpu.cmd_buffer, gpu.cmd_len);
44 gpu.cmd_len = 0;
45
46 if (unlikely(gpu.dma.h > 0))
47 finish_vram_transfer(gpu.dma_start.is_read);
48 gpu.dma.h = 0;
49}
50
51static noinline void do_reset(void)
52{
53 unsigned int i;
54
55 do_cmd_reset();
56
57 memset(gpu.regs, 0, sizeof(gpu.regs));
58 for (i = 0; i < sizeof(gpu.ex_regs) / sizeof(gpu.ex_regs[0]); i++)
59 gpu.ex_regs[i] = (0xe0 + i) << 24;
60 gpu.status.reg = 0x14802000;
61 gpu.gp0 = 0;
62 gpu.regs[3] = 1;
63 gpu.screen.hres = gpu.screen.w = 256;
64 gpu.screen.vres = gpu.screen.h = 240;
65}
66
67static noinline void update_width(void)
68{
69 int sw = gpu.screen.x2 - gpu.screen.x1;
70 if (sw <= 0 || sw >= 2560)
71 // full width
72 gpu.screen.w = gpu.screen.hres;
73 else
74 gpu.screen.w = sw * gpu.screen.hres / 2560;
75}
76
77static noinline void update_height(void)
78{
79 // TODO: emulate this properly..
80 int sh = gpu.screen.y2 - gpu.screen.y1;
81 if (gpu.status.dheight)
82 sh *= 2;
83 if (sh <= 0 || sh > gpu.screen.vres)
84 sh = gpu.screen.vres;
85
86 gpu.screen.h = sh;
87}
88
89static noinline void decide_frameskip(void)
90{
91 if (gpu.frameskip.active)
92 gpu.frameskip.cnt++;
93 else {
94 gpu.frameskip.cnt = 0;
95 gpu.frameskip.frame_ready = 1;
96 }
97
98 if (!gpu.frameskip.active && *gpu.frameskip.advice)
99 gpu.frameskip.active = 1;
100 else if (gpu.frameskip.set > 0 && gpu.frameskip.cnt < gpu.frameskip.set)
101 gpu.frameskip.active = 1;
102 else
103 gpu.frameskip.active = 0;
104
105 if (!gpu.frameskip.active && gpu.frameskip.pending_fill[0] != 0) {
106 int dummy;
107 do_cmd_list(gpu.frameskip.pending_fill, 3, &dummy);
108 gpu.frameskip.pending_fill[0] = 0;
109 }
110}
111
112static noinline int decide_frameskip_allow(uint32_t cmd_e3)
113{
114 // no frameskip if it decides to draw to display area,
115 // but not for interlace since it'll most likely always do that
116 uint32_t x = cmd_e3 & 0x3ff;
117 uint32_t y = (cmd_e3 >> 10) & 0x3ff;
118 gpu.frameskip.allow = gpu.status.interlace ||
119 (uint32_t)(x - gpu.screen.x) >= (uint32_t)gpu.screen.w ||
120 (uint32_t)(y - gpu.screen.y) >= (uint32_t)gpu.screen.h;
121 return gpu.frameskip.allow;
122}
123
124static noinline void get_gpu_info(uint32_t data)
125{
126 switch (data & 0x0f) {
127 case 0x02:
128 case 0x03:
129 case 0x04:
130 case 0x05:
131 gpu.gp0 = gpu.ex_regs[data & 7] & 0xfffff;
132 break;
133 case 0x06:
134 gpu.gp0 = gpu.ex_regs[5] & 0xfffff;
135 break;
136 case 0x07:
137 gpu.gp0 = 2;
138 break;
139 default:
140 gpu.gp0 = 0;
141 break;
142 }
143}
144
145// double, for overdraw guard
146#define VRAM_SIZE (1024 * 512 * 2 * 2)
147
148static int map_vram(void)
149{
150 gpu.vram = gpu.mmap(VRAM_SIZE);
151 if (gpu.vram != NULL) {
152 gpu.vram += 4096 / 2;
153 return 0;
154 }
155 else {
156 fprintf(stderr, "could not map vram, expect crashes\n");
157 return -1;
158 }
159}
160
161long GPUinit(void)
162{
163 int ret;
164 ret = vout_init();
165 ret |= renderer_init();
166
167 gpu.state.frame_count = &gpu.zero;
168 gpu.state.hcnt = &gpu.zero;
169 gpu.frameskip.active = 0;
170 gpu.cmd_len = 0;
171 do_reset();
172
173 if (gpu.mmap != NULL) {
174 if (map_vram() != 0)
175 ret = -1;
176 }
177 return ret;
178}
179
180long GPUshutdown(void)
181{
182 long ret;
183
184 renderer_finish();
185 ret = vout_finish();
186 if (gpu.vram != NULL) {
187 gpu.vram -= 4096 / 2;
188 gpu.munmap(gpu.vram, VRAM_SIZE);
189 }
190 gpu.vram = NULL;
191
192 return ret;
193}
194
195void GPUwriteStatus(uint32_t data)
196{
197 static const short hres[8] = { 256, 368, 320, 384, 512, 512, 640, 640 };
198 static const short vres[4] = { 240, 480, 256, 480 };
199 uint32_t cmd = data >> 24;
200
201 if (cmd < ARRAY_SIZE(gpu.regs)) {
202 if (cmd > 1 && cmd != 5 && gpu.regs[cmd] == data)
203 return;
204 gpu.regs[cmd] = data;
205 }
206
207 gpu.state.fb_dirty = 1;
208
209 switch (cmd) {
210 case 0x00:
211 do_reset();
212 break;
213 case 0x01:
214 do_cmd_reset();
215 break;
216 case 0x03:
217 gpu.status.blanking = data & 1;
218 break;
219 case 0x04:
220 gpu.status.dma = data & 3;
221 break;
222 case 0x05:
223 gpu.screen.x = data & 0x3ff;
224 gpu.screen.y = (data >> 10) & 0x1ff;
225 if (gpu.frameskip.set) {
226 decide_frameskip_allow(gpu.ex_regs[3]);
227 if (gpu.frameskip.last_flip_frame != *gpu.state.frame_count) {
228 decide_frameskip();
229 gpu.frameskip.last_flip_frame = *gpu.state.frame_count;
230 }
231 }
232 break;
233 case 0x06:
234 gpu.screen.x1 = data & 0xfff;
235 gpu.screen.x2 = (data >> 12) & 0xfff;
236 update_width();
237 break;
238 case 0x07:
239 gpu.screen.y1 = data & 0x3ff;
240 gpu.screen.y2 = (data >> 10) & 0x3ff;
241 update_height();
242 break;
243 case 0x08:
244 gpu.status.reg = (gpu.status.reg & ~0x7f0000) | ((data & 0x3F) << 17) | ((data & 0x40) << 10);
245 gpu.screen.hres = hres[(gpu.status.reg >> 16) & 7];
246 gpu.screen.vres = vres[(gpu.status.reg >> 19) & 3];
247 update_width();
248 update_height();
249 renderer_notify_res_change();
250 break;
251 default:
252 if ((cmd & 0xf0) == 0x10)
253 get_gpu_info(data);
254 break;
255 }
256
257#ifdef GPUwriteStatus_ext
258 GPUwriteStatus_ext(data);
259#endif
260}
261
262const unsigned char cmd_lengths[256] =
263{
264 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
265 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
266 3, 3, 3, 3, 6, 6, 6, 6, 4, 4, 4, 4, 8, 8, 8, 8, // 20
267 5, 5, 5, 5, 8, 8, 8, 8, 7, 7, 7, 7, 11, 11, 11, 11,
268 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, // 40
269 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
270 2, 2, 2, 2, 3, 3, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, // 60
271 1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2,
272 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 80
273 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
274 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // a0
275 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
276 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // c0
277 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
278 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // e0
279 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
280};
281
282#define VRAM_MEM_XY(x, y) &gpu.vram[(y) * 1024 + (x)]
283
284static inline void do_vram_line(int x, int y, uint16_t *mem, int l, int is_read)
285{
286 uint16_t *vram = VRAM_MEM_XY(x, y);
287 if (is_read)
288 memcpy(mem, vram, l * 2);
289 else
290 memcpy(vram, mem, l * 2);
291}
292
293static int do_vram_io(uint32_t *data, int count, int is_read)
294{
295 int count_initial = count;
296 uint16_t *sdata = (uint16_t *)data;
297 int x = gpu.dma.x, y = gpu.dma.y;
298 int w = gpu.dma.w, h = gpu.dma.h;
299 int o = gpu.dma.offset;
300 int l;
301 count *= 2; // operate in 16bpp pixels
302
303 if (gpu.dma.offset) {
304 l = w - gpu.dma.offset;
305 if (count < l)
306 l = count;
307
308 do_vram_line(x + o, y, sdata, l, is_read);
309
310 if (o + l < w)
311 o += l;
312 else {
313 o = 0;
314 y++;
315 h--;
316 }
317 sdata += l;
318 count -= l;
319 }
320
321 for (; h > 0 && count >= w; sdata += w, count -= w, y++, h--) {
322 y &= 511;
323 do_vram_line(x, y, sdata, w, is_read);
324 }
325
326 if (h > 0) {
327 if (count > 0) {
328 y &= 511;
329 do_vram_line(x, y, sdata, count, is_read);
330 o = count;
331 count = 0;
332 }
333 }
334 else
335 finish_vram_transfer(is_read);
336 gpu.dma.y = y;
337 gpu.dma.h = h;
338 gpu.dma.offset = o;
339
340 return count_initial - count / 2;
341}
342
343static void start_vram_transfer(uint32_t pos_word, uint32_t size_word, int is_read)
344{
345 if (gpu.dma.h)
346 log_anomaly("start_vram_transfer while old unfinished\n");
347
348 gpu.dma.x = pos_word & 0x3ff;
349 gpu.dma.y = (pos_word >> 16) & 0x1ff;
350 gpu.dma.w = ((size_word - 1) & 0x3ff) + 1;
351 gpu.dma.h = (((size_word >> 16) - 1) & 0x1ff) + 1;
352 gpu.dma.offset = 0;
353 gpu.dma.is_read = is_read;
354 gpu.dma_start = gpu.dma;
355
356 renderer_flush_queues();
357 if (is_read) {
358 gpu.status.img = 1;
359 // XXX: wrong for width 1
360 memcpy(&gpu.gp0, VRAM_MEM_XY(gpu.dma.x, gpu.dma.y), 4);
361 gpu.state.last_vram_read_frame = *gpu.state.frame_count;
362 }
363
364 log_io("start_vram_transfer %c (%d, %d) %dx%d\n", is_read ? 'r' : 'w',
365 gpu.dma.x, gpu.dma.y, gpu.dma.w, gpu.dma.h);
366}
367
368static void finish_vram_transfer(int is_read)
369{
370 if (is_read)
371 gpu.status.img = 0;
372 else
373 renderer_update_caches(gpu.dma_start.x, gpu.dma_start.y,
374 gpu.dma_start.w, gpu.dma_start.h);
375}
376
377static noinline int do_cmd_list_skip(uint32_t *data, int count, int *last_cmd)
378{
379 int cmd = 0, pos = 0, len, dummy, v;
380 int skip = 1;
381
382 gpu.frameskip.pending_fill[0] = 0;
383
384 while (pos < count && skip) {
385 uint32_t *list = data + pos;
386 cmd = list[0] >> 24;
387 len = 1 + cmd_lengths[cmd];
388
389 switch (cmd) {
390 case 0x02:
391 if ((list[2] & 0x3ff) > gpu.screen.w || ((list[2] >> 16) & 0x1ff) > gpu.screen.h)
392 // clearing something large, don't skip
393 do_cmd_list(list, 3, &dummy);
394 else
395 memcpy(gpu.frameskip.pending_fill, list, 3 * 4);
396 break;
397 case 0x24 ... 0x27:
398 case 0x2c ... 0x2f:
399 case 0x34 ... 0x37:
400 case 0x3c ... 0x3f:
401 gpu.ex_regs[1] &= ~0x1ff;
402 gpu.ex_regs[1] |= list[4 + ((cmd >> 4) & 1)] & 0x1ff;
403 break;
404 case 0x48 ... 0x4F:
405 for (v = 3; pos + v < count; v++)
406 {
407 if ((list[v] & 0xf000f000) == 0x50005000)
408 break;
409 }
410 len += v - 3;
411 break;
412 case 0x58 ... 0x5F:
413 for (v = 4; pos + v < count; v += 2)
414 {
415 if ((list[v] & 0xf000f000) == 0x50005000)
416 break;
417 }
418 len += v - 4;
419 break;
420 default:
421 if (cmd == 0xe3)
422 skip = decide_frameskip_allow(list[0]);
423 if ((cmd & 0xf8) == 0xe0)
424 gpu.ex_regs[cmd & 7] = list[0];
425 break;
426 }
427
428 if (pos + len > count) {
429 cmd = -1;
430 break; // incomplete cmd
431 }
432 if (0xa0 <= cmd && cmd <= 0xdf)
433 break; // image i/o
434
435 pos += len;
436 }
437
438 renderer_sync_ecmds(gpu.ex_regs);
439 *last_cmd = cmd;
440 return pos;
441}
442
443static noinline int do_cmd_buffer(uint32_t *data, int count)
444{
445 int cmd, pos;
446 uint32_t old_e3 = gpu.ex_regs[3];
447 int vram_dirty = 0;
448
449 // process buffer
450 for (pos = 0; pos < count; )
451 {
452 if (gpu.dma.h && !gpu.dma_start.is_read) { // XXX: need to verify
453 vram_dirty = 1;
454 pos += do_vram_io(data + pos, count - pos, 0);
455 if (pos == count)
456 break;
457 }
458
459 cmd = data[pos] >> 24;
460 if (0xa0 <= cmd && cmd <= 0xdf) {
461 // consume vram write/read cmd
462 start_vram_transfer(data[pos + 1], data[pos + 2], (cmd & 0xe0) == 0xc0);
463 pos += 3;
464 continue;
465 }
466
467 // 0xex cmds might affect frameskip.allow, so pass to do_cmd_list_skip
468 if (gpu.frameskip.active && (gpu.frameskip.allow || ((data[pos] >> 24) & 0xf0) == 0xe0))
469 pos += do_cmd_list_skip(data + pos, count - pos, &cmd);
470 else {
471 pos += do_cmd_list(data + pos, count - pos, &cmd);
472 vram_dirty = 1;
473 }
474
475 if (cmd == -1)
476 // incomplete cmd
477 break;
478 }
479
480 gpu.status.reg &= ~0x1fff;
481 gpu.status.reg |= gpu.ex_regs[1] & 0x7ff;
482 gpu.status.reg |= (gpu.ex_regs[6] & 3) << 11;
483
484 gpu.state.fb_dirty |= vram_dirty;
485
486 if (old_e3 != gpu.ex_regs[3])
487 decide_frameskip_allow(gpu.ex_regs[3]);
488
489 return count - pos;
490}
491
492static void flush_cmd_buffer(void)
493{
494 int left = do_cmd_buffer(gpu.cmd_buffer, gpu.cmd_len);
495 if (left > 0)
496 memmove(gpu.cmd_buffer, gpu.cmd_buffer + gpu.cmd_len - left, left * 4);
497 gpu.cmd_len = left;
498}
499
500void GPUwriteDataMem(uint32_t *mem, int count)
501{
502 int left;
503
504 log_io("gpu_dma_write %p %d\n", mem, count);
505
506 if (unlikely(gpu.cmd_len > 0))
507 flush_cmd_buffer();
508
509 left = do_cmd_buffer(mem, count);
510 if (left)
511 log_anomaly("GPUwriteDataMem: discarded %d/%d words\n", left, count);
512}
513
514void GPUwriteData(uint32_t data)
515{
516 log_io("gpu_write %08x\n", data);
517 gpu.cmd_buffer[gpu.cmd_len++] = data;
518 if (gpu.cmd_len >= CMD_BUFFER_LEN)
519 flush_cmd_buffer();
520}
521
522long GPUdmaChain(uint32_t *rambase, uint32_t start_addr)
523{
524 uint32_t addr, *list;
525 uint32_t *llist_entry = NULL;
526 int len, left, count;
527 long cpu_cycles = 0;
528
529 preload(rambase + (start_addr & 0x1fffff) / 4);
530
531 if (unlikely(gpu.cmd_len > 0))
532 flush_cmd_buffer();
533
534 // ff7 sends it's main list twice, detect this
535 if (*gpu.state.frame_count == gpu.state.last_list.frame &&
536 *gpu.state.hcnt - gpu.state.last_list.hcnt <= 1 &&
537 gpu.state.last_list.cycles > 2048)
538 {
539 llist_entry = rambase + (gpu.state.last_list.addr & 0x1fffff) / 4;
540 *llist_entry |= 0x800000;
541 }
542
543 log_io("gpu_dma_chain\n");
544 addr = start_addr & 0xffffff;
545 for (count = 0; addr != 0xffffff; count++)
546 {
547 list = rambase + (addr & 0x1fffff) / 4;
548 len = list[0] >> 24;
549 addr = list[0] & 0xffffff;
550 preload(rambase + (addr & 0x1fffff) / 4);
551
552 cpu_cycles += 10;
553 if (len > 0)
554 cpu_cycles += 5 + len;
555
556 log_io(".chain %08x #%d\n", (list - rambase) * 4, len);
557
558 // loop detection marker
559 // (bit23 set causes DMA error on real machine, so
560 // unlikely to be ever set by the game)
561 list[0] |= 0x800000;
562
563 if (len) {
564 left = do_cmd_buffer(list + 1, len);
565 if (left)
566 log_anomaly("GPUdmaChain: discarded %d/%d words\n", left, len);
567 }
568
569 if (addr & 0x800000)
570 break;
571 }
572
573 // remove loop detection markers
574 addr = start_addr & 0x1fffff;
575 while (count-- > 0) {
576 list = rambase + addr / 4;
577 addr = list[0] & 0x1fffff;
578 list[0] &= ~0x800000;
579 }
580 if (llist_entry)
581 *llist_entry &= ~0x800000;
582
583 gpu.state.last_list.frame = *gpu.state.frame_count;
584 gpu.state.last_list.hcnt = *gpu.state.hcnt;
585 gpu.state.last_list.cycles = cpu_cycles;
586 gpu.state.last_list.addr = start_addr;
587
588 return cpu_cycles;
589}
590
591void GPUreadDataMem(uint32_t *mem, int count)
592{
593 log_io("gpu_dma_read %p %d\n", mem, count);
594
595 if (unlikely(gpu.cmd_len > 0))
596 flush_cmd_buffer();
597
598 if (gpu.dma.h)
599 do_vram_io(mem, count, 1);
600}
601
602uint32_t GPUreadData(void)
603{
604 uint32_t ret;
605
606 if (unlikely(gpu.cmd_len > 0))
607 flush_cmd_buffer();
608
609 ret = gpu.gp0;
610 if (gpu.dma.h)
611 do_vram_io(&ret, 1, 1);
612
613 log_io("gpu_read %08x\n", ret);
614 return ret;
615}
616
617uint32_t GPUreadStatus(void)
618{
619 uint32_t ret;
620
621 if (unlikely(gpu.cmd_len > 0))
622 flush_cmd_buffer();
623
624 ret = gpu.status.reg;
625 log_io("gpu_read_status %08x\n", ret);
626 return ret;
627}
628
629struct GPUFreeze
630{
631 uint32_t ulFreezeVersion; // should be always 1 for now (set by main emu)
632 uint32_t ulStatus; // current gpu status
633 uint32_t ulControl[256]; // latest control register values
634 unsigned char psxVRam[1024*1024*2]; // current VRam image (full 2 MB for ZN)
635};
636
637long GPUfreeze(uint32_t type, struct GPUFreeze *freeze)
638{
639 int i;
640
641 switch (type) {
642 case 1: // save
643 if (gpu.cmd_len > 0)
644 flush_cmd_buffer();
645 memcpy(freeze->psxVRam, gpu.vram, 1024 * 512 * 2);
646 memcpy(freeze->ulControl, gpu.regs, sizeof(gpu.regs));
647 memcpy(freeze->ulControl + 0xe0, gpu.ex_regs, sizeof(gpu.ex_regs));
648 freeze->ulStatus = gpu.status.reg;
649 break;
650 case 0: // load
651 memcpy(gpu.vram, freeze->psxVRam, 1024 * 512 * 2);
652 memcpy(gpu.regs, freeze->ulControl, sizeof(gpu.regs));
653 memcpy(gpu.ex_regs, freeze->ulControl + 0xe0, sizeof(gpu.ex_regs));
654 gpu.status.reg = freeze->ulStatus;
655 gpu.cmd_len = 0;
656 for (i = 8; i > 0; i--) {
657 gpu.regs[i] ^= 1; // avoid reg change detection
658 GPUwriteStatus((i << 24) | (gpu.regs[i] ^ 1));
659 }
660 renderer_sync_ecmds(gpu.ex_regs);
661 renderer_update_caches(0, 0, 1024, 512);
662 break;
663 }
664
665 return 1;
666}
667
668void GPUupdateLace(void)
669{
670 if (gpu.cmd_len > 0)
671 flush_cmd_buffer();
672 renderer_flush_queues();
673
674 if (gpu.status.blanking) {
675 if (!gpu.state.blanked) {
676 vout_blank();
677 gpu.state.blanked = 1;
678 gpu.state.fb_dirty = 1;
679 }
680 return;
681 }
682
683 if (!gpu.state.fb_dirty)
684 return;
685
686 if (gpu.frameskip.set) {
687 if (!gpu.frameskip.frame_ready) {
688 if (*gpu.state.frame_count - gpu.frameskip.last_flip_frame < 9)
689 return;
690 gpu.frameskip.active = 0;
691 }
692 gpu.frameskip.frame_ready = 0;
693 }
694
695 vout_update();
696 gpu.state.fb_dirty = 0;
697 gpu.state.blanked = 0;
698}
699
700void GPUvBlank(int is_vblank, int lcf)
701{
702 int interlace = gpu.state.allow_interlace
703 && gpu.status.interlace && gpu.status.dheight;
704 // interlace doesn't look nice on progressive displays,
705 // so we have this "auto" mode here for games that don't read vram
706 if (gpu.state.allow_interlace == 2
707 && *gpu.state.frame_count - gpu.state.last_vram_read_frame > 1)
708 {
709 interlace = 0;
710 }
711 if (interlace || interlace != gpu.state.old_interlace) {
712 gpu.state.old_interlace = interlace;
713
714 if (gpu.cmd_len > 0)
715 flush_cmd_buffer();
716 renderer_flush_queues();
717 renderer_set_interlace(interlace, !lcf);
718 }
719}
720
721#include "../../frontend/plugin_lib.h"
722
723void GPUrearmedCallbacks(const struct rearmed_cbs *cbs)
724{
725 gpu.frameskip.set = cbs->frameskip;
726 gpu.frameskip.advice = &cbs->fskip_advice;
727 gpu.frameskip.active = 0;
728 gpu.frameskip.frame_ready = 1;
729 gpu.state.hcnt = cbs->gpu_hcnt;
730 gpu.state.frame_count = cbs->gpu_frame_count;
731 gpu.state.allow_interlace = cbs->gpu_neon.allow_interlace;
732 gpu.state.enhancement_enable = cbs->gpu_neon.enhancement_enable;
733
734 gpu.mmap = cbs->mmap;
735 gpu.munmap = cbs->munmap;
736
737 // delayed vram mmap
738 if (gpu.vram == NULL)
739 map_vram();
740
741 if (cbs->pl_vout_set_raw_vram)
742 cbs->pl_vout_set_raw_vram(gpu.vram);
743 renderer_set_config(cbs);
744 vout_set_config(cbs);
745}
746
747// vim:shiftwidth=2:expandtab