frontend: export fps stats to plugins
[pcsx_rearmed.git] / plugins / gpulib / gpu.c
CommitLineData
1ab64c54 1/*
05740673 2 * (C) GraÅžvydas "notaz" Ignotas, 2011-2012
1ab64c54
GI
3 *
4 * This work is licensed under the terms of any of these licenses
5 * (at your option):
6 * - GNU GPL, version 2 or later.
7 * - GNU LGPL, version 2.1 or later.
8 * See the COPYING file in the top-level directory.
9 */
10
d30279e2 11#include <stdio.h>
1ab64c54 12#include <string.h>
56f08d83 13#include "gpu.h"
1ab64c54
GI
14
15#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
d30279e2 16#define unlikely(x) __builtin_expect((x), 0)
8dd855cd 17#define noinline __attribute__((noinline))
1ab64c54 18
deb18d24 19#define gpu_log(fmt, ...) \
3ece2f0c 20 printf("%d:%03d: " fmt, *gpu.state.frame_count, *gpu.state.hcnt, ##__VA_ARGS__)
deb18d24 21
22//#define log_io gpu_log
56f08d83 23#define log_io(...)
9394ada5 24//#define log_anomaly gpu_log
25#define log_anomaly(...)
56f08d83 26
7d993ee2 27struct psx_gpu gpu __attribute__((aligned(2048)));
1ab64c54 28
48f3d210 29static noinline int do_cmd_buffer(uint32_t *data, int count);
05740673 30static void finish_vram_transfer(int is_read);
48f3d210 31
32static noinline void do_cmd_reset(void)
33{
34 if (unlikely(gpu.cmd_len > 0))
35 do_cmd_buffer(gpu.cmd_buffer, gpu.cmd_len);
48f3d210 36 gpu.cmd_len = 0;
05740673 37
38 if (unlikely(gpu.dma.h > 0))
39 finish_vram_transfer(gpu.dma_start.is_read);
48f3d210 40 gpu.dma.h = 0;
41}
42
6e9bdaef 43static noinline void do_reset(void)
1ab64c54 44{
7841712d 45 unsigned int i;
48f3d210 46
47 do_cmd_reset();
48
6e9bdaef 49 memset(gpu.regs, 0, sizeof(gpu.regs));
48f3d210 50 for (i = 0; i < sizeof(gpu.ex_regs) / sizeof(gpu.ex_regs[0]); i++)
51 gpu.ex_regs[i] = (0xe0 + i) << 24;
d30279e2 52 gpu.status.reg = 0x14802000;
6e9bdaef 53 gpu.gp0 = 0;
fc84f618 54 gpu.regs[3] = 1;
6e9bdaef 55 gpu.screen.hres = gpu.screen.w = 256;
fc84f618 56 gpu.screen.vres = gpu.screen.h = 240;
1ab64c54
GI
57}
58
8dd855cd 59static noinline void update_width(void)
60{
61 int sw = gpu.screen.x2 - gpu.screen.x1;
62 if (sw <= 0 || sw >= 2560)
63 // full width
64 gpu.screen.w = gpu.screen.hres;
65 else
66 gpu.screen.w = sw * gpu.screen.hres / 2560;
67}
68
69static noinline void update_height(void)
70{
71 int sh = gpu.screen.y2 - gpu.screen.y1;
72 if (gpu.status.dheight)
73 sh *= 2;
74 if (sh <= 0)
75 sh = gpu.screen.vres;
76
77 gpu.screen.h = sh;
78}
79
fc84f618 80static noinline void decide_frameskip(void)
81{
9fe27e25 82 if (gpu.frameskip.active)
83 gpu.frameskip.cnt++;
84 else {
85 gpu.frameskip.cnt = 0;
86 gpu.frameskip.frame_ready = 1;
87 }
fc84f618 88
9fe27e25 89 if (!gpu.frameskip.active && *gpu.frameskip.advice)
90 gpu.frameskip.active = 1;
91 else if (gpu.frameskip.set > 0 && gpu.frameskip.cnt < gpu.frameskip.set)
fc84f618 92 gpu.frameskip.active = 1;
93 else
94 gpu.frameskip.active = 0;
95}
96
9fe27e25 97static noinline void decide_frameskip_allow(uint32_t cmd_e3)
98{
99 // no frameskip if it decides to draw to display area,
100 // but not for interlace since it'll most likely always do that
101 uint32_t x = cmd_e3 & 0x3ff;
102 uint32_t y = (cmd_e3 >> 10) & 0x3ff;
103 gpu.frameskip.allow = gpu.status.interlace ||
104 (uint32_t)(x - gpu.screen.x) >= (uint32_t)gpu.screen.w ||
105 (uint32_t)(y - gpu.screen.y) >= (uint32_t)gpu.screen.h;
106}
107
6e9bdaef 108static noinline void get_gpu_info(uint32_t data)
109{
110 switch (data & 0x0f) {
111 case 0x02:
112 case 0x03:
113 case 0x04:
114 case 0x05:
115 gpu.gp0 = gpu.ex_regs[data & 7] & 0xfffff;
116 break;
117 case 0x06:
118 gpu.gp0 = gpu.ex_regs[5] & 0xfffff;
119 break;
120 case 0x07:
121 gpu.gp0 = 2;
122 break;
123 default:
124 gpu.gp0 = 0;
125 break;
126 }
127}
128
129long GPUinit(void)
130{
9394ada5 131 int ret;
132 ret = vout_init();
133 ret |= renderer_init();
134
3ece2f0c 135 gpu.state.frame_count = &gpu.zero;
deb18d24 136 gpu.state.hcnt = &gpu.zero;
48f3d210 137 gpu.frameskip.active = 0;
138 gpu.cmd_len = 0;
9394ada5 139 do_reset();
48f3d210 140
6e9bdaef 141 return ret;
142}
143
144long GPUshutdown(void)
145{
146 return vout_finish();
147}
148
1ab64c54
GI
149void GPUwriteStatus(uint32_t data)
150{
151 static const short hres[8] = { 256, 368, 320, 384, 512, 512, 640, 640 };
152 static const short vres[4] = { 240, 480, 256, 480 };
153 uint32_t cmd = data >> 24;
154
fc84f618 155 if (cmd < ARRAY_SIZE(gpu.regs)) {
48f3d210 156 if (cmd > 1 && cmd != 5 && gpu.regs[cmd] == data)
fc84f618 157 return;
8dd855cd 158 gpu.regs[cmd] = data;
fc84f618 159 }
160
161 gpu.state.fb_dirty = 1;
8dd855cd 162
163 switch (cmd) {
1ab64c54 164 case 0x00:
6e9bdaef 165 do_reset();
1ab64c54 166 break;
48f3d210 167 case 0x01:
168 do_cmd_reset();
169 break;
1ab64c54 170 case 0x03:
d30279e2 171 gpu.status.blanking = data & 1;
1ab64c54
GI
172 break;
173 case 0x04:
174 gpu.status.dma = data & 3;
175 break;
176 case 0x05:
177 gpu.screen.x = data & 0x3ff;
178 gpu.screen.y = (data >> 10) & 0x3ff;
9fe27e25 179 if (gpu.frameskip.set) {
180 decide_frameskip_allow(gpu.ex_regs[3]);
181 if (gpu.frameskip.last_flip_frame != *gpu.state.frame_count) {
182 decide_frameskip();
183 gpu.frameskip.last_flip_frame = *gpu.state.frame_count;
184 }
fb4c6fba 185 }
1ab64c54 186 break;
8dd855cd 187 case 0x06:
188 gpu.screen.x1 = data & 0xfff;
189 gpu.screen.x2 = (data >> 12) & 0xfff;
190 update_width();
191 break;
1ab64c54
GI
192 case 0x07:
193 gpu.screen.y1 = data & 0x3ff;
194 gpu.screen.y2 = (data >> 10) & 0x3ff;
8dd855cd 195 update_height();
1ab64c54
GI
196 break;
197 case 0x08:
198 gpu.status.reg = (gpu.status.reg & ~0x7f0000) | ((data & 0x3F) << 17) | ((data & 0x40) << 10);
8dd855cd 199 gpu.screen.hres = hres[(gpu.status.reg >> 16) & 7];
200 gpu.screen.vres = vres[(gpu.status.reg >> 19) & 3];
201 update_width();
202 update_height();
1ab64c54 203 break;
deb18d24 204 default:
205 if ((cmd & 0xf0) == 0x10)
206 get_gpu_info(data);
6e9bdaef 207 break;
1ab64c54 208 }
7890a708 209
210#ifdef GPUwriteStatus_ext
211 GPUwriteStatus_ext(data);
212#endif
1ab64c54
GI
213}
214
56f08d83 215const unsigned char cmd_lengths[256] =
1ab64c54 216{
d30279e2
GI
217 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
218 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
219 3, 3, 3, 3, 6, 6, 6, 6, 4, 4, 4, 4, 8, 8, 8, 8, // 20
220 5, 5, 5, 5, 8, 8, 8, 8, 7, 7, 7, 7, 11, 11, 11, 11,
652c6b8b 221 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, // 40
222 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
223 2, 2, 2, 2, 3, 3, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, // 60
d30279e2
GI
224 1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2,
225 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 80
226 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
227 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // a0
228 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
229 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // c0
230 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
231 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // e0
232 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
233};
234
d30279e2
GI
235#define VRAM_MEM_XY(x, y) &gpu.vram[(y) * 1024 + (x)]
236
237static inline void do_vram_line(int x, int y, uint16_t *mem, int l, int is_read)
1ab64c54 238{
d30279e2
GI
239 uint16_t *vram = VRAM_MEM_XY(x, y);
240 if (is_read)
241 memcpy(mem, vram, l * 2);
242 else
243 memcpy(vram, mem, l * 2);
244}
245
246static int do_vram_io(uint32_t *data, int count, int is_read)
247{
248 int count_initial = count;
249 uint16_t *sdata = (uint16_t *)data;
250 int x = gpu.dma.x, y = gpu.dma.y;
251 int w = gpu.dma.w, h = gpu.dma.h;
ddd56f6e 252 int o = gpu.dma.offset;
d30279e2
GI
253 int l;
254 count *= 2; // operate in 16bpp pixels
255
256 if (gpu.dma.offset) {
257 l = w - gpu.dma.offset;
ddd56f6e 258 if (count < l)
d30279e2 259 l = count;
ddd56f6e 260
261 do_vram_line(x + o, y, sdata, l, is_read);
262
263 if (o + l < w)
264 o += l;
265 else {
266 o = 0;
267 y++;
268 h--;
269 }
d30279e2
GI
270 sdata += l;
271 count -= l;
d30279e2
GI
272 }
273
274 for (; h > 0 && count >= w; sdata += w, count -= w, y++, h--) {
275 y &= 511;
276 do_vram_line(x, y, sdata, w, is_read);
277 }
278
05740673 279 if (h > 0) {
280 if (count > 0) {
281 y &= 511;
282 do_vram_line(x, y, sdata, count, is_read);
283 o = count;
284 count = 0;
285 }
d30279e2 286 }
05740673 287 else
288 finish_vram_transfer(is_read);
d30279e2
GI
289 gpu.dma.y = y;
290 gpu.dma.h = h;
ddd56f6e 291 gpu.dma.offset = o;
d30279e2 292
6e9bdaef 293 return count_initial - count / 2;
d30279e2
GI
294}
295
296static void start_vram_transfer(uint32_t pos_word, uint32_t size_word, int is_read)
297{
ddd56f6e 298 if (gpu.dma.h)
299 log_anomaly("start_vram_transfer while old unfinished\n");
300
5440b88e 301 gpu.dma.x = pos_word & 0x3ff;
302 gpu.dma.y = (pos_word >> 16) & 0x1ff;
48f3d210 303 gpu.dma.w = ((size_word - 1) & 0x3ff) + 1;
304 gpu.dma.h = (((size_word >> 16) - 1) & 0x1ff) + 1;
d30279e2 305 gpu.dma.offset = 0;
05740673 306 gpu.dma.is_read = is_read;
307 gpu.dma_start = gpu.dma;
d30279e2 308
9e146206 309 renderer_flush_queues();
310 if (is_read) {
d30279e2 311 gpu.status.img = 1;
9e146206 312 // XXX: wrong for width 1
313 memcpy(&gpu.gp0, VRAM_MEM_XY(gpu.dma.x, gpu.dma.y), 4);
5440b88e 314 gpu.state.last_vram_read_frame = *gpu.state.frame_count;
9e146206 315 }
d30279e2 316
6e9bdaef 317 log_io("start_vram_transfer %c (%d, %d) %dx%d\n", is_read ? 'r' : 'w',
318 gpu.dma.x, gpu.dma.y, gpu.dma.w, gpu.dma.h);
d30279e2
GI
319}
320
05740673 321static void finish_vram_transfer(int is_read)
322{
323 if (is_read)
324 gpu.status.img = 0;
325 else
326 renderer_update_caches(gpu.dma_start.x, gpu.dma_start.y,
327 gpu.dma_start.w, gpu.dma_start.h);
328}
329
48f3d210 330static noinline int do_cmd_buffer(uint32_t *data, int count)
d30279e2
GI
331{
332 int len, cmd, start, pos;
fc84f618 333 int vram_dirty = 0;
d30279e2 334
d30279e2 335 // process buffer
ddd56f6e 336 for (start = pos = 0; pos < count; )
d30279e2
GI
337 {
338 cmd = -1;
339 len = 0;
340
341 if (gpu.dma.h) {
342 pos += do_vram_io(data + pos, count - pos, 0);
ddd56f6e 343 if (pos == count)
344 break;
d30279e2
GI
345 start = pos;
346 }
347
ddd56f6e 348 // do look-ahead pass to detect SR changes and VRAM i/o
d30279e2 349 while (pos < count) {
56f08d83 350 uint32_t *list = data + pos;
351 cmd = list[0] >> 24;
d30279e2 352 len = 1 + cmd_lengths[cmd];
56f08d83 353
d30279e2 354 //printf(" %3d: %02x %d\n", pos, cmd, len);
56f08d83 355 if ((cmd & 0xf4) == 0x24) {
356 // flat textured prim
a3a9f519 357 gpu.ex_regs[1] &= ~0x1ff;
358 gpu.ex_regs[1] |= list[4] & 0x1ff;
56f08d83 359 }
360 else if ((cmd & 0xf4) == 0x34) {
361 // shaded textured prim
a3a9f519 362 gpu.ex_regs[1] &= ~0x1ff;
363 gpu.ex_regs[1] |= list[5] & 0x1ff;
56f08d83 364 }
fb4c6fba 365 else if (cmd == 0xe3)
9fe27e25 366 decide_frameskip_allow(list[0]);
367
fc84f618 368 if (2 <= cmd && cmd < 0xc0)
369 vram_dirty = 1;
6e9bdaef 370 else if ((cmd & 0xf8) == 0xe0)
371 gpu.ex_regs[cmd & 7] = list[0];
56f08d83 372
d30279e2
GI
373 if (pos + len > count) {
374 cmd = -1;
375 break; // incomplete cmd
376 }
377 if (cmd == 0xa0 || cmd == 0xc0)
378 break; // image i/o
379 pos += len;
380 }
381
382 if (pos - start > 0) {
fb4c6fba 383 if (!gpu.frameskip.active || !gpu.frameskip.allow)
fc84f618 384 do_cmd_list(data + start, pos - start);
d30279e2
GI
385 start = pos;
386 }
387
388 if (cmd == 0xa0 || cmd == 0xc0) {
389 // consume vram write/read cmd
390 start_vram_transfer(data[pos + 1], data[pos + 2], cmd == 0xc0);
391 pos += len;
392 }
5b745e5b 393 else if (cmd == -1)
ddd56f6e 394 break;
d30279e2 395 }
ddd56f6e 396
a3a9f519 397 gpu.status.reg &= ~0x1fff;
398 gpu.status.reg |= gpu.ex_regs[1] & 0x7ff;
399 gpu.status.reg |= (gpu.ex_regs[6] & 3) << 11;
400
5b745e5b 401 if (gpu.frameskip.active)
402 renderer_sync_ecmds(gpu.ex_regs);
fc84f618 403 gpu.state.fb_dirty |= vram_dirty;
404
ddd56f6e 405 return count - pos;
d30279e2
GI
406}
407
5440b88e 408static void flush_cmd_buffer(void)
d30279e2 409{
48f3d210 410 int left = do_cmd_buffer(gpu.cmd_buffer, gpu.cmd_len);
d30279e2
GI
411 if (left > 0)
412 memmove(gpu.cmd_buffer, gpu.cmd_buffer + gpu.cmd_len - left, left * 4);
413 gpu.cmd_len = left;
1ab64c54
GI
414}
415
416void GPUwriteDataMem(uint32_t *mem, int count)
417{
d30279e2
GI
418 int left;
419
56f08d83 420 log_io("gpu_dma_write %p %d\n", mem, count);
421
d30279e2
GI
422 if (unlikely(gpu.cmd_len > 0))
423 flush_cmd_buffer();
56f08d83 424
48f3d210 425 left = do_cmd_buffer(mem, count);
d30279e2 426 if (left)
56f08d83 427 log_anomaly("GPUwriteDataMem: discarded %d/%d words\n", left, count);
1ab64c54
GI
428}
429
d30279e2 430void GPUwriteData(uint32_t data)
1ab64c54 431{
56f08d83 432 log_io("gpu_write %08x\n", data);
d30279e2
GI
433 gpu.cmd_buffer[gpu.cmd_len++] = data;
434 if (gpu.cmd_len >= CMD_BUFFER_LEN)
435 flush_cmd_buffer();
1ab64c54
GI
436}
437
ddd56f6e 438long GPUdmaChain(uint32_t *rambase, uint32_t start_addr)
1ab64c54 439{
ddd56f6e 440 uint32_t addr, *list;
deb18d24 441 uint32_t *llist_entry = NULL;
ddd56f6e 442 int len, left, count;
1c72b1c2 443 long cpu_cycles = 0;
d30279e2
GI
444
445 if (unlikely(gpu.cmd_len > 0))
446 flush_cmd_buffer();
447
deb18d24 448 // ff7 sends it's main list twice, detect this
3ece2f0c 449 if (*gpu.state.frame_count == gpu.state.last_list.frame &&
450 *gpu.state.hcnt - gpu.state.last_list.hcnt <= 1 &&
1c72b1c2 451 gpu.state.last_list.cycles > 2048)
deb18d24 452 {
453 llist_entry = rambase + (gpu.state.last_list.addr & 0x1fffff) / 4;
454 *llist_entry |= 0x800000;
455 }
456
56f08d83 457 log_io("gpu_dma_chain\n");
ddd56f6e 458 addr = start_addr & 0xffffff;
459 for (count = 0; addr != 0xffffff; count++)
460 {
ddd56f6e 461 list = rambase + (addr & 0x1fffff) / 4;
d30279e2
GI
462 len = list[0] >> 24;
463 addr = list[0] & 0xffffff;
1c72b1c2 464 cpu_cycles += 10;
465 if (len > 0)
466 cpu_cycles += 5 + len;
deb18d24 467
468 log_io(".chain %08x #%d\n", (list - rambase) * 4, len);
ddd56f6e 469
470 // loop detection marker
471 // (bit23 set causes DMA error on real machine, so
472 // unlikely to be ever set by the game)
473 list[0] |= 0x800000;
474
56f08d83 475 if (len) {
48f3d210 476 left = do_cmd_buffer(list + 1, len);
56f08d83 477 if (left)
deb18d24 478 log_anomaly("GPUdmaChain: discarded %d/%d words\n", left, len);
56f08d83 479 }
ddd56f6e 480
481 if (addr & 0x800000)
482 break;
483 }
484
485 // remove loop detection markers
486 addr = start_addr & 0x1fffff;
487 while (count-- > 0) {
488 list = rambase + addr / 4;
489 addr = list[0] & 0x1fffff;
490 list[0] &= ~0x800000;
d30279e2 491 }
deb18d24 492 if (llist_entry)
493 *llist_entry &= ~0x800000;
d30279e2 494
3ece2f0c 495 gpu.state.last_list.frame = *gpu.state.frame_count;
deb18d24 496 gpu.state.last_list.hcnt = *gpu.state.hcnt;
1c72b1c2 497 gpu.state.last_list.cycles = cpu_cycles;
deb18d24 498 gpu.state.last_list.addr = start_addr;
499
1c72b1c2 500 return cpu_cycles;
1ab64c54
GI
501}
502
d30279e2
GI
503void GPUreadDataMem(uint32_t *mem, int count)
504{
56f08d83 505 log_io("gpu_dma_read %p %d\n", mem, count);
506
d30279e2
GI
507 if (unlikely(gpu.cmd_len > 0))
508 flush_cmd_buffer();
56f08d83 509
d30279e2
GI
510 if (gpu.dma.h)
511 do_vram_io(mem, count, 1);
512}
513
514uint32_t GPUreadData(void)
515{
9e146206 516 uint32_t ret;
56f08d83 517
518 if (unlikely(gpu.cmd_len > 0))
519 flush_cmd_buffer();
520
9e146206 521 ret = gpu.gp0;
56f08d83 522 if (gpu.dma.h)
9e146206 523 do_vram_io(&ret, 1, 1);
56f08d83 524
9e146206 525 log_io("gpu_read %08x\n", ret);
526 return ret;
d30279e2
GI
527}
528
529uint32_t GPUreadStatus(void)
530{
ddd56f6e 531 uint32_t ret;
56f08d83 532
d30279e2
GI
533 if (unlikely(gpu.cmd_len > 0))
534 flush_cmd_buffer();
535
24de2dd4 536 ret = gpu.status.reg;
ddd56f6e 537 log_io("gpu_read_status %08x\n", ret);
538 return ret;
d30279e2
GI
539}
540
096ec49b 541struct GPUFreeze
1ab64c54
GI
542{
543 uint32_t ulFreezeVersion; // should be always 1 for now (set by main emu)
544 uint32_t ulStatus; // current gpu status
545 uint32_t ulControl[256]; // latest control register values
546 unsigned char psxVRam[1024*1024*2]; // current VRam image (full 2 MB for ZN)
096ec49b 547};
1ab64c54 548
096ec49b 549long GPUfreeze(uint32_t type, struct GPUFreeze *freeze)
1ab64c54 550{
fc84f618 551 int i;
552
1ab64c54
GI
553 switch (type) {
554 case 1: // save
d30279e2
GI
555 if (gpu.cmd_len > 0)
556 flush_cmd_buffer();
1ab64c54
GI
557 memcpy(freeze->psxVRam, gpu.vram, sizeof(gpu.vram));
558 memcpy(freeze->ulControl, gpu.regs, sizeof(gpu.regs));
6e9bdaef 559 memcpy(freeze->ulControl + 0xe0, gpu.ex_regs, sizeof(gpu.ex_regs));
1ab64c54 560 freeze->ulStatus = gpu.status.reg;
1ab64c54
GI
561 break;
562 case 0: // load
563 memcpy(gpu.vram, freeze->psxVRam, sizeof(gpu.vram));
564 memcpy(gpu.regs, freeze->ulControl, sizeof(gpu.regs));
6e9bdaef 565 memcpy(gpu.ex_regs, freeze->ulControl + 0xe0, sizeof(gpu.ex_regs));
1ab64c54 566 gpu.status.reg = freeze->ulStatus;
fc84f618 567 for (i = 8; i > 0; i--) {
568 gpu.regs[i] ^= 1; // avoid reg change detection
569 GPUwriteStatus((i << 24) | (gpu.regs[i] ^ 1));
570 }
5b745e5b 571 renderer_sync_ecmds(gpu.ex_regs);
05740673 572 renderer_update_caches(0, 0, 1024, 512);
1ab64c54
GI
573 break;
574 }
575
576 return 1;
577}
578
5440b88e 579void GPUupdateLace(void)
580{
581 if (gpu.cmd_len > 0)
582 flush_cmd_buffer();
583 renderer_flush_queues();
584
585 if (gpu.status.blanking || !gpu.state.fb_dirty)
586 return;
587
588 if (gpu.frameskip.set) {
589 if (!gpu.frameskip.frame_ready) {
590 if (*gpu.state.frame_count - gpu.frameskip.last_flip_frame < 9)
591 return;
592 gpu.frameskip.active = 0;
593 }
594 gpu.frameskip.frame_ready = 0;
595 }
596
597 vout_update();
598 gpu.state.fb_dirty = 0;
599}
600
72e5023f 601void GPUvBlank(int is_vblank, int lcf)
602{
5440b88e 603 int interlace = gpu.state.allow_interlace
604 && gpu.status.interlace && gpu.status.dheight;
605 // interlace doesn't look nice on progressive displays,
606 // so we have this "auto" mode here for games that don't read vram
607 if (gpu.state.allow_interlace == 2
608 && *gpu.state.frame_count - gpu.state.last_vram_read_frame > 1)
609 {
610 interlace = 0;
611 }
612 if (interlace || interlace != gpu.state.old_interlace) {
613 gpu.state.old_interlace = interlace;
614
615 if (gpu.cmd_len > 0)
616 flush_cmd_buffer();
617 renderer_flush_queues();
618 renderer_set_interlace(interlace, !lcf);
619 }
620}
621
622#include "../../frontend/plugin_lib.h"
623
624void GPUrearmedCallbacks(const struct rearmed_cbs *cbs)
625{
626 gpu.frameskip.set = cbs->frameskip;
627 gpu.frameskip.advice = &cbs->fskip_advice;
628 gpu.frameskip.active = 0;
629 gpu.frameskip.frame_ready = 1;
630 gpu.state.hcnt = cbs->gpu_hcnt;
631 gpu.state.frame_count = cbs->gpu_frame_count;
632 gpu.state.allow_interlace = cbs->gpu_neon.allow_interlace;
633
634 if (cbs->pl_vout_set_raw_vram)
635 cbs->pl_vout_set_raw_vram(gpu.vram);
636 renderer_set_config(cbs);
637 vout_set_config(cbs);
72e5023f 638}
639
1ab64c54 640// vim:shiftwidth=2:expandtab