gpulib: eliminate list scan-ahead
[pcsx_rearmed.git] / plugins / gpulib / gpu.c
CommitLineData
1ab64c54 1/*
05740673 2 * (C) GraÅžvydas "notaz" Ignotas, 2011-2012
1ab64c54
GI
3 *
4 * This work is licensed under the terms of any of these licenses
5 * (at your option):
6 * - GNU GPL, version 2 or later.
7 * - GNU LGPL, version 2.1 or later.
8 * See the COPYING file in the top-level directory.
9 */
10
d30279e2 11#include <stdio.h>
1ab64c54 12#include <string.h>
56f08d83 13#include "gpu.h"
1ab64c54
GI
14
15#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
d30279e2 16#define unlikely(x) __builtin_expect((x), 0)
8dd855cd 17#define noinline __attribute__((noinline))
1ab64c54 18
deb18d24 19#define gpu_log(fmt, ...) \
3ece2f0c 20 printf("%d:%03d: " fmt, *gpu.state.frame_count, *gpu.state.hcnt, ##__VA_ARGS__)
deb18d24 21
22//#define log_io gpu_log
56f08d83 23#define log_io(...)
9394ada5 24//#define log_anomaly gpu_log
25#define log_anomaly(...)
56f08d83 26
7d993ee2 27struct psx_gpu gpu __attribute__((aligned(2048)));
1ab64c54 28
48f3d210 29static noinline int do_cmd_buffer(uint32_t *data, int count);
05740673 30static void finish_vram_transfer(int is_read);
48f3d210 31
32static noinline void do_cmd_reset(void)
33{
34 if (unlikely(gpu.cmd_len > 0))
35 do_cmd_buffer(gpu.cmd_buffer, gpu.cmd_len);
48f3d210 36 gpu.cmd_len = 0;
05740673 37
38 if (unlikely(gpu.dma.h > 0))
39 finish_vram_transfer(gpu.dma_start.is_read);
48f3d210 40 gpu.dma.h = 0;
41}
42
6e9bdaef 43static noinline void do_reset(void)
1ab64c54 44{
7841712d 45 unsigned int i;
48f3d210 46
47 do_cmd_reset();
48
6e9bdaef 49 memset(gpu.regs, 0, sizeof(gpu.regs));
48f3d210 50 for (i = 0; i < sizeof(gpu.ex_regs) / sizeof(gpu.ex_regs[0]); i++)
51 gpu.ex_regs[i] = (0xe0 + i) << 24;
d30279e2 52 gpu.status.reg = 0x14802000;
6e9bdaef 53 gpu.gp0 = 0;
fc84f618 54 gpu.regs[3] = 1;
6e9bdaef 55 gpu.screen.hres = gpu.screen.w = 256;
fc84f618 56 gpu.screen.vres = gpu.screen.h = 240;
1ab64c54
GI
57}
58
8dd855cd 59static noinline void update_width(void)
60{
61 int sw = gpu.screen.x2 - gpu.screen.x1;
62 if (sw <= 0 || sw >= 2560)
63 // full width
64 gpu.screen.w = gpu.screen.hres;
65 else
66 gpu.screen.w = sw * gpu.screen.hres / 2560;
67}
68
69static noinline void update_height(void)
70{
71 int sh = gpu.screen.y2 - gpu.screen.y1;
72 if (gpu.status.dheight)
73 sh *= 2;
74 if (sh <= 0)
75 sh = gpu.screen.vres;
76
77 gpu.screen.h = sh;
78}
79
fc84f618 80static noinline void decide_frameskip(void)
81{
9fe27e25 82 if (gpu.frameskip.active)
83 gpu.frameskip.cnt++;
84 else {
85 gpu.frameskip.cnt = 0;
86 gpu.frameskip.frame_ready = 1;
87 }
fc84f618 88
9fe27e25 89 if (!gpu.frameskip.active && *gpu.frameskip.advice)
90 gpu.frameskip.active = 1;
91 else if (gpu.frameskip.set > 0 && gpu.frameskip.cnt < gpu.frameskip.set)
fc84f618 92 gpu.frameskip.active = 1;
93 else
94 gpu.frameskip.active = 0;
95}
96
b243416b 97static noinline int decide_frameskip_allow(uint32_t cmd_e3)
9fe27e25 98{
99 // no frameskip if it decides to draw to display area,
100 // but not for interlace since it'll most likely always do that
101 uint32_t x = cmd_e3 & 0x3ff;
102 uint32_t y = (cmd_e3 >> 10) & 0x3ff;
103 gpu.frameskip.allow = gpu.status.interlace ||
104 (uint32_t)(x - gpu.screen.x) >= (uint32_t)gpu.screen.w ||
105 (uint32_t)(y - gpu.screen.y) >= (uint32_t)gpu.screen.h;
b243416b 106 return gpu.frameskip.allow;
9fe27e25 107}
108
6e9bdaef 109static noinline void get_gpu_info(uint32_t data)
110{
111 switch (data & 0x0f) {
112 case 0x02:
113 case 0x03:
114 case 0x04:
115 case 0x05:
116 gpu.gp0 = gpu.ex_regs[data & 7] & 0xfffff;
117 break;
118 case 0x06:
119 gpu.gp0 = gpu.ex_regs[5] & 0xfffff;
120 break;
121 case 0x07:
122 gpu.gp0 = 2;
123 break;
124 default:
125 gpu.gp0 = 0;
126 break;
127 }
128}
129
130long GPUinit(void)
131{
9394ada5 132 int ret;
133 ret = vout_init();
134 ret |= renderer_init();
135
3ece2f0c 136 gpu.state.frame_count = &gpu.zero;
deb18d24 137 gpu.state.hcnt = &gpu.zero;
48f3d210 138 gpu.frameskip.active = 0;
139 gpu.cmd_len = 0;
9394ada5 140 do_reset();
48f3d210 141
6e9bdaef 142 return ret;
143}
144
145long GPUshutdown(void)
146{
147 return vout_finish();
148}
149
1ab64c54
GI
150void GPUwriteStatus(uint32_t data)
151{
152 static const short hres[8] = { 256, 368, 320, 384, 512, 512, 640, 640 };
153 static const short vres[4] = { 240, 480, 256, 480 };
154 uint32_t cmd = data >> 24;
155
fc84f618 156 if (cmd < ARRAY_SIZE(gpu.regs)) {
48f3d210 157 if (cmd > 1 && cmd != 5 && gpu.regs[cmd] == data)
fc84f618 158 return;
8dd855cd 159 gpu.regs[cmd] = data;
fc84f618 160 }
161
162 gpu.state.fb_dirty = 1;
8dd855cd 163
164 switch (cmd) {
1ab64c54 165 case 0x00:
6e9bdaef 166 do_reset();
1ab64c54 167 break;
48f3d210 168 case 0x01:
169 do_cmd_reset();
170 break;
1ab64c54 171 case 0x03:
d30279e2 172 gpu.status.blanking = data & 1;
1ab64c54
GI
173 break;
174 case 0x04:
175 gpu.status.dma = data & 3;
176 break;
177 case 0x05:
178 gpu.screen.x = data & 0x3ff;
179 gpu.screen.y = (data >> 10) & 0x3ff;
9fe27e25 180 if (gpu.frameskip.set) {
181 decide_frameskip_allow(gpu.ex_regs[3]);
182 if (gpu.frameskip.last_flip_frame != *gpu.state.frame_count) {
183 decide_frameskip();
184 gpu.frameskip.last_flip_frame = *gpu.state.frame_count;
185 }
fb4c6fba 186 }
1ab64c54 187 break;
8dd855cd 188 case 0x06:
189 gpu.screen.x1 = data & 0xfff;
190 gpu.screen.x2 = (data >> 12) & 0xfff;
191 update_width();
192 break;
1ab64c54
GI
193 case 0x07:
194 gpu.screen.y1 = data & 0x3ff;
195 gpu.screen.y2 = (data >> 10) & 0x3ff;
8dd855cd 196 update_height();
1ab64c54
GI
197 break;
198 case 0x08:
199 gpu.status.reg = (gpu.status.reg & ~0x7f0000) | ((data & 0x3F) << 17) | ((data & 0x40) << 10);
8dd855cd 200 gpu.screen.hres = hres[(gpu.status.reg >> 16) & 7];
201 gpu.screen.vres = vres[(gpu.status.reg >> 19) & 3];
202 update_width();
203 update_height();
1ab64c54 204 break;
deb18d24 205 default:
206 if ((cmd & 0xf0) == 0x10)
207 get_gpu_info(data);
6e9bdaef 208 break;
1ab64c54 209 }
7890a708 210
211#ifdef GPUwriteStatus_ext
212 GPUwriteStatus_ext(data);
213#endif
1ab64c54
GI
214}
215
56f08d83 216const unsigned char cmd_lengths[256] =
1ab64c54 217{
d30279e2
GI
218 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
219 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
220 3, 3, 3, 3, 6, 6, 6, 6, 4, 4, 4, 4, 8, 8, 8, 8, // 20
221 5, 5, 5, 5, 8, 8, 8, 8, 7, 7, 7, 7, 11, 11, 11, 11,
652c6b8b 222 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, // 40
223 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
224 2, 2, 2, 2, 3, 3, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, // 60
d30279e2
GI
225 1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2,
226 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 80
227 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
228 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // a0
229 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
230 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // c0
231 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
232 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // e0
233 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
234};
235
d30279e2
GI
236#define VRAM_MEM_XY(x, y) &gpu.vram[(y) * 1024 + (x)]
237
238static inline void do_vram_line(int x, int y, uint16_t *mem, int l, int is_read)
1ab64c54 239{
d30279e2
GI
240 uint16_t *vram = VRAM_MEM_XY(x, y);
241 if (is_read)
242 memcpy(mem, vram, l * 2);
243 else
244 memcpy(vram, mem, l * 2);
245}
246
247static int do_vram_io(uint32_t *data, int count, int is_read)
248{
249 int count_initial = count;
250 uint16_t *sdata = (uint16_t *)data;
251 int x = gpu.dma.x, y = gpu.dma.y;
252 int w = gpu.dma.w, h = gpu.dma.h;
ddd56f6e 253 int o = gpu.dma.offset;
d30279e2
GI
254 int l;
255 count *= 2; // operate in 16bpp pixels
256
257 if (gpu.dma.offset) {
258 l = w - gpu.dma.offset;
ddd56f6e 259 if (count < l)
d30279e2 260 l = count;
ddd56f6e 261
262 do_vram_line(x + o, y, sdata, l, is_read);
263
264 if (o + l < w)
265 o += l;
266 else {
267 o = 0;
268 y++;
269 h--;
270 }
d30279e2
GI
271 sdata += l;
272 count -= l;
d30279e2
GI
273 }
274
275 for (; h > 0 && count >= w; sdata += w, count -= w, y++, h--) {
276 y &= 511;
277 do_vram_line(x, y, sdata, w, is_read);
278 }
279
05740673 280 if (h > 0) {
281 if (count > 0) {
282 y &= 511;
283 do_vram_line(x, y, sdata, count, is_read);
284 o = count;
285 count = 0;
286 }
d30279e2 287 }
05740673 288 else
289 finish_vram_transfer(is_read);
d30279e2
GI
290 gpu.dma.y = y;
291 gpu.dma.h = h;
ddd56f6e 292 gpu.dma.offset = o;
d30279e2 293
6e9bdaef 294 return count_initial - count / 2;
d30279e2
GI
295}
296
297static void start_vram_transfer(uint32_t pos_word, uint32_t size_word, int is_read)
298{
ddd56f6e 299 if (gpu.dma.h)
300 log_anomaly("start_vram_transfer while old unfinished\n");
301
5440b88e 302 gpu.dma.x = pos_word & 0x3ff;
303 gpu.dma.y = (pos_word >> 16) & 0x1ff;
48f3d210 304 gpu.dma.w = ((size_word - 1) & 0x3ff) + 1;
305 gpu.dma.h = (((size_word >> 16) - 1) & 0x1ff) + 1;
d30279e2 306 gpu.dma.offset = 0;
05740673 307 gpu.dma.is_read = is_read;
308 gpu.dma_start = gpu.dma;
d30279e2 309
9e146206 310 renderer_flush_queues();
311 if (is_read) {
d30279e2 312 gpu.status.img = 1;
9e146206 313 // XXX: wrong for width 1
314 memcpy(&gpu.gp0, VRAM_MEM_XY(gpu.dma.x, gpu.dma.y), 4);
5440b88e 315 gpu.state.last_vram_read_frame = *gpu.state.frame_count;
9e146206 316 }
d30279e2 317
6e9bdaef 318 log_io("start_vram_transfer %c (%d, %d) %dx%d\n", is_read ? 'r' : 'w',
319 gpu.dma.x, gpu.dma.y, gpu.dma.w, gpu.dma.h);
d30279e2
GI
320}
321
05740673 322static void finish_vram_transfer(int is_read)
323{
324 if (is_read)
325 gpu.status.img = 0;
326 else
327 renderer_update_caches(gpu.dma_start.x, gpu.dma_start.y,
328 gpu.dma_start.w, gpu.dma_start.h);
329}
330
b243416b 331static noinline int do_cmd_list_skip(uint32_t *data, int count, int *last_cmd)
332{
333 int cmd = 0, pos = 0, len, dummy;
334 int skip = 1;
335
336 while (pos < count && skip) {
337 uint32_t *list = data + pos;
338 cmd = list[0] >> 24;
339 len = 1 + cmd_lengths[cmd];
340
341 if (cmd == 0x02) {
342 if ((list[2] & 0x3ff) > gpu.screen.w || ((list[2] >> 16) & 0x1ff) > gpu.screen.h)
343 // clearing something large, don't skip
344 do_cmd_list(data + pos, 3, &dummy);
345 }
346 else if ((cmd & 0xf4) == 0x24) {
347 // flat textured prim
348 gpu.ex_regs[1] &= ~0x1ff;
349 gpu.ex_regs[1] |= list[4] & 0x1ff;
350 }
351 else if ((cmd & 0xf4) == 0x34) {
352 // shaded textured prim
353 gpu.ex_regs[1] &= ~0x1ff;
354 gpu.ex_regs[1] |= list[5] & 0x1ff;
355 }
356 else if (cmd == 0xe3)
357 skip = decide_frameskip_allow(list[0]);
358
359 if ((cmd & 0xf8) == 0xe0)
360 gpu.ex_regs[cmd & 7] = list[0];
361
362 if (pos + len > count) {
363 cmd = -1;
364 break; // incomplete cmd
365 }
366 if (cmd == 0xa0 || cmd == 0xc0)
367 break; // image i/o
368 pos += len;
369 }
370
371 renderer_sync_ecmds(gpu.ex_regs);
372 *last_cmd = cmd;
373 return pos;
374}
375
48f3d210 376static noinline int do_cmd_buffer(uint32_t *data, int count)
d30279e2 377{
b243416b 378 int cmd, pos;
379 uint32_t old_e3 = gpu.ex_regs[3];
fc84f618 380 int vram_dirty = 0;
d30279e2 381
d30279e2 382 // process buffer
b243416b 383 for (pos = 0; pos < count; )
d30279e2 384 {
b243416b 385 if (gpu.dma.h && !gpu.dma_start.is_read) { // XXX: need to verify
386 vram_dirty = 1;
d30279e2 387 pos += do_vram_io(data + pos, count - pos, 0);
ddd56f6e 388 if (pos == count)
389 break;
d30279e2
GI
390 }
391
b243416b 392 cmd = data[pos] >> 24;
d30279e2
GI
393 if (cmd == 0xa0 || cmd == 0xc0) {
394 // consume vram write/read cmd
395 start_vram_transfer(data[pos + 1], data[pos + 2], cmd == 0xc0);
b243416b 396 pos += 3;
397 continue;
d30279e2 398 }
b243416b 399
400 if (gpu.frameskip.active && gpu.frameskip.allow)
401 pos += do_cmd_list_skip(data + pos, count - pos, &cmd);
402 else {
403 pos += do_cmd_list(data + pos, count - pos, &cmd);
404 vram_dirty = 1;
405 }
406
407 if (cmd == -1)
408 // incomplete cmd
ddd56f6e 409 break;
d30279e2 410 }
ddd56f6e 411
a3a9f519 412 gpu.status.reg &= ~0x1fff;
413 gpu.status.reg |= gpu.ex_regs[1] & 0x7ff;
414 gpu.status.reg |= (gpu.ex_regs[6] & 3) << 11;
415
fc84f618 416 gpu.state.fb_dirty |= vram_dirty;
417
b243416b 418 if (old_e3 != gpu.ex_regs[3])
419 decide_frameskip_allow(gpu.ex_regs[3]);
420
ddd56f6e 421 return count - pos;
d30279e2
GI
422}
423
5440b88e 424static void flush_cmd_buffer(void)
d30279e2 425{
48f3d210 426 int left = do_cmd_buffer(gpu.cmd_buffer, gpu.cmd_len);
d30279e2
GI
427 if (left > 0)
428 memmove(gpu.cmd_buffer, gpu.cmd_buffer + gpu.cmd_len - left, left * 4);
429 gpu.cmd_len = left;
1ab64c54
GI
430}
431
432void GPUwriteDataMem(uint32_t *mem, int count)
433{
d30279e2
GI
434 int left;
435
56f08d83 436 log_io("gpu_dma_write %p %d\n", mem, count);
437
d30279e2
GI
438 if (unlikely(gpu.cmd_len > 0))
439 flush_cmd_buffer();
56f08d83 440
48f3d210 441 left = do_cmd_buffer(mem, count);
d30279e2 442 if (left)
56f08d83 443 log_anomaly("GPUwriteDataMem: discarded %d/%d words\n", left, count);
1ab64c54
GI
444}
445
d30279e2 446void GPUwriteData(uint32_t data)
1ab64c54 447{
56f08d83 448 log_io("gpu_write %08x\n", data);
d30279e2
GI
449 gpu.cmd_buffer[gpu.cmd_len++] = data;
450 if (gpu.cmd_len >= CMD_BUFFER_LEN)
451 flush_cmd_buffer();
1ab64c54
GI
452}
453
ddd56f6e 454long GPUdmaChain(uint32_t *rambase, uint32_t start_addr)
1ab64c54 455{
ddd56f6e 456 uint32_t addr, *list;
deb18d24 457 uint32_t *llist_entry = NULL;
ddd56f6e 458 int len, left, count;
1c72b1c2 459 long cpu_cycles = 0;
d30279e2
GI
460
461 if (unlikely(gpu.cmd_len > 0))
462 flush_cmd_buffer();
463
deb18d24 464 // ff7 sends it's main list twice, detect this
3ece2f0c 465 if (*gpu.state.frame_count == gpu.state.last_list.frame &&
466 *gpu.state.hcnt - gpu.state.last_list.hcnt <= 1 &&
1c72b1c2 467 gpu.state.last_list.cycles > 2048)
deb18d24 468 {
469 llist_entry = rambase + (gpu.state.last_list.addr & 0x1fffff) / 4;
470 *llist_entry |= 0x800000;
471 }
472
56f08d83 473 log_io("gpu_dma_chain\n");
ddd56f6e 474 addr = start_addr & 0xffffff;
475 for (count = 0; addr != 0xffffff; count++)
476 {
ddd56f6e 477 list = rambase + (addr & 0x1fffff) / 4;
d30279e2
GI
478 len = list[0] >> 24;
479 addr = list[0] & 0xffffff;
1c72b1c2 480 cpu_cycles += 10;
481 if (len > 0)
482 cpu_cycles += 5 + len;
deb18d24 483
484 log_io(".chain %08x #%d\n", (list - rambase) * 4, len);
ddd56f6e 485
486 // loop detection marker
487 // (bit23 set causes DMA error on real machine, so
488 // unlikely to be ever set by the game)
489 list[0] |= 0x800000;
490
56f08d83 491 if (len) {
48f3d210 492 left = do_cmd_buffer(list + 1, len);
56f08d83 493 if (left)
deb18d24 494 log_anomaly("GPUdmaChain: discarded %d/%d words\n", left, len);
56f08d83 495 }
ddd56f6e 496
497 if (addr & 0x800000)
498 break;
499 }
500
501 // remove loop detection markers
502 addr = start_addr & 0x1fffff;
503 while (count-- > 0) {
504 list = rambase + addr / 4;
505 addr = list[0] & 0x1fffff;
506 list[0] &= ~0x800000;
d30279e2 507 }
deb18d24 508 if (llist_entry)
509 *llist_entry &= ~0x800000;
d30279e2 510
3ece2f0c 511 gpu.state.last_list.frame = *gpu.state.frame_count;
deb18d24 512 gpu.state.last_list.hcnt = *gpu.state.hcnt;
1c72b1c2 513 gpu.state.last_list.cycles = cpu_cycles;
deb18d24 514 gpu.state.last_list.addr = start_addr;
515
1c72b1c2 516 return cpu_cycles;
1ab64c54
GI
517}
518
d30279e2
GI
519void GPUreadDataMem(uint32_t *mem, int count)
520{
56f08d83 521 log_io("gpu_dma_read %p %d\n", mem, count);
522
d30279e2
GI
523 if (unlikely(gpu.cmd_len > 0))
524 flush_cmd_buffer();
56f08d83 525
d30279e2
GI
526 if (gpu.dma.h)
527 do_vram_io(mem, count, 1);
528}
529
530uint32_t GPUreadData(void)
531{
9e146206 532 uint32_t ret;
56f08d83 533
534 if (unlikely(gpu.cmd_len > 0))
535 flush_cmd_buffer();
536
9e146206 537 ret = gpu.gp0;
56f08d83 538 if (gpu.dma.h)
9e146206 539 do_vram_io(&ret, 1, 1);
56f08d83 540
9e146206 541 log_io("gpu_read %08x\n", ret);
542 return ret;
d30279e2
GI
543}
544
545uint32_t GPUreadStatus(void)
546{
ddd56f6e 547 uint32_t ret;
56f08d83 548
d30279e2
GI
549 if (unlikely(gpu.cmd_len > 0))
550 flush_cmd_buffer();
551
24de2dd4 552 ret = gpu.status.reg;
ddd56f6e 553 log_io("gpu_read_status %08x\n", ret);
554 return ret;
d30279e2
GI
555}
556
096ec49b 557struct GPUFreeze
1ab64c54
GI
558{
559 uint32_t ulFreezeVersion; // should be always 1 for now (set by main emu)
560 uint32_t ulStatus; // current gpu status
561 uint32_t ulControl[256]; // latest control register values
562 unsigned char psxVRam[1024*1024*2]; // current VRam image (full 2 MB for ZN)
096ec49b 563};
1ab64c54 564
096ec49b 565long GPUfreeze(uint32_t type, struct GPUFreeze *freeze)
1ab64c54 566{
fc84f618 567 int i;
568
1ab64c54
GI
569 switch (type) {
570 case 1: // save
d30279e2
GI
571 if (gpu.cmd_len > 0)
572 flush_cmd_buffer();
1ab64c54
GI
573 memcpy(freeze->psxVRam, gpu.vram, sizeof(gpu.vram));
574 memcpy(freeze->ulControl, gpu.regs, sizeof(gpu.regs));
6e9bdaef 575 memcpy(freeze->ulControl + 0xe0, gpu.ex_regs, sizeof(gpu.ex_regs));
1ab64c54 576 freeze->ulStatus = gpu.status.reg;
1ab64c54
GI
577 break;
578 case 0: // load
579 memcpy(gpu.vram, freeze->psxVRam, sizeof(gpu.vram));
580 memcpy(gpu.regs, freeze->ulControl, sizeof(gpu.regs));
6e9bdaef 581 memcpy(gpu.ex_regs, freeze->ulControl + 0xe0, sizeof(gpu.ex_regs));
1ab64c54 582 gpu.status.reg = freeze->ulStatus;
fc84f618 583 for (i = 8; i > 0; i--) {
584 gpu.regs[i] ^= 1; // avoid reg change detection
585 GPUwriteStatus((i << 24) | (gpu.regs[i] ^ 1));
586 }
5b745e5b 587 renderer_sync_ecmds(gpu.ex_regs);
05740673 588 renderer_update_caches(0, 0, 1024, 512);
1ab64c54
GI
589 break;
590 }
591
592 return 1;
593}
594
5440b88e 595void GPUupdateLace(void)
596{
597 if (gpu.cmd_len > 0)
598 flush_cmd_buffer();
599 renderer_flush_queues();
600
601 if (gpu.status.blanking || !gpu.state.fb_dirty)
602 return;
603
604 if (gpu.frameskip.set) {
605 if (!gpu.frameskip.frame_ready) {
606 if (*gpu.state.frame_count - gpu.frameskip.last_flip_frame < 9)
607 return;
608 gpu.frameskip.active = 0;
609 }
610 gpu.frameskip.frame_ready = 0;
611 }
612
613 vout_update();
614 gpu.state.fb_dirty = 0;
615}
616
72e5023f 617void GPUvBlank(int is_vblank, int lcf)
618{
5440b88e 619 int interlace = gpu.state.allow_interlace
620 && gpu.status.interlace && gpu.status.dheight;
621 // interlace doesn't look nice on progressive displays,
622 // so we have this "auto" mode here for games that don't read vram
623 if (gpu.state.allow_interlace == 2
624 && *gpu.state.frame_count - gpu.state.last_vram_read_frame > 1)
625 {
626 interlace = 0;
627 }
628 if (interlace || interlace != gpu.state.old_interlace) {
629 gpu.state.old_interlace = interlace;
630
631 if (gpu.cmd_len > 0)
632 flush_cmd_buffer();
633 renderer_flush_queues();
634 renderer_set_interlace(interlace, !lcf);
635 }
636}
637
638#include "../../frontend/plugin_lib.h"
639
640void GPUrearmedCallbacks(const struct rearmed_cbs *cbs)
641{
642 gpu.frameskip.set = cbs->frameskip;
643 gpu.frameskip.advice = &cbs->fskip_advice;
644 gpu.frameskip.active = 0;
645 gpu.frameskip.frame_ready = 1;
646 gpu.state.hcnt = cbs->gpu_hcnt;
647 gpu.state.frame_count = cbs->gpu_frame_count;
648 gpu.state.allow_interlace = cbs->gpu_neon.allow_interlace;
649
650 if (cbs->pl_vout_set_raw_vram)
651 cbs->pl_vout_set_raw_vram(gpu.vram);
652 renderer_set_config(cbs);
653 vout_set_config(cbs);
72e5023f 654}
655
1ab64c54 656// vim:shiftwidth=2:expandtab