psx_gpu: fix line cmd length and move w/h masking
[pcsx_rearmed.git] / plugins / gpulib / gpu.c
CommitLineData
1ab64c54 1/*
05740673 2 * (C) GraÅžvydas "notaz" Ignotas, 2011-2012
1ab64c54
GI
3 *
4 * This work is licensed under the terms of any of these licenses
5 * (at your option):
6 * - GNU GPL, version 2 or later.
7 * - GNU LGPL, version 2.1 or later.
8 * See the COPYING file in the top-level directory.
9 */
10
d30279e2 11#include <stdio.h>
1ab64c54 12#include <string.h>
56f08d83 13#include "gpu.h"
1ab64c54
GI
14
15#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
d30279e2 16#define unlikely(x) __builtin_expect((x), 0)
8dd855cd 17#define noinline __attribute__((noinline))
1ab64c54 18
deb18d24 19#define gpu_log(fmt, ...) \
3ece2f0c 20 printf("%d:%03d: " fmt, *gpu.state.frame_count, *gpu.state.hcnt, ##__VA_ARGS__)
deb18d24 21
22//#define log_io gpu_log
56f08d83 23#define log_io(...)
9394ada5 24//#define log_anomaly gpu_log
25#define log_anomaly(...)
56f08d83 26
7d993ee2 27struct psx_gpu gpu __attribute__((aligned(2048)));
1ab64c54 28
48f3d210 29static noinline int do_cmd_buffer(uint32_t *data, int count);
05740673 30static void finish_vram_transfer(int is_read);
48f3d210 31
32static noinline void do_cmd_reset(void)
33{
34 if (unlikely(gpu.cmd_len > 0))
35 do_cmd_buffer(gpu.cmd_buffer, gpu.cmd_len);
48f3d210 36 gpu.cmd_len = 0;
05740673 37
38 if (unlikely(gpu.dma.h > 0))
39 finish_vram_transfer(gpu.dma_start.is_read);
48f3d210 40 gpu.dma.h = 0;
41}
42
6e9bdaef 43static noinline void do_reset(void)
1ab64c54 44{
7841712d 45 unsigned int i;
48f3d210 46
47 do_cmd_reset();
48
6e9bdaef 49 memset(gpu.regs, 0, sizeof(gpu.regs));
48f3d210 50 for (i = 0; i < sizeof(gpu.ex_regs) / sizeof(gpu.ex_regs[0]); i++)
51 gpu.ex_regs[i] = (0xe0 + i) << 24;
d30279e2 52 gpu.status.reg = 0x14802000;
6e9bdaef 53 gpu.gp0 = 0;
fc84f618 54 gpu.regs[3] = 1;
6e9bdaef 55 gpu.screen.hres = gpu.screen.w = 256;
fc84f618 56 gpu.screen.vres = gpu.screen.h = 240;
1ab64c54
GI
57}
58
8dd855cd 59static noinline void update_width(void)
60{
61 int sw = gpu.screen.x2 - gpu.screen.x1;
62 if (sw <= 0 || sw >= 2560)
63 // full width
64 gpu.screen.w = gpu.screen.hres;
65 else
66 gpu.screen.w = sw * gpu.screen.hres / 2560;
67}
68
69static noinline void update_height(void)
70{
71 int sh = gpu.screen.y2 - gpu.screen.y1;
72 if (gpu.status.dheight)
73 sh *= 2;
74 if (sh <= 0)
75 sh = gpu.screen.vres;
76
77 gpu.screen.h = sh;
78}
79
fc84f618 80static noinline void decide_frameskip(void)
81{
9fe27e25 82 if (gpu.frameskip.active)
83 gpu.frameskip.cnt++;
84 else {
85 gpu.frameskip.cnt = 0;
86 gpu.frameskip.frame_ready = 1;
87 }
fc84f618 88
9fe27e25 89 if (!gpu.frameskip.active && *gpu.frameskip.advice)
90 gpu.frameskip.active = 1;
91 else if (gpu.frameskip.set > 0 && gpu.frameskip.cnt < gpu.frameskip.set)
fc84f618 92 gpu.frameskip.active = 1;
93 else
94 gpu.frameskip.active = 0;
95}
96
b243416b 97static noinline int decide_frameskip_allow(uint32_t cmd_e3)
9fe27e25 98{
99 // no frameskip if it decides to draw to display area,
100 // but not for interlace since it'll most likely always do that
101 uint32_t x = cmd_e3 & 0x3ff;
102 uint32_t y = (cmd_e3 >> 10) & 0x3ff;
103 gpu.frameskip.allow = gpu.status.interlace ||
104 (uint32_t)(x - gpu.screen.x) >= (uint32_t)gpu.screen.w ||
105 (uint32_t)(y - gpu.screen.y) >= (uint32_t)gpu.screen.h;
b243416b 106 return gpu.frameskip.allow;
9fe27e25 107}
108
6e9bdaef 109static noinline void get_gpu_info(uint32_t data)
110{
111 switch (data & 0x0f) {
112 case 0x02:
113 case 0x03:
114 case 0x04:
115 case 0x05:
116 gpu.gp0 = gpu.ex_regs[data & 7] & 0xfffff;
117 break;
118 case 0x06:
119 gpu.gp0 = gpu.ex_regs[5] & 0xfffff;
120 break;
121 case 0x07:
122 gpu.gp0 = 2;
123 break;
124 default:
125 gpu.gp0 = 0;
126 break;
127 }
128}
129
130long GPUinit(void)
131{
9394ada5 132 int ret;
133 ret = vout_init();
134 ret |= renderer_init();
135
3ece2f0c 136 gpu.state.frame_count = &gpu.zero;
deb18d24 137 gpu.state.hcnt = &gpu.zero;
48f3d210 138 gpu.frameskip.active = 0;
139 gpu.cmd_len = 0;
9394ada5 140 do_reset();
48f3d210 141
6e9bdaef 142 return ret;
143}
144
145long GPUshutdown(void)
146{
147 return vout_finish();
148}
149
1ab64c54
GI
150void GPUwriteStatus(uint32_t data)
151{
152 static const short hres[8] = { 256, 368, 320, 384, 512, 512, 640, 640 };
153 static const short vres[4] = { 240, 480, 256, 480 };
154 uint32_t cmd = data >> 24;
155
fc84f618 156 if (cmd < ARRAY_SIZE(gpu.regs)) {
48f3d210 157 if (cmd > 1 && cmd != 5 && gpu.regs[cmd] == data)
fc84f618 158 return;
8dd855cd 159 gpu.regs[cmd] = data;
fc84f618 160 }
161
162 gpu.state.fb_dirty = 1;
8dd855cd 163
164 switch (cmd) {
1ab64c54 165 case 0x00:
6e9bdaef 166 do_reset();
1ab64c54 167 break;
48f3d210 168 case 0x01:
169 do_cmd_reset();
170 break;
1ab64c54 171 case 0x03:
d30279e2 172 gpu.status.blanking = data & 1;
1ab64c54
GI
173 break;
174 case 0x04:
175 gpu.status.dma = data & 3;
176 break;
177 case 0x05:
178 gpu.screen.x = data & 0x3ff;
179 gpu.screen.y = (data >> 10) & 0x3ff;
9fe27e25 180 if (gpu.frameskip.set) {
181 decide_frameskip_allow(gpu.ex_regs[3]);
182 if (gpu.frameskip.last_flip_frame != *gpu.state.frame_count) {
183 decide_frameskip();
184 gpu.frameskip.last_flip_frame = *gpu.state.frame_count;
185 }
fb4c6fba 186 }
1ab64c54 187 break;
8dd855cd 188 case 0x06:
189 gpu.screen.x1 = data & 0xfff;
190 gpu.screen.x2 = (data >> 12) & 0xfff;
191 update_width();
192 break;
1ab64c54
GI
193 case 0x07:
194 gpu.screen.y1 = data & 0x3ff;
195 gpu.screen.y2 = (data >> 10) & 0x3ff;
8dd855cd 196 update_height();
1ab64c54
GI
197 break;
198 case 0x08:
199 gpu.status.reg = (gpu.status.reg & ~0x7f0000) | ((data & 0x3F) << 17) | ((data & 0x40) << 10);
8dd855cd 200 gpu.screen.hres = hres[(gpu.status.reg >> 16) & 7];
201 gpu.screen.vres = vres[(gpu.status.reg >> 19) & 3];
202 update_width();
203 update_height();
1ab64c54 204 break;
deb18d24 205 default:
206 if ((cmd & 0xf0) == 0x10)
207 get_gpu_info(data);
6e9bdaef 208 break;
1ab64c54 209 }
7890a708 210
211#ifdef GPUwriteStatus_ext
212 GPUwriteStatus_ext(data);
213#endif
1ab64c54
GI
214}
215
56f08d83 216const unsigned char cmd_lengths[256] =
1ab64c54 217{
d30279e2
GI
218 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
219 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
220 3, 3, 3, 3, 6, 6, 6, 6, 4, 4, 4, 4, 8, 8, 8, 8, // 20
221 5, 5, 5, 5, 8, 8, 8, 8, 7, 7, 7, 7, 11, 11, 11, 11,
652c6b8b 222 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, // 40
223 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
224 2, 2, 2, 2, 3, 3, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, // 60
d30279e2
GI
225 1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2,
226 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 80
227 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
228 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // a0
229 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
230 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // c0
231 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
232 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // e0
233 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
234};
235
d30279e2
GI
236#define VRAM_MEM_XY(x, y) &gpu.vram[(y) * 1024 + (x)]
237
238static inline void do_vram_line(int x, int y, uint16_t *mem, int l, int is_read)
1ab64c54 239{
d30279e2
GI
240 uint16_t *vram = VRAM_MEM_XY(x, y);
241 if (is_read)
242 memcpy(mem, vram, l * 2);
243 else
244 memcpy(vram, mem, l * 2);
245}
246
247static int do_vram_io(uint32_t *data, int count, int is_read)
248{
249 int count_initial = count;
250 uint16_t *sdata = (uint16_t *)data;
251 int x = gpu.dma.x, y = gpu.dma.y;
252 int w = gpu.dma.w, h = gpu.dma.h;
ddd56f6e 253 int o = gpu.dma.offset;
d30279e2
GI
254 int l;
255 count *= 2; // operate in 16bpp pixels
256
257 if (gpu.dma.offset) {
258 l = w - gpu.dma.offset;
ddd56f6e 259 if (count < l)
d30279e2 260 l = count;
ddd56f6e 261
262 do_vram_line(x + o, y, sdata, l, is_read);
263
264 if (o + l < w)
265 o += l;
266 else {
267 o = 0;
268 y++;
269 h--;
270 }
d30279e2
GI
271 sdata += l;
272 count -= l;
d30279e2
GI
273 }
274
275 for (; h > 0 && count >= w; sdata += w, count -= w, y++, h--) {
276 y &= 511;
277 do_vram_line(x, y, sdata, w, is_read);
278 }
279
05740673 280 if (h > 0) {
281 if (count > 0) {
282 y &= 511;
283 do_vram_line(x, y, sdata, count, is_read);
284 o = count;
285 count = 0;
286 }
d30279e2 287 }
05740673 288 else
289 finish_vram_transfer(is_read);
d30279e2
GI
290 gpu.dma.y = y;
291 gpu.dma.h = h;
ddd56f6e 292 gpu.dma.offset = o;
d30279e2 293
6e9bdaef 294 return count_initial - count / 2;
d30279e2
GI
295}
296
297static void start_vram_transfer(uint32_t pos_word, uint32_t size_word, int is_read)
298{
ddd56f6e 299 if (gpu.dma.h)
300 log_anomaly("start_vram_transfer while old unfinished\n");
301
5440b88e 302 gpu.dma.x = pos_word & 0x3ff;
303 gpu.dma.y = (pos_word >> 16) & 0x1ff;
48f3d210 304 gpu.dma.w = ((size_word - 1) & 0x3ff) + 1;
305 gpu.dma.h = (((size_word >> 16) - 1) & 0x1ff) + 1;
d30279e2 306 gpu.dma.offset = 0;
05740673 307 gpu.dma.is_read = is_read;
308 gpu.dma_start = gpu.dma;
d30279e2 309
9e146206 310 renderer_flush_queues();
311 if (is_read) {
d30279e2 312 gpu.status.img = 1;
9e146206 313 // XXX: wrong for width 1
314 memcpy(&gpu.gp0, VRAM_MEM_XY(gpu.dma.x, gpu.dma.y), 4);
5440b88e 315 gpu.state.last_vram_read_frame = *gpu.state.frame_count;
9e146206 316 }
d30279e2 317
6e9bdaef 318 log_io("start_vram_transfer %c (%d, %d) %dx%d\n", is_read ? 'r' : 'w',
319 gpu.dma.x, gpu.dma.y, gpu.dma.w, gpu.dma.h);
d30279e2
GI
320}
321
05740673 322static void finish_vram_transfer(int is_read)
323{
324 if (is_read)
325 gpu.status.img = 0;
326 else
327 renderer_update_caches(gpu.dma_start.x, gpu.dma_start.y,
328 gpu.dma_start.w, gpu.dma_start.h);
329}
330
b243416b 331static noinline int do_cmd_list_skip(uint32_t *data, int count, int *last_cmd)
332{
333 int cmd = 0, pos = 0, len, dummy;
334 int skip = 1;
335
b8d961ef 336 // XXX: polylines are not properly handled
b243416b 337 while (pos < count && skip) {
338 uint32_t *list = data + pos;
339 cmd = list[0] >> 24;
340 len = 1 + cmd_lengths[cmd];
341
342 if (cmd == 0x02) {
343 if ((list[2] & 0x3ff) > gpu.screen.w || ((list[2] >> 16) & 0x1ff) > gpu.screen.h)
344 // clearing something large, don't skip
345 do_cmd_list(data + pos, 3, &dummy);
346 }
347 else if ((cmd & 0xf4) == 0x24) {
348 // flat textured prim
349 gpu.ex_regs[1] &= ~0x1ff;
350 gpu.ex_regs[1] |= list[4] & 0x1ff;
351 }
352 else if ((cmd & 0xf4) == 0x34) {
353 // shaded textured prim
354 gpu.ex_regs[1] &= ~0x1ff;
355 gpu.ex_regs[1] |= list[5] & 0x1ff;
356 }
357 else if (cmd == 0xe3)
358 skip = decide_frameskip_allow(list[0]);
359
360 if ((cmd & 0xf8) == 0xe0)
361 gpu.ex_regs[cmd & 7] = list[0];
362
363 if (pos + len > count) {
364 cmd = -1;
365 break; // incomplete cmd
366 }
367 if (cmd == 0xa0 || cmd == 0xc0)
368 break; // image i/o
369 pos += len;
370 }
371
372 renderer_sync_ecmds(gpu.ex_regs);
373 *last_cmd = cmd;
374 return pos;
375}
376
48f3d210 377static noinline int do_cmd_buffer(uint32_t *data, int count)
d30279e2 378{
b243416b 379 int cmd, pos;
380 uint32_t old_e3 = gpu.ex_regs[3];
fc84f618 381 int vram_dirty = 0;
d30279e2 382
d30279e2 383 // process buffer
b243416b 384 for (pos = 0; pos < count; )
d30279e2 385 {
b243416b 386 if (gpu.dma.h && !gpu.dma_start.is_read) { // XXX: need to verify
387 vram_dirty = 1;
d30279e2 388 pos += do_vram_io(data + pos, count - pos, 0);
ddd56f6e 389 if (pos == count)
390 break;
d30279e2
GI
391 }
392
b243416b 393 cmd = data[pos] >> 24;
d30279e2
GI
394 if (cmd == 0xa0 || cmd == 0xc0) {
395 // consume vram write/read cmd
396 start_vram_transfer(data[pos + 1], data[pos + 2], cmd == 0xc0);
b243416b 397 pos += 3;
398 continue;
d30279e2 399 }
b243416b 400
1e07f71d 401 // 0xex cmds might affect frameskip.allow, so pass to do_cmd_list_skip
402 if (gpu.frameskip.active && (gpu.frameskip.allow || ((data[pos] >> 24) & 0xf0) == 0xe0))
b243416b 403 pos += do_cmd_list_skip(data + pos, count - pos, &cmd);
404 else {
405 pos += do_cmd_list(data + pos, count - pos, &cmd);
406 vram_dirty = 1;
407 }
408
409 if (cmd == -1)
410 // incomplete cmd
ddd56f6e 411 break;
d30279e2 412 }
ddd56f6e 413
a3a9f519 414 gpu.status.reg &= ~0x1fff;
415 gpu.status.reg |= gpu.ex_regs[1] & 0x7ff;
416 gpu.status.reg |= (gpu.ex_regs[6] & 3) << 11;
417
fc84f618 418 gpu.state.fb_dirty |= vram_dirty;
419
b243416b 420 if (old_e3 != gpu.ex_regs[3])
421 decide_frameskip_allow(gpu.ex_regs[3]);
422
ddd56f6e 423 return count - pos;
d30279e2
GI
424}
425
5440b88e 426static void flush_cmd_buffer(void)
d30279e2 427{
48f3d210 428 int left = do_cmd_buffer(gpu.cmd_buffer, gpu.cmd_len);
d30279e2
GI
429 if (left > 0)
430 memmove(gpu.cmd_buffer, gpu.cmd_buffer + gpu.cmd_len - left, left * 4);
431 gpu.cmd_len = left;
1ab64c54
GI
432}
433
434void GPUwriteDataMem(uint32_t *mem, int count)
435{
d30279e2
GI
436 int left;
437
56f08d83 438 log_io("gpu_dma_write %p %d\n", mem, count);
439
d30279e2
GI
440 if (unlikely(gpu.cmd_len > 0))
441 flush_cmd_buffer();
56f08d83 442
48f3d210 443 left = do_cmd_buffer(mem, count);
d30279e2 444 if (left)
56f08d83 445 log_anomaly("GPUwriteDataMem: discarded %d/%d words\n", left, count);
1ab64c54
GI
446}
447
d30279e2 448void GPUwriteData(uint32_t data)
1ab64c54 449{
56f08d83 450 log_io("gpu_write %08x\n", data);
d30279e2
GI
451 gpu.cmd_buffer[gpu.cmd_len++] = data;
452 if (gpu.cmd_len >= CMD_BUFFER_LEN)
453 flush_cmd_buffer();
1ab64c54
GI
454}
455
ddd56f6e 456long GPUdmaChain(uint32_t *rambase, uint32_t start_addr)
1ab64c54 457{
ddd56f6e 458 uint32_t addr, *list;
deb18d24 459 uint32_t *llist_entry = NULL;
ddd56f6e 460 int len, left, count;
1c72b1c2 461 long cpu_cycles = 0;
d30279e2
GI
462
463 if (unlikely(gpu.cmd_len > 0))
464 flush_cmd_buffer();
465
deb18d24 466 // ff7 sends it's main list twice, detect this
3ece2f0c 467 if (*gpu.state.frame_count == gpu.state.last_list.frame &&
468 *gpu.state.hcnt - gpu.state.last_list.hcnt <= 1 &&
1c72b1c2 469 gpu.state.last_list.cycles > 2048)
deb18d24 470 {
471 llist_entry = rambase + (gpu.state.last_list.addr & 0x1fffff) / 4;
472 *llist_entry |= 0x800000;
473 }
474
56f08d83 475 log_io("gpu_dma_chain\n");
ddd56f6e 476 addr = start_addr & 0xffffff;
477 for (count = 0; addr != 0xffffff; count++)
478 {
ddd56f6e 479 list = rambase + (addr & 0x1fffff) / 4;
d30279e2
GI
480 len = list[0] >> 24;
481 addr = list[0] & 0xffffff;
1c72b1c2 482 cpu_cycles += 10;
483 if (len > 0)
484 cpu_cycles += 5 + len;
deb18d24 485
486 log_io(".chain %08x #%d\n", (list - rambase) * 4, len);
ddd56f6e 487
488 // loop detection marker
489 // (bit23 set causes DMA error on real machine, so
490 // unlikely to be ever set by the game)
491 list[0] |= 0x800000;
492
56f08d83 493 if (len) {
48f3d210 494 left = do_cmd_buffer(list + 1, len);
56f08d83 495 if (left)
deb18d24 496 log_anomaly("GPUdmaChain: discarded %d/%d words\n", left, len);
56f08d83 497 }
ddd56f6e 498
499 if (addr & 0x800000)
500 break;
501 }
502
503 // remove loop detection markers
504 addr = start_addr & 0x1fffff;
505 while (count-- > 0) {
506 list = rambase + addr / 4;
507 addr = list[0] & 0x1fffff;
508 list[0] &= ~0x800000;
d30279e2 509 }
deb18d24 510 if (llist_entry)
511 *llist_entry &= ~0x800000;
d30279e2 512
3ece2f0c 513 gpu.state.last_list.frame = *gpu.state.frame_count;
deb18d24 514 gpu.state.last_list.hcnt = *gpu.state.hcnt;
1c72b1c2 515 gpu.state.last_list.cycles = cpu_cycles;
deb18d24 516 gpu.state.last_list.addr = start_addr;
517
1c72b1c2 518 return cpu_cycles;
1ab64c54
GI
519}
520
d30279e2
GI
521void GPUreadDataMem(uint32_t *mem, int count)
522{
56f08d83 523 log_io("gpu_dma_read %p %d\n", mem, count);
524
d30279e2
GI
525 if (unlikely(gpu.cmd_len > 0))
526 flush_cmd_buffer();
56f08d83 527
d30279e2
GI
528 if (gpu.dma.h)
529 do_vram_io(mem, count, 1);
530}
531
532uint32_t GPUreadData(void)
533{
9e146206 534 uint32_t ret;
56f08d83 535
536 if (unlikely(gpu.cmd_len > 0))
537 flush_cmd_buffer();
538
9e146206 539 ret = gpu.gp0;
56f08d83 540 if (gpu.dma.h)
9e146206 541 do_vram_io(&ret, 1, 1);
56f08d83 542
9e146206 543 log_io("gpu_read %08x\n", ret);
544 return ret;
d30279e2
GI
545}
546
547uint32_t GPUreadStatus(void)
548{
ddd56f6e 549 uint32_t ret;
56f08d83 550
d30279e2
GI
551 if (unlikely(gpu.cmd_len > 0))
552 flush_cmd_buffer();
553
24de2dd4 554 ret = gpu.status.reg;
ddd56f6e 555 log_io("gpu_read_status %08x\n", ret);
556 return ret;
d30279e2
GI
557}
558
096ec49b 559struct GPUFreeze
1ab64c54
GI
560{
561 uint32_t ulFreezeVersion; // should be always 1 for now (set by main emu)
562 uint32_t ulStatus; // current gpu status
563 uint32_t ulControl[256]; // latest control register values
564 unsigned char psxVRam[1024*1024*2]; // current VRam image (full 2 MB for ZN)
096ec49b 565};
1ab64c54 566
096ec49b 567long GPUfreeze(uint32_t type, struct GPUFreeze *freeze)
1ab64c54 568{
fc84f618 569 int i;
570
1ab64c54
GI
571 switch (type) {
572 case 1: // save
d30279e2
GI
573 if (gpu.cmd_len > 0)
574 flush_cmd_buffer();
1ab64c54
GI
575 memcpy(freeze->psxVRam, gpu.vram, sizeof(gpu.vram));
576 memcpy(freeze->ulControl, gpu.regs, sizeof(gpu.regs));
6e9bdaef 577 memcpy(freeze->ulControl + 0xe0, gpu.ex_regs, sizeof(gpu.ex_regs));
1ab64c54 578 freeze->ulStatus = gpu.status.reg;
1ab64c54
GI
579 break;
580 case 0: // load
581 memcpy(gpu.vram, freeze->psxVRam, sizeof(gpu.vram));
582 memcpy(gpu.regs, freeze->ulControl, sizeof(gpu.regs));
6e9bdaef 583 memcpy(gpu.ex_regs, freeze->ulControl + 0xe0, sizeof(gpu.ex_regs));
1ab64c54 584 gpu.status.reg = freeze->ulStatus;
3d47ef17 585 gpu.cmd_len = 0;
fc84f618 586 for (i = 8; i > 0; i--) {
587 gpu.regs[i] ^= 1; // avoid reg change detection
588 GPUwriteStatus((i << 24) | (gpu.regs[i] ^ 1));
589 }
5b745e5b 590 renderer_sync_ecmds(gpu.ex_regs);
05740673 591 renderer_update_caches(0, 0, 1024, 512);
1ab64c54
GI
592 break;
593 }
594
595 return 1;
596}
597
5440b88e 598void GPUupdateLace(void)
599{
600 if (gpu.cmd_len > 0)
601 flush_cmd_buffer();
602 renderer_flush_queues();
603
604 if (gpu.status.blanking || !gpu.state.fb_dirty)
605 return;
606
607 if (gpu.frameskip.set) {
608 if (!gpu.frameskip.frame_ready) {
609 if (*gpu.state.frame_count - gpu.frameskip.last_flip_frame < 9)
610 return;
611 gpu.frameskip.active = 0;
612 }
613 gpu.frameskip.frame_ready = 0;
614 }
615
616 vout_update();
617 gpu.state.fb_dirty = 0;
618}
619
72e5023f 620void GPUvBlank(int is_vblank, int lcf)
621{
5440b88e 622 int interlace = gpu.state.allow_interlace
623 && gpu.status.interlace && gpu.status.dheight;
624 // interlace doesn't look nice on progressive displays,
625 // so we have this "auto" mode here for games that don't read vram
626 if (gpu.state.allow_interlace == 2
627 && *gpu.state.frame_count - gpu.state.last_vram_read_frame > 1)
628 {
629 interlace = 0;
630 }
631 if (interlace || interlace != gpu.state.old_interlace) {
632 gpu.state.old_interlace = interlace;
633
634 if (gpu.cmd_len > 0)
635 flush_cmd_buffer();
636 renderer_flush_queues();
637 renderer_set_interlace(interlace, !lcf);
638 }
639}
640
641#include "../../frontend/plugin_lib.h"
642
643void GPUrearmedCallbacks(const struct rearmed_cbs *cbs)
644{
645 gpu.frameskip.set = cbs->frameskip;
646 gpu.frameskip.advice = &cbs->fskip_advice;
647 gpu.frameskip.active = 0;
648 gpu.frameskip.frame_ready = 1;
649 gpu.state.hcnt = cbs->gpu_hcnt;
650 gpu.state.frame_count = cbs->gpu_frame_count;
651 gpu.state.allow_interlace = cbs->gpu_neon.allow_interlace;
652
653 if (cbs->pl_vout_set_raw_vram)
654 cbs->pl_vout_set_raw_vram(gpu.vram);
655 renderer_set_config(cbs);
656 vout_set_config(cbs);
72e5023f 657}
658
1ab64c54 659// vim:shiftwidth=2:expandtab