fix some build issues and warnings
[pcsx_rearmed.git] / plugins / gpulib / gpu.c
CommitLineData
1ab64c54 1/*
05740673 2 * (C) GraÅžvydas "notaz" Ignotas, 2011-2012
1ab64c54
GI
3 *
4 * This work is licensed under the terms of any of these licenses
5 * (at your option):
6 * - GNU GPL, version 2 or later.
7 * - GNU LGPL, version 2.1 or later.
8 * See the COPYING file in the top-level directory.
9 */
10
d30279e2 11#include <stdio.h>
1ab64c54 12#include <string.h>
56f08d83 13#include "gpu.h"
1ab64c54
GI
14
15#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
d30279e2 16#define unlikely(x) __builtin_expect((x), 0)
8dd855cd 17#define noinline __attribute__((noinline))
1ab64c54 18
deb18d24 19#define gpu_log(fmt, ...) \
3ece2f0c 20 printf("%d:%03d: " fmt, *gpu.state.frame_count, *gpu.state.hcnt, ##__VA_ARGS__)
deb18d24 21
22//#define log_io gpu_log
56f08d83 23#define log_io(...)
9394ada5 24//#define log_anomaly gpu_log
25#define log_anomaly(...)
56f08d83 26
7d993ee2 27struct psx_gpu gpu __attribute__((aligned(2048)));
1ab64c54 28
48f3d210 29static noinline int do_cmd_buffer(uint32_t *data, int count);
05740673 30static void finish_vram_transfer(int is_read);
48f3d210 31
32static noinline void do_cmd_reset(void)
33{
34 if (unlikely(gpu.cmd_len > 0))
35 do_cmd_buffer(gpu.cmd_buffer, gpu.cmd_len);
48f3d210 36 gpu.cmd_len = 0;
05740673 37
38 if (unlikely(gpu.dma.h > 0))
39 finish_vram_transfer(gpu.dma_start.is_read);
48f3d210 40 gpu.dma.h = 0;
41}
42
6e9bdaef 43static noinline void do_reset(void)
1ab64c54 44{
7841712d 45 unsigned int i;
48f3d210 46
47 do_cmd_reset();
48
6e9bdaef 49 memset(gpu.regs, 0, sizeof(gpu.regs));
48f3d210 50 for (i = 0; i < sizeof(gpu.ex_regs) / sizeof(gpu.ex_regs[0]); i++)
51 gpu.ex_regs[i] = (0xe0 + i) << 24;
d30279e2 52 gpu.status.reg = 0x14802000;
6e9bdaef 53 gpu.gp0 = 0;
fc84f618 54 gpu.regs[3] = 1;
6e9bdaef 55 gpu.screen.hres = gpu.screen.w = 256;
fc84f618 56 gpu.screen.vres = gpu.screen.h = 240;
1ab64c54
GI
57}
58
8dd855cd 59static noinline void update_width(void)
60{
61 int sw = gpu.screen.x2 - gpu.screen.x1;
62 if (sw <= 0 || sw >= 2560)
63 // full width
64 gpu.screen.w = gpu.screen.hres;
65 else
66 gpu.screen.w = sw * gpu.screen.hres / 2560;
67}
68
69static noinline void update_height(void)
70{
71 int sh = gpu.screen.y2 - gpu.screen.y1;
72 if (gpu.status.dheight)
73 sh *= 2;
74 if (sh <= 0)
75 sh = gpu.screen.vres;
76
77 gpu.screen.h = sh;
78}
79
fc84f618 80static noinline void decide_frameskip(void)
81{
9fe27e25 82 if (gpu.frameskip.active)
83 gpu.frameskip.cnt++;
84 else {
85 gpu.frameskip.cnt = 0;
86 gpu.frameskip.frame_ready = 1;
87 }
fc84f618 88
9fe27e25 89 if (!gpu.frameskip.active && *gpu.frameskip.advice)
90 gpu.frameskip.active = 1;
91 else if (gpu.frameskip.set > 0 && gpu.frameskip.cnt < gpu.frameskip.set)
fc84f618 92 gpu.frameskip.active = 1;
93 else
94 gpu.frameskip.active = 0;
95}
96
b243416b 97static noinline int decide_frameskip_allow(uint32_t cmd_e3)
9fe27e25 98{
99 // no frameskip if it decides to draw to display area,
100 // but not for interlace since it'll most likely always do that
101 uint32_t x = cmd_e3 & 0x3ff;
102 uint32_t y = (cmd_e3 >> 10) & 0x3ff;
103 gpu.frameskip.allow = gpu.status.interlace ||
104 (uint32_t)(x - gpu.screen.x) >= (uint32_t)gpu.screen.w ||
105 (uint32_t)(y - gpu.screen.y) >= (uint32_t)gpu.screen.h;
b243416b 106 return gpu.frameskip.allow;
9fe27e25 107}
108
6e9bdaef 109static noinline void get_gpu_info(uint32_t data)
110{
111 switch (data & 0x0f) {
112 case 0x02:
113 case 0x03:
114 case 0x04:
115 case 0x05:
116 gpu.gp0 = gpu.ex_regs[data & 7] & 0xfffff;
117 break;
118 case 0x06:
119 gpu.gp0 = gpu.ex_regs[5] & 0xfffff;
120 break;
121 case 0x07:
122 gpu.gp0 = 2;
123 break;
124 default:
125 gpu.gp0 = 0;
126 break;
127 }
128}
129
130long GPUinit(void)
131{
9394ada5 132 int ret;
133 ret = vout_init();
134 ret |= renderer_init();
135
3ece2f0c 136 gpu.state.frame_count = &gpu.zero;
deb18d24 137 gpu.state.hcnt = &gpu.zero;
48f3d210 138 gpu.frameskip.active = 0;
139 gpu.cmd_len = 0;
9394ada5 140 do_reset();
48f3d210 141
6e9bdaef 142 return ret;
143}
144
145long GPUshutdown(void)
146{
147 return vout_finish();
148}
149
1ab64c54
GI
150void GPUwriteStatus(uint32_t data)
151{
152 static const short hres[8] = { 256, 368, 320, 384, 512, 512, 640, 640 };
153 static const short vres[4] = { 240, 480, 256, 480 };
154 uint32_t cmd = data >> 24;
155
fc84f618 156 if (cmd < ARRAY_SIZE(gpu.regs)) {
48f3d210 157 if (cmd > 1 && cmd != 5 && gpu.regs[cmd] == data)
fc84f618 158 return;
8dd855cd 159 gpu.regs[cmd] = data;
fc84f618 160 }
161
162 gpu.state.fb_dirty = 1;
8dd855cd 163
164 switch (cmd) {
1ab64c54 165 case 0x00:
6e9bdaef 166 do_reset();
1ab64c54 167 break;
48f3d210 168 case 0x01:
169 do_cmd_reset();
170 break;
1ab64c54 171 case 0x03:
d30279e2 172 gpu.status.blanking = data & 1;
1ab64c54
GI
173 break;
174 case 0x04:
175 gpu.status.dma = data & 3;
176 break;
177 case 0x05:
178 gpu.screen.x = data & 0x3ff;
179 gpu.screen.y = (data >> 10) & 0x3ff;
9fe27e25 180 if (gpu.frameskip.set) {
181 decide_frameskip_allow(gpu.ex_regs[3]);
182 if (gpu.frameskip.last_flip_frame != *gpu.state.frame_count) {
183 decide_frameskip();
184 gpu.frameskip.last_flip_frame = *gpu.state.frame_count;
185 }
fb4c6fba 186 }
1ab64c54 187 break;
8dd855cd 188 case 0x06:
189 gpu.screen.x1 = data & 0xfff;
190 gpu.screen.x2 = (data >> 12) & 0xfff;
191 update_width();
192 break;
1ab64c54
GI
193 case 0x07:
194 gpu.screen.y1 = data & 0x3ff;
195 gpu.screen.y2 = (data >> 10) & 0x3ff;
8dd855cd 196 update_height();
1ab64c54
GI
197 break;
198 case 0x08:
199 gpu.status.reg = (gpu.status.reg & ~0x7f0000) | ((data & 0x3F) << 17) | ((data & 0x40) << 10);
8dd855cd 200 gpu.screen.hres = hres[(gpu.status.reg >> 16) & 7];
201 gpu.screen.vres = vres[(gpu.status.reg >> 19) & 3];
202 update_width();
203 update_height();
1ab64c54 204 break;
deb18d24 205 default:
206 if ((cmd & 0xf0) == 0x10)
207 get_gpu_info(data);
6e9bdaef 208 break;
1ab64c54 209 }
7890a708 210
211#ifdef GPUwriteStatus_ext
212 GPUwriteStatus_ext(data);
213#endif
1ab64c54
GI
214}
215
56f08d83 216const unsigned char cmd_lengths[256] =
1ab64c54 217{
d30279e2
GI
218 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
219 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
220 3, 3, 3, 3, 6, 6, 6, 6, 4, 4, 4, 4, 8, 8, 8, 8, // 20
221 5, 5, 5, 5, 8, 8, 8, 8, 7, 7, 7, 7, 11, 11, 11, 11,
652c6b8b 222 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, // 40
223 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
224 2, 2, 2, 2, 3, 3, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, // 60
d30279e2
GI
225 1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2,
226 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 80
227 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
228 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // a0
229 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
230 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // c0
231 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
232 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // e0
233 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
234};
235
d30279e2
GI
236#define VRAM_MEM_XY(x, y) &gpu.vram[(y) * 1024 + (x)]
237
238static inline void do_vram_line(int x, int y, uint16_t *mem, int l, int is_read)
1ab64c54 239{
d30279e2
GI
240 uint16_t *vram = VRAM_MEM_XY(x, y);
241 if (is_read)
242 memcpy(mem, vram, l * 2);
243 else
244 memcpy(vram, mem, l * 2);
245}
246
247static int do_vram_io(uint32_t *data, int count, int is_read)
248{
249 int count_initial = count;
250 uint16_t *sdata = (uint16_t *)data;
251 int x = gpu.dma.x, y = gpu.dma.y;
252 int w = gpu.dma.w, h = gpu.dma.h;
ddd56f6e 253 int o = gpu.dma.offset;
d30279e2
GI
254 int l;
255 count *= 2; // operate in 16bpp pixels
256
257 if (gpu.dma.offset) {
258 l = w - gpu.dma.offset;
ddd56f6e 259 if (count < l)
d30279e2 260 l = count;
ddd56f6e 261
262 do_vram_line(x + o, y, sdata, l, is_read);
263
264 if (o + l < w)
265 o += l;
266 else {
267 o = 0;
268 y++;
269 h--;
270 }
d30279e2
GI
271 sdata += l;
272 count -= l;
d30279e2
GI
273 }
274
275 for (; h > 0 && count >= w; sdata += w, count -= w, y++, h--) {
276 y &= 511;
277 do_vram_line(x, y, sdata, w, is_read);
278 }
279
05740673 280 if (h > 0) {
281 if (count > 0) {
282 y &= 511;
283 do_vram_line(x, y, sdata, count, is_read);
284 o = count;
285 count = 0;
286 }
d30279e2 287 }
05740673 288 else
289 finish_vram_transfer(is_read);
d30279e2
GI
290 gpu.dma.y = y;
291 gpu.dma.h = h;
ddd56f6e 292 gpu.dma.offset = o;
d30279e2 293
6e9bdaef 294 return count_initial - count / 2;
d30279e2
GI
295}
296
297static void start_vram_transfer(uint32_t pos_word, uint32_t size_word, int is_read)
298{
ddd56f6e 299 if (gpu.dma.h)
300 log_anomaly("start_vram_transfer while old unfinished\n");
301
5440b88e 302 gpu.dma.x = pos_word & 0x3ff;
303 gpu.dma.y = (pos_word >> 16) & 0x1ff;
48f3d210 304 gpu.dma.w = ((size_word - 1) & 0x3ff) + 1;
305 gpu.dma.h = (((size_word >> 16) - 1) & 0x1ff) + 1;
d30279e2 306 gpu.dma.offset = 0;
05740673 307 gpu.dma.is_read = is_read;
308 gpu.dma_start = gpu.dma;
d30279e2 309
9e146206 310 renderer_flush_queues();
311 if (is_read) {
d30279e2 312 gpu.status.img = 1;
9e146206 313 // XXX: wrong for width 1
314 memcpy(&gpu.gp0, VRAM_MEM_XY(gpu.dma.x, gpu.dma.y), 4);
5440b88e 315 gpu.state.last_vram_read_frame = *gpu.state.frame_count;
9e146206 316 }
d30279e2 317
6e9bdaef 318 log_io("start_vram_transfer %c (%d, %d) %dx%d\n", is_read ? 'r' : 'w',
319 gpu.dma.x, gpu.dma.y, gpu.dma.w, gpu.dma.h);
d30279e2
GI
320}
321
05740673 322static void finish_vram_transfer(int is_read)
323{
324 if (is_read)
325 gpu.status.img = 0;
326 else
327 renderer_update_caches(gpu.dma_start.x, gpu.dma_start.y,
328 gpu.dma_start.w, gpu.dma_start.h);
329}
330
b243416b 331static noinline int do_cmd_list_skip(uint32_t *data, int count, int *last_cmd)
332{
333 int cmd = 0, pos = 0, len, dummy;
334 int skip = 1;
335
336 while (pos < count && skip) {
337 uint32_t *list = data + pos;
338 cmd = list[0] >> 24;
339 len = 1 + cmd_lengths[cmd];
340
341 if (cmd == 0x02) {
342 if ((list[2] & 0x3ff) > gpu.screen.w || ((list[2] >> 16) & 0x1ff) > gpu.screen.h)
343 // clearing something large, don't skip
344 do_cmd_list(data + pos, 3, &dummy);
345 }
346 else if ((cmd & 0xf4) == 0x24) {
347 // flat textured prim
348 gpu.ex_regs[1] &= ~0x1ff;
349 gpu.ex_regs[1] |= list[4] & 0x1ff;
350 }
351 else if ((cmd & 0xf4) == 0x34) {
352 // shaded textured prim
353 gpu.ex_regs[1] &= ~0x1ff;
354 gpu.ex_regs[1] |= list[5] & 0x1ff;
355 }
356 else if (cmd == 0xe3)
357 skip = decide_frameskip_allow(list[0]);
358
359 if ((cmd & 0xf8) == 0xe0)
360 gpu.ex_regs[cmd & 7] = list[0];
361
362 if (pos + len > count) {
363 cmd = -1;
364 break; // incomplete cmd
365 }
366 if (cmd == 0xa0 || cmd == 0xc0)
367 break; // image i/o
368 pos += len;
369 }
370
371 renderer_sync_ecmds(gpu.ex_regs);
372 *last_cmd = cmd;
373 return pos;
374}
375
48f3d210 376static noinline int do_cmd_buffer(uint32_t *data, int count)
d30279e2 377{
b243416b 378 int cmd, pos;
379 uint32_t old_e3 = gpu.ex_regs[3];
fc84f618 380 int vram_dirty = 0;
d30279e2 381
d30279e2 382 // process buffer
b243416b 383 for (pos = 0; pos < count; )
d30279e2 384 {
b243416b 385 if (gpu.dma.h && !gpu.dma_start.is_read) { // XXX: need to verify
386 vram_dirty = 1;
d30279e2 387 pos += do_vram_io(data + pos, count - pos, 0);
ddd56f6e 388 if (pos == count)
389 break;
d30279e2
GI
390 }
391
b243416b 392 cmd = data[pos] >> 24;
d30279e2
GI
393 if (cmd == 0xa0 || cmd == 0xc0) {
394 // consume vram write/read cmd
395 start_vram_transfer(data[pos + 1], data[pos + 2], cmd == 0xc0);
b243416b 396 pos += 3;
397 continue;
d30279e2 398 }
b243416b 399
1e07f71d 400 // 0xex cmds might affect frameskip.allow, so pass to do_cmd_list_skip
401 if (gpu.frameskip.active && (gpu.frameskip.allow || ((data[pos] >> 24) & 0xf0) == 0xe0))
b243416b 402 pos += do_cmd_list_skip(data + pos, count - pos, &cmd);
403 else {
404 pos += do_cmd_list(data + pos, count - pos, &cmd);
405 vram_dirty = 1;
406 }
407
408 if (cmd == -1)
409 // incomplete cmd
ddd56f6e 410 break;
d30279e2 411 }
ddd56f6e 412
a3a9f519 413 gpu.status.reg &= ~0x1fff;
414 gpu.status.reg |= gpu.ex_regs[1] & 0x7ff;
415 gpu.status.reg |= (gpu.ex_regs[6] & 3) << 11;
416
fc84f618 417 gpu.state.fb_dirty |= vram_dirty;
418
b243416b 419 if (old_e3 != gpu.ex_regs[3])
420 decide_frameskip_allow(gpu.ex_regs[3]);
421
ddd56f6e 422 return count - pos;
d30279e2
GI
423}
424
5440b88e 425static void flush_cmd_buffer(void)
d30279e2 426{
48f3d210 427 int left = do_cmd_buffer(gpu.cmd_buffer, gpu.cmd_len);
d30279e2
GI
428 if (left > 0)
429 memmove(gpu.cmd_buffer, gpu.cmd_buffer + gpu.cmd_len - left, left * 4);
430 gpu.cmd_len = left;
1ab64c54
GI
431}
432
433void GPUwriteDataMem(uint32_t *mem, int count)
434{
d30279e2
GI
435 int left;
436
56f08d83 437 log_io("gpu_dma_write %p %d\n", mem, count);
438
d30279e2
GI
439 if (unlikely(gpu.cmd_len > 0))
440 flush_cmd_buffer();
56f08d83 441
48f3d210 442 left = do_cmd_buffer(mem, count);
d30279e2 443 if (left)
56f08d83 444 log_anomaly("GPUwriteDataMem: discarded %d/%d words\n", left, count);
1ab64c54
GI
445}
446
d30279e2 447void GPUwriteData(uint32_t data)
1ab64c54 448{
56f08d83 449 log_io("gpu_write %08x\n", data);
d30279e2
GI
450 gpu.cmd_buffer[gpu.cmd_len++] = data;
451 if (gpu.cmd_len >= CMD_BUFFER_LEN)
452 flush_cmd_buffer();
1ab64c54
GI
453}
454
ddd56f6e 455long GPUdmaChain(uint32_t *rambase, uint32_t start_addr)
1ab64c54 456{
ddd56f6e 457 uint32_t addr, *list;
deb18d24 458 uint32_t *llist_entry = NULL;
ddd56f6e 459 int len, left, count;
1c72b1c2 460 long cpu_cycles = 0;
d30279e2
GI
461
462 if (unlikely(gpu.cmd_len > 0))
463 flush_cmd_buffer();
464
deb18d24 465 // ff7 sends it's main list twice, detect this
3ece2f0c 466 if (*gpu.state.frame_count == gpu.state.last_list.frame &&
467 *gpu.state.hcnt - gpu.state.last_list.hcnt <= 1 &&
1c72b1c2 468 gpu.state.last_list.cycles > 2048)
deb18d24 469 {
470 llist_entry = rambase + (gpu.state.last_list.addr & 0x1fffff) / 4;
471 *llist_entry |= 0x800000;
472 }
473
56f08d83 474 log_io("gpu_dma_chain\n");
ddd56f6e 475 addr = start_addr & 0xffffff;
476 for (count = 0; addr != 0xffffff; count++)
477 {
ddd56f6e 478 list = rambase + (addr & 0x1fffff) / 4;
d30279e2
GI
479 len = list[0] >> 24;
480 addr = list[0] & 0xffffff;
1c72b1c2 481 cpu_cycles += 10;
482 if (len > 0)
483 cpu_cycles += 5 + len;
deb18d24 484
485 log_io(".chain %08x #%d\n", (list - rambase) * 4, len);
ddd56f6e 486
487 // loop detection marker
488 // (bit23 set causes DMA error on real machine, so
489 // unlikely to be ever set by the game)
490 list[0] |= 0x800000;
491
56f08d83 492 if (len) {
48f3d210 493 left = do_cmd_buffer(list + 1, len);
56f08d83 494 if (left)
deb18d24 495 log_anomaly("GPUdmaChain: discarded %d/%d words\n", left, len);
56f08d83 496 }
ddd56f6e 497
498 if (addr & 0x800000)
499 break;
500 }
501
502 // remove loop detection markers
503 addr = start_addr & 0x1fffff;
504 while (count-- > 0) {
505 list = rambase + addr / 4;
506 addr = list[0] & 0x1fffff;
507 list[0] &= ~0x800000;
d30279e2 508 }
deb18d24 509 if (llist_entry)
510 *llist_entry &= ~0x800000;
d30279e2 511
3ece2f0c 512 gpu.state.last_list.frame = *gpu.state.frame_count;
deb18d24 513 gpu.state.last_list.hcnt = *gpu.state.hcnt;
1c72b1c2 514 gpu.state.last_list.cycles = cpu_cycles;
deb18d24 515 gpu.state.last_list.addr = start_addr;
516
1c72b1c2 517 return cpu_cycles;
1ab64c54
GI
518}
519
d30279e2
GI
520void GPUreadDataMem(uint32_t *mem, int count)
521{
56f08d83 522 log_io("gpu_dma_read %p %d\n", mem, count);
523
d30279e2
GI
524 if (unlikely(gpu.cmd_len > 0))
525 flush_cmd_buffer();
56f08d83 526
d30279e2
GI
527 if (gpu.dma.h)
528 do_vram_io(mem, count, 1);
529}
530
531uint32_t GPUreadData(void)
532{
9e146206 533 uint32_t ret;
56f08d83 534
535 if (unlikely(gpu.cmd_len > 0))
536 flush_cmd_buffer();
537
9e146206 538 ret = gpu.gp0;
56f08d83 539 if (gpu.dma.h)
9e146206 540 do_vram_io(&ret, 1, 1);
56f08d83 541
9e146206 542 log_io("gpu_read %08x\n", ret);
543 return ret;
d30279e2
GI
544}
545
546uint32_t GPUreadStatus(void)
547{
ddd56f6e 548 uint32_t ret;
56f08d83 549
d30279e2
GI
550 if (unlikely(gpu.cmd_len > 0))
551 flush_cmd_buffer();
552
24de2dd4 553 ret = gpu.status.reg;
ddd56f6e 554 log_io("gpu_read_status %08x\n", ret);
555 return ret;
d30279e2
GI
556}
557
096ec49b 558struct GPUFreeze
1ab64c54
GI
559{
560 uint32_t ulFreezeVersion; // should be always 1 for now (set by main emu)
561 uint32_t ulStatus; // current gpu status
562 uint32_t ulControl[256]; // latest control register values
563 unsigned char psxVRam[1024*1024*2]; // current VRam image (full 2 MB for ZN)
096ec49b 564};
1ab64c54 565
096ec49b 566long GPUfreeze(uint32_t type, struct GPUFreeze *freeze)
1ab64c54 567{
fc84f618 568 int i;
569
1ab64c54
GI
570 switch (type) {
571 case 1: // save
d30279e2
GI
572 if (gpu.cmd_len > 0)
573 flush_cmd_buffer();
1ab64c54
GI
574 memcpy(freeze->psxVRam, gpu.vram, sizeof(gpu.vram));
575 memcpy(freeze->ulControl, gpu.regs, sizeof(gpu.regs));
6e9bdaef 576 memcpy(freeze->ulControl + 0xe0, gpu.ex_regs, sizeof(gpu.ex_regs));
1ab64c54 577 freeze->ulStatus = gpu.status.reg;
1ab64c54
GI
578 break;
579 case 0: // load
580 memcpy(gpu.vram, freeze->psxVRam, sizeof(gpu.vram));
581 memcpy(gpu.regs, freeze->ulControl, sizeof(gpu.regs));
6e9bdaef 582 memcpy(gpu.ex_regs, freeze->ulControl + 0xe0, sizeof(gpu.ex_regs));
1ab64c54 583 gpu.status.reg = freeze->ulStatus;
fc84f618 584 for (i = 8; i > 0; i--) {
585 gpu.regs[i] ^= 1; // avoid reg change detection
586 GPUwriteStatus((i << 24) | (gpu.regs[i] ^ 1));
587 }
5b745e5b 588 renderer_sync_ecmds(gpu.ex_regs);
05740673 589 renderer_update_caches(0, 0, 1024, 512);
1ab64c54
GI
590 break;
591 }
592
593 return 1;
594}
595
5440b88e 596void GPUupdateLace(void)
597{
598 if (gpu.cmd_len > 0)
599 flush_cmd_buffer();
600 renderer_flush_queues();
601
602 if (gpu.status.blanking || !gpu.state.fb_dirty)
603 return;
604
605 if (gpu.frameskip.set) {
606 if (!gpu.frameskip.frame_ready) {
607 if (*gpu.state.frame_count - gpu.frameskip.last_flip_frame < 9)
608 return;
609 gpu.frameskip.active = 0;
610 }
611 gpu.frameskip.frame_ready = 0;
612 }
613
614 vout_update();
615 gpu.state.fb_dirty = 0;
616}
617
72e5023f 618void GPUvBlank(int is_vblank, int lcf)
619{
5440b88e 620 int interlace = gpu.state.allow_interlace
621 && gpu.status.interlace && gpu.status.dheight;
622 // interlace doesn't look nice on progressive displays,
623 // so we have this "auto" mode here for games that don't read vram
624 if (gpu.state.allow_interlace == 2
625 && *gpu.state.frame_count - gpu.state.last_vram_read_frame > 1)
626 {
627 interlace = 0;
628 }
629 if (interlace || interlace != gpu.state.old_interlace) {
630 gpu.state.old_interlace = interlace;
631
632 if (gpu.cmd_len > 0)
633 flush_cmd_buffer();
634 renderer_flush_queues();
635 renderer_set_interlace(interlace, !lcf);
636 }
637}
638
639#include "../../frontend/plugin_lib.h"
640
641void GPUrearmedCallbacks(const struct rearmed_cbs *cbs)
642{
643 gpu.frameskip.set = cbs->frameskip;
644 gpu.frameskip.advice = &cbs->fskip_advice;
645 gpu.frameskip.active = 0;
646 gpu.frameskip.frame_ready = 1;
647 gpu.state.hcnt = cbs->gpu_hcnt;
648 gpu.state.frame_count = cbs->gpu_frame_count;
649 gpu.state.allow_interlace = cbs->gpu_neon.allow_interlace;
650
651 if (cbs->pl_vout_set_raw_vram)
652 cbs->pl_vout_set_raw_vram(gpu.vram);
653 renderer_set_config(cbs);
654 vout_set_config(cbs);
72e5023f 655}
656
1ab64c54 657// vim:shiftwidth=2:expandtab