psx_gpu: do enhanced lines
[pcsx_rearmed.git] / plugins / gpulib / gpu.c
CommitLineData
1ab64c54 1/*
05740673 2 * (C) GraÅžvydas "notaz" Ignotas, 2011-2012
1ab64c54
GI
3 *
4 * This work is licensed under the terms of any of these licenses
5 * (at your option):
6 * - GNU GPL, version 2 or later.
7 * - GNU LGPL, version 2.1 or later.
8 * See the COPYING file in the top-level directory.
9 */
10
d30279e2 11#include <stdio.h>
1ab64c54 12#include <string.h>
56f08d83 13#include "gpu.h"
1ab64c54
GI
14
15#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
d30279e2 16#define unlikely(x) __builtin_expect((x), 0)
8dd855cd 17#define noinline __attribute__((noinline))
1ab64c54 18
deb18d24 19#define gpu_log(fmt, ...) \
3ece2f0c 20 printf("%d:%03d: " fmt, *gpu.state.frame_count, *gpu.state.hcnt, ##__VA_ARGS__)
deb18d24 21
22//#define log_io gpu_log
56f08d83 23#define log_io(...)
9394ada5 24//#define log_anomaly gpu_log
25#define log_anomaly(...)
56f08d83 26
7d993ee2 27struct psx_gpu gpu __attribute__((aligned(2048)));
1ab64c54 28
48f3d210 29static noinline int do_cmd_buffer(uint32_t *data, int count);
05740673 30static void finish_vram_transfer(int is_read);
48f3d210 31
32static noinline void do_cmd_reset(void)
33{
34 if (unlikely(gpu.cmd_len > 0))
35 do_cmd_buffer(gpu.cmd_buffer, gpu.cmd_len);
48f3d210 36 gpu.cmd_len = 0;
05740673 37
38 if (unlikely(gpu.dma.h > 0))
39 finish_vram_transfer(gpu.dma_start.is_read);
48f3d210 40 gpu.dma.h = 0;
41}
42
6e9bdaef 43static noinline void do_reset(void)
1ab64c54 44{
7841712d 45 unsigned int i;
48f3d210 46
47 do_cmd_reset();
48
6e9bdaef 49 memset(gpu.regs, 0, sizeof(gpu.regs));
48f3d210 50 for (i = 0; i < sizeof(gpu.ex_regs) / sizeof(gpu.ex_regs[0]); i++)
51 gpu.ex_regs[i] = (0xe0 + i) << 24;
d30279e2 52 gpu.status.reg = 0x14802000;
6e9bdaef 53 gpu.gp0 = 0;
fc84f618 54 gpu.regs[3] = 1;
6e9bdaef 55 gpu.screen.hres = gpu.screen.w = 256;
fc84f618 56 gpu.screen.vres = gpu.screen.h = 240;
1ab64c54
GI
57}
58
8dd855cd 59static noinline void update_width(void)
60{
61 int sw = gpu.screen.x2 - gpu.screen.x1;
62 if (sw <= 0 || sw >= 2560)
63 // full width
64 gpu.screen.w = gpu.screen.hres;
65 else
66 gpu.screen.w = sw * gpu.screen.hres / 2560;
67}
68
69static noinline void update_height(void)
70{
71 int sh = gpu.screen.y2 - gpu.screen.y1;
72 if (gpu.status.dheight)
73 sh *= 2;
74 if (sh <= 0)
75 sh = gpu.screen.vres;
76
77 gpu.screen.h = sh;
78}
79
fc84f618 80static noinline void decide_frameskip(void)
81{
9fe27e25 82 if (gpu.frameskip.active)
83 gpu.frameskip.cnt++;
84 else {
85 gpu.frameskip.cnt = 0;
86 gpu.frameskip.frame_ready = 1;
87 }
fc84f618 88
9fe27e25 89 if (!gpu.frameskip.active && *gpu.frameskip.advice)
90 gpu.frameskip.active = 1;
91 else if (gpu.frameskip.set > 0 && gpu.frameskip.cnt < gpu.frameskip.set)
fc84f618 92 gpu.frameskip.active = 1;
93 else
94 gpu.frameskip.active = 0;
fbb4bfff 95
96 if (!gpu.frameskip.active && gpu.frameskip.pending_fill[0] != 0) {
97 int dummy;
98 do_cmd_list(gpu.frameskip.pending_fill, 3, &dummy);
99 gpu.frameskip.pending_fill[0] = 0;
100 }
fc84f618 101}
102
b243416b 103static noinline int decide_frameskip_allow(uint32_t cmd_e3)
9fe27e25 104{
105 // no frameskip if it decides to draw to display area,
106 // but not for interlace since it'll most likely always do that
107 uint32_t x = cmd_e3 & 0x3ff;
108 uint32_t y = (cmd_e3 >> 10) & 0x3ff;
109 gpu.frameskip.allow = gpu.status.interlace ||
110 (uint32_t)(x - gpu.screen.x) >= (uint32_t)gpu.screen.w ||
111 (uint32_t)(y - gpu.screen.y) >= (uint32_t)gpu.screen.h;
b243416b 112 return gpu.frameskip.allow;
9fe27e25 113}
114
6e9bdaef 115static noinline void get_gpu_info(uint32_t data)
116{
117 switch (data & 0x0f) {
118 case 0x02:
119 case 0x03:
120 case 0x04:
121 case 0x05:
122 gpu.gp0 = gpu.ex_regs[data & 7] & 0xfffff;
123 break;
124 case 0x06:
125 gpu.gp0 = gpu.ex_regs[5] & 0xfffff;
126 break;
127 case 0x07:
128 gpu.gp0 = 2;
129 break;
130 default:
131 gpu.gp0 = 0;
132 break;
133 }
134}
135
136long GPUinit(void)
137{
9394ada5 138 int ret;
139 ret = vout_init();
140 ret |= renderer_init();
141
3ece2f0c 142 gpu.state.frame_count = &gpu.zero;
deb18d24 143 gpu.state.hcnt = &gpu.zero;
48f3d210 144 gpu.frameskip.active = 0;
145 gpu.cmd_len = 0;
9394ada5 146 do_reset();
48f3d210 147
6e9bdaef 148 return ret;
149}
150
151long GPUshutdown(void)
152{
e929dec5 153 renderer_finish();
6e9bdaef 154 return vout_finish();
155}
156
1ab64c54
GI
157void GPUwriteStatus(uint32_t data)
158{
159 static const short hres[8] = { 256, 368, 320, 384, 512, 512, 640, 640 };
160 static const short vres[4] = { 240, 480, 256, 480 };
161 uint32_t cmd = data >> 24;
162
fc84f618 163 if (cmd < ARRAY_SIZE(gpu.regs)) {
48f3d210 164 if (cmd > 1 && cmd != 5 && gpu.regs[cmd] == data)
fc84f618 165 return;
8dd855cd 166 gpu.regs[cmd] = data;
fc84f618 167 }
168
169 gpu.state.fb_dirty = 1;
8dd855cd 170
171 switch (cmd) {
1ab64c54 172 case 0x00:
6e9bdaef 173 do_reset();
1ab64c54 174 break;
48f3d210 175 case 0x01:
176 do_cmd_reset();
177 break;
1ab64c54 178 case 0x03:
d30279e2 179 gpu.status.blanking = data & 1;
1ab64c54
GI
180 break;
181 case 0x04:
182 gpu.status.dma = data & 3;
183 break;
184 case 0x05:
185 gpu.screen.x = data & 0x3ff;
186 gpu.screen.y = (data >> 10) & 0x3ff;
9fe27e25 187 if (gpu.frameskip.set) {
188 decide_frameskip_allow(gpu.ex_regs[3]);
189 if (gpu.frameskip.last_flip_frame != *gpu.state.frame_count) {
190 decide_frameskip();
191 gpu.frameskip.last_flip_frame = *gpu.state.frame_count;
192 }
fb4c6fba 193 }
1ab64c54 194 break;
8dd855cd 195 case 0x06:
196 gpu.screen.x1 = data & 0xfff;
197 gpu.screen.x2 = (data >> 12) & 0xfff;
198 update_width();
199 break;
1ab64c54
GI
200 case 0x07:
201 gpu.screen.y1 = data & 0x3ff;
202 gpu.screen.y2 = (data >> 10) & 0x3ff;
8dd855cd 203 update_height();
1ab64c54
GI
204 break;
205 case 0x08:
206 gpu.status.reg = (gpu.status.reg & ~0x7f0000) | ((data & 0x3F) << 17) | ((data & 0x40) << 10);
8dd855cd 207 gpu.screen.hres = hres[(gpu.status.reg >> 16) & 7];
208 gpu.screen.vres = vres[(gpu.status.reg >> 19) & 3];
209 update_width();
210 update_height();
e929dec5 211 renderer_notify_res_change();
1ab64c54 212 break;
deb18d24 213 default:
214 if ((cmd & 0xf0) == 0x10)
215 get_gpu_info(data);
6e9bdaef 216 break;
1ab64c54 217 }
7890a708 218
219#ifdef GPUwriteStatus_ext
220 GPUwriteStatus_ext(data);
221#endif
1ab64c54
GI
222}
223
56f08d83 224const unsigned char cmd_lengths[256] =
1ab64c54 225{
d30279e2
GI
226 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
227 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
228 3, 3, 3, 3, 6, 6, 6, 6, 4, 4, 4, 4, 8, 8, 8, 8, // 20
229 5, 5, 5, 5, 8, 8, 8, 8, 7, 7, 7, 7, 11, 11, 11, 11,
652c6b8b 230 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, // 40
231 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
232 2, 2, 2, 2, 3, 3, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, // 60
d30279e2
GI
233 1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2,
234 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 80
235 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
236 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // a0
237 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
238 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // c0
239 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
240 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // e0
241 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
242};
243
d30279e2
GI
244#define VRAM_MEM_XY(x, y) &gpu.vram[(y) * 1024 + (x)]
245
246static inline void do_vram_line(int x, int y, uint16_t *mem, int l, int is_read)
1ab64c54 247{
d30279e2
GI
248 uint16_t *vram = VRAM_MEM_XY(x, y);
249 if (is_read)
250 memcpy(mem, vram, l * 2);
251 else
252 memcpy(vram, mem, l * 2);
253}
254
255static int do_vram_io(uint32_t *data, int count, int is_read)
256{
257 int count_initial = count;
258 uint16_t *sdata = (uint16_t *)data;
259 int x = gpu.dma.x, y = gpu.dma.y;
260 int w = gpu.dma.w, h = gpu.dma.h;
ddd56f6e 261 int o = gpu.dma.offset;
d30279e2
GI
262 int l;
263 count *= 2; // operate in 16bpp pixels
264
265 if (gpu.dma.offset) {
266 l = w - gpu.dma.offset;
ddd56f6e 267 if (count < l)
d30279e2 268 l = count;
ddd56f6e 269
270 do_vram_line(x + o, y, sdata, l, is_read);
271
272 if (o + l < w)
273 o += l;
274 else {
275 o = 0;
276 y++;
277 h--;
278 }
d30279e2
GI
279 sdata += l;
280 count -= l;
d30279e2
GI
281 }
282
283 for (; h > 0 && count >= w; sdata += w, count -= w, y++, h--) {
284 y &= 511;
285 do_vram_line(x, y, sdata, w, is_read);
286 }
287
05740673 288 if (h > 0) {
289 if (count > 0) {
290 y &= 511;
291 do_vram_line(x, y, sdata, count, is_read);
292 o = count;
293 count = 0;
294 }
d30279e2 295 }
05740673 296 else
297 finish_vram_transfer(is_read);
d30279e2
GI
298 gpu.dma.y = y;
299 gpu.dma.h = h;
ddd56f6e 300 gpu.dma.offset = o;
d30279e2 301
6e9bdaef 302 return count_initial - count / 2;
d30279e2
GI
303}
304
305static void start_vram_transfer(uint32_t pos_word, uint32_t size_word, int is_read)
306{
ddd56f6e 307 if (gpu.dma.h)
308 log_anomaly("start_vram_transfer while old unfinished\n");
309
5440b88e 310 gpu.dma.x = pos_word & 0x3ff;
311 gpu.dma.y = (pos_word >> 16) & 0x1ff;
48f3d210 312 gpu.dma.w = ((size_word - 1) & 0x3ff) + 1;
313 gpu.dma.h = (((size_word >> 16) - 1) & 0x1ff) + 1;
d30279e2 314 gpu.dma.offset = 0;
05740673 315 gpu.dma.is_read = is_read;
316 gpu.dma_start = gpu.dma;
d30279e2 317
9e146206 318 renderer_flush_queues();
319 if (is_read) {
d30279e2 320 gpu.status.img = 1;
9e146206 321 // XXX: wrong for width 1
322 memcpy(&gpu.gp0, VRAM_MEM_XY(gpu.dma.x, gpu.dma.y), 4);
5440b88e 323 gpu.state.last_vram_read_frame = *gpu.state.frame_count;
9e146206 324 }
d30279e2 325
6e9bdaef 326 log_io("start_vram_transfer %c (%d, %d) %dx%d\n", is_read ? 'r' : 'w',
327 gpu.dma.x, gpu.dma.y, gpu.dma.w, gpu.dma.h);
d30279e2
GI
328}
329
05740673 330static void finish_vram_transfer(int is_read)
331{
332 if (is_read)
333 gpu.status.img = 0;
334 else
335 renderer_update_caches(gpu.dma_start.x, gpu.dma_start.y,
336 gpu.dma_start.w, gpu.dma_start.h);
337}
338
b243416b 339static noinline int do_cmd_list_skip(uint32_t *data, int count, int *last_cmd)
340{
341 int cmd = 0, pos = 0, len, dummy;
342 int skip = 1;
343
fbb4bfff 344 gpu.frameskip.pending_fill[0] = 0;
345
b8d961ef 346 // XXX: polylines are not properly handled
b243416b 347 while (pos < count && skip) {
348 uint32_t *list = data + pos;
349 cmd = list[0] >> 24;
350 len = 1 + cmd_lengths[cmd];
351
352 if (cmd == 0x02) {
353 if ((list[2] & 0x3ff) > gpu.screen.w || ((list[2] >> 16) & 0x1ff) > gpu.screen.h)
354 // clearing something large, don't skip
fbb4bfff 355 do_cmd_list(list, 3, &dummy);
356 else
357 memcpy(gpu.frameskip.pending_fill, list, 3 * 4);
b243416b 358 }
359 else if ((cmd & 0xf4) == 0x24) {
360 // flat textured prim
361 gpu.ex_regs[1] &= ~0x1ff;
362 gpu.ex_regs[1] |= list[4] & 0x1ff;
363 }
364 else if ((cmd & 0xf4) == 0x34) {
365 // shaded textured prim
366 gpu.ex_regs[1] &= ~0x1ff;
367 gpu.ex_regs[1] |= list[5] & 0x1ff;
368 }
369 else if (cmd == 0xe3)
370 skip = decide_frameskip_allow(list[0]);
371
372 if ((cmd & 0xf8) == 0xe0)
373 gpu.ex_regs[cmd & 7] = list[0];
374
375 if (pos + len > count) {
376 cmd = -1;
377 break; // incomplete cmd
378 }
379 if (cmd == 0xa0 || cmd == 0xc0)
380 break; // image i/o
381 pos += len;
382 }
383
384 renderer_sync_ecmds(gpu.ex_regs);
385 *last_cmd = cmd;
386 return pos;
387}
388
48f3d210 389static noinline int do_cmd_buffer(uint32_t *data, int count)
d30279e2 390{
b243416b 391 int cmd, pos;
392 uint32_t old_e3 = gpu.ex_regs[3];
fc84f618 393 int vram_dirty = 0;
d30279e2 394
d30279e2 395 // process buffer
b243416b 396 for (pos = 0; pos < count; )
d30279e2 397 {
b243416b 398 if (gpu.dma.h && !gpu.dma_start.is_read) { // XXX: need to verify
399 vram_dirty = 1;
d30279e2 400 pos += do_vram_io(data + pos, count - pos, 0);
ddd56f6e 401 if (pos == count)
402 break;
d30279e2
GI
403 }
404
b243416b 405 cmd = data[pos] >> 24;
d30279e2
GI
406 if (cmd == 0xa0 || cmd == 0xc0) {
407 // consume vram write/read cmd
408 start_vram_transfer(data[pos + 1], data[pos + 2], cmd == 0xc0);
b243416b 409 pos += 3;
410 continue;
d30279e2 411 }
b243416b 412
1e07f71d 413 // 0xex cmds might affect frameskip.allow, so pass to do_cmd_list_skip
414 if (gpu.frameskip.active && (gpu.frameskip.allow || ((data[pos] >> 24) & 0xf0) == 0xe0))
b243416b 415 pos += do_cmd_list_skip(data + pos, count - pos, &cmd);
416 else {
417 pos += do_cmd_list(data + pos, count - pos, &cmd);
418 vram_dirty = 1;
419 }
420
421 if (cmd == -1)
422 // incomplete cmd
ddd56f6e 423 break;
d30279e2 424 }
ddd56f6e 425
a3a9f519 426 gpu.status.reg &= ~0x1fff;
427 gpu.status.reg |= gpu.ex_regs[1] & 0x7ff;
428 gpu.status.reg |= (gpu.ex_regs[6] & 3) << 11;
429
fc84f618 430 gpu.state.fb_dirty |= vram_dirty;
431
b243416b 432 if (old_e3 != gpu.ex_regs[3])
433 decide_frameskip_allow(gpu.ex_regs[3]);
434
ddd56f6e 435 return count - pos;
d30279e2
GI
436}
437
5440b88e 438static void flush_cmd_buffer(void)
d30279e2 439{
48f3d210 440 int left = do_cmd_buffer(gpu.cmd_buffer, gpu.cmd_len);
d30279e2
GI
441 if (left > 0)
442 memmove(gpu.cmd_buffer, gpu.cmd_buffer + gpu.cmd_len - left, left * 4);
443 gpu.cmd_len = left;
1ab64c54
GI
444}
445
446void GPUwriteDataMem(uint32_t *mem, int count)
447{
d30279e2
GI
448 int left;
449
56f08d83 450 log_io("gpu_dma_write %p %d\n", mem, count);
451
d30279e2
GI
452 if (unlikely(gpu.cmd_len > 0))
453 flush_cmd_buffer();
56f08d83 454
48f3d210 455 left = do_cmd_buffer(mem, count);
d30279e2 456 if (left)
56f08d83 457 log_anomaly("GPUwriteDataMem: discarded %d/%d words\n", left, count);
1ab64c54
GI
458}
459
d30279e2 460void GPUwriteData(uint32_t data)
1ab64c54 461{
56f08d83 462 log_io("gpu_write %08x\n", data);
d30279e2
GI
463 gpu.cmd_buffer[gpu.cmd_len++] = data;
464 if (gpu.cmd_len >= CMD_BUFFER_LEN)
465 flush_cmd_buffer();
1ab64c54
GI
466}
467
ddd56f6e 468long GPUdmaChain(uint32_t *rambase, uint32_t start_addr)
1ab64c54 469{
ddd56f6e 470 uint32_t addr, *list;
deb18d24 471 uint32_t *llist_entry = NULL;
ddd56f6e 472 int len, left, count;
1c72b1c2 473 long cpu_cycles = 0;
d30279e2
GI
474
475 if (unlikely(gpu.cmd_len > 0))
476 flush_cmd_buffer();
477
deb18d24 478 // ff7 sends it's main list twice, detect this
3ece2f0c 479 if (*gpu.state.frame_count == gpu.state.last_list.frame &&
480 *gpu.state.hcnt - gpu.state.last_list.hcnt <= 1 &&
1c72b1c2 481 gpu.state.last_list.cycles > 2048)
deb18d24 482 {
483 llist_entry = rambase + (gpu.state.last_list.addr & 0x1fffff) / 4;
484 *llist_entry |= 0x800000;
485 }
486
56f08d83 487 log_io("gpu_dma_chain\n");
ddd56f6e 488 addr = start_addr & 0xffffff;
489 for (count = 0; addr != 0xffffff; count++)
490 {
ddd56f6e 491 list = rambase + (addr & 0x1fffff) / 4;
d30279e2
GI
492 len = list[0] >> 24;
493 addr = list[0] & 0xffffff;
1c72b1c2 494 cpu_cycles += 10;
495 if (len > 0)
496 cpu_cycles += 5 + len;
deb18d24 497
498 log_io(".chain %08x #%d\n", (list - rambase) * 4, len);
ddd56f6e 499
500 // loop detection marker
501 // (bit23 set causes DMA error on real machine, so
502 // unlikely to be ever set by the game)
503 list[0] |= 0x800000;
504
56f08d83 505 if (len) {
48f3d210 506 left = do_cmd_buffer(list + 1, len);
56f08d83 507 if (left)
deb18d24 508 log_anomaly("GPUdmaChain: discarded %d/%d words\n", left, len);
56f08d83 509 }
ddd56f6e 510
511 if (addr & 0x800000)
512 break;
513 }
514
515 // remove loop detection markers
516 addr = start_addr & 0x1fffff;
517 while (count-- > 0) {
518 list = rambase + addr / 4;
519 addr = list[0] & 0x1fffff;
520 list[0] &= ~0x800000;
d30279e2 521 }
deb18d24 522 if (llist_entry)
523 *llist_entry &= ~0x800000;
d30279e2 524
3ece2f0c 525 gpu.state.last_list.frame = *gpu.state.frame_count;
deb18d24 526 gpu.state.last_list.hcnt = *gpu.state.hcnt;
1c72b1c2 527 gpu.state.last_list.cycles = cpu_cycles;
deb18d24 528 gpu.state.last_list.addr = start_addr;
529
1c72b1c2 530 return cpu_cycles;
1ab64c54
GI
531}
532
d30279e2
GI
533void GPUreadDataMem(uint32_t *mem, int count)
534{
56f08d83 535 log_io("gpu_dma_read %p %d\n", mem, count);
536
d30279e2
GI
537 if (unlikely(gpu.cmd_len > 0))
538 flush_cmd_buffer();
56f08d83 539
d30279e2
GI
540 if (gpu.dma.h)
541 do_vram_io(mem, count, 1);
542}
543
544uint32_t GPUreadData(void)
545{
9e146206 546 uint32_t ret;
56f08d83 547
548 if (unlikely(gpu.cmd_len > 0))
549 flush_cmd_buffer();
550
9e146206 551 ret = gpu.gp0;
56f08d83 552 if (gpu.dma.h)
9e146206 553 do_vram_io(&ret, 1, 1);
56f08d83 554
9e146206 555 log_io("gpu_read %08x\n", ret);
556 return ret;
d30279e2
GI
557}
558
559uint32_t GPUreadStatus(void)
560{
ddd56f6e 561 uint32_t ret;
56f08d83 562
d30279e2
GI
563 if (unlikely(gpu.cmd_len > 0))
564 flush_cmd_buffer();
565
24de2dd4 566 ret = gpu.status.reg;
ddd56f6e 567 log_io("gpu_read_status %08x\n", ret);
568 return ret;
d30279e2
GI
569}
570
096ec49b 571struct GPUFreeze
1ab64c54
GI
572{
573 uint32_t ulFreezeVersion; // should be always 1 for now (set by main emu)
574 uint32_t ulStatus; // current gpu status
575 uint32_t ulControl[256]; // latest control register values
576 unsigned char psxVRam[1024*1024*2]; // current VRam image (full 2 MB for ZN)
096ec49b 577};
1ab64c54 578
096ec49b 579long GPUfreeze(uint32_t type, struct GPUFreeze *freeze)
1ab64c54 580{
fc84f618 581 int i;
582
1ab64c54
GI
583 switch (type) {
584 case 1: // save
d30279e2
GI
585 if (gpu.cmd_len > 0)
586 flush_cmd_buffer();
1ab64c54
GI
587 memcpy(freeze->psxVRam, gpu.vram, sizeof(gpu.vram));
588 memcpy(freeze->ulControl, gpu.regs, sizeof(gpu.regs));
6e9bdaef 589 memcpy(freeze->ulControl + 0xe0, gpu.ex_regs, sizeof(gpu.ex_regs));
1ab64c54 590 freeze->ulStatus = gpu.status.reg;
1ab64c54
GI
591 break;
592 case 0: // load
593 memcpy(gpu.vram, freeze->psxVRam, sizeof(gpu.vram));
594 memcpy(gpu.regs, freeze->ulControl, sizeof(gpu.regs));
6e9bdaef 595 memcpy(gpu.ex_regs, freeze->ulControl + 0xe0, sizeof(gpu.ex_regs));
1ab64c54 596 gpu.status.reg = freeze->ulStatus;
3d47ef17 597 gpu.cmd_len = 0;
fc84f618 598 for (i = 8; i > 0; i--) {
599 gpu.regs[i] ^= 1; // avoid reg change detection
600 GPUwriteStatus((i << 24) | (gpu.regs[i] ^ 1));
601 }
5b745e5b 602 renderer_sync_ecmds(gpu.ex_regs);
05740673 603 renderer_update_caches(0, 0, 1024, 512);
1ab64c54
GI
604 break;
605 }
606
607 return 1;
608}
609
5440b88e 610void GPUupdateLace(void)
611{
612 if (gpu.cmd_len > 0)
613 flush_cmd_buffer();
614 renderer_flush_queues();
615
aafcb4dd 616 if (gpu.status.blanking) {
617 if (!gpu.state.blanked) {
618 vout_blank();
619 gpu.state.blanked = 1;
620 gpu.state.fb_dirty = 1;
621 }
622 return;
623 }
624
625 if (!gpu.state.fb_dirty)
5440b88e 626 return;
627
628 if (gpu.frameskip.set) {
629 if (!gpu.frameskip.frame_ready) {
630 if (*gpu.state.frame_count - gpu.frameskip.last_flip_frame < 9)
631 return;
632 gpu.frameskip.active = 0;
633 }
634 gpu.frameskip.frame_ready = 0;
635 }
636
637 vout_update();
638 gpu.state.fb_dirty = 0;
aafcb4dd 639 gpu.state.blanked = 0;
5440b88e 640}
641
72e5023f 642void GPUvBlank(int is_vblank, int lcf)
643{
5440b88e 644 int interlace = gpu.state.allow_interlace
645 && gpu.status.interlace && gpu.status.dheight;
646 // interlace doesn't look nice on progressive displays,
647 // so we have this "auto" mode here for games that don't read vram
648 if (gpu.state.allow_interlace == 2
649 && *gpu.state.frame_count - gpu.state.last_vram_read_frame > 1)
650 {
651 interlace = 0;
652 }
653 if (interlace || interlace != gpu.state.old_interlace) {
654 gpu.state.old_interlace = interlace;
655
656 if (gpu.cmd_len > 0)
657 flush_cmd_buffer();
658 renderer_flush_queues();
659 renderer_set_interlace(interlace, !lcf);
660 }
661}
662
663#include "../../frontend/plugin_lib.h"
664
665void GPUrearmedCallbacks(const struct rearmed_cbs *cbs)
666{
667 gpu.frameskip.set = cbs->frameskip;
668 gpu.frameskip.advice = &cbs->fskip_advice;
669 gpu.frameskip.active = 0;
670 gpu.frameskip.frame_ready = 1;
671 gpu.state.hcnt = cbs->gpu_hcnt;
672 gpu.state.frame_count = cbs->gpu_frame_count;
673 gpu.state.allow_interlace = cbs->gpu_neon.allow_interlace;
0b02eb77 674 gpu.state.enhancement_enable = cbs->gpu_neon.enhancement_enable;
5440b88e 675
676 if (cbs->pl_vout_set_raw_vram)
677 cbs->pl_vout_set_raw_vram(gpu.vram);
678 renderer_set_config(cbs);
679 vout_set_config(cbs);
72e5023f 680}
681
1ab64c54 682// vim:shiftwidth=2:expandtab