add support for software-enhanced rendering
[pcsx_rearmed.git] / plugins / gpulib / gpu.c
CommitLineData
1ab64c54 1/*
05740673 2 * (C) GraÅžvydas "notaz" Ignotas, 2011-2012
1ab64c54
GI
3 *
4 * This work is licensed under the terms of any of these licenses
5 * (at your option):
6 * - GNU GPL, version 2 or later.
7 * - GNU LGPL, version 2.1 or later.
8 * See the COPYING file in the top-level directory.
9 */
10
d30279e2 11#include <stdio.h>
0b02eb77 12#include <stdlib.h>
1ab64c54 13#include <string.h>
56f08d83 14#include "gpu.h"
1ab64c54
GI
15
16#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
d30279e2 17#define unlikely(x) __builtin_expect((x), 0)
8dd855cd 18#define noinline __attribute__((noinline))
1ab64c54 19
deb18d24 20#define gpu_log(fmt, ...) \
3ece2f0c 21 printf("%d:%03d: " fmt, *gpu.state.frame_count, *gpu.state.hcnt, ##__VA_ARGS__)
deb18d24 22
23//#define log_io gpu_log
56f08d83 24#define log_io(...)
9394ada5 25//#define log_anomaly gpu_log
26#define log_anomaly(...)
56f08d83 27
7d993ee2 28struct psx_gpu gpu __attribute__((aligned(2048)));
1ab64c54 29
48f3d210 30static noinline int do_cmd_buffer(uint32_t *data, int count);
05740673 31static void finish_vram_transfer(int is_read);
48f3d210 32
33static noinline void do_cmd_reset(void)
34{
35 if (unlikely(gpu.cmd_len > 0))
36 do_cmd_buffer(gpu.cmd_buffer, gpu.cmd_len);
48f3d210 37 gpu.cmd_len = 0;
05740673 38
39 if (unlikely(gpu.dma.h > 0))
40 finish_vram_transfer(gpu.dma_start.is_read);
48f3d210 41 gpu.dma.h = 0;
42}
43
6e9bdaef 44static noinline void do_reset(void)
1ab64c54 45{
7841712d 46 unsigned int i;
48f3d210 47
48 do_cmd_reset();
49
6e9bdaef 50 memset(gpu.regs, 0, sizeof(gpu.regs));
48f3d210 51 for (i = 0; i < sizeof(gpu.ex_regs) / sizeof(gpu.ex_regs[0]); i++)
52 gpu.ex_regs[i] = (0xe0 + i) << 24;
d30279e2 53 gpu.status.reg = 0x14802000;
6e9bdaef 54 gpu.gp0 = 0;
fc84f618 55 gpu.regs[3] = 1;
6e9bdaef 56 gpu.screen.hres = gpu.screen.w = 256;
fc84f618 57 gpu.screen.vres = gpu.screen.h = 240;
1ab64c54
GI
58}
59
8dd855cd 60static noinline void update_width(void)
61{
62 int sw = gpu.screen.x2 - gpu.screen.x1;
63 if (sw <= 0 || sw >= 2560)
64 // full width
65 gpu.screen.w = gpu.screen.hres;
66 else
67 gpu.screen.w = sw * gpu.screen.hres / 2560;
68}
69
70static noinline void update_height(void)
71{
72 int sh = gpu.screen.y2 - gpu.screen.y1;
73 if (gpu.status.dheight)
74 sh *= 2;
75 if (sh <= 0)
76 sh = gpu.screen.vres;
77
78 gpu.screen.h = sh;
79}
80
fc84f618 81static noinline void decide_frameskip(void)
82{
9fe27e25 83 if (gpu.frameskip.active)
84 gpu.frameskip.cnt++;
85 else {
86 gpu.frameskip.cnt = 0;
87 gpu.frameskip.frame_ready = 1;
88 }
fc84f618 89
9fe27e25 90 if (!gpu.frameskip.active && *gpu.frameskip.advice)
91 gpu.frameskip.active = 1;
92 else if (gpu.frameskip.set > 0 && gpu.frameskip.cnt < gpu.frameskip.set)
fc84f618 93 gpu.frameskip.active = 1;
94 else
95 gpu.frameskip.active = 0;
fbb4bfff 96
97 if (!gpu.frameskip.active && gpu.frameskip.pending_fill[0] != 0) {
98 int dummy;
99 do_cmd_list(gpu.frameskip.pending_fill, 3, &dummy);
100 gpu.frameskip.pending_fill[0] = 0;
101 }
fc84f618 102}
103
b243416b 104static noinline int decide_frameskip_allow(uint32_t cmd_e3)
9fe27e25 105{
106 // no frameskip if it decides to draw to display area,
107 // but not for interlace since it'll most likely always do that
108 uint32_t x = cmd_e3 & 0x3ff;
109 uint32_t y = (cmd_e3 >> 10) & 0x3ff;
110 gpu.frameskip.allow = gpu.status.interlace ||
111 (uint32_t)(x - gpu.screen.x) >= (uint32_t)gpu.screen.w ||
112 (uint32_t)(y - gpu.screen.y) >= (uint32_t)gpu.screen.h;
b243416b 113 return gpu.frameskip.allow;
9fe27e25 114}
115
6e9bdaef 116static noinline void get_gpu_info(uint32_t data)
117{
118 switch (data & 0x0f) {
119 case 0x02:
120 case 0x03:
121 case 0x04:
122 case 0x05:
123 gpu.gp0 = gpu.ex_regs[data & 7] & 0xfffff;
124 break;
125 case 0x06:
126 gpu.gp0 = gpu.ex_regs[5] & 0xfffff;
127 break;
128 case 0x07:
129 gpu.gp0 = 2;
130 break;
131 default:
132 gpu.gp0 = 0;
133 break;
134 }
135}
136
137long GPUinit(void)
138{
9394ada5 139 int ret;
140 ret = vout_init();
0b02eb77 141
142 gpu.state.enhancement_available = 0;
9394ada5 143 ret |= renderer_init();
144
0b02eb77 145 if (gpu.state.enhancement_available) {
146 if (gpu.enhancement_bufer == NULL)
147 gpu.enhancement_bufer = malloc(2048 * 1024 * 2 + 1024 * 512 * 2);
148 if (gpu.enhancement_bufer == NULL)
149 gpu_log("OOM for enhancement buffer\n");
150 }
151 else if (gpu.enhancement_bufer != NULL) {
152 free(gpu.enhancement_bufer);
153 gpu.enhancement_bufer = NULL;
154 }
155
3ece2f0c 156 gpu.state.frame_count = &gpu.zero;
deb18d24 157 gpu.state.hcnt = &gpu.zero;
48f3d210 158 gpu.frameskip.active = 0;
159 gpu.cmd_len = 0;
9394ada5 160 do_reset();
48f3d210 161
6e9bdaef 162 return ret;
163}
164
165long GPUshutdown(void)
166{
167 return vout_finish();
168}
169
1ab64c54
GI
170void GPUwriteStatus(uint32_t data)
171{
172 static const short hres[8] = { 256, 368, 320, 384, 512, 512, 640, 640 };
173 static const short vres[4] = { 240, 480, 256, 480 };
174 uint32_t cmd = data >> 24;
175
fc84f618 176 if (cmd < ARRAY_SIZE(gpu.regs)) {
48f3d210 177 if (cmd > 1 && cmd != 5 && gpu.regs[cmd] == data)
fc84f618 178 return;
8dd855cd 179 gpu.regs[cmd] = data;
fc84f618 180 }
181
182 gpu.state.fb_dirty = 1;
8dd855cd 183
184 switch (cmd) {
1ab64c54 185 case 0x00:
6e9bdaef 186 do_reset();
1ab64c54 187 break;
48f3d210 188 case 0x01:
189 do_cmd_reset();
190 break;
1ab64c54 191 case 0x03:
d30279e2 192 gpu.status.blanking = data & 1;
1ab64c54
GI
193 break;
194 case 0x04:
195 gpu.status.dma = data & 3;
196 break;
197 case 0x05:
198 gpu.screen.x = data & 0x3ff;
199 gpu.screen.y = (data >> 10) & 0x3ff;
9fe27e25 200 if (gpu.frameskip.set) {
201 decide_frameskip_allow(gpu.ex_regs[3]);
202 if (gpu.frameskip.last_flip_frame != *gpu.state.frame_count) {
203 decide_frameskip();
204 gpu.frameskip.last_flip_frame = *gpu.state.frame_count;
205 }
fb4c6fba 206 }
1ab64c54 207 break;
8dd855cd 208 case 0x06:
209 gpu.screen.x1 = data & 0xfff;
210 gpu.screen.x2 = (data >> 12) & 0xfff;
211 update_width();
212 break;
1ab64c54
GI
213 case 0x07:
214 gpu.screen.y1 = data & 0x3ff;
215 gpu.screen.y2 = (data >> 10) & 0x3ff;
8dd855cd 216 update_height();
1ab64c54
GI
217 break;
218 case 0x08:
219 gpu.status.reg = (gpu.status.reg & ~0x7f0000) | ((data & 0x3F) << 17) | ((data & 0x40) << 10);
8dd855cd 220 gpu.screen.hres = hres[(gpu.status.reg >> 16) & 7];
221 gpu.screen.vres = vres[(gpu.status.reg >> 19) & 3];
222 update_width();
223 update_height();
1ab64c54 224 break;
deb18d24 225 default:
226 if ((cmd & 0xf0) == 0x10)
227 get_gpu_info(data);
6e9bdaef 228 break;
1ab64c54 229 }
7890a708 230
231#ifdef GPUwriteStatus_ext
232 GPUwriteStatus_ext(data);
233#endif
1ab64c54
GI
234}
235
56f08d83 236const unsigned char cmd_lengths[256] =
1ab64c54 237{
d30279e2
GI
238 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
239 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
240 3, 3, 3, 3, 6, 6, 6, 6, 4, 4, 4, 4, 8, 8, 8, 8, // 20
241 5, 5, 5, 5, 8, 8, 8, 8, 7, 7, 7, 7, 11, 11, 11, 11,
652c6b8b 242 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, // 40
243 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
244 2, 2, 2, 2, 3, 3, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, // 60
d30279e2
GI
245 1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2,
246 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 80
247 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
248 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // a0
249 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
250 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // c0
251 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
252 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // e0
253 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
254};
255
d30279e2
GI
256#define VRAM_MEM_XY(x, y) &gpu.vram[(y) * 1024 + (x)]
257
258static inline void do_vram_line(int x, int y, uint16_t *mem, int l, int is_read)
1ab64c54 259{
d30279e2
GI
260 uint16_t *vram = VRAM_MEM_XY(x, y);
261 if (is_read)
262 memcpy(mem, vram, l * 2);
263 else
264 memcpy(vram, mem, l * 2);
265}
266
267static int do_vram_io(uint32_t *data, int count, int is_read)
268{
269 int count_initial = count;
270 uint16_t *sdata = (uint16_t *)data;
271 int x = gpu.dma.x, y = gpu.dma.y;
272 int w = gpu.dma.w, h = gpu.dma.h;
ddd56f6e 273 int o = gpu.dma.offset;
d30279e2
GI
274 int l;
275 count *= 2; // operate in 16bpp pixels
276
277 if (gpu.dma.offset) {
278 l = w - gpu.dma.offset;
ddd56f6e 279 if (count < l)
d30279e2 280 l = count;
ddd56f6e 281
282 do_vram_line(x + o, y, sdata, l, is_read);
283
284 if (o + l < w)
285 o += l;
286 else {
287 o = 0;
288 y++;
289 h--;
290 }
d30279e2
GI
291 sdata += l;
292 count -= l;
d30279e2
GI
293 }
294
295 for (; h > 0 && count >= w; sdata += w, count -= w, y++, h--) {
296 y &= 511;
297 do_vram_line(x, y, sdata, w, is_read);
298 }
299
05740673 300 if (h > 0) {
301 if (count > 0) {
302 y &= 511;
303 do_vram_line(x, y, sdata, count, is_read);
304 o = count;
305 count = 0;
306 }
d30279e2 307 }
05740673 308 else
309 finish_vram_transfer(is_read);
d30279e2
GI
310 gpu.dma.y = y;
311 gpu.dma.h = h;
ddd56f6e 312 gpu.dma.offset = o;
d30279e2 313
6e9bdaef 314 return count_initial - count / 2;
d30279e2
GI
315}
316
317static void start_vram_transfer(uint32_t pos_word, uint32_t size_word, int is_read)
318{
ddd56f6e 319 if (gpu.dma.h)
320 log_anomaly("start_vram_transfer while old unfinished\n");
321
5440b88e 322 gpu.dma.x = pos_word & 0x3ff;
323 gpu.dma.y = (pos_word >> 16) & 0x1ff;
48f3d210 324 gpu.dma.w = ((size_word - 1) & 0x3ff) + 1;
325 gpu.dma.h = (((size_word >> 16) - 1) & 0x1ff) + 1;
d30279e2 326 gpu.dma.offset = 0;
05740673 327 gpu.dma.is_read = is_read;
328 gpu.dma_start = gpu.dma;
d30279e2 329
9e146206 330 renderer_flush_queues();
331 if (is_read) {
d30279e2 332 gpu.status.img = 1;
9e146206 333 // XXX: wrong for width 1
334 memcpy(&gpu.gp0, VRAM_MEM_XY(gpu.dma.x, gpu.dma.y), 4);
5440b88e 335 gpu.state.last_vram_read_frame = *gpu.state.frame_count;
9e146206 336 }
d30279e2 337
6e9bdaef 338 log_io("start_vram_transfer %c (%d, %d) %dx%d\n", is_read ? 'r' : 'w',
339 gpu.dma.x, gpu.dma.y, gpu.dma.w, gpu.dma.h);
d30279e2
GI
340}
341
05740673 342static void finish_vram_transfer(int is_read)
343{
344 if (is_read)
345 gpu.status.img = 0;
346 else
347 renderer_update_caches(gpu.dma_start.x, gpu.dma_start.y,
348 gpu.dma_start.w, gpu.dma_start.h);
349}
350
b243416b 351static noinline int do_cmd_list_skip(uint32_t *data, int count, int *last_cmd)
352{
353 int cmd = 0, pos = 0, len, dummy;
354 int skip = 1;
355
fbb4bfff 356 gpu.frameskip.pending_fill[0] = 0;
357
b8d961ef 358 // XXX: polylines are not properly handled
b243416b 359 while (pos < count && skip) {
360 uint32_t *list = data + pos;
361 cmd = list[0] >> 24;
362 len = 1 + cmd_lengths[cmd];
363
364 if (cmd == 0x02) {
365 if ((list[2] & 0x3ff) > gpu.screen.w || ((list[2] >> 16) & 0x1ff) > gpu.screen.h)
366 // clearing something large, don't skip
fbb4bfff 367 do_cmd_list(list, 3, &dummy);
368 else
369 memcpy(gpu.frameskip.pending_fill, list, 3 * 4);
b243416b 370 }
371 else if ((cmd & 0xf4) == 0x24) {
372 // flat textured prim
373 gpu.ex_regs[1] &= ~0x1ff;
374 gpu.ex_regs[1] |= list[4] & 0x1ff;
375 }
376 else if ((cmd & 0xf4) == 0x34) {
377 // shaded textured prim
378 gpu.ex_regs[1] &= ~0x1ff;
379 gpu.ex_regs[1] |= list[5] & 0x1ff;
380 }
381 else if (cmd == 0xe3)
382 skip = decide_frameskip_allow(list[0]);
383
384 if ((cmd & 0xf8) == 0xe0)
385 gpu.ex_regs[cmd & 7] = list[0];
386
387 if (pos + len > count) {
388 cmd = -1;
389 break; // incomplete cmd
390 }
391 if (cmd == 0xa0 || cmd == 0xc0)
392 break; // image i/o
393 pos += len;
394 }
395
396 renderer_sync_ecmds(gpu.ex_regs);
397 *last_cmd = cmd;
398 return pos;
399}
400
48f3d210 401static noinline int do_cmd_buffer(uint32_t *data, int count)
d30279e2 402{
b243416b 403 int cmd, pos;
404 uint32_t old_e3 = gpu.ex_regs[3];
fc84f618 405 int vram_dirty = 0;
d30279e2 406
d30279e2 407 // process buffer
b243416b 408 for (pos = 0; pos < count; )
d30279e2 409 {
b243416b 410 if (gpu.dma.h && !gpu.dma_start.is_read) { // XXX: need to verify
411 vram_dirty = 1;
d30279e2 412 pos += do_vram_io(data + pos, count - pos, 0);
ddd56f6e 413 if (pos == count)
414 break;
d30279e2
GI
415 }
416
b243416b 417 cmd = data[pos] >> 24;
d30279e2
GI
418 if (cmd == 0xa0 || cmd == 0xc0) {
419 // consume vram write/read cmd
420 start_vram_transfer(data[pos + 1], data[pos + 2], cmd == 0xc0);
b243416b 421 pos += 3;
422 continue;
d30279e2 423 }
b243416b 424
1e07f71d 425 // 0xex cmds might affect frameskip.allow, so pass to do_cmd_list_skip
426 if (gpu.frameskip.active && (gpu.frameskip.allow || ((data[pos] >> 24) & 0xf0) == 0xe0))
b243416b 427 pos += do_cmd_list_skip(data + pos, count - pos, &cmd);
428 else {
429 pos += do_cmd_list(data + pos, count - pos, &cmd);
430 vram_dirty = 1;
431 }
432
433 if (cmd == -1)
434 // incomplete cmd
ddd56f6e 435 break;
d30279e2 436 }
ddd56f6e 437
a3a9f519 438 gpu.status.reg &= ~0x1fff;
439 gpu.status.reg |= gpu.ex_regs[1] & 0x7ff;
440 gpu.status.reg |= (gpu.ex_regs[6] & 3) << 11;
441
fc84f618 442 gpu.state.fb_dirty |= vram_dirty;
443
b243416b 444 if (old_e3 != gpu.ex_regs[3])
445 decide_frameskip_allow(gpu.ex_regs[3]);
446
ddd56f6e 447 return count - pos;
d30279e2
GI
448}
449
5440b88e 450static void flush_cmd_buffer(void)
d30279e2 451{
48f3d210 452 int left = do_cmd_buffer(gpu.cmd_buffer, gpu.cmd_len);
d30279e2
GI
453 if (left > 0)
454 memmove(gpu.cmd_buffer, gpu.cmd_buffer + gpu.cmd_len - left, left * 4);
455 gpu.cmd_len = left;
1ab64c54
GI
456}
457
458void GPUwriteDataMem(uint32_t *mem, int count)
459{
d30279e2
GI
460 int left;
461
56f08d83 462 log_io("gpu_dma_write %p %d\n", mem, count);
463
d30279e2
GI
464 if (unlikely(gpu.cmd_len > 0))
465 flush_cmd_buffer();
56f08d83 466
48f3d210 467 left = do_cmd_buffer(mem, count);
d30279e2 468 if (left)
56f08d83 469 log_anomaly("GPUwriteDataMem: discarded %d/%d words\n", left, count);
1ab64c54
GI
470}
471
d30279e2 472void GPUwriteData(uint32_t data)
1ab64c54 473{
56f08d83 474 log_io("gpu_write %08x\n", data);
d30279e2
GI
475 gpu.cmd_buffer[gpu.cmd_len++] = data;
476 if (gpu.cmd_len >= CMD_BUFFER_LEN)
477 flush_cmd_buffer();
1ab64c54
GI
478}
479
ddd56f6e 480long GPUdmaChain(uint32_t *rambase, uint32_t start_addr)
1ab64c54 481{
ddd56f6e 482 uint32_t addr, *list;
deb18d24 483 uint32_t *llist_entry = NULL;
ddd56f6e 484 int len, left, count;
1c72b1c2 485 long cpu_cycles = 0;
d30279e2
GI
486
487 if (unlikely(gpu.cmd_len > 0))
488 flush_cmd_buffer();
489
deb18d24 490 // ff7 sends it's main list twice, detect this
3ece2f0c 491 if (*gpu.state.frame_count == gpu.state.last_list.frame &&
492 *gpu.state.hcnt - gpu.state.last_list.hcnt <= 1 &&
1c72b1c2 493 gpu.state.last_list.cycles > 2048)
deb18d24 494 {
495 llist_entry = rambase + (gpu.state.last_list.addr & 0x1fffff) / 4;
496 *llist_entry |= 0x800000;
497 }
498
56f08d83 499 log_io("gpu_dma_chain\n");
ddd56f6e 500 addr = start_addr & 0xffffff;
501 for (count = 0; addr != 0xffffff; count++)
502 {
ddd56f6e 503 list = rambase + (addr & 0x1fffff) / 4;
d30279e2
GI
504 len = list[0] >> 24;
505 addr = list[0] & 0xffffff;
1c72b1c2 506 cpu_cycles += 10;
507 if (len > 0)
508 cpu_cycles += 5 + len;
deb18d24 509
510 log_io(".chain %08x #%d\n", (list - rambase) * 4, len);
ddd56f6e 511
512 // loop detection marker
513 // (bit23 set causes DMA error on real machine, so
514 // unlikely to be ever set by the game)
515 list[0] |= 0x800000;
516
56f08d83 517 if (len) {
48f3d210 518 left = do_cmd_buffer(list + 1, len);
56f08d83 519 if (left)
deb18d24 520 log_anomaly("GPUdmaChain: discarded %d/%d words\n", left, len);
56f08d83 521 }
ddd56f6e 522
523 if (addr & 0x800000)
524 break;
525 }
526
527 // remove loop detection markers
528 addr = start_addr & 0x1fffff;
529 while (count-- > 0) {
530 list = rambase + addr / 4;
531 addr = list[0] & 0x1fffff;
532 list[0] &= ~0x800000;
d30279e2 533 }
deb18d24 534 if (llist_entry)
535 *llist_entry &= ~0x800000;
d30279e2 536
3ece2f0c 537 gpu.state.last_list.frame = *gpu.state.frame_count;
deb18d24 538 gpu.state.last_list.hcnt = *gpu.state.hcnt;
1c72b1c2 539 gpu.state.last_list.cycles = cpu_cycles;
deb18d24 540 gpu.state.last_list.addr = start_addr;
541
1c72b1c2 542 return cpu_cycles;
1ab64c54
GI
543}
544
d30279e2
GI
545void GPUreadDataMem(uint32_t *mem, int count)
546{
56f08d83 547 log_io("gpu_dma_read %p %d\n", mem, count);
548
d30279e2
GI
549 if (unlikely(gpu.cmd_len > 0))
550 flush_cmd_buffer();
56f08d83 551
d30279e2
GI
552 if (gpu.dma.h)
553 do_vram_io(mem, count, 1);
554}
555
556uint32_t GPUreadData(void)
557{
9e146206 558 uint32_t ret;
56f08d83 559
560 if (unlikely(gpu.cmd_len > 0))
561 flush_cmd_buffer();
562
9e146206 563 ret = gpu.gp0;
56f08d83 564 if (gpu.dma.h)
9e146206 565 do_vram_io(&ret, 1, 1);
56f08d83 566
9e146206 567 log_io("gpu_read %08x\n", ret);
568 return ret;
d30279e2
GI
569}
570
571uint32_t GPUreadStatus(void)
572{
ddd56f6e 573 uint32_t ret;
56f08d83 574
d30279e2
GI
575 if (unlikely(gpu.cmd_len > 0))
576 flush_cmd_buffer();
577
24de2dd4 578 ret = gpu.status.reg;
ddd56f6e 579 log_io("gpu_read_status %08x\n", ret);
580 return ret;
d30279e2
GI
581}
582
096ec49b 583struct GPUFreeze
1ab64c54
GI
584{
585 uint32_t ulFreezeVersion; // should be always 1 for now (set by main emu)
586 uint32_t ulStatus; // current gpu status
587 uint32_t ulControl[256]; // latest control register values
588 unsigned char psxVRam[1024*1024*2]; // current VRam image (full 2 MB for ZN)
096ec49b 589};
1ab64c54 590
096ec49b 591long GPUfreeze(uint32_t type, struct GPUFreeze *freeze)
1ab64c54 592{
fc84f618 593 int i;
594
1ab64c54
GI
595 switch (type) {
596 case 1: // save
d30279e2
GI
597 if (gpu.cmd_len > 0)
598 flush_cmd_buffer();
1ab64c54
GI
599 memcpy(freeze->psxVRam, gpu.vram, sizeof(gpu.vram));
600 memcpy(freeze->ulControl, gpu.regs, sizeof(gpu.regs));
6e9bdaef 601 memcpy(freeze->ulControl + 0xe0, gpu.ex_regs, sizeof(gpu.ex_regs));
1ab64c54 602 freeze->ulStatus = gpu.status.reg;
1ab64c54
GI
603 break;
604 case 0: // load
605 memcpy(gpu.vram, freeze->psxVRam, sizeof(gpu.vram));
606 memcpy(gpu.regs, freeze->ulControl, sizeof(gpu.regs));
6e9bdaef 607 memcpy(gpu.ex_regs, freeze->ulControl + 0xe0, sizeof(gpu.ex_regs));
1ab64c54 608 gpu.status.reg = freeze->ulStatus;
3d47ef17 609 gpu.cmd_len = 0;
fc84f618 610 for (i = 8; i > 0; i--) {
611 gpu.regs[i] ^= 1; // avoid reg change detection
612 GPUwriteStatus((i << 24) | (gpu.regs[i] ^ 1));
613 }
5b745e5b 614 renderer_sync_ecmds(gpu.ex_regs);
05740673 615 renderer_update_caches(0, 0, 1024, 512);
1ab64c54
GI
616 break;
617 }
618
619 return 1;
620}
621
5440b88e 622void GPUupdateLace(void)
623{
624 if (gpu.cmd_len > 0)
625 flush_cmd_buffer();
626 renderer_flush_queues();
627
aafcb4dd 628 if (gpu.status.blanking) {
629 if (!gpu.state.blanked) {
630 vout_blank();
631 gpu.state.blanked = 1;
632 gpu.state.fb_dirty = 1;
633 }
634 return;
635 }
636
637 if (!gpu.state.fb_dirty)
5440b88e 638 return;
639
640 if (gpu.frameskip.set) {
641 if (!gpu.frameskip.frame_ready) {
642 if (*gpu.state.frame_count - gpu.frameskip.last_flip_frame < 9)
643 return;
644 gpu.frameskip.active = 0;
645 }
646 gpu.frameskip.frame_ready = 0;
647 }
648
649 vout_update();
650 gpu.state.fb_dirty = 0;
aafcb4dd 651 gpu.state.blanked = 0;
5440b88e 652}
653
72e5023f 654void GPUvBlank(int is_vblank, int lcf)
655{
5440b88e 656 int interlace = gpu.state.allow_interlace
657 && gpu.status.interlace && gpu.status.dheight;
658 // interlace doesn't look nice on progressive displays,
659 // so we have this "auto" mode here for games that don't read vram
660 if (gpu.state.allow_interlace == 2
661 && *gpu.state.frame_count - gpu.state.last_vram_read_frame > 1)
662 {
663 interlace = 0;
664 }
665 if (interlace || interlace != gpu.state.old_interlace) {
666 gpu.state.old_interlace = interlace;
667
668 if (gpu.cmd_len > 0)
669 flush_cmd_buffer();
670 renderer_flush_queues();
671 renderer_set_interlace(interlace, !lcf);
672 }
673}
674
675#include "../../frontend/plugin_lib.h"
676
677void GPUrearmedCallbacks(const struct rearmed_cbs *cbs)
678{
679 gpu.frameskip.set = cbs->frameskip;
680 gpu.frameskip.advice = &cbs->fskip_advice;
681 gpu.frameskip.active = 0;
682 gpu.frameskip.frame_ready = 1;
683 gpu.state.hcnt = cbs->gpu_hcnt;
684 gpu.state.frame_count = cbs->gpu_frame_count;
685 gpu.state.allow_interlace = cbs->gpu_neon.allow_interlace;
0b02eb77 686 gpu.state.enhancement_enable = cbs->gpu_neon.enhancement_enable;
5440b88e 687
688 if (cbs->pl_vout_set_raw_vram)
689 cbs->pl_vout_set_raw_vram(gpu.vram);
690 renderer_set_config(cbs);
691 vout_set_config(cbs);
72e5023f 692}
693
1ab64c54 694// vim:shiftwidth=2:expandtab