gpulib: limit height
[pcsx_rearmed.git] / plugins / gpulib / gpu.c
CommitLineData
1ab64c54 1/*
05740673 2 * (C) GraÅžvydas "notaz" Ignotas, 2011-2012
1ab64c54
GI
3 *
4 * This work is licensed under the terms of any of these licenses
5 * (at your option):
6 * - GNU GPL, version 2 or later.
7 * - GNU LGPL, version 2.1 or later.
8 * See the COPYING file in the top-level directory.
9 */
10
d30279e2 11#include <stdio.h>
1ab64c54 12#include <string.h>
56f08d83 13#include "gpu.h"
1ab64c54
GI
14
15#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
d30279e2 16#define unlikely(x) __builtin_expect((x), 0)
8dd855cd 17#define noinline __attribute__((noinline))
1ab64c54 18
deb18d24 19#define gpu_log(fmt, ...) \
3ece2f0c 20 printf("%d:%03d: " fmt, *gpu.state.frame_count, *gpu.state.hcnt, ##__VA_ARGS__)
deb18d24 21
22//#define log_io gpu_log
56f08d83 23#define log_io(...)
9394ada5 24//#define log_anomaly gpu_log
25#define log_anomaly(...)
56f08d83 26
9ee0fd5b 27struct psx_gpu gpu;
1ab64c54 28
48f3d210 29static noinline int do_cmd_buffer(uint32_t *data, int count);
05740673 30static void finish_vram_transfer(int is_read);
48f3d210 31
32static noinline void do_cmd_reset(void)
33{
34 if (unlikely(gpu.cmd_len > 0))
35 do_cmd_buffer(gpu.cmd_buffer, gpu.cmd_len);
48f3d210 36 gpu.cmd_len = 0;
05740673 37
38 if (unlikely(gpu.dma.h > 0))
39 finish_vram_transfer(gpu.dma_start.is_read);
48f3d210 40 gpu.dma.h = 0;
41}
42
6e9bdaef 43static noinline void do_reset(void)
1ab64c54 44{
7841712d 45 unsigned int i;
48f3d210 46
47 do_cmd_reset();
48
6e9bdaef 49 memset(gpu.regs, 0, sizeof(gpu.regs));
48f3d210 50 for (i = 0; i < sizeof(gpu.ex_regs) / sizeof(gpu.ex_regs[0]); i++)
51 gpu.ex_regs[i] = (0xe0 + i) << 24;
d30279e2 52 gpu.status.reg = 0x14802000;
6e9bdaef 53 gpu.gp0 = 0;
fc84f618 54 gpu.regs[3] = 1;
6e9bdaef 55 gpu.screen.hres = gpu.screen.w = 256;
fc84f618 56 gpu.screen.vres = gpu.screen.h = 240;
1ab64c54
GI
57}
58
8dd855cd 59static noinline void update_width(void)
60{
61 int sw = gpu.screen.x2 - gpu.screen.x1;
62 if (sw <= 0 || sw >= 2560)
63 // full width
64 gpu.screen.w = gpu.screen.hres;
65 else
66 gpu.screen.w = sw * gpu.screen.hres / 2560;
67}
68
69static noinline void update_height(void)
70{
74df5906 71 // TODO: emulate this properly..
8dd855cd 72 int sh = gpu.screen.y2 - gpu.screen.y1;
73 if (gpu.status.dheight)
74 sh *= 2;
74df5906 75 if (sh <= 0 || sh > gpu.screen.vres)
8dd855cd 76 sh = gpu.screen.vres;
77
78 gpu.screen.h = sh;
79}
80
fc84f618 81static noinline void decide_frameskip(void)
82{
9fe27e25 83 if (gpu.frameskip.active)
84 gpu.frameskip.cnt++;
85 else {
86 gpu.frameskip.cnt = 0;
87 gpu.frameskip.frame_ready = 1;
88 }
fc84f618 89
9fe27e25 90 if (!gpu.frameskip.active && *gpu.frameskip.advice)
91 gpu.frameskip.active = 1;
92 else if (gpu.frameskip.set > 0 && gpu.frameskip.cnt < gpu.frameskip.set)
fc84f618 93 gpu.frameskip.active = 1;
94 else
95 gpu.frameskip.active = 0;
fbb4bfff 96
97 if (!gpu.frameskip.active && gpu.frameskip.pending_fill[0] != 0) {
98 int dummy;
99 do_cmd_list(gpu.frameskip.pending_fill, 3, &dummy);
100 gpu.frameskip.pending_fill[0] = 0;
101 }
fc84f618 102}
103
b243416b 104static noinline int decide_frameskip_allow(uint32_t cmd_e3)
9fe27e25 105{
106 // no frameskip if it decides to draw to display area,
107 // but not for interlace since it'll most likely always do that
108 uint32_t x = cmd_e3 & 0x3ff;
109 uint32_t y = (cmd_e3 >> 10) & 0x3ff;
110 gpu.frameskip.allow = gpu.status.interlace ||
111 (uint32_t)(x - gpu.screen.x) >= (uint32_t)gpu.screen.w ||
112 (uint32_t)(y - gpu.screen.y) >= (uint32_t)gpu.screen.h;
b243416b 113 return gpu.frameskip.allow;
9fe27e25 114}
115
6e9bdaef 116static noinline void get_gpu_info(uint32_t data)
117{
118 switch (data & 0x0f) {
119 case 0x02:
120 case 0x03:
121 case 0x04:
122 case 0x05:
123 gpu.gp0 = gpu.ex_regs[data & 7] & 0xfffff;
124 break;
125 case 0x06:
126 gpu.gp0 = gpu.ex_regs[5] & 0xfffff;
127 break;
128 case 0x07:
129 gpu.gp0 = 2;
130 break;
131 default:
132 gpu.gp0 = 0;
133 break;
134 }
135}
136
9ee0fd5b 137// double, for overdraw guard
138#define VRAM_SIZE (1024 * 512 * 2 * 2)
139
140static int map_vram(void)
141{
142 gpu.vram = gpu.mmap(VRAM_SIZE);
143 if (gpu.vram != NULL) {
144 gpu.vram += 4096 / 2;
145 return 0;
146 }
147 else {
148 fprintf(stderr, "could not map vram, expect crashes\n");
149 return -1;
150 }
151}
152
6e9bdaef 153long GPUinit(void)
154{
9394ada5 155 int ret;
156 ret = vout_init();
157 ret |= renderer_init();
158
3ece2f0c 159 gpu.state.frame_count = &gpu.zero;
deb18d24 160 gpu.state.hcnt = &gpu.zero;
48f3d210 161 gpu.frameskip.active = 0;
162 gpu.cmd_len = 0;
9394ada5 163 do_reset();
48f3d210 164
9ee0fd5b 165 if (gpu.mmap != NULL) {
166 if (map_vram() != 0)
167 ret = -1;
168 }
6e9bdaef 169 return ret;
170}
171
172long GPUshutdown(void)
173{
9ee0fd5b 174 long ret;
175
e929dec5 176 renderer_finish();
9ee0fd5b 177 ret = vout_finish();
178 if (gpu.vram != NULL) {
179 gpu.vram -= 4096 / 2;
180 gpu.munmap(gpu.vram, VRAM_SIZE);
181 }
182 gpu.vram = NULL;
183
184 return ret;
6e9bdaef 185}
186
1ab64c54
GI
187void GPUwriteStatus(uint32_t data)
188{
189 static const short hres[8] = { 256, 368, 320, 384, 512, 512, 640, 640 };
190 static const short vres[4] = { 240, 480, 256, 480 };
191 uint32_t cmd = data >> 24;
192
fc84f618 193 if (cmd < ARRAY_SIZE(gpu.regs)) {
48f3d210 194 if (cmd > 1 && cmd != 5 && gpu.regs[cmd] == data)
fc84f618 195 return;
8dd855cd 196 gpu.regs[cmd] = data;
fc84f618 197 }
198
199 gpu.state.fb_dirty = 1;
8dd855cd 200
201 switch (cmd) {
1ab64c54 202 case 0x00:
6e9bdaef 203 do_reset();
1ab64c54 204 break;
48f3d210 205 case 0x01:
206 do_cmd_reset();
207 break;
1ab64c54 208 case 0x03:
d30279e2 209 gpu.status.blanking = data & 1;
1ab64c54
GI
210 break;
211 case 0x04:
212 gpu.status.dma = data & 3;
213 break;
214 case 0x05:
215 gpu.screen.x = data & 0x3ff;
c65553d0 216 gpu.screen.y = (data >> 10) & 0x1ff;
9fe27e25 217 if (gpu.frameskip.set) {
218 decide_frameskip_allow(gpu.ex_regs[3]);
219 if (gpu.frameskip.last_flip_frame != *gpu.state.frame_count) {
220 decide_frameskip();
221 gpu.frameskip.last_flip_frame = *gpu.state.frame_count;
222 }
fb4c6fba 223 }
1ab64c54 224 break;
8dd855cd 225 case 0x06:
226 gpu.screen.x1 = data & 0xfff;
227 gpu.screen.x2 = (data >> 12) & 0xfff;
228 update_width();
229 break;
1ab64c54
GI
230 case 0x07:
231 gpu.screen.y1 = data & 0x3ff;
232 gpu.screen.y2 = (data >> 10) & 0x3ff;
8dd855cd 233 update_height();
1ab64c54
GI
234 break;
235 case 0x08:
236 gpu.status.reg = (gpu.status.reg & ~0x7f0000) | ((data & 0x3F) << 17) | ((data & 0x40) << 10);
8dd855cd 237 gpu.screen.hres = hres[(gpu.status.reg >> 16) & 7];
238 gpu.screen.vres = vres[(gpu.status.reg >> 19) & 3];
239 update_width();
240 update_height();
e929dec5 241 renderer_notify_res_change();
1ab64c54 242 break;
deb18d24 243 default:
244 if ((cmd & 0xf0) == 0x10)
245 get_gpu_info(data);
6e9bdaef 246 break;
1ab64c54 247 }
7890a708 248
249#ifdef GPUwriteStatus_ext
250 GPUwriteStatus_ext(data);
251#endif
1ab64c54
GI
252}
253
56f08d83 254const unsigned char cmd_lengths[256] =
1ab64c54 255{
d30279e2
GI
256 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
257 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
258 3, 3, 3, 3, 6, 6, 6, 6, 4, 4, 4, 4, 8, 8, 8, 8, // 20
259 5, 5, 5, 5, 8, 8, 8, 8, 7, 7, 7, 7, 11, 11, 11, 11,
652c6b8b 260 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, // 40
261 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
262 2, 2, 2, 2, 3, 3, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, // 60
d30279e2
GI
263 1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2,
264 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 80
265 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
266 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // a0
267 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
268 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // c0
269 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
270 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // e0
271 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
272};
273
d30279e2
GI
274#define VRAM_MEM_XY(x, y) &gpu.vram[(y) * 1024 + (x)]
275
276static inline void do_vram_line(int x, int y, uint16_t *mem, int l, int is_read)
1ab64c54 277{
d30279e2
GI
278 uint16_t *vram = VRAM_MEM_XY(x, y);
279 if (is_read)
280 memcpy(mem, vram, l * 2);
281 else
282 memcpy(vram, mem, l * 2);
283}
284
285static int do_vram_io(uint32_t *data, int count, int is_read)
286{
287 int count_initial = count;
288 uint16_t *sdata = (uint16_t *)data;
289 int x = gpu.dma.x, y = gpu.dma.y;
290 int w = gpu.dma.w, h = gpu.dma.h;
ddd56f6e 291 int o = gpu.dma.offset;
d30279e2
GI
292 int l;
293 count *= 2; // operate in 16bpp pixels
294
295 if (gpu.dma.offset) {
296 l = w - gpu.dma.offset;
ddd56f6e 297 if (count < l)
d30279e2 298 l = count;
ddd56f6e 299
300 do_vram_line(x + o, y, sdata, l, is_read);
301
302 if (o + l < w)
303 o += l;
304 else {
305 o = 0;
306 y++;
307 h--;
308 }
d30279e2
GI
309 sdata += l;
310 count -= l;
d30279e2
GI
311 }
312
313 for (; h > 0 && count >= w; sdata += w, count -= w, y++, h--) {
314 y &= 511;
315 do_vram_line(x, y, sdata, w, is_read);
316 }
317
05740673 318 if (h > 0) {
319 if (count > 0) {
320 y &= 511;
321 do_vram_line(x, y, sdata, count, is_read);
322 o = count;
323 count = 0;
324 }
d30279e2 325 }
05740673 326 else
327 finish_vram_transfer(is_read);
d30279e2
GI
328 gpu.dma.y = y;
329 gpu.dma.h = h;
ddd56f6e 330 gpu.dma.offset = o;
d30279e2 331
6e9bdaef 332 return count_initial - count / 2;
d30279e2
GI
333}
334
335static void start_vram_transfer(uint32_t pos_word, uint32_t size_word, int is_read)
336{
ddd56f6e 337 if (gpu.dma.h)
338 log_anomaly("start_vram_transfer while old unfinished\n");
339
5440b88e 340 gpu.dma.x = pos_word & 0x3ff;
341 gpu.dma.y = (pos_word >> 16) & 0x1ff;
48f3d210 342 gpu.dma.w = ((size_word - 1) & 0x3ff) + 1;
343 gpu.dma.h = (((size_word >> 16) - 1) & 0x1ff) + 1;
d30279e2 344 gpu.dma.offset = 0;
05740673 345 gpu.dma.is_read = is_read;
346 gpu.dma_start = gpu.dma;
d30279e2 347
9e146206 348 renderer_flush_queues();
349 if (is_read) {
d30279e2 350 gpu.status.img = 1;
9e146206 351 // XXX: wrong for width 1
352 memcpy(&gpu.gp0, VRAM_MEM_XY(gpu.dma.x, gpu.dma.y), 4);
5440b88e 353 gpu.state.last_vram_read_frame = *gpu.state.frame_count;
9e146206 354 }
d30279e2 355
6e9bdaef 356 log_io("start_vram_transfer %c (%d, %d) %dx%d\n", is_read ? 'r' : 'w',
357 gpu.dma.x, gpu.dma.y, gpu.dma.w, gpu.dma.h);
d30279e2
GI
358}
359
05740673 360static void finish_vram_transfer(int is_read)
361{
362 if (is_read)
363 gpu.status.img = 0;
364 else
365 renderer_update_caches(gpu.dma_start.x, gpu.dma_start.y,
366 gpu.dma_start.w, gpu.dma_start.h);
367}
368
b243416b 369static noinline int do_cmd_list_skip(uint32_t *data, int count, int *last_cmd)
370{
97e07db9 371 int cmd = 0, pos = 0, len, dummy, v;
b243416b 372 int skip = 1;
373
fbb4bfff 374 gpu.frameskip.pending_fill[0] = 0;
375
b243416b 376 while (pos < count && skip) {
377 uint32_t *list = data + pos;
378 cmd = list[0] >> 24;
379 len = 1 + cmd_lengths[cmd];
380
97e07db9 381 switch (cmd) {
382 case 0x02:
383 if ((list[2] & 0x3ff) > gpu.screen.w || ((list[2] >> 16) & 0x1ff) > gpu.screen.h)
384 // clearing something large, don't skip
385 do_cmd_list(list, 3, &dummy);
386 else
387 memcpy(gpu.frameskip.pending_fill, list, 3 * 4);
388 break;
389 case 0x24 ... 0x27:
390 case 0x2c ... 0x2f:
391 case 0x34 ... 0x37:
392 case 0x3c ... 0x3f:
393 gpu.ex_regs[1] &= ~0x1ff;
394 gpu.ex_regs[1] |= list[4 + ((cmd >> 4) & 1)] & 0x1ff;
395 break;
396 case 0x48 ... 0x4F:
397 for (v = 3; pos + v < count; v++)
398 {
399 if ((list[v] & 0xf000f000) == 0x50005000)
400 break;
401 }
402 len += v - 3;
403 break;
404 case 0x58 ... 0x5F:
405 for (v = 4; pos + v < count; v += 2)
406 {
407 if ((list[v] & 0xf000f000) == 0x50005000)
408 break;
409 }
410 len += v - 4;
411 break;
412 default:
413 if (cmd == 0xe3)
414 skip = decide_frameskip_allow(list[0]);
415 if ((cmd & 0xf8) == 0xe0)
416 gpu.ex_regs[cmd & 7] = list[0];
417 break;
b243416b 418 }
b243416b 419
420 if (pos + len > count) {
421 cmd = -1;
422 break; // incomplete cmd
423 }
97e07db9 424 if (0xa0 <= cmd && cmd <= 0xdf)
b243416b 425 break; // image i/o
97e07db9 426
b243416b 427 pos += len;
428 }
429
430 renderer_sync_ecmds(gpu.ex_regs);
431 *last_cmd = cmd;
432 return pos;
433}
434
48f3d210 435static noinline int do_cmd_buffer(uint32_t *data, int count)
d30279e2 436{
b243416b 437 int cmd, pos;
438 uint32_t old_e3 = gpu.ex_regs[3];
fc84f618 439 int vram_dirty = 0;
d30279e2 440
d30279e2 441 // process buffer
b243416b 442 for (pos = 0; pos < count; )
d30279e2 443 {
b243416b 444 if (gpu.dma.h && !gpu.dma_start.is_read) { // XXX: need to verify
445 vram_dirty = 1;
d30279e2 446 pos += do_vram_io(data + pos, count - pos, 0);
ddd56f6e 447 if (pos == count)
448 break;
d30279e2
GI
449 }
450
b243416b 451 cmd = data[pos] >> 24;
97e07db9 452 if (0xa0 <= cmd && cmd <= 0xdf) {
d30279e2 453 // consume vram write/read cmd
97e07db9 454 start_vram_transfer(data[pos + 1], data[pos + 2], (cmd & 0xe0) == 0xc0);
b243416b 455 pos += 3;
456 continue;
d30279e2 457 }
b243416b 458
1e07f71d 459 // 0xex cmds might affect frameskip.allow, so pass to do_cmd_list_skip
460 if (gpu.frameskip.active && (gpu.frameskip.allow || ((data[pos] >> 24) & 0xf0) == 0xe0))
b243416b 461 pos += do_cmd_list_skip(data + pos, count - pos, &cmd);
462 else {
463 pos += do_cmd_list(data + pos, count - pos, &cmd);
464 vram_dirty = 1;
465 }
466
467 if (cmd == -1)
468 // incomplete cmd
ddd56f6e 469 break;
d30279e2 470 }
ddd56f6e 471
a3a9f519 472 gpu.status.reg &= ~0x1fff;
473 gpu.status.reg |= gpu.ex_regs[1] & 0x7ff;
474 gpu.status.reg |= (gpu.ex_regs[6] & 3) << 11;
475
fc84f618 476 gpu.state.fb_dirty |= vram_dirty;
477
b243416b 478 if (old_e3 != gpu.ex_regs[3])
479 decide_frameskip_allow(gpu.ex_regs[3]);
480
ddd56f6e 481 return count - pos;
d30279e2
GI
482}
483
5440b88e 484static void flush_cmd_buffer(void)
d30279e2 485{
48f3d210 486 int left = do_cmd_buffer(gpu.cmd_buffer, gpu.cmd_len);
d30279e2
GI
487 if (left > 0)
488 memmove(gpu.cmd_buffer, gpu.cmd_buffer + gpu.cmd_len - left, left * 4);
489 gpu.cmd_len = left;
1ab64c54
GI
490}
491
492void GPUwriteDataMem(uint32_t *mem, int count)
493{
d30279e2
GI
494 int left;
495
56f08d83 496 log_io("gpu_dma_write %p %d\n", mem, count);
497
d30279e2
GI
498 if (unlikely(gpu.cmd_len > 0))
499 flush_cmd_buffer();
56f08d83 500
48f3d210 501 left = do_cmd_buffer(mem, count);
d30279e2 502 if (left)
56f08d83 503 log_anomaly("GPUwriteDataMem: discarded %d/%d words\n", left, count);
1ab64c54
GI
504}
505
d30279e2 506void GPUwriteData(uint32_t data)
1ab64c54 507{
56f08d83 508 log_io("gpu_write %08x\n", data);
d30279e2
GI
509 gpu.cmd_buffer[gpu.cmd_len++] = data;
510 if (gpu.cmd_len >= CMD_BUFFER_LEN)
511 flush_cmd_buffer();
1ab64c54
GI
512}
513
ddd56f6e 514long GPUdmaChain(uint32_t *rambase, uint32_t start_addr)
1ab64c54 515{
ddd56f6e 516 uint32_t addr, *list;
deb18d24 517 uint32_t *llist_entry = NULL;
ddd56f6e 518 int len, left, count;
1c72b1c2 519 long cpu_cycles = 0;
d30279e2
GI
520
521 if (unlikely(gpu.cmd_len > 0))
522 flush_cmd_buffer();
523
deb18d24 524 // ff7 sends it's main list twice, detect this
3ece2f0c 525 if (*gpu.state.frame_count == gpu.state.last_list.frame &&
526 *gpu.state.hcnt - gpu.state.last_list.hcnt <= 1 &&
1c72b1c2 527 gpu.state.last_list.cycles > 2048)
deb18d24 528 {
529 llist_entry = rambase + (gpu.state.last_list.addr & 0x1fffff) / 4;
530 *llist_entry |= 0x800000;
531 }
532
56f08d83 533 log_io("gpu_dma_chain\n");
ddd56f6e 534 addr = start_addr & 0xffffff;
535 for (count = 0; addr != 0xffffff; count++)
536 {
ddd56f6e 537 list = rambase + (addr & 0x1fffff) / 4;
d30279e2
GI
538 len = list[0] >> 24;
539 addr = list[0] & 0xffffff;
1c72b1c2 540 cpu_cycles += 10;
541 if (len > 0)
542 cpu_cycles += 5 + len;
deb18d24 543
544 log_io(".chain %08x #%d\n", (list - rambase) * 4, len);
ddd56f6e 545
546 // loop detection marker
547 // (bit23 set causes DMA error on real machine, so
548 // unlikely to be ever set by the game)
549 list[0] |= 0x800000;
550
56f08d83 551 if (len) {
48f3d210 552 left = do_cmd_buffer(list + 1, len);
56f08d83 553 if (left)
deb18d24 554 log_anomaly("GPUdmaChain: discarded %d/%d words\n", left, len);
56f08d83 555 }
ddd56f6e 556
557 if (addr & 0x800000)
558 break;
559 }
560
561 // remove loop detection markers
562 addr = start_addr & 0x1fffff;
563 while (count-- > 0) {
564 list = rambase + addr / 4;
565 addr = list[0] & 0x1fffff;
566 list[0] &= ~0x800000;
d30279e2 567 }
deb18d24 568 if (llist_entry)
569 *llist_entry &= ~0x800000;
d30279e2 570
3ece2f0c 571 gpu.state.last_list.frame = *gpu.state.frame_count;
deb18d24 572 gpu.state.last_list.hcnt = *gpu.state.hcnt;
1c72b1c2 573 gpu.state.last_list.cycles = cpu_cycles;
deb18d24 574 gpu.state.last_list.addr = start_addr;
575
1c72b1c2 576 return cpu_cycles;
1ab64c54
GI
577}
578
d30279e2
GI
579void GPUreadDataMem(uint32_t *mem, int count)
580{
56f08d83 581 log_io("gpu_dma_read %p %d\n", mem, count);
582
d30279e2
GI
583 if (unlikely(gpu.cmd_len > 0))
584 flush_cmd_buffer();
56f08d83 585
d30279e2
GI
586 if (gpu.dma.h)
587 do_vram_io(mem, count, 1);
588}
589
590uint32_t GPUreadData(void)
591{
9e146206 592 uint32_t ret;
56f08d83 593
594 if (unlikely(gpu.cmd_len > 0))
595 flush_cmd_buffer();
596
9e146206 597 ret = gpu.gp0;
56f08d83 598 if (gpu.dma.h)
9e146206 599 do_vram_io(&ret, 1, 1);
56f08d83 600
9e146206 601 log_io("gpu_read %08x\n", ret);
602 return ret;
d30279e2
GI
603}
604
605uint32_t GPUreadStatus(void)
606{
ddd56f6e 607 uint32_t ret;
56f08d83 608
d30279e2
GI
609 if (unlikely(gpu.cmd_len > 0))
610 flush_cmd_buffer();
611
24de2dd4 612 ret = gpu.status.reg;
ddd56f6e 613 log_io("gpu_read_status %08x\n", ret);
614 return ret;
d30279e2
GI
615}
616
096ec49b 617struct GPUFreeze
1ab64c54
GI
618{
619 uint32_t ulFreezeVersion; // should be always 1 for now (set by main emu)
620 uint32_t ulStatus; // current gpu status
621 uint32_t ulControl[256]; // latest control register values
622 unsigned char psxVRam[1024*1024*2]; // current VRam image (full 2 MB for ZN)
096ec49b 623};
1ab64c54 624
096ec49b 625long GPUfreeze(uint32_t type, struct GPUFreeze *freeze)
1ab64c54 626{
fc84f618 627 int i;
628
1ab64c54
GI
629 switch (type) {
630 case 1: // save
d30279e2
GI
631 if (gpu.cmd_len > 0)
632 flush_cmd_buffer();
9ee0fd5b 633 memcpy(freeze->psxVRam, gpu.vram, 1024 * 512 * 2);
1ab64c54 634 memcpy(freeze->ulControl, gpu.regs, sizeof(gpu.regs));
6e9bdaef 635 memcpy(freeze->ulControl + 0xe0, gpu.ex_regs, sizeof(gpu.ex_regs));
1ab64c54 636 freeze->ulStatus = gpu.status.reg;
1ab64c54
GI
637 break;
638 case 0: // load
9ee0fd5b 639 memcpy(gpu.vram, freeze->psxVRam, 1024 * 512 * 2);
1ab64c54 640 memcpy(gpu.regs, freeze->ulControl, sizeof(gpu.regs));
6e9bdaef 641 memcpy(gpu.ex_regs, freeze->ulControl + 0xe0, sizeof(gpu.ex_regs));
1ab64c54 642 gpu.status.reg = freeze->ulStatus;
3d47ef17 643 gpu.cmd_len = 0;
fc84f618 644 for (i = 8; i > 0; i--) {
645 gpu.regs[i] ^= 1; // avoid reg change detection
646 GPUwriteStatus((i << 24) | (gpu.regs[i] ^ 1));
647 }
5b745e5b 648 renderer_sync_ecmds(gpu.ex_regs);
05740673 649 renderer_update_caches(0, 0, 1024, 512);
1ab64c54
GI
650 break;
651 }
652
653 return 1;
654}
655
5440b88e 656void GPUupdateLace(void)
657{
658 if (gpu.cmd_len > 0)
659 flush_cmd_buffer();
660 renderer_flush_queues();
661
aafcb4dd 662 if (gpu.status.blanking) {
663 if (!gpu.state.blanked) {
664 vout_blank();
665 gpu.state.blanked = 1;
666 gpu.state.fb_dirty = 1;
667 }
668 return;
669 }
670
671 if (!gpu.state.fb_dirty)
5440b88e 672 return;
673
674 if (gpu.frameskip.set) {
675 if (!gpu.frameskip.frame_ready) {
676 if (*gpu.state.frame_count - gpu.frameskip.last_flip_frame < 9)
677 return;
678 gpu.frameskip.active = 0;
679 }
680 gpu.frameskip.frame_ready = 0;
681 }
682
683 vout_update();
684 gpu.state.fb_dirty = 0;
aafcb4dd 685 gpu.state.blanked = 0;
5440b88e 686}
687
72e5023f 688void GPUvBlank(int is_vblank, int lcf)
689{
5440b88e 690 int interlace = gpu.state.allow_interlace
691 && gpu.status.interlace && gpu.status.dheight;
692 // interlace doesn't look nice on progressive displays,
693 // so we have this "auto" mode here for games that don't read vram
694 if (gpu.state.allow_interlace == 2
695 && *gpu.state.frame_count - gpu.state.last_vram_read_frame > 1)
696 {
697 interlace = 0;
698 }
699 if (interlace || interlace != gpu.state.old_interlace) {
700 gpu.state.old_interlace = interlace;
701
702 if (gpu.cmd_len > 0)
703 flush_cmd_buffer();
704 renderer_flush_queues();
705 renderer_set_interlace(interlace, !lcf);
706 }
707}
708
709#include "../../frontend/plugin_lib.h"
710
711void GPUrearmedCallbacks(const struct rearmed_cbs *cbs)
712{
713 gpu.frameskip.set = cbs->frameskip;
714 gpu.frameskip.advice = &cbs->fskip_advice;
715 gpu.frameskip.active = 0;
716 gpu.frameskip.frame_ready = 1;
717 gpu.state.hcnt = cbs->gpu_hcnt;
718 gpu.state.frame_count = cbs->gpu_frame_count;
719 gpu.state.allow_interlace = cbs->gpu_neon.allow_interlace;
0b02eb77 720 gpu.state.enhancement_enable = cbs->gpu_neon.enhancement_enable;
5440b88e 721
9ee0fd5b 722 gpu.mmap = cbs->mmap;
723 gpu.munmap = cbs->munmap;
724
725 // delayed vram mmap
726 if (gpu.vram == NULL)
727 map_vram();
728
5440b88e 729 if (cbs->pl_vout_set_raw_vram)
730 cbs->pl_vout_set_raw_vram(gpu.vram);
731 renderer_set_config(cbs);
732 vout_set_config(cbs);
72e5023f 733}
734
1ab64c54 735// vim:shiftwidth=2:expandtab