frontend: fix minor glitches on video mode changes
[pcsx_rearmed.git] / plugins / gpulib / gpu.c
CommitLineData
1ab64c54 1/*
05740673 2 * (C) GraÅžvydas "notaz" Ignotas, 2011-2012
1ab64c54
GI
3 *
4 * This work is licensed under the terms of any of these licenses
5 * (at your option):
6 * - GNU GPL, version 2 or later.
7 * - GNU LGPL, version 2.1 or later.
8 * See the COPYING file in the top-level directory.
9 */
10
d30279e2 11#include <stdio.h>
1ab64c54 12#include <string.h>
56f08d83 13#include "gpu.h"
1ab64c54
GI
14
15#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
8f5f2dd5 16#ifdef __GNUC__
d30279e2 17#define unlikely(x) __builtin_expect((x), 0)
8f5f2dd5 18#define preload __builtin_prefetch
8dd855cd 19#define noinline __attribute__((noinline))
8f5f2dd5 20#else
21#define unlikely(x)
22#define preload(...)
23#define noinline
24#error huh
25#endif
1ab64c54 26
deb18d24 27#define gpu_log(fmt, ...) \
3ece2f0c 28 printf("%d:%03d: " fmt, *gpu.state.frame_count, *gpu.state.hcnt, ##__VA_ARGS__)
deb18d24 29
30//#define log_io gpu_log
56f08d83 31#define log_io(...)
9394ada5 32//#define log_anomaly gpu_log
33#define log_anomaly(...)
56f08d83 34
9ee0fd5b 35struct psx_gpu gpu;
1ab64c54 36
48f3d210 37static noinline int do_cmd_buffer(uint32_t *data, int count);
05740673 38static void finish_vram_transfer(int is_read);
48f3d210 39
40static noinline void do_cmd_reset(void)
41{
42 if (unlikely(gpu.cmd_len > 0))
43 do_cmd_buffer(gpu.cmd_buffer, gpu.cmd_len);
48f3d210 44 gpu.cmd_len = 0;
05740673 45
46 if (unlikely(gpu.dma.h > 0))
47 finish_vram_transfer(gpu.dma_start.is_read);
48f3d210 48 gpu.dma.h = 0;
49}
50
6e9bdaef 51static noinline void do_reset(void)
1ab64c54 52{
7841712d 53 unsigned int i;
48f3d210 54
55 do_cmd_reset();
56
6e9bdaef 57 memset(gpu.regs, 0, sizeof(gpu.regs));
48f3d210 58 for (i = 0; i < sizeof(gpu.ex_regs) / sizeof(gpu.ex_regs[0]); i++)
59 gpu.ex_regs[i] = (0xe0 + i) << 24;
d30279e2 60 gpu.status.reg = 0x14802000;
6e9bdaef 61 gpu.gp0 = 0;
fc84f618 62 gpu.regs[3] = 1;
6e9bdaef 63 gpu.screen.hres = gpu.screen.w = 256;
fc84f618 64 gpu.screen.vres = gpu.screen.h = 240;
1ab64c54
GI
65}
66
8dd855cd 67static noinline void update_width(void)
68{
69 int sw = gpu.screen.x2 - gpu.screen.x1;
70 if (sw <= 0 || sw >= 2560)
71 // full width
72 gpu.screen.w = gpu.screen.hres;
73 else
74 gpu.screen.w = sw * gpu.screen.hres / 2560;
75}
76
77static noinline void update_height(void)
78{
74df5906 79 // TODO: emulate this properly..
8dd855cd 80 int sh = gpu.screen.y2 - gpu.screen.y1;
81 if (gpu.status.dheight)
82 sh *= 2;
74df5906 83 if (sh <= 0 || sh > gpu.screen.vres)
8dd855cd 84 sh = gpu.screen.vres;
85
86 gpu.screen.h = sh;
87}
88
fc84f618 89static noinline void decide_frameskip(void)
90{
9fe27e25 91 if (gpu.frameskip.active)
92 gpu.frameskip.cnt++;
93 else {
94 gpu.frameskip.cnt = 0;
95 gpu.frameskip.frame_ready = 1;
96 }
fc84f618 97
9fe27e25 98 if (!gpu.frameskip.active && *gpu.frameskip.advice)
99 gpu.frameskip.active = 1;
100 else if (gpu.frameskip.set > 0 && gpu.frameskip.cnt < gpu.frameskip.set)
fc84f618 101 gpu.frameskip.active = 1;
102 else
103 gpu.frameskip.active = 0;
fbb4bfff 104
105 if (!gpu.frameskip.active && gpu.frameskip.pending_fill[0] != 0) {
106 int dummy;
107 do_cmd_list(gpu.frameskip.pending_fill, 3, &dummy);
108 gpu.frameskip.pending_fill[0] = 0;
109 }
fc84f618 110}
111
b243416b 112static noinline int decide_frameskip_allow(uint32_t cmd_e3)
9fe27e25 113{
114 // no frameskip if it decides to draw to display area,
115 // but not for interlace since it'll most likely always do that
116 uint32_t x = cmd_e3 & 0x3ff;
117 uint32_t y = (cmd_e3 >> 10) & 0x3ff;
118 gpu.frameskip.allow = gpu.status.interlace ||
119 (uint32_t)(x - gpu.screen.x) >= (uint32_t)gpu.screen.w ||
120 (uint32_t)(y - gpu.screen.y) >= (uint32_t)gpu.screen.h;
b243416b 121 return gpu.frameskip.allow;
9fe27e25 122}
123
6e9bdaef 124static noinline void get_gpu_info(uint32_t data)
125{
126 switch (data & 0x0f) {
127 case 0x02:
128 case 0x03:
129 case 0x04:
130 case 0x05:
131 gpu.gp0 = gpu.ex_regs[data & 7] & 0xfffff;
132 break;
133 case 0x06:
134 gpu.gp0 = gpu.ex_regs[5] & 0xfffff;
135 break;
136 case 0x07:
137 gpu.gp0 = 2;
138 break;
139 default:
140 gpu.gp0 = 0;
141 break;
142 }
143}
144
9ee0fd5b 145// double, for overdraw guard
146#define VRAM_SIZE (1024 * 512 * 2 * 2)
147
148static int map_vram(void)
149{
150 gpu.vram = gpu.mmap(VRAM_SIZE);
151 if (gpu.vram != NULL) {
152 gpu.vram += 4096 / 2;
153 return 0;
154 }
155 else {
156 fprintf(stderr, "could not map vram, expect crashes\n");
157 return -1;
158 }
159}
160
6e9bdaef 161long GPUinit(void)
162{
9394ada5 163 int ret;
164 ret = vout_init();
165 ret |= renderer_init();
166
3ece2f0c 167 gpu.state.frame_count = &gpu.zero;
deb18d24 168 gpu.state.hcnt = &gpu.zero;
48f3d210 169 gpu.frameskip.active = 0;
170 gpu.cmd_len = 0;
9394ada5 171 do_reset();
48f3d210 172
9ee0fd5b 173 if (gpu.mmap != NULL) {
174 if (map_vram() != 0)
175 ret = -1;
176 }
6e9bdaef 177 return ret;
178}
179
180long GPUshutdown(void)
181{
9ee0fd5b 182 long ret;
183
e929dec5 184 renderer_finish();
9ee0fd5b 185 ret = vout_finish();
186 if (gpu.vram != NULL) {
187 gpu.vram -= 4096 / 2;
188 gpu.munmap(gpu.vram, VRAM_SIZE);
189 }
190 gpu.vram = NULL;
191
192 return ret;
6e9bdaef 193}
194
1ab64c54
GI
195void GPUwriteStatus(uint32_t data)
196{
197 static const short hres[8] = { 256, 368, 320, 384, 512, 512, 640, 640 };
198 static const short vres[4] = { 240, 480, 256, 480 };
199 uint32_t cmd = data >> 24;
200
fc84f618 201 if (cmd < ARRAY_SIZE(gpu.regs)) {
48f3d210 202 if (cmd > 1 && cmd != 5 && gpu.regs[cmd] == data)
fc84f618 203 return;
8dd855cd 204 gpu.regs[cmd] = data;
fc84f618 205 }
206
207 gpu.state.fb_dirty = 1;
8dd855cd 208
209 switch (cmd) {
1ab64c54 210 case 0x00:
6e9bdaef 211 do_reset();
1ab64c54 212 break;
48f3d210 213 case 0x01:
214 do_cmd_reset();
215 break;
1ab64c54 216 case 0x03:
d30279e2 217 gpu.status.blanking = data & 1;
1ab64c54
GI
218 break;
219 case 0x04:
220 gpu.status.dma = data & 3;
221 break;
222 case 0x05:
223 gpu.screen.x = data & 0x3ff;
c65553d0 224 gpu.screen.y = (data >> 10) & 0x1ff;
9fe27e25 225 if (gpu.frameskip.set) {
226 decide_frameskip_allow(gpu.ex_regs[3]);
227 if (gpu.frameskip.last_flip_frame != *gpu.state.frame_count) {
228 decide_frameskip();
229 gpu.frameskip.last_flip_frame = *gpu.state.frame_count;
230 }
fb4c6fba 231 }
1ab64c54 232 break;
8dd855cd 233 case 0x06:
234 gpu.screen.x1 = data & 0xfff;
235 gpu.screen.x2 = (data >> 12) & 0xfff;
236 update_width();
237 break;
1ab64c54
GI
238 case 0x07:
239 gpu.screen.y1 = data & 0x3ff;
240 gpu.screen.y2 = (data >> 10) & 0x3ff;
8dd855cd 241 update_height();
1ab64c54
GI
242 break;
243 case 0x08:
244 gpu.status.reg = (gpu.status.reg & ~0x7f0000) | ((data & 0x3F) << 17) | ((data & 0x40) << 10);
8dd855cd 245 gpu.screen.hres = hres[(gpu.status.reg >> 16) & 7];
246 gpu.screen.vres = vres[(gpu.status.reg >> 19) & 3];
247 update_width();
248 update_height();
e929dec5 249 renderer_notify_res_change();
1ab64c54 250 break;
deb18d24 251 default:
252 if ((cmd & 0xf0) == 0x10)
253 get_gpu_info(data);
6e9bdaef 254 break;
1ab64c54 255 }
7890a708 256
257#ifdef GPUwriteStatus_ext
258 GPUwriteStatus_ext(data);
259#endif
1ab64c54
GI
260}
261
56f08d83 262const unsigned char cmd_lengths[256] =
1ab64c54 263{
d30279e2
GI
264 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
265 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
266 3, 3, 3, 3, 6, 6, 6, 6, 4, 4, 4, 4, 8, 8, 8, 8, // 20
267 5, 5, 5, 5, 8, 8, 8, 8, 7, 7, 7, 7, 11, 11, 11, 11,
652c6b8b 268 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, // 40
269 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
270 2, 2, 2, 2, 3, 3, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, // 60
d30279e2
GI
271 1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2,
272 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 80
273 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
274 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // a0
275 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
276 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // c0
277 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
278 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // e0
279 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
280};
281
d30279e2
GI
282#define VRAM_MEM_XY(x, y) &gpu.vram[(y) * 1024 + (x)]
283
284static inline void do_vram_line(int x, int y, uint16_t *mem, int l, int is_read)
1ab64c54 285{
d30279e2
GI
286 uint16_t *vram = VRAM_MEM_XY(x, y);
287 if (is_read)
288 memcpy(mem, vram, l * 2);
289 else
290 memcpy(vram, mem, l * 2);
291}
292
293static int do_vram_io(uint32_t *data, int count, int is_read)
294{
295 int count_initial = count;
296 uint16_t *sdata = (uint16_t *)data;
297 int x = gpu.dma.x, y = gpu.dma.y;
298 int w = gpu.dma.w, h = gpu.dma.h;
ddd56f6e 299 int o = gpu.dma.offset;
d30279e2
GI
300 int l;
301 count *= 2; // operate in 16bpp pixels
302
303 if (gpu.dma.offset) {
304 l = w - gpu.dma.offset;
ddd56f6e 305 if (count < l)
d30279e2 306 l = count;
ddd56f6e 307
308 do_vram_line(x + o, y, sdata, l, is_read);
309
310 if (o + l < w)
311 o += l;
312 else {
313 o = 0;
314 y++;
315 h--;
316 }
d30279e2
GI
317 sdata += l;
318 count -= l;
d30279e2
GI
319 }
320
321 for (; h > 0 && count >= w; sdata += w, count -= w, y++, h--) {
322 y &= 511;
323 do_vram_line(x, y, sdata, w, is_read);
324 }
325
05740673 326 if (h > 0) {
327 if (count > 0) {
328 y &= 511;
329 do_vram_line(x, y, sdata, count, is_read);
330 o = count;
331 count = 0;
332 }
d30279e2 333 }
05740673 334 else
335 finish_vram_transfer(is_read);
d30279e2
GI
336 gpu.dma.y = y;
337 gpu.dma.h = h;
ddd56f6e 338 gpu.dma.offset = o;
d30279e2 339
6e9bdaef 340 return count_initial - count / 2;
d30279e2
GI
341}
342
343static void start_vram_transfer(uint32_t pos_word, uint32_t size_word, int is_read)
344{
ddd56f6e 345 if (gpu.dma.h)
346 log_anomaly("start_vram_transfer while old unfinished\n");
347
5440b88e 348 gpu.dma.x = pos_word & 0x3ff;
349 gpu.dma.y = (pos_word >> 16) & 0x1ff;
48f3d210 350 gpu.dma.w = ((size_word - 1) & 0x3ff) + 1;
351 gpu.dma.h = (((size_word >> 16) - 1) & 0x1ff) + 1;
d30279e2 352 gpu.dma.offset = 0;
05740673 353 gpu.dma.is_read = is_read;
354 gpu.dma_start = gpu.dma;
d30279e2 355
9e146206 356 renderer_flush_queues();
357 if (is_read) {
d30279e2 358 gpu.status.img = 1;
9e146206 359 // XXX: wrong for width 1
360 memcpy(&gpu.gp0, VRAM_MEM_XY(gpu.dma.x, gpu.dma.y), 4);
5440b88e 361 gpu.state.last_vram_read_frame = *gpu.state.frame_count;
9e146206 362 }
d30279e2 363
6e9bdaef 364 log_io("start_vram_transfer %c (%d, %d) %dx%d\n", is_read ? 'r' : 'w',
365 gpu.dma.x, gpu.dma.y, gpu.dma.w, gpu.dma.h);
d30279e2
GI
366}
367
05740673 368static void finish_vram_transfer(int is_read)
369{
370 if (is_read)
371 gpu.status.img = 0;
372 else
373 renderer_update_caches(gpu.dma_start.x, gpu.dma_start.y,
374 gpu.dma_start.w, gpu.dma_start.h);
375}
376
b243416b 377static noinline int do_cmd_list_skip(uint32_t *data, int count, int *last_cmd)
378{
97e07db9 379 int cmd = 0, pos = 0, len, dummy, v;
b243416b 380 int skip = 1;
381
fbb4bfff 382 gpu.frameskip.pending_fill[0] = 0;
383
b243416b 384 while (pos < count && skip) {
385 uint32_t *list = data + pos;
386 cmd = list[0] >> 24;
387 len = 1 + cmd_lengths[cmd];
388
97e07db9 389 switch (cmd) {
390 case 0x02:
391 if ((list[2] & 0x3ff) > gpu.screen.w || ((list[2] >> 16) & 0x1ff) > gpu.screen.h)
392 // clearing something large, don't skip
393 do_cmd_list(list, 3, &dummy);
394 else
395 memcpy(gpu.frameskip.pending_fill, list, 3 * 4);
396 break;
397 case 0x24 ... 0x27:
398 case 0x2c ... 0x2f:
399 case 0x34 ... 0x37:
400 case 0x3c ... 0x3f:
401 gpu.ex_regs[1] &= ~0x1ff;
402 gpu.ex_regs[1] |= list[4 + ((cmd >> 4) & 1)] & 0x1ff;
403 break;
404 case 0x48 ... 0x4F:
405 for (v = 3; pos + v < count; v++)
406 {
407 if ((list[v] & 0xf000f000) == 0x50005000)
408 break;
409 }
410 len += v - 3;
411 break;
412 case 0x58 ... 0x5F:
413 for (v = 4; pos + v < count; v += 2)
414 {
415 if ((list[v] & 0xf000f000) == 0x50005000)
416 break;
417 }
418 len += v - 4;
419 break;
420 default:
421 if (cmd == 0xe3)
422 skip = decide_frameskip_allow(list[0]);
423 if ((cmd & 0xf8) == 0xe0)
424 gpu.ex_regs[cmd & 7] = list[0];
425 break;
b243416b 426 }
b243416b 427
428 if (pos + len > count) {
429 cmd = -1;
430 break; // incomplete cmd
431 }
97e07db9 432 if (0xa0 <= cmd && cmd <= 0xdf)
b243416b 433 break; // image i/o
97e07db9 434
b243416b 435 pos += len;
436 }
437
438 renderer_sync_ecmds(gpu.ex_regs);
439 *last_cmd = cmd;
440 return pos;
441}
442
48f3d210 443static noinline int do_cmd_buffer(uint32_t *data, int count)
d30279e2 444{
b243416b 445 int cmd, pos;
446 uint32_t old_e3 = gpu.ex_regs[3];
fc84f618 447 int vram_dirty = 0;
d30279e2 448
d30279e2 449 // process buffer
b243416b 450 for (pos = 0; pos < count; )
d30279e2 451 {
b243416b 452 if (gpu.dma.h && !gpu.dma_start.is_read) { // XXX: need to verify
453 vram_dirty = 1;
d30279e2 454 pos += do_vram_io(data + pos, count - pos, 0);
ddd56f6e 455 if (pos == count)
456 break;
d30279e2
GI
457 }
458
b243416b 459 cmd = data[pos] >> 24;
97e07db9 460 if (0xa0 <= cmd && cmd <= 0xdf) {
d30279e2 461 // consume vram write/read cmd
97e07db9 462 start_vram_transfer(data[pos + 1], data[pos + 2], (cmd & 0xe0) == 0xc0);
b243416b 463 pos += 3;
464 continue;
d30279e2 465 }
b243416b 466
1e07f71d 467 // 0xex cmds might affect frameskip.allow, so pass to do_cmd_list_skip
468 if (gpu.frameskip.active && (gpu.frameskip.allow || ((data[pos] >> 24) & 0xf0) == 0xe0))
b243416b 469 pos += do_cmd_list_skip(data + pos, count - pos, &cmd);
470 else {
471 pos += do_cmd_list(data + pos, count - pos, &cmd);
472 vram_dirty = 1;
473 }
474
475 if (cmd == -1)
476 // incomplete cmd
ddd56f6e 477 break;
d30279e2 478 }
ddd56f6e 479
a3a9f519 480 gpu.status.reg &= ~0x1fff;
481 gpu.status.reg |= gpu.ex_regs[1] & 0x7ff;
482 gpu.status.reg |= (gpu.ex_regs[6] & 3) << 11;
483
fc84f618 484 gpu.state.fb_dirty |= vram_dirty;
485
b243416b 486 if (old_e3 != gpu.ex_regs[3])
487 decide_frameskip_allow(gpu.ex_regs[3]);
488
ddd56f6e 489 return count - pos;
d30279e2
GI
490}
491
5440b88e 492static void flush_cmd_buffer(void)
d30279e2 493{
48f3d210 494 int left = do_cmd_buffer(gpu.cmd_buffer, gpu.cmd_len);
d30279e2
GI
495 if (left > 0)
496 memmove(gpu.cmd_buffer, gpu.cmd_buffer + gpu.cmd_len - left, left * 4);
497 gpu.cmd_len = left;
1ab64c54
GI
498}
499
500void GPUwriteDataMem(uint32_t *mem, int count)
501{
d30279e2
GI
502 int left;
503
56f08d83 504 log_io("gpu_dma_write %p %d\n", mem, count);
505
d30279e2
GI
506 if (unlikely(gpu.cmd_len > 0))
507 flush_cmd_buffer();
56f08d83 508
48f3d210 509 left = do_cmd_buffer(mem, count);
d30279e2 510 if (left)
56f08d83 511 log_anomaly("GPUwriteDataMem: discarded %d/%d words\n", left, count);
1ab64c54
GI
512}
513
d30279e2 514void GPUwriteData(uint32_t data)
1ab64c54 515{
56f08d83 516 log_io("gpu_write %08x\n", data);
d30279e2
GI
517 gpu.cmd_buffer[gpu.cmd_len++] = data;
518 if (gpu.cmd_len >= CMD_BUFFER_LEN)
519 flush_cmd_buffer();
1ab64c54
GI
520}
521
ddd56f6e 522long GPUdmaChain(uint32_t *rambase, uint32_t start_addr)
1ab64c54 523{
09159d99 524 uint32_t addr, *list, ld_addr = 0;
deb18d24 525 uint32_t *llist_entry = NULL;
ddd56f6e 526 int len, left, count;
1c72b1c2 527 long cpu_cycles = 0;
d30279e2 528
8f5f2dd5 529 preload(rambase + (start_addr & 0x1fffff) / 4);
530
d30279e2
GI
531 if (unlikely(gpu.cmd_len > 0))
532 flush_cmd_buffer();
533
deb18d24 534 // ff7 sends it's main list twice, detect this
3ece2f0c 535 if (*gpu.state.frame_count == gpu.state.last_list.frame &&
536 *gpu.state.hcnt - gpu.state.last_list.hcnt <= 1 &&
1c72b1c2 537 gpu.state.last_list.cycles > 2048)
deb18d24 538 {
539 llist_entry = rambase + (gpu.state.last_list.addr & 0x1fffff) / 4;
540 *llist_entry |= 0x800000;
541 }
542
56f08d83 543 log_io("gpu_dma_chain\n");
ddd56f6e 544 addr = start_addr & 0xffffff;
09159d99 545 for (count = 0; (addr & 0x800000) == 0; count++)
ddd56f6e 546 {
ddd56f6e 547 list = rambase + (addr & 0x1fffff) / 4;
d30279e2
GI
548 len = list[0] >> 24;
549 addr = list[0] & 0xffffff;
8f5f2dd5 550 preload(rambase + (addr & 0x1fffff) / 4);
551
1c72b1c2 552 cpu_cycles += 10;
553 if (len > 0)
554 cpu_cycles += 5 + len;
deb18d24 555
556 log_io(".chain %08x #%d\n", (list - rambase) * 4, len);
ddd56f6e 557
56f08d83 558 if (len) {
48f3d210 559 left = do_cmd_buffer(list + 1, len);
56f08d83 560 if (left)
deb18d24 561 log_anomaly("GPUdmaChain: discarded %d/%d words\n", left, len);
56f08d83 562 }
ddd56f6e 563
09159d99 564 #define LD_THRESHOLD (8*1024)
565 if (count >= LD_THRESHOLD) {
566 if (count == LD_THRESHOLD) {
567 ld_addr = addr;
568 continue;
569 }
570
571 // loop detection marker
572 // (bit23 set causes DMA error on real machine, so
573 // unlikely to be ever set by the game)
574 list[0] |= 0x800000;
575 }
ddd56f6e 576 }
577
09159d99 578 if (ld_addr != 0) {
579 // remove loop detection markers
580 count -= LD_THRESHOLD + 2;
581 addr = ld_addr & 0x1fffff;
582 while (count-- > 0) {
583 list = rambase + addr / 4;
584 addr = list[0] & 0x1fffff;
585 list[0] &= ~0x800000;
586 }
d30279e2 587 }
09159d99 588
deb18d24 589 if (llist_entry)
590 *llist_entry &= ~0x800000;
d30279e2 591
3ece2f0c 592 gpu.state.last_list.frame = *gpu.state.frame_count;
deb18d24 593 gpu.state.last_list.hcnt = *gpu.state.hcnt;
1c72b1c2 594 gpu.state.last_list.cycles = cpu_cycles;
deb18d24 595 gpu.state.last_list.addr = start_addr;
596
1c72b1c2 597 return cpu_cycles;
1ab64c54
GI
598}
599
d30279e2
GI
600void GPUreadDataMem(uint32_t *mem, int count)
601{
56f08d83 602 log_io("gpu_dma_read %p %d\n", mem, count);
603
d30279e2
GI
604 if (unlikely(gpu.cmd_len > 0))
605 flush_cmd_buffer();
56f08d83 606
d30279e2
GI
607 if (gpu.dma.h)
608 do_vram_io(mem, count, 1);
609}
610
611uint32_t GPUreadData(void)
612{
9e146206 613 uint32_t ret;
56f08d83 614
615 if (unlikely(gpu.cmd_len > 0))
616 flush_cmd_buffer();
617
9e146206 618 ret = gpu.gp0;
56f08d83 619 if (gpu.dma.h)
9e146206 620 do_vram_io(&ret, 1, 1);
56f08d83 621
9e146206 622 log_io("gpu_read %08x\n", ret);
623 return ret;
d30279e2
GI
624}
625
626uint32_t GPUreadStatus(void)
627{
ddd56f6e 628 uint32_t ret;
56f08d83 629
d30279e2
GI
630 if (unlikely(gpu.cmd_len > 0))
631 flush_cmd_buffer();
632
24de2dd4 633 ret = gpu.status.reg;
ddd56f6e 634 log_io("gpu_read_status %08x\n", ret);
635 return ret;
d30279e2
GI
636}
637
096ec49b 638struct GPUFreeze
1ab64c54
GI
639{
640 uint32_t ulFreezeVersion; // should be always 1 for now (set by main emu)
641 uint32_t ulStatus; // current gpu status
642 uint32_t ulControl[256]; // latest control register values
643 unsigned char psxVRam[1024*1024*2]; // current VRam image (full 2 MB for ZN)
096ec49b 644};
1ab64c54 645
096ec49b 646long GPUfreeze(uint32_t type, struct GPUFreeze *freeze)
1ab64c54 647{
fc84f618 648 int i;
649
1ab64c54
GI
650 switch (type) {
651 case 1: // save
d30279e2
GI
652 if (gpu.cmd_len > 0)
653 flush_cmd_buffer();
9ee0fd5b 654 memcpy(freeze->psxVRam, gpu.vram, 1024 * 512 * 2);
1ab64c54 655 memcpy(freeze->ulControl, gpu.regs, sizeof(gpu.regs));
6e9bdaef 656 memcpy(freeze->ulControl + 0xe0, gpu.ex_regs, sizeof(gpu.ex_regs));
1ab64c54 657 freeze->ulStatus = gpu.status.reg;
1ab64c54
GI
658 break;
659 case 0: // load
9ee0fd5b 660 memcpy(gpu.vram, freeze->psxVRam, 1024 * 512 * 2);
1ab64c54 661 memcpy(gpu.regs, freeze->ulControl, sizeof(gpu.regs));
6e9bdaef 662 memcpy(gpu.ex_regs, freeze->ulControl + 0xe0, sizeof(gpu.ex_regs));
1ab64c54 663 gpu.status.reg = freeze->ulStatus;
3d47ef17 664 gpu.cmd_len = 0;
fc84f618 665 for (i = 8; i > 0; i--) {
666 gpu.regs[i] ^= 1; // avoid reg change detection
667 GPUwriteStatus((i << 24) | (gpu.regs[i] ^ 1));
668 }
5b745e5b 669 renderer_sync_ecmds(gpu.ex_regs);
05740673 670 renderer_update_caches(0, 0, 1024, 512);
1ab64c54
GI
671 break;
672 }
673
674 return 1;
675}
676
5440b88e 677void GPUupdateLace(void)
678{
679 if (gpu.cmd_len > 0)
680 flush_cmd_buffer();
681 renderer_flush_queues();
682
aafcb4dd 683 if (gpu.status.blanking) {
684 if (!gpu.state.blanked) {
685 vout_blank();
686 gpu.state.blanked = 1;
687 gpu.state.fb_dirty = 1;
688 }
689 return;
690 }
691
692 if (!gpu.state.fb_dirty)
5440b88e 693 return;
694
695 if (gpu.frameskip.set) {
696 if (!gpu.frameskip.frame_ready) {
697 if (*gpu.state.frame_count - gpu.frameskip.last_flip_frame < 9)
698 return;
699 gpu.frameskip.active = 0;
700 }
701 gpu.frameskip.frame_ready = 0;
702 }
703
704 vout_update();
705 gpu.state.fb_dirty = 0;
aafcb4dd 706 gpu.state.blanked = 0;
5440b88e 707}
708
72e5023f 709void GPUvBlank(int is_vblank, int lcf)
710{
5440b88e 711 int interlace = gpu.state.allow_interlace
712 && gpu.status.interlace && gpu.status.dheight;
713 // interlace doesn't look nice on progressive displays,
714 // so we have this "auto" mode here for games that don't read vram
715 if (gpu.state.allow_interlace == 2
716 && *gpu.state.frame_count - gpu.state.last_vram_read_frame > 1)
717 {
718 interlace = 0;
719 }
720 if (interlace || interlace != gpu.state.old_interlace) {
721 gpu.state.old_interlace = interlace;
722
723 if (gpu.cmd_len > 0)
724 flush_cmd_buffer();
725 renderer_flush_queues();
726 renderer_set_interlace(interlace, !lcf);
727 }
728}
729
730#include "../../frontend/plugin_lib.h"
731
732void GPUrearmedCallbacks(const struct rearmed_cbs *cbs)
733{
734 gpu.frameskip.set = cbs->frameskip;
735 gpu.frameskip.advice = &cbs->fskip_advice;
736 gpu.frameskip.active = 0;
737 gpu.frameskip.frame_ready = 1;
738 gpu.state.hcnt = cbs->gpu_hcnt;
739 gpu.state.frame_count = cbs->gpu_frame_count;
740 gpu.state.allow_interlace = cbs->gpu_neon.allow_interlace;
0b02eb77 741 gpu.state.enhancement_enable = cbs->gpu_neon.enhancement_enable;
5440b88e 742
9ee0fd5b 743 gpu.mmap = cbs->mmap;
744 gpu.munmap = cbs->munmap;
745
746 // delayed vram mmap
747 if (gpu.vram == NULL)
748 map_vram();
749
5440b88e 750 if (cbs->pl_vout_set_raw_vram)
751 cbs->pl_vout_set_raw_vram(gpu.vram);
752 renderer_set_config(cbs);
753 vout_set_config(cbs);
72e5023f 754}
755
1ab64c54 756// vim:shiftwidth=2:expandtab