frontend: accept more bios
[pcsx_rearmed.git] / plugins / gpulib / gpu.c
... / ...
CommitLineData
1/*
2 * (C) GraÅžvydas "notaz" Ignotas, 2011-2012
3 *
4 * This work is licensed under the terms of any of these licenses
5 * (at your option):
6 * - GNU GPL, version 2 or later.
7 * - GNU LGPL, version 2.1 or later.
8 * See the COPYING file in the top-level directory.
9 */
10
11#include <stdio.h>
12#include <stdlib.h>
13#include <string.h>
14#include "gpu.h"
15
16#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
17#ifdef __GNUC__
18#define unlikely(x) __builtin_expect((x), 0)
19#define preload __builtin_prefetch
20#define noinline __attribute__((noinline))
21#else
22#define unlikely(x)
23#define preload(...)
24#define noinline
25#endif
26
27#define gpu_log(fmt, ...) \
28 printf("%d:%03d: " fmt, *gpu.state.frame_count, *gpu.state.hcnt, ##__VA_ARGS__)
29
30//#define log_io gpu_log
31#define log_io(...)
32//#define log_anomaly gpu_log
33#define log_anomaly(...)
34
35struct psx_gpu gpu;
36
37static noinline int do_cmd_buffer(uint32_t *data, int count);
38static void finish_vram_transfer(int is_read);
39
40static noinline void do_cmd_reset(void)
41{
42 if (unlikely(gpu.cmd_len > 0))
43 do_cmd_buffer(gpu.cmd_buffer, gpu.cmd_len);
44 gpu.cmd_len = 0;
45
46 if (unlikely(gpu.dma.h > 0))
47 finish_vram_transfer(gpu.dma_start.is_read);
48 gpu.dma.h = 0;
49}
50
51static noinline void do_reset(void)
52{
53 unsigned int i;
54
55 do_cmd_reset();
56
57 memset(gpu.regs, 0, sizeof(gpu.regs));
58 for (i = 0; i < sizeof(gpu.ex_regs) / sizeof(gpu.ex_regs[0]); i++)
59 gpu.ex_regs[i] = (0xe0 + i) << 24;
60 gpu.status = 0x14802000;
61 gpu.gp0 = 0;
62 gpu.regs[3] = 1;
63 gpu.screen.hres = gpu.screen.w = 256;
64 gpu.screen.vres = gpu.screen.h = 240;
65 gpu.screen.x = gpu.screen.y = 0;
66}
67
68static noinline void update_width(void)
69{
70 static const short hres_all[8] = { 256, 368, 320, 368, 512, 368, 640, 368 };
71 static const uint8_t hdivs[8] = { 10, 7, 8, 7, 5, 7, 4, 7 };
72 uint8_t hdiv = hdivs[(gpu.status >> 16) & 7];
73 int hres = hres_all[(gpu.status >> 16) & 7];
74 int pal = gpu.status & PSX_GPU_STATUS_PAL;
75 int sw = gpu.screen.x2 - gpu.screen.x1;
76 int x = 0, x_auto;
77 if (sw <= 0)
78 /* nothing displayed? */;
79 else {
80 int s = pal ? 656 : 608; // or 600? pal is just a guess
81 x = (gpu.screen.x1 - s) / hdiv;
82 x = (x + 1) & ~1; // blitter limitation
83 sw /= hdiv;
84 sw = (sw + 2) & ~3; // according to nocash
85 switch (gpu.state.screen_centering_type) {
86 case 1:
87 break;
88 case 2:
89 x = gpu.state.screen_centering_x;
90 break;
91 default:
92 // correct if slightly miscentered
93 x_auto = (hres - sw) / 2 & ~3;
94 if ((uint32_t)x_auto <= 8u && abs(x) < 24)
95 x = x_auto;
96 }
97 if (x + sw > hres)
98 sw = hres - x;
99 // .x range check is done in vout_update()
100 }
101 // reduce the unpleasant right border that a few games have
102 if (gpu.state.screen_centering_type == 0
103 && x <= 4 && hres - (x + sw) >= 4)
104 hres -= 4;
105 gpu.screen.x = x;
106 gpu.screen.w = sw;
107 gpu.screen.hres = hres;
108 gpu.state.dims_changed = 1;
109 //printf("xx %d %d -> %2d, %d / %d\n",
110 // gpu.screen.x1, gpu.screen.x2, x, sw, hres);
111}
112
113static noinline void update_height(void)
114{
115 int pal = gpu.status & PSX_GPU_STATUS_PAL;
116 int dheight = gpu.status & PSX_GPU_STATUS_DHEIGHT;
117 int y = gpu.screen.y1 - (pal ? 39 : 16); // 39 for spyro
118 int sh = gpu.screen.y2 - gpu.screen.y1;
119 int center_tol = 16;
120 int vres = 240;
121
122 if (pal && (sh > 240 || gpu.screen.vres == 256))
123 vres = 256;
124 if (dheight)
125 y *= 2, sh *= 2, vres *= 2, center_tol *= 2;
126 if (sh <= 0)
127 /* nothing displayed? */;
128 else {
129 switch (gpu.state.screen_centering_type) {
130 case 1:
131 break;
132 case 2:
133 y = gpu.state.screen_centering_y;
134 break;
135 default:
136 // correct if slightly miscentered
137 if ((uint32_t)(vres - sh) <= 1 && abs(y) <= center_tol)
138 y = 0;
139 }
140 if (y + sh > vres)
141 sh = vres - y;
142 }
143 gpu.screen.y = y;
144 gpu.screen.h = sh;
145 gpu.screen.vres = vres;
146 gpu.state.dims_changed = 1;
147 //printf("yy %d %d -> %d, %d / %d\n",
148 // gpu.screen.y1, gpu.screen.y2, y, sh, vres);
149}
150
151static noinline void decide_frameskip(void)
152{
153 if (gpu.frameskip.active)
154 gpu.frameskip.cnt++;
155 else {
156 gpu.frameskip.cnt = 0;
157 gpu.frameskip.frame_ready = 1;
158 }
159
160 if (!gpu.frameskip.active && *gpu.frameskip.advice)
161 gpu.frameskip.active = 1;
162 else if (gpu.frameskip.set > 0 && gpu.frameskip.cnt < gpu.frameskip.set)
163 gpu.frameskip.active = 1;
164 else
165 gpu.frameskip.active = 0;
166
167 if (!gpu.frameskip.active && gpu.frameskip.pending_fill[0] != 0) {
168 int dummy;
169 do_cmd_list(gpu.frameskip.pending_fill, 3, &dummy);
170 gpu.frameskip.pending_fill[0] = 0;
171 }
172}
173
174static noinline int decide_frameskip_allow(uint32_t cmd_e3)
175{
176 // no frameskip if it decides to draw to display area,
177 // but not for interlace since it'll most likely always do that
178 uint32_t x = cmd_e3 & 0x3ff;
179 uint32_t y = (cmd_e3 >> 10) & 0x3ff;
180 gpu.frameskip.allow = (gpu.status & PSX_GPU_STATUS_INTERLACE) ||
181 (uint32_t)(x - gpu.screen.src_x) >= (uint32_t)gpu.screen.w ||
182 (uint32_t)(y - gpu.screen.src_y) >= (uint32_t)gpu.screen.h;
183 return gpu.frameskip.allow;
184}
185
186static noinline void get_gpu_info(uint32_t data)
187{
188 switch (data & 0x0f) {
189 case 0x02:
190 case 0x03:
191 case 0x04:
192 gpu.gp0 = gpu.ex_regs[data & 7] & 0xfffff;
193 break;
194 case 0x05:
195 gpu.gp0 = gpu.ex_regs[5] & 0x3fffff;
196 break;
197 case 0x07:
198 gpu.gp0 = 2;
199 break;
200 default:
201 // gpu.gp0 unchanged
202 break;
203 }
204}
205
206// double, for overdraw guard
207#define VRAM_SIZE (1024 * 512 * 2 * 2)
208
209static int map_vram(void)
210{
211 gpu.vram = gpu.mmap(VRAM_SIZE);
212 if (gpu.vram != NULL) {
213 gpu.vram += 4096 / 2;
214 return 0;
215 }
216 else {
217 fprintf(stderr, "could not map vram, expect crashes\n");
218 return -1;
219 }
220}
221
222long GPUinit(void)
223{
224 int ret;
225 ret = vout_init();
226 ret |= renderer_init();
227
228 gpu.state.frame_count = &gpu.zero;
229 gpu.state.hcnt = &gpu.zero;
230 gpu.frameskip.active = 0;
231 gpu.cmd_len = 0;
232 do_reset();
233
234 if (gpu.mmap != NULL) {
235 if (map_vram() != 0)
236 ret = -1;
237 }
238 return ret;
239}
240
241long GPUshutdown(void)
242{
243 long ret;
244
245 renderer_finish();
246 ret = vout_finish();
247 if (gpu.vram != NULL) {
248 gpu.vram -= 4096 / 2;
249 gpu.munmap(gpu.vram, VRAM_SIZE);
250 }
251 gpu.vram = NULL;
252
253 return ret;
254}
255
256void GPUwriteStatus(uint32_t data)
257{
258 uint32_t cmd = data >> 24;
259
260 if (cmd < ARRAY_SIZE(gpu.regs)) {
261 if (cmd > 1 && cmd != 5 && gpu.regs[cmd] == data)
262 return;
263 gpu.regs[cmd] = data;
264 }
265
266 gpu.state.fb_dirty = 1;
267
268 switch (cmd) {
269 case 0x00:
270 do_reset();
271 break;
272 case 0x01:
273 do_cmd_reset();
274 break;
275 case 0x03:
276 if (data & 1) {
277 gpu.status |= PSX_GPU_STATUS_BLANKING;
278 gpu.state.dims_changed = 1; // for hud clearing
279 }
280 else
281 gpu.status &= ~PSX_GPU_STATUS_BLANKING;
282 break;
283 case 0x04:
284 gpu.status &= ~PSX_GPU_STATUS_DMA_MASK;
285 gpu.status |= PSX_GPU_STATUS_DMA(data & 3);
286 break;
287 case 0x05:
288 gpu.screen.src_x = data & 0x3ff;
289 gpu.screen.src_y = (data >> 10) & 0x1ff;
290 if (gpu.frameskip.set) {
291 decide_frameskip_allow(gpu.ex_regs[3]);
292 if (gpu.frameskip.last_flip_frame != *gpu.state.frame_count) {
293 decide_frameskip();
294 gpu.frameskip.last_flip_frame = *gpu.state.frame_count;
295 }
296 }
297 break;
298 case 0x06:
299 gpu.screen.x1 = data & 0xfff;
300 gpu.screen.x2 = (data >> 12) & 0xfff;
301 update_width();
302 break;
303 case 0x07:
304 gpu.screen.y1 = data & 0x3ff;
305 gpu.screen.y2 = (data >> 10) & 0x3ff;
306 update_height();
307 break;
308 case 0x08:
309 gpu.status = (gpu.status & ~0x7f0000) | ((data & 0x3F) << 17) | ((data & 0x40) << 10);
310 update_width();
311 update_height();
312 renderer_notify_res_change();
313 break;
314 default:
315 if ((cmd & 0xf0) == 0x10)
316 get_gpu_info(data);
317 break;
318 }
319
320#ifdef GPUwriteStatus_ext
321 GPUwriteStatus_ext(data);
322#endif
323}
324
325const unsigned char cmd_lengths[256] =
326{
327 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
328 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
329 3, 3, 3, 3, 6, 6, 6, 6, 4, 4, 4, 4, 8, 8, 8, 8, // 20
330 5, 5, 5, 5, 8, 8, 8, 8, 7, 7, 7, 7, 11, 11, 11, 11,
331 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, // 40
332 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
333 2, 2, 2, 2, 3, 3, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, // 60
334 1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2,
335 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 80
336 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
337 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // a0
338 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
339 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // c0
340 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
341 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // e0
342 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
343};
344
345#define VRAM_MEM_XY(x, y) &gpu.vram[(y) * 1024 + (x)]
346
347static inline void do_vram_line(int x, int y, uint16_t *mem, int l, int is_read)
348{
349 uint16_t *vram = VRAM_MEM_XY(x, y);
350 if (is_read)
351 memcpy(mem, vram, l * 2);
352 else
353 memcpy(vram, mem, l * 2);
354}
355
356static int do_vram_io(uint32_t *data, int count, int is_read)
357{
358 int count_initial = count;
359 uint16_t *sdata = (uint16_t *)data;
360 int x = gpu.dma.x, y = gpu.dma.y;
361 int w = gpu.dma.w, h = gpu.dma.h;
362 int o = gpu.dma.offset;
363 int l;
364 count *= 2; // operate in 16bpp pixels
365
366 if (gpu.dma.offset) {
367 l = w - gpu.dma.offset;
368 if (count < l)
369 l = count;
370
371 do_vram_line(x + o, y, sdata, l, is_read);
372
373 if (o + l < w)
374 o += l;
375 else {
376 o = 0;
377 y++;
378 h--;
379 }
380 sdata += l;
381 count -= l;
382 }
383
384 for (; h > 0 && count >= w; sdata += w, count -= w, y++, h--) {
385 y &= 511;
386 do_vram_line(x, y, sdata, w, is_read);
387 }
388
389 if (h > 0) {
390 if (count > 0) {
391 y &= 511;
392 do_vram_line(x, y, sdata, count, is_read);
393 o = count;
394 count = 0;
395 }
396 }
397 else
398 finish_vram_transfer(is_read);
399 gpu.dma.y = y;
400 gpu.dma.h = h;
401 gpu.dma.offset = o;
402
403 return count_initial - count / 2;
404}
405
406static void start_vram_transfer(uint32_t pos_word, uint32_t size_word, int is_read)
407{
408 if (gpu.dma.h)
409 log_anomaly("start_vram_transfer while old unfinished\n");
410
411 gpu.dma.x = pos_word & 0x3ff;
412 gpu.dma.y = (pos_word >> 16) & 0x1ff;
413 gpu.dma.w = ((size_word - 1) & 0x3ff) + 1;
414 gpu.dma.h = (((size_word >> 16) - 1) & 0x1ff) + 1;
415 gpu.dma.offset = 0;
416 gpu.dma.is_read = is_read;
417 gpu.dma_start = gpu.dma;
418
419 renderer_flush_queues();
420 if (is_read) {
421 gpu.status |= PSX_GPU_STATUS_IMG;
422 // XXX: wrong for width 1
423 gpu.gp0 = LE32TOH(*(uint32_t *) VRAM_MEM_XY(gpu.dma.x, gpu.dma.y));
424 gpu.state.last_vram_read_frame = *gpu.state.frame_count;
425 }
426
427 log_io("start_vram_transfer %c (%d, %d) %dx%d\n", is_read ? 'r' : 'w',
428 gpu.dma.x, gpu.dma.y, gpu.dma.w, gpu.dma.h);
429}
430
431static void finish_vram_transfer(int is_read)
432{
433 if (is_read)
434 gpu.status &= ~PSX_GPU_STATUS_IMG;
435 else
436 renderer_update_caches(gpu.dma_start.x, gpu.dma_start.y,
437 gpu.dma_start.w, gpu.dma_start.h);
438}
439
440static noinline int do_cmd_list_skip(uint32_t *data, int count, int *last_cmd)
441{
442 int cmd = 0, pos = 0, len, dummy, v;
443 int skip = 1;
444
445 gpu.frameskip.pending_fill[0] = 0;
446
447 while (pos < count && skip) {
448 uint32_t *list = data + pos;
449 cmd = LE32TOH(list[0]) >> 24;
450 len = 1 + cmd_lengths[cmd];
451
452 switch (cmd) {
453 case 0x02:
454 if ((LE32TOH(list[2]) & 0x3ff) > gpu.screen.w || ((LE32TOH(list[2]) >> 16) & 0x1ff) > gpu.screen.h)
455 // clearing something large, don't skip
456 do_cmd_list(list, 3, &dummy);
457 else
458 memcpy(gpu.frameskip.pending_fill, list, 3 * 4);
459 break;
460 case 0x24 ... 0x27:
461 case 0x2c ... 0x2f:
462 case 0x34 ... 0x37:
463 case 0x3c ... 0x3f:
464 gpu.ex_regs[1] &= ~0x1ff;
465 gpu.ex_regs[1] |= LE32TOH(list[4 + ((cmd >> 4) & 1)]) & 0x1ff;
466 break;
467 case 0x48 ... 0x4F:
468 for (v = 3; pos + v < count; v++)
469 {
470 if ((list[v] & HTOLE32(0xf000f000)) == HTOLE32(0x50005000))
471 break;
472 }
473 len += v - 3;
474 break;
475 case 0x58 ... 0x5F:
476 for (v = 4; pos + v < count; v += 2)
477 {
478 if ((list[v] & HTOLE32(0xf000f000)) == HTOLE32(0x50005000))
479 break;
480 }
481 len += v - 4;
482 break;
483 default:
484 if (cmd == 0xe3)
485 skip = decide_frameskip_allow(LE32TOH(list[0]));
486 if ((cmd & 0xf8) == 0xe0)
487 gpu.ex_regs[cmd & 7] = LE32TOH(list[0]);
488 break;
489 }
490
491 if (pos + len > count) {
492 cmd = -1;
493 break; // incomplete cmd
494 }
495 if (0xa0 <= cmd && cmd <= 0xdf)
496 break; // image i/o
497
498 pos += len;
499 }
500
501 renderer_sync_ecmds(gpu.ex_regs);
502 *last_cmd = cmd;
503 return pos;
504}
505
506static noinline int do_cmd_buffer(uint32_t *data, int count)
507{
508 int cmd, pos;
509 uint32_t old_e3 = gpu.ex_regs[3];
510 int vram_dirty = 0;
511
512 // process buffer
513 for (pos = 0; pos < count; )
514 {
515 if (gpu.dma.h && !gpu.dma_start.is_read) { // XXX: need to verify
516 vram_dirty = 1;
517 pos += do_vram_io(data + pos, count - pos, 0);
518 if (pos == count)
519 break;
520 }
521
522 cmd = LE32TOH(data[pos]) >> 24;
523 if (0xa0 <= cmd && cmd <= 0xdf) {
524 if (unlikely((pos+2) >= count)) {
525 // incomplete vram write/read cmd, can't consume yet
526 cmd = -1;
527 break;
528 }
529
530 // consume vram write/read cmd
531 start_vram_transfer(LE32TOH(data[pos + 1]), LE32TOH(data[pos + 2]), (cmd & 0xe0) == 0xc0);
532 pos += 3;
533 continue;
534 }
535
536 // 0xex cmds might affect frameskip.allow, so pass to do_cmd_list_skip
537 if (gpu.frameskip.active && (gpu.frameskip.allow || ((LE32TOH(data[pos]) >> 24) & 0xf0) == 0xe0))
538 pos += do_cmd_list_skip(data + pos, count - pos, &cmd);
539 else {
540 pos += do_cmd_list(data + pos, count - pos, &cmd);
541 vram_dirty = 1;
542 }
543
544 if (cmd == -1)
545 // incomplete cmd
546 break;
547 }
548
549 gpu.status &= ~0x1fff;
550 gpu.status |= gpu.ex_regs[1] & 0x7ff;
551 gpu.status |= (gpu.ex_regs[6] & 3) << 11;
552
553 gpu.state.fb_dirty |= vram_dirty;
554
555 if (old_e3 != gpu.ex_regs[3])
556 decide_frameskip_allow(gpu.ex_regs[3]);
557
558 return count - pos;
559}
560
561static void flush_cmd_buffer(void)
562{
563 int left = do_cmd_buffer(gpu.cmd_buffer, gpu.cmd_len);
564 if (left > 0)
565 memmove(gpu.cmd_buffer, gpu.cmd_buffer + gpu.cmd_len - left, left * 4);
566 gpu.cmd_len = left;
567}
568
569void GPUwriteDataMem(uint32_t *mem, int count)
570{
571 int left;
572
573 log_io("gpu_dma_write %p %d\n", mem, count);
574
575 if (unlikely(gpu.cmd_len > 0))
576 flush_cmd_buffer();
577
578 left = do_cmd_buffer(mem, count);
579 if (left)
580 log_anomaly("GPUwriteDataMem: discarded %d/%d words\n", left, count);
581}
582
583void GPUwriteData(uint32_t data)
584{
585 log_io("gpu_write %08x\n", data);
586 gpu.cmd_buffer[gpu.cmd_len++] = HTOLE32(data);
587 if (gpu.cmd_len >= CMD_BUFFER_LEN)
588 flush_cmd_buffer();
589}
590
591long GPUdmaChain(uint32_t *rambase, uint32_t start_addr, uint32_t *progress_addr)
592{
593 uint32_t addr, *list, ld_addr = 0;
594 int len, left, count;
595 long cpu_cycles = 0;
596
597 preload(rambase + (start_addr & 0x1fffff) / 4);
598
599 if (unlikely(gpu.cmd_len > 0))
600 flush_cmd_buffer();
601
602 log_io("gpu_dma_chain\n");
603 addr = start_addr & 0xffffff;
604 for (count = 0; (addr & 0x800000) == 0; count++)
605 {
606 list = rambase + (addr & 0x1fffff) / 4;
607 len = LE32TOH(list[0]) >> 24;
608 addr = LE32TOH(list[0]) & 0xffffff;
609 preload(rambase + (addr & 0x1fffff) / 4);
610
611 cpu_cycles += 10;
612 if (len > 0)
613 cpu_cycles += 5 + len;
614
615 log_io(".chain %08lx #%d+%d\n",
616 (long)(list - rambase) * 4, len, gpu.cmd_len);
617 if (unlikely(gpu.cmd_len > 0)) {
618 memcpy(gpu.cmd_buffer + gpu.cmd_len, list + 1, len * 4);
619 gpu.cmd_len += len;
620 flush_cmd_buffer();
621 continue;
622 }
623
624 if (len) {
625 left = do_cmd_buffer(list + 1, len);
626 if (left) {
627 memcpy(gpu.cmd_buffer, list + 1 + len - left, left * 4);
628 gpu.cmd_len = left;
629 log_anomaly("GPUdmaChain: %d/%d words left\n", left, len);
630 }
631 }
632
633 if (progress_addr) {
634 *progress_addr = addr;
635 break;
636 }
637 #define LD_THRESHOLD (8*1024)
638 if (count >= LD_THRESHOLD) {
639 if (count == LD_THRESHOLD) {
640 ld_addr = addr;
641 continue;
642 }
643
644 // loop detection marker
645 // (bit23 set causes DMA error on real machine, so
646 // unlikely to be ever set by the game)
647 list[0] |= HTOLE32(0x800000);
648 }
649 }
650
651 if (ld_addr != 0) {
652 // remove loop detection markers
653 count -= LD_THRESHOLD + 2;
654 addr = ld_addr & 0x1fffff;
655 while (count-- > 0) {
656 list = rambase + addr / 4;
657 addr = LE32TOH(list[0]) & 0x1fffff;
658 list[0] &= HTOLE32(~0x800000);
659 }
660 }
661
662 gpu.state.last_list.frame = *gpu.state.frame_count;
663 gpu.state.last_list.hcnt = *gpu.state.hcnt;
664 gpu.state.last_list.cycles = cpu_cycles;
665 gpu.state.last_list.addr = start_addr;
666
667 return cpu_cycles;
668}
669
670void GPUreadDataMem(uint32_t *mem, int count)
671{
672 log_io("gpu_dma_read %p %d\n", mem, count);
673
674 if (unlikely(gpu.cmd_len > 0))
675 flush_cmd_buffer();
676
677 if (gpu.dma.h)
678 do_vram_io(mem, count, 1);
679}
680
681uint32_t GPUreadData(void)
682{
683 uint32_t ret;
684
685 if (unlikely(gpu.cmd_len > 0))
686 flush_cmd_buffer();
687
688 ret = gpu.gp0;
689 if (gpu.dma.h) {
690 ret = HTOLE32(ret);
691 do_vram_io(&ret, 1, 1);
692 ret = LE32TOH(ret);
693 }
694
695 log_io("gpu_read %08x\n", ret);
696 return ret;
697}
698
699uint32_t GPUreadStatus(void)
700{
701 uint32_t ret;
702
703 if (unlikely(gpu.cmd_len > 0))
704 flush_cmd_buffer();
705
706 ret = gpu.status;
707 log_io("gpu_read_status %08x\n", ret);
708 return ret;
709}
710
711struct GPUFreeze
712{
713 uint32_t ulFreezeVersion; // should be always 1 for now (set by main emu)
714 uint32_t ulStatus; // current gpu status
715 uint32_t ulControl[256]; // latest control register values
716 unsigned char psxVRam[1024*1024*2]; // current VRam image (full 2 MB for ZN)
717};
718
719long GPUfreeze(uint32_t type, struct GPUFreeze *freeze)
720{
721 int i;
722
723 switch (type) {
724 case 1: // save
725 if (gpu.cmd_len > 0)
726 flush_cmd_buffer();
727 memcpy(freeze->psxVRam, gpu.vram, 1024 * 512 * 2);
728 memcpy(freeze->ulControl, gpu.regs, sizeof(gpu.regs));
729 memcpy(freeze->ulControl + 0xe0, gpu.ex_regs, sizeof(gpu.ex_regs));
730 freeze->ulStatus = gpu.status;
731 break;
732 case 0: // load
733 memcpy(gpu.vram, freeze->psxVRam, 1024 * 512 * 2);
734 memcpy(gpu.regs, freeze->ulControl, sizeof(gpu.regs));
735 memcpy(gpu.ex_regs, freeze->ulControl + 0xe0, sizeof(gpu.ex_regs));
736 gpu.status = freeze->ulStatus;
737 gpu.cmd_len = 0;
738 for (i = 8; i > 0; i--) {
739 gpu.regs[i] ^= 1; // avoid reg change detection
740 GPUwriteStatus((i << 24) | (gpu.regs[i] ^ 1));
741 }
742 renderer_sync_ecmds(gpu.ex_regs);
743 renderer_update_caches(0, 0, 1024, 512);
744 break;
745 }
746
747 return 1;
748}
749
750void GPUupdateLace(void)
751{
752 if (gpu.cmd_len > 0)
753 flush_cmd_buffer();
754 renderer_flush_queues();
755
756 if (gpu.status & PSX_GPU_STATUS_BLANKING) {
757 if (!gpu.state.blanked) {
758 vout_blank();
759 gpu.state.blanked = 1;
760 gpu.state.fb_dirty = 1;
761 }
762 return;
763 }
764
765 if (!gpu.state.fb_dirty)
766 return;
767
768 if (gpu.frameskip.set) {
769 if (!gpu.frameskip.frame_ready) {
770 if (*gpu.state.frame_count - gpu.frameskip.last_flip_frame < 9)
771 return;
772 gpu.frameskip.active = 0;
773 }
774 gpu.frameskip.frame_ready = 0;
775 }
776
777 vout_update();
778 gpu.state.fb_dirty = 0;
779 gpu.state.blanked = 0;
780}
781
782void GPUvBlank(int is_vblank, int lcf)
783{
784 int interlace = gpu.state.allow_interlace
785 && (gpu.status & PSX_GPU_STATUS_INTERLACE)
786 && (gpu.status & PSX_GPU_STATUS_DHEIGHT);
787 // interlace doesn't look nice on progressive displays,
788 // so we have this "auto" mode here for games that don't read vram
789 if (gpu.state.allow_interlace == 2
790 && *gpu.state.frame_count - gpu.state.last_vram_read_frame > 1)
791 {
792 interlace = 0;
793 }
794 if (interlace || interlace != gpu.state.old_interlace) {
795 gpu.state.old_interlace = interlace;
796
797 if (gpu.cmd_len > 0)
798 flush_cmd_buffer();
799 renderer_flush_queues();
800 renderer_set_interlace(interlace, !lcf);
801 }
802}
803
804#include "../../frontend/plugin_lib.h"
805
806void GPUrearmedCallbacks(const struct rearmed_cbs *cbs)
807{
808 gpu.frameskip.set = cbs->frameskip;
809 gpu.frameskip.advice = &cbs->fskip_advice;
810 gpu.frameskip.active = 0;
811 gpu.frameskip.frame_ready = 1;
812 gpu.state.hcnt = cbs->gpu_hcnt;
813 gpu.state.frame_count = cbs->gpu_frame_count;
814 gpu.state.allow_interlace = cbs->gpu_neon.allow_interlace;
815 gpu.state.enhancement_enable = cbs->gpu_neon.enhancement_enable;
816 if (gpu.state.screen_centering_type != cbs->screen_centering_type
817 || gpu.state.screen_centering_x != cbs->screen_centering_x
818 || gpu.state.screen_centering_y != cbs->screen_centering_y) {
819 gpu.state.screen_centering_type = cbs->screen_centering_type;
820 gpu.state.screen_centering_x = cbs->screen_centering_x;
821 gpu.state.screen_centering_y = cbs->screen_centering_y;
822 update_width();
823 update_height();
824 }
825
826 gpu.mmap = cbs->mmap;
827 gpu.munmap = cbs->munmap;
828
829 // delayed vram mmap
830 if (gpu.vram == NULL)
831 map_vram();
832
833 if (cbs->pl_vout_set_raw_vram)
834 cbs->pl_vout_set_raw_vram(gpu.vram);
835 renderer_set_config(cbs);
836 vout_set_config(cbs);
837}
838
839// vim:shiftwidth=2:expandtab