reduce differences from upstream
[pcsx_rearmed.git] / plugins / gpu_neon / psx_gpu_if.c
... / ...
CommitLineData
1/*
2 * (C) GraÅžvydas "notaz" Ignotas, 2011
3 *
4 * This work is licensed under the terms of any of these licenses
5 * (at your option):
6 * - GNU GPL, version 2 or later.
7 * - GNU LGPL, version 2.1 or later.
8 * See the COPYING file in the top-level directory.
9 */
10
11#include <stdio.h>
12
13#ifdef _WIN32
14#include <mman.h>
15#else
16#include <sys/mman.h>
17#endif
18
19extern const unsigned char cmd_lengths[256];
20#define command_lengths cmd_lengths
21
22static unsigned int *ex_regs;
23static int initialized;
24
25#define PCSX
26#define SET_Ex(r, v) \
27 ex_regs[r] = v
28
29#include "psx_gpu/psx_gpu.c"
30#include "psx_gpu/psx_gpu_parse.c"
31#include "../gpulib/gpu.h"
32
33static psx_gpu_struct egpu __attribute__((aligned(256)));
34
35int do_cmd_list(uint32_t *list, int count, int *last_cmd)
36{
37 int ret;
38
39#if defined(__arm__) && defined(NEON_BUILD) && !defined(SIMD_BUILD)
40 // the asm doesn't bother to save callee-save vector regs, so do it here
41 __asm__ __volatile__("":::"q4","q5","q6","q7");
42#endif
43
44 if (gpu.state.enhancement_active)
45 ret = gpu_parse_enhanced(&egpu, list, count * 4, (u32 *)last_cmd);
46 else
47 ret = gpu_parse(&egpu, list, count * 4, (u32 *)last_cmd);
48
49#if defined(__arm__) && defined(NEON_BUILD) && !defined(SIMD_BUILD)
50 __asm__ __volatile__("":::"q4","q5","q6","q7");
51#endif
52
53 ex_regs[1] &= ~0x1ff;
54 ex_regs[1] |= egpu.texture_settings & 0x1ff;
55 return ret;
56}
57
58#define ENHANCEMENT_BUF_SIZE (1024 * 1024 * 2 * 4 + 4096 * 2)
59
60static uint16_t *get_enhancement_bufer(int *x, int *y, int *w, int *h,
61 int *vram_h)
62{
63 uint16_t *ret = select_enhancement_buf_ptr(&egpu, *x);
64
65 *x *= 2;
66 *y *= 2;
67 *w = *w * 2;
68 *h = *h * 2;
69 *vram_h = 1024;
70 return ret;
71}
72
73static void map_enhancement_buffer(void)
74{
75 // currently we use 4x 1024*1024 buffers instead of single 2048*1024
76 // to be able to reuse 1024-width code better (triangle setup,
77 // dithering phase, lines).
78 egpu.enhancement_buf_ptr = gpu.mmap(ENHANCEMENT_BUF_SIZE);
79 if (egpu.enhancement_buf_ptr == NULL) {
80 fprintf(stderr, "failed to map enhancement buffer\n");
81 gpu.get_enhancement_bufer = NULL;
82 }
83 else {
84 egpu.enhancement_buf_ptr += 4096 / 2;
85 gpu.get_enhancement_bufer = get_enhancement_bufer;
86 }
87}
88
89int renderer_init(void)
90{
91 if (gpu.vram != NULL) {
92 initialize_psx_gpu(&egpu, gpu.vram);
93 initialized = 1;
94 }
95
96 if (gpu.mmap != NULL && egpu.enhancement_buf_ptr == NULL)
97 map_enhancement_buffer();
98
99 ex_regs = gpu.ex_regs;
100 return 0;
101}
102
103void renderer_finish(void)
104{
105 if (egpu.enhancement_buf_ptr != NULL) {
106 egpu.enhancement_buf_ptr -= 4096 / 2;
107 gpu.munmap(egpu.enhancement_buf_ptr, ENHANCEMENT_BUF_SIZE);
108 }
109 egpu.enhancement_buf_ptr = NULL;
110 egpu.enhancement_current_buf_ptr = NULL;
111 initialized = 0;
112}
113
114static __attribute__((noinline)) void
115sync_enhancement_buffers(int x, int y, int w, int h)
116{
117 const int step_x = 1024 / sizeof(egpu.enhancement_buf_by_x16);
118 u16 *src, *dst;
119 int w1, fb_index;
120
121 w += x & (step_x - 1);
122 x &= ~(step_x - 1);
123 w = (w + step_x - 1) & ~(step_x - 1);
124 if (y + h > 512)
125 h = 512 - y;
126
127 while (w > 0) {
128 fb_index = egpu.enhancement_buf_by_x16[x / step_x];
129 for (w1 = 0; w > 0; w1++, w -= step_x)
130 if (fb_index != egpu.enhancement_buf_by_x16[x / step_x + w1])
131 break;
132
133 src = gpu.vram + y * 1024 + x;
134 dst = select_enhancement_buf_ptr(&egpu, x);
135 dst += (y * 1024 + x) * 2;
136 scale2x_tiles8(dst, src, w1 * step_x / 8, h);
137
138 x += w1 * step_x;
139 }
140}
141
142void renderer_sync_ecmds(uint32_t *ecmds)
143{
144 gpu_parse(&egpu, ecmds + 1, 6 * 4, NULL);
145}
146
147void renderer_update_caches(int x, int y, int w, int h)
148{
149 update_texture_cache_region(&egpu, x, y, x + w - 1, y + h - 1);
150 if (gpu.state.enhancement_active && !(gpu.status & PSX_GPU_STATUS_RGB24))
151 sync_enhancement_buffers(x, y, w, h);
152}
153
154void renderer_flush_queues(void)
155{
156 flush_render_block_buffer(&egpu);
157}
158
159void renderer_set_interlace(int enable, int is_odd)
160{
161 egpu.render_mode &= ~(RENDER_INTERLACE_ENABLED|RENDER_INTERLACE_ODD);
162 if (enable)
163 egpu.render_mode |= RENDER_INTERLACE_ENABLED;
164 if (is_odd)
165 egpu.render_mode |= RENDER_INTERLACE_ODD;
166}
167
168void renderer_notify_res_change(void)
169{
170 // note: must keep it multiple of 8
171 if (egpu.enhancement_x_threshold != gpu.screen.hres)
172 {
173 egpu.enhancement_x_threshold = gpu.screen.hres;
174 update_enhancement_buf_table_from_hres(&egpu);
175 }
176}
177
178#include "../../frontend/plugin_lib.h"
179
180void renderer_set_config(const struct rearmed_cbs *cbs)
181{
182 static int enhancement_was_on;
183
184 disable_main_render = cbs->gpu_neon.enhancement_no_main;
185 if (egpu.enhancement_buf_ptr != NULL && cbs->gpu_neon.enhancement_enable
186 && !enhancement_was_on)
187 {
188 sync_enhancement_buffers(0, 0, 1024, 512);
189 }
190 enhancement_was_on = cbs->gpu_neon.enhancement_enable;
191
192 if (!initialized) {
193 initialize_psx_gpu(&egpu, gpu.vram);
194 initialized = 1;
195 }
196
197 if (gpu.mmap != NULL && egpu.enhancement_buf_ptr == NULL)
198 map_enhancement_buffer();
199 if (cbs->pl_set_gpu_caps)
200 cbs->pl_set_gpu_caps(GPU_CAP_SUPPORTS_2X);
201
202 egpu.use_dithering = cbs->gpu_neon.allow_dithering;
203 if(!egpu.use_dithering) {
204 egpu.dither_table[0] = dither_table_row(0, 0, 0, 0);
205 egpu.dither_table[1] = dither_table_row(0, 0, 0, 0);
206 egpu.dither_table[2] = dither_table_row(0, 0, 0, 0);
207 egpu.dither_table[3] = dither_table_row(0, 0, 0, 0);
208 } else {
209 egpu.dither_table[0] = dither_table_row(-4, 0, -3, 1);
210 egpu.dither_table[1] = dither_table_row(2, -2, 3, -1);
211 egpu.dither_table[2] = dither_table_row(-3, 1, -4, 0);
212 egpu.dither_table[3] = dither_table_row(3, -1, 2, -2);
213 }
214
215}
216void renderer_sync(void)
217{
218}
219void renderer_notify_update_lace(int updated)
220{
221}