2 * (C) GraÅžvydas "notaz" Ignotas, 2011
4 * This work is licensed under the terms of any of these licenses
6 * - GNU GPL, version 2 or later.
7 * - GNU LGPL, version 2.1 or later.
8 * See the COPYING file in the top-level directory.
14 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
16 #define min(a, b) ((a) < (b) ? (a) : (b))
19 #define max(a, b) ((a) > (b) ? (a) : (b))
22 extern const unsigned char cmd_lengths[256];
23 #define command_lengths cmd_lengths
25 static unsigned int *ex_regs;
26 static int initialized;
29 #define SET_Ex(r, v) \
32 static __attribute__((noinline)) void
33 sync_enhancement_buffers(int x, int y, int w, int h);
35 #include "../gpulib/gpu.h"
36 #include "psx_gpu/psx_gpu.c"
37 #include "psx_gpu/psx_gpu_parse.c"
39 static psx_gpu_struct egpu __attribute__((aligned(256)));
41 int do_cmd_list(uint32_t *list, int count,
42 int *cycles_sum, int *cycles_last, int *last_cmd)
46 #if defined(__arm__) && defined(NEON_BUILD) && !defined(SIMD_BUILD)
47 // the asm doesn't bother to save callee-save vector regs, so do it here
48 __asm__ __volatile__("":::"q4","q5","q6","q7");
51 if (gpu.state.enhancement_active)
52 ret = gpu_parse_enhanced(&egpu, list, count * 4,
53 cycles_sum, cycles_last, (u32 *)last_cmd);
55 ret = gpu_parse(&egpu, list, count * 4,
56 cycles_sum, cycles_last, (u32 *)last_cmd);
58 #if defined(__arm__) && defined(NEON_BUILD) && !defined(SIMD_BUILD)
59 __asm__ __volatile__("":::"q4","q5","q6","q7");
63 ex_regs[1] |= egpu.texture_settings & 0x1ff;
67 #define ENHANCEMENT_BUF_SIZE (1024 * 1024 * 2 * 4 + 4096 * 2)
69 static void *get_enhancement_bufer(int *x, int *y, int *w, int *h,
72 uint16_t *ret = select_enhancement_buf_ptr(&egpu, *x, *y);
84 static void map_enhancement_buffer(void)
86 // currently we use 4x 1024*1024 buffers instead of single 2048*1024
87 // to be able to reuse 1024-width code better (triangle setup,
88 // dithering phase, lines).
89 egpu.enhancement_buf_ptr = gpu.mmap(ENHANCEMENT_BUF_SIZE);
90 if (egpu.enhancement_buf_ptr == NULL) {
91 fprintf(stderr, "failed to map enhancement buffer\n");
92 gpu.get_enhancement_bufer = NULL;
95 egpu.enhancement_buf_ptr += 4096 / 2;
96 gpu.get_enhancement_bufer = get_enhancement_bufer;
100 int renderer_init(void)
102 if (gpu.vram != NULL) {
103 initialize_psx_gpu(&egpu, gpu.vram);
107 if (gpu.mmap != NULL && egpu.enhancement_buf_ptr == NULL)
108 map_enhancement_buffer();
110 ex_regs = gpu.ex_regs;
114 void renderer_finish(void)
116 if (egpu.enhancement_buf_ptr != NULL) {
117 egpu.enhancement_buf_ptr -= 4096 / 2;
118 gpu.munmap(egpu.enhancement_buf_ptr, ENHANCEMENT_BUF_SIZE);
120 egpu.enhancement_buf_ptr = NULL;
121 egpu.enhancement_current_buf_ptr = NULL;
125 static __attribute__((noinline)) void
126 sync_enhancement_buffers(int x, int y, int w, int h)
128 int i, right = x + w, bottom = y + h;
129 const u16 *src = gpu.vram;
130 // use these because the scanout struct may hold reduced w, h
131 // due to intersection stuff, see the update_enhancement_buf_scanouts() mess
132 int s_w = max(gpu.screen.hres, gpu.screen.w);
133 int s_h = gpu.screen.vres;
134 if (gpu.screen.y < 0)
137 for (i = 0; i < ARRAY_SIZE(egpu.enhancement_scanouts); i++) {
138 const struct psx_gpu_scanout *s = &egpu.enhancement_scanouts[i];
139 u16 *dst = select_enhancement_buf_by_index(&egpu, i);
141 if (s->w == 0) continue;
142 if (s->x >= right) continue;
143 if (s->x + s_w <= x) continue;
144 if (s->y >= bottom) continue;
145 if (s->y + s_h <= y) continue;
147 x2 = min(right, s->x + s_w);
149 y2 = min(bottom, s->y + s_h);
150 // 16-byte align for the asm version
153 scale2x_tiles8(dst + y1 * 1024*2 + x1 * 2,
154 src + y1 * 1024 + x1, (x2 - x1 + 7) / 8u, y2 - y1);
158 void renderer_sync_ecmds(uint32_t *ecmds)
162 gpu_parse(&egpu, ecmds + 1, 6 * 4, &dummy0, &dummy0, &dummy1);
165 void renderer_update_caches(int x, int y, int w, int h, int state_changed)
167 update_texture_cache_region(&egpu, x, y, x + w - 1, y + h - 1);
169 if (gpu.state.enhancement_active) {
171 int vres = gpu.screen.vres;
172 if (gpu.screen.y < 0)
173 vres -= gpu.screen.y;
174 memset(egpu.enhancement_scanouts, 0, sizeof(egpu.enhancement_scanouts));
175 egpu.enhancement_scanout_eselect = 0;
176 update_enhancement_buf_scanouts(&egpu,
177 gpu.screen.src_x, gpu.screen.src_y, gpu.screen.hres, vres);
180 sync_enhancement_buffers(x, y, w, h);
184 void renderer_flush_queues(void)
186 flush_render_block_buffer(&egpu);
189 void renderer_set_interlace(int enable, int is_odd)
191 egpu.render_mode &= ~(RENDER_INTERLACE_ENABLED|RENDER_INTERLACE_ODD);
193 egpu.render_mode |= RENDER_INTERLACE_ENABLED;
195 egpu.render_mode |= RENDER_INTERLACE_ODD;
198 void renderer_notify_res_change(void)
200 renderer_notify_scanout_change(gpu.screen.src_x, gpu.screen.src_y);
203 void renderer_notify_scanout_change(int x, int y)
205 int vres = gpu.screen.vres;
206 if (!gpu.state.enhancement_active || !egpu.enhancement_buf_ptr)
209 if (gpu.screen.y < 0)
210 vres -= gpu.screen.y;
211 update_enhancement_buf_scanouts(&egpu, x, y, gpu.screen.hres, vres);
214 #include "../../frontend/plugin_lib.h"
216 void renderer_set_config(const struct rearmed_cbs *cbs)
219 initialize_psx_gpu(&egpu, gpu.vram);
222 if (cbs->pl_set_gpu_caps)
223 cbs->pl_set_gpu_caps(GPU_CAP_SUPPORTS_2X);
225 egpu.use_dithering = cbs->gpu_neon.allow_dithering;
226 if(!egpu.use_dithering) {
227 egpu.dither_table[0] = dither_table_row(0, 0, 0, 0);
228 egpu.dither_table[1] = dither_table_row(0, 0, 0, 0);
229 egpu.dither_table[2] = dither_table_row(0, 0, 0, 0);
230 egpu.dither_table[3] = dither_table_row(0, 0, 0, 0);
232 egpu.dither_table[0] = dither_table_row(-4, 0, -3, 1);
233 egpu.dither_table[1] = dither_table_row(2, -2, 3, -1);
234 egpu.dither_table[2] = dither_table_row(-3, 1, -4, 0);
235 egpu.dither_table[3] = dither_table_row(3, -1, 2, -2);
238 disable_main_render = cbs->gpu_neon.enhancement_no_main;
239 if (gpu.state.enhancement_enable) {
240 if (gpu.mmap != NULL && egpu.enhancement_buf_ptr == NULL)
241 map_enhancement_buffer();
245 void renderer_sync(void)
249 void renderer_notify_update_lace(int updated)
253 // vim:ts=2:sw=2:expandtab