| 1 | /* |
| 2 | * (C) GraÅžvydas "notaz" Ignotas, 2011 |
| 3 | * |
| 4 | * This work is licensed under the terms of any of these licenses |
| 5 | * (at your option): |
| 6 | * - GNU GPL, version 2 or later. |
| 7 | * - GNU LGPL, version 2.1 or later. |
| 8 | * See the COPYING file in the top-level directory. |
| 9 | */ |
| 10 | |
| 11 | #include <stdio.h> |
| 12 | |
| 13 | #ifdef _WIN32 |
| 14 | #include <mman.h> |
| 15 | #else |
| 16 | #include <sys/mman.h> |
| 17 | #endif |
| 18 | |
| 19 | extern const unsigned char cmd_lengths[256]; |
| 20 | #define command_lengths cmd_lengths |
| 21 | |
| 22 | static unsigned int *ex_regs; |
| 23 | static int initialized; |
| 24 | |
| 25 | #define PCSX |
| 26 | #define SET_Ex(r, v) \ |
| 27 | ex_regs[r] = v |
| 28 | |
| 29 | #include "psx_gpu/psx_gpu.c" |
| 30 | #include "psx_gpu/psx_gpu_parse.c" |
| 31 | #include "../gpulib/gpu.h" |
| 32 | |
| 33 | static psx_gpu_struct egpu __attribute__((aligned(256))); |
| 34 | |
| 35 | int do_cmd_list(uint32_t *list, int count, int *last_cmd) |
| 36 | { |
| 37 | int ret; |
| 38 | |
| 39 | #if defined(__arm__) && defined(NEON_BUILD) && !defined(SIMD_BUILD) |
| 40 | // the asm doesn't bother to save callee-save vector regs, so do it here |
| 41 | __asm__ __volatile__("":::"q4","q5","q6","q7"); |
| 42 | #endif |
| 43 | |
| 44 | if (gpu.state.enhancement_active) |
| 45 | ret = gpu_parse_enhanced(&egpu, list, count * 4, (u32 *)last_cmd); |
| 46 | else |
| 47 | ret = gpu_parse(&egpu, list, count * 4, (u32 *)last_cmd); |
| 48 | |
| 49 | #if defined(__arm__) && defined(NEON_BUILD) && !defined(SIMD_BUILD) |
| 50 | __asm__ __volatile__("":::"q4","q5","q6","q7"); |
| 51 | #endif |
| 52 | |
| 53 | ex_regs[1] &= ~0x1ff; |
| 54 | ex_regs[1] |= egpu.texture_settings & 0x1ff; |
| 55 | return ret; |
| 56 | } |
| 57 | |
| 58 | #define ENHANCEMENT_BUF_SIZE (1024 * 1024 * 2 * 4 + 4096 * 2) |
| 59 | |
| 60 | static uint16_t *get_enhancement_bufer(int *x, int *y, int *w, int *h, |
| 61 | int *vram_h) |
| 62 | { |
| 63 | uint16_t *ret = select_enhancement_buf_ptr(&egpu, *x); |
| 64 | |
| 65 | *x *= 2; |
| 66 | *y *= 2; |
| 67 | *w = *w * 2; |
| 68 | *h = *h * 2; |
| 69 | *vram_h = 1024; |
| 70 | return ret; |
| 71 | } |
| 72 | |
| 73 | static void map_enhancement_buffer(void) |
| 74 | { |
| 75 | // currently we use 4x 1024*1024 buffers instead of single 2048*1024 |
| 76 | // to be able to reuse 1024-width code better (triangle setup, |
| 77 | // dithering phase, lines). |
| 78 | egpu.enhancement_buf_ptr = gpu.mmap(ENHANCEMENT_BUF_SIZE); |
| 79 | if (egpu.enhancement_buf_ptr == NULL) { |
| 80 | fprintf(stderr, "failed to map enhancement buffer\n"); |
| 81 | gpu.get_enhancement_bufer = NULL; |
| 82 | } |
| 83 | else { |
| 84 | egpu.enhancement_buf_ptr += 4096 / 2; |
| 85 | gpu.get_enhancement_bufer = get_enhancement_bufer; |
| 86 | } |
| 87 | } |
| 88 | |
| 89 | int renderer_init(void) |
| 90 | { |
| 91 | if (gpu.vram != NULL) { |
| 92 | initialize_psx_gpu(&egpu, gpu.vram); |
| 93 | initialized = 1; |
| 94 | } |
| 95 | |
| 96 | if (gpu.mmap != NULL && egpu.enhancement_buf_ptr == NULL) |
| 97 | map_enhancement_buffer(); |
| 98 | |
| 99 | ex_regs = gpu.ex_regs; |
| 100 | return 0; |
| 101 | } |
| 102 | |
| 103 | void renderer_finish(void) |
| 104 | { |
| 105 | if (egpu.enhancement_buf_ptr != NULL) { |
| 106 | egpu.enhancement_buf_ptr -= 4096 / 2; |
| 107 | gpu.munmap(egpu.enhancement_buf_ptr, ENHANCEMENT_BUF_SIZE); |
| 108 | } |
| 109 | egpu.enhancement_buf_ptr = NULL; |
| 110 | egpu.enhancement_current_buf_ptr = NULL; |
| 111 | initialized = 0; |
| 112 | } |
| 113 | |
| 114 | static __attribute__((noinline)) void |
| 115 | sync_enhancement_buffers(int x, int y, int w, int h) |
| 116 | { |
| 117 | const int step_x = 1024 / sizeof(egpu.enhancement_buf_by_x16); |
| 118 | u16 *src, *dst; |
| 119 | int w1, fb_index; |
| 120 | |
| 121 | w += x & (step_x - 1); |
| 122 | x &= ~(step_x - 1); |
| 123 | w = (w + step_x - 1) & ~(step_x - 1); |
| 124 | if (y + h > 512) |
| 125 | h = 512 - y; |
| 126 | |
| 127 | while (w > 0) { |
| 128 | fb_index = egpu.enhancement_buf_by_x16[x / step_x]; |
| 129 | for (w1 = 0; w > 0; w1++, w -= step_x) |
| 130 | if (fb_index != egpu.enhancement_buf_by_x16[x / step_x + w1]) |
| 131 | break; |
| 132 | |
| 133 | src = gpu.vram + y * 1024 + x; |
| 134 | dst = select_enhancement_buf_ptr(&egpu, x); |
| 135 | dst += (y * 1024 + x) * 2; |
| 136 | scale2x_tiles8(dst, src, w1 * step_x / 8, h); |
| 137 | |
| 138 | x += w1 * step_x; |
| 139 | } |
| 140 | } |
| 141 | |
| 142 | void renderer_sync_ecmds(uint32_t *ecmds) |
| 143 | { |
| 144 | gpu_parse(&egpu, ecmds + 1, 6 * 4, NULL); |
| 145 | } |
| 146 | |
| 147 | void renderer_update_caches(int x, int y, int w, int h) |
| 148 | { |
| 149 | update_texture_cache_region(&egpu, x, y, x + w - 1, y + h - 1); |
| 150 | if (gpu.state.enhancement_active && |
| 151 | !(gpu.status & PSX_GPU_STATUS_RGB24)) |
| 152 | sync_enhancement_buffers(x, y, w, h); |
| 153 | } |
| 154 | |
| 155 | void renderer_flush_queues(void) |
| 156 | { |
| 157 | flush_render_block_buffer(&egpu); |
| 158 | } |
| 159 | |
| 160 | void renderer_set_interlace(int enable, int is_odd) |
| 161 | { |
| 162 | egpu.render_mode &= ~(RENDER_INTERLACE_ENABLED|RENDER_INTERLACE_ODD); |
| 163 | if (enable) |
| 164 | egpu.render_mode |= RENDER_INTERLACE_ENABLED; |
| 165 | if (is_odd) |
| 166 | egpu.render_mode |= RENDER_INTERLACE_ODD; |
| 167 | } |
| 168 | |
| 169 | void renderer_notify_res_change(void) |
| 170 | { |
| 171 | // note: must keep it multiple of 8 |
| 172 | if (egpu.enhancement_x_threshold != gpu.screen.hres) |
| 173 | { |
| 174 | egpu.enhancement_x_threshold = gpu.screen.hres; |
| 175 | update_enhancement_buf_table_from_hres(&egpu); |
| 176 | } |
| 177 | } |
| 178 | |
| 179 | #include "../../frontend/plugin_lib.h" |
| 180 | |
| 181 | void renderer_set_config(const struct rearmed_cbs *cbs) |
| 182 | { |
| 183 | static int enhancement_was_on; |
| 184 | |
| 185 | disable_main_render = cbs->gpu_neon.enhancement_no_main; |
| 186 | if (egpu.enhancement_buf_ptr != NULL && cbs->gpu_neon.enhancement_enable |
| 187 | && !enhancement_was_on) |
| 188 | { |
| 189 | sync_enhancement_buffers(0, 0, 1024, 512); |
| 190 | } |
| 191 | enhancement_was_on = cbs->gpu_neon.enhancement_enable; |
| 192 | |
| 193 | if (!initialized) { |
| 194 | initialize_psx_gpu(&egpu, gpu.vram); |
| 195 | initialized = 1; |
| 196 | } |
| 197 | |
| 198 | if (gpu.mmap != NULL && egpu.enhancement_buf_ptr == NULL) |
| 199 | map_enhancement_buffer(); |
| 200 | if (cbs->pl_set_gpu_caps) |
| 201 | cbs->pl_set_gpu_caps(GPU_CAP_SUPPORTS_2X); |
| 202 | |
| 203 | egpu.use_dithering = cbs->gpu_neon.allow_dithering; |
| 204 | if(!egpu.use_dithering) { |
| 205 | egpu.dither_table[0] = dither_table_row(0, 0, 0, 0); |
| 206 | egpu.dither_table[1] = dither_table_row(0, 0, 0, 0); |
| 207 | egpu.dither_table[2] = dither_table_row(0, 0, 0, 0); |
| 208 | egpu.dither_table[3] = dither_table_row(0, 0, 0, 0); |
| 209 | } else { |
| 210 | egpu.dither_table[0] = dither_table_row(-4, 0, -3, 1); |
| 211 | egpu.dither_table[1] = dither_table_row(2, -2, 3, -1); |
| 212 | egpu.dither_table[2] = dither_table_row(-3, 1, -4, 0); |
| 213 | egpu.dither_table[3] = dither_table_row(3, -1, 2, -2); |
| 214 | } |
| 215 | |
| 216 | } |
| 217 | void renderer_sync(void) |
| 218 | { |
| 219 | } |
| 220 | void renderer_notify_update_lace(int updated) |
| 221 | { |
| 222 | } |