| 1 | /* |
| 2 | * Copyright (C) 2011 Gilead Kutnick "Exophase" <exophase@gmail.com> |
| 3 | * Copyright (C) 2022 GraÅžvydas Ignotas "notaz" <notasas@gmail.com> |
| 4 | * |
| 5 | * This program is free software; you can redistribute it and/or |
| 6 | * modify it under the terms of the GNU General Public License as |
| 7 | * published by the Free Software Foundation; either version 2 of |
| 8 | * the License, or (at your option) any later version. |
| 9 | * |
| 10 | * This program is distributed in the hope that it will be useful, |
| 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 13 | * General Public License for more details. |
| 14 | */ |
| 15 | |
| 16 | #include <string.h> |
| 17 | #include "psx_gpu.h" |
| 18 | #include "psx_gpu_simd.h" |
| 19 | //#define ASM_PROTOTYPES |
| 20 | //#include "psx_gpu_simd.h" |
| 21 | #ifndef SIMD_BUILD |
| 22 | #error "please define SIMD_BUILD if you want this gpu_neon C simd implementation" |
| 23 | #endif |
| 24 | |
| 25 | typedef u8 gvu8 __attribute__((vector_size(16))); |
| 26 | typedef u16 gvu16 __attribute__((vector_size(16))); |
| 27 | typedef u32 gvu32 __attribute__((vector_size(16))); |
| 28 | typedef u64 gvu64 __attribute__((vector_size(16))); |
| 29 | typedef s8 gvs8 __attribute__((vector_size(16))); |
| 30 | typedef s16 gvs16 __attribute__((vector_size(16))); |
| 31 | typedef s32 gvs32 __attribute__((vector_size(16))); |
| 32 | typedef s64 gvs64 __attribute__((vector_size(16))); |
| 33 | |
| 34 | typedef u8 gvhu8 __attribute__((vector_size(8))); |
| 35 | typedef u16 gvhu16 __attribute__((vector_size(8))); |
| 36 | typedef u32 gvhu32 __attribute__((vector_size(8))); |
| 37 | typedef u64 gvhu64 __attribute__((vector_size(8))); |
| 38 | typedef s8 gvhs8 __attribute__((vector_size(8))); |
| 39 | typedef s16 gvhs16 __attribute__((vector_size(8))); |
| 40 | typedef s32 gvhs32 __attribute__((vector_size(8))); |
| 41 | typedef s64 gvhs64 __attribute__((vector_size(8))); |
| 42 | |
| 43 | typedef union |
| 44 | { |
| 45 | gvhu8 u8; |
| 46 | gvhu16 u16; |
| 47 | gvhu32 u32; |
| 48 | gvhu64 u64; |
| 49 | //u64 u64; |
| 50 | //uint64x1_t u64; |
| 51 | gvhs8 s8; |
| 52 | gvhs16 s16; |
| 53 | gvhs32 s32; |
| 54 | gvhs64 s64; |
| 55 | //s64 s64; |
| 56 | //int64x1_t s64; |
| 57 | } gvhreg; |
| 58 | |
| 59 | typedef union |
| 60 | { |
| 61 | gvu8 u8; |
| 62 | gvu16 u16; |
| 63 | gvu32 u32; |
| 64 | gvu64 u64; |
| 65 | gvs8 s8; |
| 66 | gvs16 s16; |
| 67 | gvs32 s32; |
| 68 | gvs64 s64; |
| 69 | // this may be tempting, but it causes gcc to do lots of stack spills |
| 70 | //gvhreg h[2]; |
| 71 | } gvreg; |
| 72 | |
| 73 | #if defined(__ARM_NEON) || defined(__ARM_NEON__) |
| 74 | #include <arm_neon.h> |
| 75 | |
| 76 | #define gvaddhn_u32(d, a, b) d.u16 = vaddhn_u32(a.u32, b.u32) |
| 77 | #define gvaddw_s32(d, a, b) d.s64 = vaddw_s32(a.s64, b.s32) |
| 78 | #define gvabsq_s32(d, s) d.s32 = vabsq_s32(s.s32) |
| 79 | #define gvbic_n_u16(d, n) d.u16 = vbic_u16(d.u16, vmov_n_u16(n)) |
| 80 | #define gvbifq(d, a, b) d.u8 = vbslq_u8(b.u8, d.u8, a.u8) |
| 81 | #define gvbit(d, a, b) d.u8 = vbsl_u8(b.u8, a.u8, d.u8) |
| 82 | #define gvceqq_u16(d, a, b) d.u16 = vceqq_u16(a.u16, b.u16) |
| 83 | #define gvcgt_s16(d, a, b) d.u16 = vcgt_s16(a.s16, b.s16) |
| 84 | #define gvclt_s16(d, a, b) d.u16 = vclt_s16(a.s16, b.s16) |
| 85 | #define gvcreate_s32(d, a, b) d.s32 = vcreate_s32((u32)(a) | ((u64)(b) << 32)) |
| 86 | #define gvcreate_u32(d, a, b) d.u32 = vcreate_u32((u32)(a) | ((u64)(b) << 32)) |
| 87 | #define gvcreate_s64(d, s) d.s64 = (gvhs64)vcreate_s64(s) |
| 88 | #define gvcreate_u64(d, s) d.u64 = (gvhu64)vcreate_u64(s) |
| 89 | #define gvcombine_u16(d, l, h) d.u16 = vcombine_u16(l.u16, h.u16) |
| 90 | #define gvcombine_u32(d, l, h) d.u32 = vcombine_u32(l.u32, h.u32) |
| 91 | #define gvcombine_s64(d, l, h) d.s64 = vcombine_s64((int64x1_t)l.s64, (int64x1_t)h.s64) |
| 92 | #define gvdup_l_u8(d, s, l) d.u8 = vdup_lane_u8(s.u8, l) |
| 93 | #define gvdup_l_u16(d, s, l) d.u16 = vdup_lane_u16(s.u16, l) |
| 94 | #define gvdup_l_u32(d, s, l) d.u32 = vdup_lane_u32(s.u32, l) |
| 95 | #define gvdupq_l_s64(d, s, l) d.s64 = vdupq_lane_s64((int64x1_t)s.s64, l) |
| 96 | #define gvdupq_l_u32(d, s, l) d.u32 = vdupq_lane_u32(s.u32, l) |
| 97 | #define gvdup_n_s64(d, n) d.s64 = vdup_n_s64(n) |
| 98 | #define gvdup_n_u8(d, n) d.u8 = vdup_n_u8(n) |
| 99 | #define gvdup_n_u16(d, n) d.u16 = vdup_n_u16(n) |
| 100 | #define gvdup_n_u32(d, n) d.u32 = vdup_n_u32(n) |
| 101 | #define gvdupq_n_u16(d, n) d.u16 = vdupq_n_u16(n) |
| 102 | #define gvdupq_n_u32(d, n) d.u32 = vdupq_n_u32(n) |
| 103 | #define gvdupq_n_s64(d, n) d.s64 = vdupq_n_s64(n) |
| 104 | #define gvhaddq_u16(d, a, b) d.u16 = vhaddq_u16(a.u16, b.u16) |
| 105 | #define gvmax_s16(d, a, b) d.s16 = vmax_s16(a.s16, b.s16) |
| 106 | #define gvmin_s16(d, a, b) d.s16 = vmin_s16(a.s16, b.s16) |
| 107 | #define gvminq_u8(d, a, b) d.u8 = vminq_u8(a.u8, b.u8) |
| 108 | #define gvminq_u16(d, a, b) d.u16 = vminq_u16(a.u16, b.u16) |
| 109 | #define gvmla_s32(d, a, b) d.s32 = vmla_s32(d.s32, a.s32, b.s32) |
| 110 | #define gvmla_u32(d, a, b) d.u32 = vmla_u32(d.u32, a.u32, b.u32) |
| 111 | #define gvmlaq_s32(d, a, b) d.s32 = vmlaq_s32(d.s32, a.s32, b.s32) |
| 112 | #define gvmlaq_u32(d, a, b) d.u32 = vmlaq_u32(d.u32, a.u32, b.u32) |
| 113 | #define gvmlal_s32(d, a, b) d.s64 = vmlal_s32(d.s64, a.s32, b.s32) |
| 114 | #define gvmlal_u8(d, a, b) d.u16 = vmlal_u8(d.u16, a.u8, b.u8) |
| 115 | #define gvmlsq_s32(d, a, b) d.s32 = vmlsq_s32(d.s32, a.s32, b.s32) |
| 116 | #define gvmlsq_l_s32(d, a, b, l) d.s32 = vmlsq_lane_s32(d.s32, a.s32, b.s32, l) |
| 117 | #define gvmov_l_s32(d, s, l) d.s32 = vset_lane_s32(s, d.s32, l) |
| 118 | #define gvmov_l_u32(d, s, l) d.u32 = vset_lane_u32(s, d.u32, l) |
| 119 | #define gvmovl_u8(d, s) d.u16 = vmovl_u8(s.u8) |
| 120 | #define gvmovl_s32(d, s) d.s64 = vmovl_s32(s.s32) |
| 121 | #define gvmovn_u16(d, s) d.u8 = vmovn_u16(s.u16) |
| 122 | #define gvmovn_u32(d, s) d.u16 = vmovn_u32(s.u32) |
| 123 | #define gvmovn_u64(d, s) d.u32 = vmovn_u64(s.u64) |
| 124 | #define gvmul_s32(d, a, b) d.s32 = vmul_s32(a.s32, b.s32) |
| 125 | #define gvmull_s16(d, a, b) d.s32 = vmull_s16(a.s16, b.s16) |
| 126 | #define gvmull_s32(d, a, b) d.s64 = vmull_s32(a.s32, b.s32) |
| 127 | #define gvmull_u8(d, a, b) d.u16 = vmull_u8(a.u8, b.u8) |
| 128 | #define gvmull_l_u32(d, a, b, l) d.u64 = vmull_lane_u32(a.u32, b.u32, l) |
| 129 | #define gvmlsl_s16(d, a, b) d.s32 = vmlsl_s16(d.s32, a.s16, b.s16) |
| 130 | #define gvneg_s32(d, s) d.s32 = vneg_s32(s.s32) |
| 131 | #define gvqadd_u8(d, a, b) d.u8 = vqadd_u8(a.u8, b.u8) |
| 132 | #define gvqsub_u8(d, a, b) d.u8 = vqsub_u8(a.u8, b.u8) |
| 133 | #define gvshl_u16(d, a, b) d.u16 = vshl_u16(a.u16, b.s16) |
| 134 | #define gvshlq_s64(d, a, b) d.s64 = vshlq_s64(a.s64, b.s64) |
| 135 | #define gvshlq_u32(d, a, b) d.u32 = vshlq_u32(a.u32, b.s32) |
| 136 | #define gvshlq_u64(d, a, b) d.u64 = vshlq_u64(a.u64, b.s64) |
| 137 | #define gvshrq_n_s16(d, s, n) d.s16 = vshrq_n_s16(s.s16, n) |
| 138 | #define gvshrq_n_u16(d, s, n) d.u16 = vshrq_n_u16(s.u16, n) |
| 139 | #define gvshl_n_u32(d, s, n) d.u32 = vshl_n_u32(s.u32, n) |
| 140 | #define gvshlq_n_u16(d, s, n) d.u16 = vshlq_n_u16(s.u16, n) |
| 141 | #define gvshlq_n_u32(d, s, n) d.u32 = vshlq_n_u32(s.u32, n) |
| 142 | #define gvshll_n_s8(d, s, n) d.s16 = vshll_n_s8(s.s8, n) |
| 143 | #define gvshll_n_u8(d, s, n) d.u16 = vshll_n_u8(s.u8, n) |
| 144 | #define gvshll_n_u16(d, s, n) d.u32 = vshll_n_u16(s.u16, n) |
| 145 | #define gvshr_n_u8(d, s, n) d.u8 = vshr_n_u8(s.u8, n) |
| 146 | #define gvshr_n_u16(d, s, n) d.u16 = vshr_n_u16(s.u16, n) |
| 147 | #define gvshr_n_u32(d, s, n) d.u32 = vshr_n_u32(s.u32, n) |
| 148 | #define gvshr_n_u64(d, s, n) d.u64 = (gvhu64)vshr_n_u64((uint64x1_t)s.u64, n) |
| 149 | #define gvshrn_n_s64(d, s, n) d.s32 = vshrn_n_s64(s.s64, n) |
| 150 | #define gvshrn_n_u16(d, s, n) d.u8 = vshrn_n_u16(s.u16, n) |
| 151 | #define gvshrn_n_u32(d, s, n) d.u16 = vshrn_n_u32(s.u32, n) |
| 152 | #define gvsli_n_u8(d, s, n) d.u8 = vsli_n_u8(d.u8, s.u8, n) |
| 153 | #define gvsri_n_u8(d, s, n) d.u8 = vsri_n_u8(d.u8, s.u8, n) |
| 154 | #define gvtstq_u16(d, a, b) d.u16 = vtstq_u16(a.u16, b.u16) |
| 155 | #define gvqshrun_n_s16(d, s, n) d.u8 = vqshrun_n_s16(s.s16, n) |
| 156 | #define gvqsubq_u8(d, a, b) d.u8 = vqsubq_u8(a.u8, b.u8) |
| 157 | #define gvqsubq_u16(d, a, b) d.u16 = vqsubq_u16(a.u16, b.u16) |
| 158 | |
| 159 | #define gvget_lo(d, s) d.u16 = vget_low_u16(s.u16) |
| 160 | #define gvget_hi(d, s) d.u16 = vget_high_u16(s.u16) |
| 161 | #define gvlo(s) ({gvhreg t_; gvget_lo(t_, s); t_;}) |
| 162 | #define gvhi(s) ({gvhreg t_; gvget_hi(t_, s); t_;}) |
| 163 | |
| 164 | #define gvset_lo(d, s) d.u16 = vcombine_u16(s.u16, gvhi(d).u16) |
| 165 | #define gvset_hi(d, s) d.u16 = vcombine_u16(gvlo(d).u16, s.u16) |
| 166 | |
| 167 | #define gvtbl2_u8(d, a, b) { \ |
| 168 | uint8x8x2_t v_; \ |
| 169 | v_.val[0] = vget_low_u8(a.u8); v_.val[1] = vget_high_u8(a.u8); \ |
| 170 | d.u8 = vtbl2_u8(v_, b.u8); \ |
| 171 | } |
| 172 | |
| 173 | #define gvzip_u8(d, a, b) { \ |
| 174 | uint8x8x2_t v_ = vzip_u8(a.u8, b.u8); \ |
| 175 | d.u8 = vcombine_u8(v_.val[0], v_.val[1]); \ |
| 176 | } |
| 177 | #define gvzipq_u16(d0, d1, s0, s1) { \ |
| 178 | uint16x8x2_t v_ = vzipq_u16(s0.u16, s1.u16); \ |
| 179 | d0.u16 = v_.val[0]; d1.u16 = v_.val[1]; \ |
| 180 | } |
| 181 | |
| 182 | #define gvld1_u8(d, s) d.u8 = vld1_u8(s) |
| 183 | #define gvld1_u32(d, s) d.u32 = vld1_u32((const u32 *)(s)) |
| 184 | #define gvld1q_u8(d, s) d.u8 = vld1q_u8(s) |
| 185 | #define gvld1q_u16(d, s) d.u16 = vld1q_u16(s) |
| 186 | #define gvld1q_u32(d, s) d.u32 = vld1q_u32((const u32 *)(s)) |
| 187 | #define gvld2_dup(v0, v1, p) { \ |
| 188 | uint8x8x2_t v_ = vld2_dup_u8(p); \ |
| 189 | v0.u8 = v_.val[0]; v1.u8 = v_.val[1]; \ |
| 190 | } |
| 191 | #define gvld2q_u8(v0, v1, p) { \ |
| 192 | uint8x16x2_t v_ = vld2q_u8(p); \ |
| 193 | v0.u8 = v_.val[0]; v1.u8 = v_.val[1]; \ |
| 194 | } |
| 195 | |
| 196 | #define gvst1_u8(v, p) \ |
| 197 | vst1_u8(p, v.u8) |
| 198 | #define gvst1q_u16(v, p) \ |
| 199 | vst1q_u16(p, v.u16) |
| 200 | #define gvst1q_inc_u32(v, p, i) { \ |
| 201 | vst1q_u32((u32 *)(p), v.u32); \ |
| 202 | p += (i) / sizeof(*p); \ |
| 203 | } |
| 204 | #define gvst2_u8(v0, v1, p) { \ |
| 205 | uint8x8x2_t v_; \ |
| 206 | v_.val[0] = v0.u8; v_.val[1] = v1.u8; \ |
| 207 | vst2_u8(p, v_); \ |
| 208 | } |
| 209 | #define gvst2_u16(v0, v1, p) { \ |
| 210 | uint16x4x2_t v_; \ |
| 211 | v_.val[0] = v0.u16; v_.val[1] = v1.u16; \ |
| 212 | vst2_u16(p, v_); \ |
| 213 | } |
| 214 | #define gvst2q_u8(v0, v1, p) { \ |
| 215 | uint8x16x2_t v_; \ |
| 216 | v_.val[0] = v0.u8; v_.val[1] = v1.u8; \ |
| 217 | vst2q_u8(p, v_); \ |
| 218 | } |
| 219 | #define gvst4_4_inc_u32(v0, v1, v2, v3, p, i) { \ |
| 220 | uint32x2x4_t v_; \ |
| 221 | v_.val[0] = v0.u32; v_.val[1] = v1.u32; v_.val[2] = v2.u32; v_.val[3] = v3.u32; \ |
| 222 | vst4_u32(p, v_); p += (i) / sizeof(*p); \ |
| 223 | } |
| 224 | #define gvst4_pi_u16(v0, v1, v2, v3, p) { \ |
| 225 | uint16x4x4_t v_; \ |
| 226 | v_.val[0] = v0.u16; v_.val[1] = v1.u16; v_.val[2] = v2.u16; v_.val[3] = v3.u16; \ |
| 227 | vst4_u16((u16 *)(p), v_); p += sizeof(v_) / sizeof(*p); \ |
| 228 | } |
| 229 | #define gvst1q_pi_u32(v, p) \ |
| 230 | gvst1q_inc_u32(v, p, sizeof(v)) |
| 231 | // could use vst1q_u32_x2 but that's not always available |
| 232 | #define gvst1q_2_pi_u32(v0, v1, p) { \ |
| 233 | gvst1q_inc_u32(v0, p, sizeof(v0)); \ |
| 234 | gvst1q_inc_u32(v1, p, sizeof(v1)); \ |
| 235 | } |
| 236 | |
| 237 | /* notes: |
| 238 | - gcc > 9: (arm32) int64x1_t type produces ops on gp regs |
| 239 | (also u64 __attribute__((vector_size(8))) :( ) |
| 240 | - gcc <11: (arm32) handles '<vec> == 0' poorly |
| 241 | */ |
| 242 | |
| 243 | /* |
| 244 | #elif defined(__SSE2__) |
| 245 | #include <x86intrin.h> |
| 246 | */ |
| 247 | #else |
| 248 | #error "arch not supported or SIMD support was not enabled by your compiler" |
| 249 | #endif |
| 250 | |
| 251 | // the below have intrinsics but they evaluate to basic operations on both gcc and clang |
| 252 | #define gvadd_s64(d, a, b) d.s64 = a.s64 + b.s64 |
| 253 | #define gvadd_u8(d, a, b) d.u8 = a.u8 + b.u8 |
| 254 | #define gvadd_u16(d, a, b) d.u16 = a.u16 + b.u16 |
| 255 | #define gvadd_u32(d, a, b) d.u32 = a.u32 + b.u32 |
| 256 | #define gvaddq_s64 gvadd_s64 |
| 257 | #define gvaddq_u16 gvadd_u16 |
| 258 | #define gvaddq_u32 gvadd_u32 |
| 259 | #define gvand(d, a, b) d.u32 = a.u32 & b.u32 |
| 260 | #define gvbic(d, a, b) d.u32 = a.u32 & ~b.u32 |
| 261 | #define gvbicq gvbic |
| 262 | #define gveor(d, a, b) d.u32 = a.u32 ^ b.u32 |
| 263 | #define gveorq gveor |
| 264 | #define gvceqz_u16(d, s) d.u16 = s.u16 == 0 |
| 265 | #define gvceqzq_u16 gvceqz_u16 |
| 266 | #define gvcltz_s16(d, s) d.s16 = s.s16 < 0 |
| 267 | #define gvcltzq_s16 gvcltz_s16 |
| 268 | #define gvsub_u16(d, a, b) d.u16 = a.u16 - b.u16 |
| 269 | #define gvsub_u32(d, a, b) d.u32 = a.u32 - b.u32 |
| 270 | #define gvsubq_u16 gvsub_u16 |
| 271 | #define gvsubq_u32 gvsub_u32 |
| 272 | #define gvorr(d, a, b) d.u32 = a.u32 | b.u32 |
| 273 | #define gvorrq gvorr |
| 274 | |
| 275 | #if defined(__arm__) |
| 276 | |
| 277 | #define gssub16(d, a, b) asm("ssub16 %0,%1,%2" : "=r"(d) : "r"(a), "r"(b)) |
| 278 | #define gsmusdx(d, a, b) asm("smusdx %0,%1,%2" : "=r"(d) : "r"(a), "r"(b)) |
| 279 | |
| 280 | #if 0 |
| 281 | // gcc/config/arm/arm.c |
| 282 | #undef gvadd_s64 |
| 283 | #define gvadd_s64(d, a, b) asm("vadd.i64 %P0,%P1,%P2" : "=w"(d.s64) : "w"(a.s64), "w"(b.s64)) |
| 284 | #endif |
| 285 | |
| 286 | #else |
| 287 | |
| 288 | #define gssub16(d, a, b) d = (u16)((a) - (b)) | ((((a) >> 16) - ((b) >> 16)) << 16) |
| 289 | #define gsmusdx(d, a, b) d = ((s32)(s16)(a) * ((s32)(b) >> 16)) \ |
| 290 | - (((s32)(a) >> 16) * (s16)(b)) |
| 291 | |
| 292 | #endif |
| 293 | |
| 294 | // for compatibility with the original psx_gpu.c code |
| 295 | #define vec_2x64s gvreg |
| 296 | #define vec_2x64u gvreg |
| 297 | #define vec_4x32s gvreg |
| 298 | #define vec_4x32u gvreg |
| 299 | #define vec_8x16s gvreg |
| 300 | #define vec_8x16u gvreg |
| 301 | #define vec_16x8s gvreg |
| 302 | #define vec_16x8u gvreg |
| 303 | #define vec_1x64s gvhreg |
| 304 | #define vec_1x64u gvhreg |
| 305 | #define vec_2x32s gvhreg |
| 306 | #define vec_2x32u gvhreg |
| 307 | #define vec_4x16s gvhreg |
| 308 | #define vec_4x16u gvhreg |
| 309 | #define vec_8x8s gvhreg |
| 310 | #define vec_8x8u gvhreg |
| 311 | |
| 312 | #if 0 |
| 313 | #include <stdio.h> |
| 314 | #include <stdlib.h> |
| 315 | #include <unistd.h> |
| 316 | static int ccount; |
| 317 | void cmpp(const char *name, const void *a_, const void *b_, size_t len) |
| 318 | { |
| 319 | const uint32_t *a = a_, *b = b_, masks[] = { 0, 0xff, 0xffff, 0xffffff }; |
| 320 | size_t i, left; |
| 321 | uint32_t mask; |
| 322 | for (i = 0; i < (len + 3)/4; i++) { |
| 323 | left = len - i*4; |
| 324 | mask = left >= 4 ? ~0u : masks[left]; |
| 325 | if ((a[i] ^ b[i]) & mask) { |
| 326 | printf("%s: %08x %08x [%03zx/%zu] #%d\n", |
| 327 | name, a[i] & mask, b[i] & mask, i*4, i, ccount); |
| 328 | exit(1); |
| 329 | } |
| 330 | } |
| 331 | ccount++; |
| 332 | } |
| 333 | #define ccmpf(n) cmpp(#n, &psx_gpu->n, &n##_c, sizeof(n##_c)) |
| 334 | #define ccmpa(n,c) cmpp(#n, &psx_gpu->n, &n##_c, sizeof(n##_c[0]) * c) |
| 335 | |
| 336 | void dump_r_(const char *name, void *dump, int is_q) |
| 337 | { |
| 338 | unsigned long long *u = dump; |
| 339 | //if (ccount > 1) return; |
| 340 | printf("%10s %016llx ", name, u[0]); |
| 341 | if (is_q) |
| 342 | printf("%016llx", u[1]); |
| 343 | puts(""); |
| 344 | } |
| 345 | void __attribute__((noinline,noclone)) dump_r_d(const char *name, void *dump) |
| 346 | { dump_r_(name, dump, 0); } |
| 347 | void __attribute__((noinline,noclone)) dump_r_q(const char *name, void *dump) |
| 348 | { dump_r_(name, dump, 1); } |
| 349 | #define dumprd(n) { u8 dump_[8]; gvst1_u8(n, dump_); dump_r_d(#n, dump_); } |
| 350 | #define dumprq(n) { u16 dump_[8]; gvst1q_u16(n, dump_); dump_r_q(#n, dump_); } |
| 351 | #endif |
| 352 | |
| 353 | void compute_all_gradients(psx_gpu_struct * __restrict__ psx_gpu, |
| 354 | const vertex_struct * __restrict__ a, const vertex_struct * __restrict__ b, |
| 355 | const vertex_struct * __restrict__ c) |
| 356 | { |
| 357 | union { double d; struct { u32 l; u32 h; } i; } divident, divider; |
| 358 | union { double d; gvhreg v; } d30; |
| 359 | |
| 360 | #if 0 |
| 361 | compute_all_gradients_(psx_gpu, a, b, c); |
| 362 | return; |
| 363 | #endif |
| 364 | // First compute the triangle area reciprocal and shift. The division will |
| 365 | // happen concurrently with much of the work which follows. |
| 366 | |
| 367 | // load exponent of 62 into upper half of double |
| 368 | u32 shift = __builtin_clz(psx_gpu->triangle_area); |
| 369 | u32 triangle_area_normalized = psx_gpu->triangle_area << shift; |
| 370 | |
| 371 | // load area normalized into lower half of double |
| 372 | divident.i.l = triangle_area_normalized >> 10; |
| 373 | divident.i.h = (62 + 1023) << 20; |
| 374 | |
| 375 | divider.i.l = triangle_area_normalized << 20; |
| 376 | divider.i.h = ((1022 + 31) << 20) + (triangle_area_normalized >> 11); |
| 377 | |
| 378 | d30.d = divident.d / divider.d; // d30 = ((1 << 62) + ta_n) / ta_n |
| 379 | |
| 380 | // ((x1 - x0) * (y2 - y1)) - ((x2 - x1) * (y1 - y0)) = |
| 381 | // ( d0 * d1 ) - ( d2 * d3 ) = |
| 382 | // ( m0 ) - ( m1 ) = gradient |
| 383 | |
| 384 | // This is split to do 12 elements at a time over three sets: a, b, and c. |
| 385 | // Technically we only need to do 10 elements (uvrgb_x and uvrgb_y), so |
| 386 | // two of the slots are unused. |
| 387 | |
| 388 | // Inputs are all 16-bit signed. The m0/m1 results are 32-bit signed, as |
| 389 | // is g. |
| 390 | |
| 391 | // First type is: uvrg bxxx xxxx |
| 392 | // Second type is: yyyy ybyy uvrg |
| 393 | // Since x_a and y_c are the same the same variable is used for both. |
| 394 | |
| 395 | gvreg v0; |
| 396 | gvreg v1; |
| 397 | gvreg v2; |
| 398 | gvreg uvrg_xxxx0; |
| 399 | gvreg uvrg_xxxx1; |
| 400 | gvreg uvrg_xxxx2; |
| 401 | |
| 402 | gvreg y0_ab; |
| 403 | gvreg y1_ab; |
| 404 | gvreg y2_ab; |
| 405 | |
| 406 | gvreg d0_ab; |
| 407 | gvreg d1_ab; |
| 408 | gvreg d2_ab; |
| 409 | gvreg d3_ab; |
| 410 | |
| 411 | gvreg ga_uvrg_x; |
| 412 | gvreg ga_uvrg_y; |
| 413 | gvreg gw_rg_x; |
| 414 | gvreg gw_rg_y; |
| 415 | gvreg w_mask; |
| 416 | gvreg r_shift; |
| 417 | gvreg uvrg_dx2, uvrg_dx3; |
| 418 | gvreg uvrgb_phase; |
| 419 | gvhreg zero, tmp_lo, tmp_hi; |
| 420 | |
| 421 | gvld1q_u8(v0, (u8 *)a); // v0 = { uvrg0, b0, x0, y0 } |
| 422 | gvld1q_u8(v1, (u8 *)b); // v1 = { uvrg1, b1, x1, y1 } |
| 423 | gvld1q_u8(v2, (u8 *)c); // v2 = { uvrg2, b2, x2, y2 } |
| 424 | |
| 425 | gvmovl_u8(uvrg_xxxx0, gvlo(v0)); // uvrg_xxxx0 = { uv0, rg0, b0-, -- } |
| 426 | gvmovl_u8(uvrg_xxxx1, gvlo(v1)); // uvrg_xxxx1 = { uv1, rg1, b1-, -- } |
| 427 | gvmovl_u8(uvrg_xxxx2, gvlo(v2)); // uvrg_xxxx2 = { uv2, rg2, b2-, -- } |
| 428 | |
| 429 | gvdup_l_u16(tmp_lo, gvhi(v0), 1); // yyyy0 = { yy0, yy0 } |
| 430 | gvcombine_u16(y0_ab, tmp_lo, gvlo(uvrg_xxxx0)); |
| 431 | |
| 432 | gvdup_l_u16(tmp_lo, gvhi(v0), 0); // xxxx0 = { xx0, xx0 } |
| 433 | gvset_hi(uvrg_xxxx0, tmp_lo); |
| 434 | |
| 435 | u32 x1_x2 = (u16)b->x | (c->x << 16); // x1_x2 = { x1, x2 } |
| 436 | u32 x0_x1 = (u16)a->x | (b->x << 16); // x0_x1 = { x0, x1 } |
| 437 | |
| 438 | gvdup_l_u16(tmp_lo, gvhi(v1), 1); // yyyy1 = { yy1, yy1 } |
| 439 | gvcombine_u16(y1_ab, tmp_lo, gvlo(uvrg_xxxx1)); |
| 440 | |
| 441 | gvdup_l_u16(tmp_lo, gvhi(v1), 0); // xxxx1 = { xx1, xx1 } |
| 442 | gvset_hi(uvrg_xxxx1, tmp_lo); |
| 443 | |
| 444 | gvdup_l_u16(tmp_lo, gvhi(v2), 1); // yyyy2 = { yy2, yy2 } |
| 445 | gvcombine_u16(y2_ab, tmp_lo, gvlo(uvrg_xxxx2)); |
| 446 | |
| 447 | gvdup_l_u16(tmp_lo, gvhi(v2), 0); // xxxx2 = { xx2, xx2 } |
| 448 | gvset_hi(uvrg_xxxx2, tmp_lo); |
| 449 | |
| 450 | u32 y0_y1 = (u16)a->y | (b->y << 16); // y0_y1 = { y0, y1 } |
| 451 | u32 y1_y2 = (u16)b->y | (c->y << 16); // y1_y2 = { y1, y2 } |
| 452 | |
| 453 | gvsubq_u16(d0_ab, uvrg_xxxx1, uvrg_xxxx0); |
| 454 | |
| 455 | u32 b1_b2 = b->b | (c->b << 16); // b1_b2 = { b1, b2 } |
| 456 | |
| 457 | gvsubq_u16(d2_ab, uvrg_xxxx2, uvrg_xxxx1); |
| 458 | |
| 459 | gvsubq_u16(d1_ab, y2_ab, y1_ab); |
| 460 | |
| 461 | u32 b0_b1 = a->b | (b->b << 16); // b0_b1 = { b0, b1 } |
| 462 | |
| 463 | u32 dx, dy, db; |
| 464 | gssub16(dx, x1_x2, x0_x1); // dx = { x1 - x0, x2 - x1 } |
| 465 | gssub16(dy, y1_y2, y0_y1); // dy = { y1 - y0, y2 - y1 } |
| 466 | gssub16(db, b1_b2, b0_b1); // db = { b1 - b0, b2 - b1 } |
| 467 | |
| 468 | u32 ga_by, ga_bx; |
| 469 | gvsubq_u16(d3_ab, y1_ab, y0_ab); |
| 470 | gsmusdx(ga_by, dx, db); // ga_by = ((x1 - x0) * (b2 - b1)) - |
| 471 | // ((x2 - X1) * (b1 - b0)) |
| 472 | gvmull_s16(ga_uvrg_x, gvlo(d0_ab), gvlo(d1_ab)); |
| 473 | gsmusdx(ga_bx, db, dy); // ga_bx = ((b1 - b0) * (y2 - y1)) - |
| 474 | // ((b2 - b1) * (y1 - y0)) |
| 475 | gvmlsl_s16(ga_uvrg_x, gvlo(d2_ab), gvlo(d3_ab)); |
| 476 | u32 gs_bx = (s32)ga_bx >> 31; // movs |
| 477 | |
| 478 | gvmull_s16(ga_uvrg_y, gvhi(d0_ab), gvhi(d1_ab)); |
| 479 | if ((s32)gs_bx < 0) ga_bx = -ga_bx; // rsbmi |
| 480 | |
| 481 | gvmlsl_s16(ga_uvrg_y, gvhi(d2_ab), gvhi(d3_ab)); |
| 482 | u32 gs_by = (s32)ga_by >> 31; // movs |
| 483 | |
| 484 | gvhreg d0; |
| 485 | gvshr_n_u64(d0, d30.v, 22); // note: on "d30 >> 22" gcc generates junk code |
| 486 | |
| 487 | gvdupq_n_u32(uvrgb_phase, psx_gpu->uvrgb_phase); |
| 488 | u32 b_base = psx_gpu->uvrgb_phase + (a->b << 16); |
| 489 | |
| 490 | if ((s32)gs_by < 0) ga_by = -ga_by; // rsbmi |
| 491 | gvreg gs_uvrg_x, gs_uvrg_y; |
| 492 | gs_uvrg_x.s32 = ga_uvrg_x.s32 < 0; // gs_uvrg_x = ga_uvrg_x < 0 |
| 493 | gs_uvrg_y.s32 = ga_uvrg_y.s32 < 0; // gs_uvrg_y = ga_uvrg_y < 0 |
| 494 | |
| 495 | gvdupq_n_u32(w_mask, -psx_gpu->triangle_winding); // w_mask = { -w, -w, -w, -w } |
| 496 | shift -= 62 - 12; // shift -= (62 - FIXED_BITS) |
| 497 | |
| 498 | gvreg uvrg_base; |
| 499 | gvshll_n_u16(uvrg_base, gvlo(uvrg_xxxx0), 16); // uvrg_base = uvrg0 << 16 |
| 500 | gvdupq_n_u32(r_shift, shift); // r_shift = { shift, shift, shift, shift } |
| 501 | |
| 502 | gvaddq_u32(uvrg_base, uvrg_base, uvrgb_phase); |
| 503 | gvabsq_s32(ga_uvrg_x, ga_uvrg_x); // ga_uvrg_x = abs(ga_uvrg_x) |
| 504 | |
| 505 | u32 area_r_s = d0.u32[0]; // area_r_s = triangle_reciprocal |
| 506 | gvabsq_s32(ga_uvrg_y, ga_uvrg_y); // ga_uvrg_y = abs(ga_uvrg_y) |
| 507 | |
| 508 | gvmull_l_u32(gw_rg_x, gvhi(ga_uvrg_x), d0, 0); |
| 509 | gvmull_l_u32(ga_uvrg_x, gvlo(ga_uvrg_x), d0, 0); |
| 510 | gvmull_l_u32(gw_rg_y, gvhi(ga_uvrg_y), d0, 0); |
| 511 | gvmull_l_u32(ga_uvrg_y, gvlo(ga_uvrg_y), d0, 0); |
| 512 | |
| 513 | gvshlq_u64(gw_rg_x, gw_rg_x, r_shift); |
| 514 | gvshlq_u64(ga_uvrg_x, ga_uvrg_x, r_shift); |
| 515 | gvshlq_u64(gw_rg_y, gw_rg_y, r_shift); |
| 516 | gvshlq_u64(ga_uvrg_y, ga_uvrg_y, r_shift); |
| 517 | |
| 518 | gveorq(gs_uvrg_x, gs_uvrg_x, w_mask); |
| 519 | gvmovn_u64(tmp_lo, ga_uvrg_x); |
| 520 | |
| 521 | gveorq(gs_uvrg_y, gs_uvrg_y, w_mask); |
| 522 | gvmovn_u64(tmp_hi, gw_rg_x); |
| 523 | |
| 524 | gvcombine_u32(ga_uvrg_x, tmp_lo, tmp_hi); |
| 525 | |
| 526 | gveorq(ga_uvrg_x, ga_uvrg_x, gs_uvrg_x); |
| 527 | gvmovn_u64(tmp_lo, ga_uvrg_y); |
| 528 | |
| 529 | gvsubq_u32(ga_uvrg_x, ga_uvrg_x, gs_uvrg_x); |
| 530 | gvmovn_u64(tmp_hi, gw_rg_y); |
| 531 | |
| 532 | gvcombine_u32(ga_uvrg_y, tmp_lo, tmp_hi); |
| 533 | |
| 534 | gveorq(ga_uvrg_y, ga_uvrg_y, gs_uvrg_y); |
| 535 | ga_bx = ga_bx << 13; |
| 536 | |
| 537 | gvsubq_u32(ga_uvrg_y, ga_uvrg_y, gs_uvrg_y); |
| 538 | ga_by = ga_by << 13; |
| 539 | |
| 540 | u32 gw_bx_h, gw_by_h; |
| 541 | gw_bx_h = (u64)ga_bx * area_r_s >> 32; |
| 542 | |
| 543 | gvshlq_n_u32(ga_uvrg_x, ga_uvrg_x, 4); |
| 544 | gvshlq_n_u32(ga_uvrg_y, ga_uvrg_y, 4); |
| 545 | |
| 546 | gw_by_h = (u64)ga_by * area_r_s >> 32; |
| 547 | gvdup_n_u32(tmp_lo, a->x); |
| 548 | gvmlsq_l_s32(uvrg_base, ga_uvrg_x, tmp_lo, 0); |
| 549 | |
| 550 | gs_bx = gs_bx ^ -psx_gpu->triangle_winding; |
| 551 | gvaddq_u32(uvrg_dx2, ga_uvrg_x, ga_uvrg_x); |
| 552 | |
| 553 | gs_by = gs_by ^ -psx_gpu->triangle_winding; |
| 554 | |
| 555 | u32 r11 = -shift; // r11 = negative shift for scalar lsr |
| 556 | u32 *store_a = psx_gpu->uvrg.e; |
| 557 | r11 = r11 - (32 - 13); |
| 558 | u32 *store_b = store_a + 16 / sizeof(u32); |
| 559 | |
| 560 | gvaddq_u32(uvrg_dx3, uvrg_dx2, ga_uvrg_x); |
| 561 | gvst1q_inc_u32(uvrg_base, store_a, 32); |
| 562 | |
| 563 | gvst1q_inc_u32(ga_uvrg_x, store_b, 32); |
| 564 | u32 g_bx = (u32)gw_bx_h >> r11; |
| 565 | |
| 566 | gvst1q_inc_u32(ga_uvrg_y, store_a, 32); |
| 567 | u32 g_by = (u32)gw_by_h >> r11; |
| 568 | |
| 569 | gvdup_n_u32(zero, 0); |
| 570 | |
| 571 | gvst4_4_inc_u32(zero, gvlo(ga_uvrg_x), gvlo(uvrg_dx2), gvlo(uvrg_dx3), store_b, 32); |
| 572 | g_bx = g_bx ^ gs_bx; |
| 573 | |
| 574 | gvst4_4_inc_u32(zero, gvhi(ga_uvrg_x), gvhi(uvrg_dx2), gvhi(uvrg_dx3), store_b, 32); |
| 575 | g_bx = g_bx - gs_bx; |
| 576 | |
| 577 | g_bx = g_bx << 4; |
| 578 | g_by = g_by ^ gs_by; |
| 579 | |
| 580 | b_base -= g_bx * a->x; |
| 581 | g_by = g_by - gs_by; |
| 582 | |
| 583 | g_by = g_by << 4; |
| 584 | |
| 585 | u32 g_bx2 = g_bx + g_bx; |
| 586 | u32 g_bx3 = g_bx + g_bx2; |
| 587 | |
| 588 | // 112 |
| 589 | store_b[0] = 0; |
| 590 | store_b[1] = g_bx; |
| 591 | store_b[2] = g_bx2; |
| 592 | store_b[3] = g_bx3; |
| 593 | store_b[4] = b_base; |
| 594 | store_b[5] = g_by; // 132 |
| 595 | } |
| 596 | |
| 597 | #define setup_spans_debug_check(span_edge_data_element) \ |
| 598 | |
| 599 | #define setup_spans_prologue_alternate_yes() \ |
| 600 | vec_2x64s alternate_x; \ |
| 601 | vec_2x64s alternate_dx_dy; \ |
| 602 | vec_4x32s alternate_x_32; \ |
| 603 | vec_2x32s alternate_x_16; \ |
| 604 | \ |
| 605 | vec_4x16u alternate_select; \ |
| 606 | vec_4x16s y_mid_point; \ |
| 607 | \ |
| 608 | s32 y_b = v_b->y; \ |
| 609 | s64 edge_alt; \ |
| 610 | s32 edge_dx_dy_alt; \ |
| 611 | u32 edge_shift_alt \ |
| 612 | |
| 613 | #define setup_spans_prologue_alternate_no() \ |
| 614 | |
| 615 | #define setup_spans_prologue(alternate_active) \ |
| 616 | edge_data_struct *span_edge_data; \ |
| 617 | vec_4x32u *span_uvrg_offset; \ |
| 618 | u32 *span_b_offset; \ |
| 619 | \ |
| 620 | s32 clip; \ |
| 621 | vec_4x32u v_clip; \ |
| 622 | \ |
| 623 | union { vec_2x64s full; vec_1x64s h[2]; } edges_xy; \ |
| 624 | vec_2x32s edges_dx_dy; \ |
| 625 | vec_2x32u edge_shifts; \ |
| 626 | \ |
| 627 | vec_2x64s left_x, right_x; \ |
| 628 | vec_2x64s left_dx_dy, right_dx_dy; \ |
| 629 | vec_4x32s left_x_32, right_x_32; \ |
| 630 | vec_2x32s left_x_32_lo, right_x_32_lo; \ |
| 631 | vec_2x32s left_x_32_hi, right_x_32_hi; \ |
| 632 | vec_4x16s left_right_x_16_lo, left_right_x_16_hi; \ |
| 633 | vec_4x16s y_x4; \ |
| 634 | vec_8x16s left_edge; \ |
| 635 | vec_8x16s right_edge; \ |
| 636 | vec_4x16u span_shift; \ |
| 637 | \ |
| 638 | vec_2x32u c_0x01; \ |
| 639 | vec_4x16u c_0x04; \ |
| 640 | vec_4x16u c_0xFFFE; \ |
| 641 | vec_4x16u c_0x07; \ |
| 642 | \ |
| 643 | vec_2x32s x_starts; \ |
| 644 | vec_2x32s x_ends; \ |
| 645 | \ |
| 646 | s32 x_a = v_a->x; \ |
| 647 | s32 x_b = v_b->x; \ |
| 648 | s32 x_c = v_c->x; \ |
| 649 | s32 y_a = v_a->y; \ |
| 650 | s32 y_c = v_c->y; \ |
| 651 | \ |
| 652 | vec_4x32u uvrg; \ |
| 653 | vec_4x32u uvrg_dy; \ |
| 654 | u32 b = psx_gpu->b; \ |
| 655 | u32 b_dy = psx_gpu->b_dy; \ |
| 656 | const u32 *reciprocal_table = psx_gpu->reciprocal_table_ptr; \ |
| 657 | \ |
| 658 | gvld1q_u32(uvrg, psx_gpu->uvrg.e); \ |
| 659 | gvld1q_u32(uvrg_dy, psx_gpu->uvrg_dy.e); \ |
| 660 | gvdup_n_u32(c_0x01, 0x01); \ |
| 661 | setup_spans_prologue_alternate_##alternate_active() \ |
| 662 | |
| 663 | #define setup_spans_prologue_b() \ |
| 664 | span_edge_data = psx_gpu->span_edge_data; \ |
| 665 | span_uvrg_offset = (vec_4x32u *)psx_gpu->span_uvrg_offset; \ |
| 666 | span_b_offset = psx_gpu->span_b_offset; \ |
| 667 | \ |
| 668 | vec_8x16u c_0x0001; \ |
| 669 | \ |
| 670 | gvdupq_n_u16(c_0x0001, 0x0001); \ |
| 671 | gvdupq_n_u16(left_edge, psx_gpu->viewport_start_x); \ |
| 672 | gvdupq_n_u16(right_edge, psx_gpu->viewport_end_x); \ |
| 673 | gvaddq_u16(right_edge, right_edge, c_0x0001); \ |
| 674 | gvdup_n_u16(c_0x04, 0x04); \ |
| 675 | gvdup_n_u16(c_0x07, 0x07); \ |
| 676 | gvdup_n_u16(c_0xFFFE, 0xFFFE); \ |
| 677 | |
| 678 | |
| 679 | #define compute_edge_delta_x2() \ |
| 680 | { \ |
| 681 | vec_2x32s heights; \ |
| 682 | vec_2x32s height_reciprocals; \ |
| 683 | vec_2x32s heights_b; \ |
| 684 | vec_2x32u widths; \ |
| 685 | \ |
| 686 | u32 edge_shift = reciprocal_table[height]; \ |
| 687 | \ |
| 688 | gvdup_n_u32(heights, height); \ |
| 689 | gvsub_u32(widths, x_ends, x_starts); \ |
| 690 | \ |
| 691 | gvdup_n_u32(edge_shifts, edge_shift); \ |
| 692 | gvsub_u32(heights_b, heights, c_0x01); \ |
| 693 | gvshr_n_u32(height_reciprocals, edge_shifts, 10); \ |
| 694 | \ |
| 695 | gvmla_s32(heights_b, x_starts, heights); \ |
| 696 | gvbic_n_u16(edge_shifts, 0xE0); \ |
| 697 | gvmul_s32(edges_dx_dy, widths, height_reciprocals); \ |
| 698 | gvmull_s32(edges_xy.full, heights_b, height_reciprocals); \ |
| 699 | } \ |
| 700 | |
| 701 | #define compute_edge_delta_x3(start_c, height_a, height_b) \ |
| 702 | { \ |
| 703 | vec_2x32s heights; \ |
| 704 | vec_2x32s height_reciprocals; \ |
| 705 | vec_2x32s heights_b; \ |
| 706 | vec_2x32u widths; \ |
| 707 | \ |
| 708 | u32 width_alt; \ |
| 709 | s32 height_b_alt; \ |
| 710 | u32 height_reciprocal_alt; \ |
| 711 | \ |
| 712 | gvcreate_u32(heights, height_a, height_b); \ |
| 713 | gvcreate_u32(edge_shifts, reciprocal_table[height_a], reciprocal_table[height_b]); \ |
| 714 | \ |
| 715 | edge_shift_alt = reciprocal_table[height_minor_b]; \ |
| 716 | \ |
| 717 | gvsub_u32(widths, x_ends, x_starts); \ |
| 718 | width_alt = x_c - start_c; \ |
| 719 | \ |
| 720 | gvshr_n_u32(height_reciprocals, edge_shifts, 10); \ |
| 721 | height_reciprocal_alt = edge_shift_alt >> 10; \ |
| 722 | \ |
| 723 | gvbic_n_u16(edge_shifts, 0xE0); \ |
| 724 | edge_shift_alt &= 0x1F; \ |
| 725 | \ |
| 726 | gvsub_u32(heights_b, heights, c_0x01); \ |
| 727 | height_b_alt = height_minor_b - 1; \ |
| 728 | \ |
| 729 | gvmla_s32(heights_b, x_starts, heights); \ |
| 730 | height_b_alt += height_minor_b * start_c; \ |
| 731 | \ |
| 732 | gvmull_s32(edges_xy.full, heights_b, height_reciprocals); \ |
| 733 | edge_alt = (s64)height_b_alt * height_reciprocal_alt; \ |
| 734 | \ |
| 735 | gvmul_s32(edges_dx_dy, widths, height_reciprocals); \ |
| 736 | edge_dx_dy_alt = width_alt * height_reciprocal_alt; \ |
| 737 | } \ |
| 738 | |
| 739 | |
| 740 | #define setup_spans_adjust_y_up() \ |
| 741 | gvsub_u32(y_x4, y_x4, c_0x04) \ |
| 742 | |
| 743 | #define setup_spans_adjust_y_down() \ |
| 744 | gvadd_u32(y_x4, y_x4, c_0x04) \ |
| 745 | |
| 746 | #define setup_spans_adjust_interpolants_up() \ |
| 747 | gvsubq_u32(uvrg, uvrg, uvrg_dy); \ |
| 748 | b -= b_dy \ |
| 749 | |
| 750 | #define setup_spans_adjust_interpolants_down() \ |
| 751 | gvaddq_u32(uvrg, uvrg, uvrg_dy); \ |
| 752 | b += b_dy \ |
| 753 | |
| 754 | |
| 755 | #define setup_spans_clip_interpolants_increment() \ |
| 756 | gvmlaq_s32(uvrg, uvrg_dy, v_clip); \ |
| 757 | b += b_dy * clip \ |
| 758 | |
| 759 | #define setup_spans_clip_interpolants_decrement() \ |
| 760 | gvmlsq_s32(uvrg, uvrg_dy, v_clip); \ |
| 761 | b -= b_dy * clip \ |
| 762 | |
| 763 | #define setup_spans_clip_alternate_yes() \ |
| 764 | edge_alt += edge_dx_dy_alt * (s64)(clip) \ |
| 765 | |
| 766 | #define setup_spans_clip_alternate_no() \ |
| 767 | |
| 768 | #define setup_spans_clip(direction, alternate_active) \ |
| 769 | { \ |
| 770 | gvdupq_n_u32(v_clip, clip); \ |
| 771 | gvmlal_s32(edges_xy.full, edges_dx_dy, gvlo(v_clip)); \ |
| 772 | setup_spans_clip_alternate_##alternate_active(); \ |
| 773 | setup_spans_clip_interpolants_##direction(); \ |
| 774 | } \ |
| 775 | |
| 776 | |
| 777 | #define setup_spans_adjust_edges_alternate_no(left_index, right_index) \ |
| 778 | { \ |
| 779 | vec_2x64s edge_shifts_64; \ |
| 780 | union { vec_2x64s full; vec_1x64s h[2]; } edges_dx_dy_64; \ |
| 781 | vec_1x64s left_x_hi, right_x_hi; \ |
| 782 | \ |
| 783 | gvmovl_s32(edge_shifts_64, edge_shifts); \ |
| 784 | gvshlq_s64(edges_xy.full, edges_xy.full, edge_shifts_64); \ |
| 785 | \ |
| 786 | gvmovl_s32(edges_dx_dy_64.full, edges_dx_dy); \ |
| 787 | gvshlq_s64(edges_dx_dy_64.full, edges_dx_dy_64.full, edge_shifts_64); \ |
| 788 | \ |
| 789 | gvdupq_l_s64(left_x, edges_xy.h[left_index], 0); \ |
| 790 | gvdupq_l_s64(right_x, edges_xy.h[right_index], 0); \ |
| 791 | \ |
| 792 | gvdupq_l_s64(left_dx_dy, edges_dx_dy_64.h[left_index], 0); \ |
| 793 | gvdupq_l_s64(right_dx_dy, edges_dx_dy_64.h[right_index], 0); \ |
| 794 | \ |
| 795 | gvadd_s64(left_x_hi, gvlo(left_x), gvlo(left_dx_dy)); \ |
| 796 | gvadd_s64(right_x_hi, gvlo(right_x), gvlo(right_dx_dy)); \ |
| 797 | \ |
| 798 | gvset_hi(left_x, left_x_hi); \ |
| 799 | gvset_hi(right_x, right_x_hi); \ |
| 800 | \ |
| 801 | gvaddq_s64(left_dx_dy, left_dx_dy, left_dx_dy); \ |
| 802 | gvaddq_s64(right_dx_dy, right_dx_dy, right_dx_dy); \ |
| 803 | } \ |
| 804 | |
| 805 | #define setup_spans_adjust_edges_alternate_yes(left_index, right_index) \ |
| 806 | { \ |
| 807 | setup_spans_adjust_edges_alternate_no(left_index, right_index); \ |
| 808 | s64 edge_dx_dy_alt_64; \ |
| 809 | vec_1x64s alternate_x_hi; \ |
| 810 | \ |
| 811 | gvdup_n_u16(y_mid_point, y_b); \ |
| 812 | \ |
| 813 | edge_alt <<= edge_shift_alt; \ |
| 814 | edge_dx_dy_alt_64 = (s64)edge_dx_dy_alt << edge_shift_alt; \ |
| 815 | \ |
| 816 | gvdupq_n_s64(alternate_x, edge_alt); \ |
| 817 | gvdupq_n_s64(alternate_dx_dy, edge_dx_dy_alt_64); \ |
| 818 | \ |
| 819 | gvadd_s64(alternate_x_hi, gvlo(alternate_x), gvlo(alternate_dx_dy)); \ |
| 820 | gvaddq_s64(alternate_dx_dy, alternate_dx_dy, alternate_dx_dy); \ |
| 821 | gvset_hi(alternate_x, alternate_x_hi); \ |
| 822 | } \ |
| 823 | |
| 824 | |
| 825 | #define setup_spans_y_select_up() \ |
| 826 | gvclt_s16(alternate_select, y_x4, y_mid_point) \ |
| 827 | |
| 828 | #define setup_spans_y_select_down() \ |
| 829 | gvcgt_s16(alternate_select, y_x4, y_mid_point) \ |
| 830 | |
| 831 | #define setup_spans_y_select_alternate_yes(direction) \ |
| 832 | setup_spans_y_select_##direction() \ |
| 833 | |
| 834 | #define setup_spans_y_select_alternate_no(direction) \ |
| 835 | |
| 836 | #define setup_spans_alternate_select_left() \ |
| 837 | gvbit(left_right_x_16_lo, alternate_x_16, alternate_select); \ |
| 838 | |
| 839 | #define setup_spans_alternate_select_right() \ |
| 840 | gvbit(left_right_x_16_hi, alternate_x_16, alternate_select); \ |
| 841 | |
| 842 | #define setup_spans_alternate_select_none() \ |
| 843 | |
| 844 | #define setup_spans_increment_alternate_yes() \ |
| 845 | { \ |
| 846 | vec_2x32s alternate_x_32_lo, alternate_x_32_hi; \ |
| 847 | gvshrn_n_s64(alternate_x_32_lo, alternate_x, 32); \ |
| 848 | gvaddq_s64(alternate_x, alternate_x, alternate_dx_dy); \ |
| 849 | gvshrn_n_s64(alternate_x_32_hi, alternate_x, 32); \ |
| 850 | gvaddq_s64(alternate_x, alternate_x, alternate_dx_dy); \ |
| 851 | gvcombine_u32(alternate_x_32, alternate_x_32_lo, alternate_x_32_hi); \ |
| 852 | gvmovn_u32(alternate_x_16, alternate_x_32); \ |
| 853 | } \ |
| 854 | |
| 855 | #define setup_spans_increment_alternate_no() \ |
| 856 | |
| 857 | #define setup_spans_set_x4(alternate, direction, alternate_active) \ |
| 858 | { \ |
| 859 | gvst1q_pi_u32(uvrg, span_uvrg_offset); \ |
| 860 | *span_b_offset++ = b; \ |
| 861 | setup_spans_adjust_interpolants_##direction(); \ |
| 862 | \ |
| 863 | gvst1q_pi_u32(uvrg, span_uvrg_offset); \ |
| 864 | *span_b_offset++ = b; \ |
| 865 | setup_spans_adjust_interpolants_##direction(); \ |
| 866 | \ |
| 867 | gvst1q_pi_u32(uvrg, span_uvrg_offset); \ |
| 868 | *span_b_offset++ = b; \ |
| 869 | setup_spans_adjust_interpolants_##direction(); \ |
| 870 | \ |
| 871 | gvst1q_pi_u32(uvrg, span_uvrg_offset); \ |
| 872 | *span_b_offset++ = b; \ |
| 873 | setup_spans_adjust_interpolants_##direction(); \ |
| 874 | \ |
| 875 | gvshrn_n_s64(left_x_32_lo, left_x, 32); \ |
| 876 | gvshrn_n_s64(right_x_32_lo, right_x, 32); \ |
| 877 | \ |
| 878 | gvaddq_s64(left_x, left_x, left_dx_dy); \ |
| 879 | gvaddq_s64(right_x, right_x, right_dx_dy); \ |
| 880 | \ |
| 881 | gvshrn_n_s64(left_x_32_hi, left_x, 32); \ |
| 882 | gvshrn_n_s64(right_x_32_hi, right_x, 32); \ |
| 883 | \ |
| 884 | gvaddq_s64(left_x, left_x, left_dx_dy); \ |
| 885 | gvaddq_s64(right_x, right_x, right_dx_dy); \ |
| 886 | \ |
| 887 | gvcombine_s64(left_x_32, left_x_32_lo, left_x_32_hi); \ |
| 888 | gvcombine_s64(right_x_32, right_x_32_lo, right_x_32_hi); \ |
| 889 | \ |
| 890 | gvmovn_u32(left_right_x_16_lo, left_x_32); \ |
| 891 | gvmovn_u32(left_right_x_16_hi, right_x_32); \ |
| 892 | \ |
| 893 | setup_spans_increment_alternate_##alternate_active(); \ |
| 894 | setup_spans_y_select_alternate_##alternate_active(direction); \ |
| 895 | setup_spans_alternate_select_##alternate(); \ |
| 896 | \ |
| 897 | gvmax_s16(left_right_x_16_lo, left_right_x_16_lo, gvlo(left_edge)); \ |
| 898 | gvmax_s16(left_right_x_16_hi, left_right_x_16_hi, gvhi(left_edge)); \ |
| 899 | gvmin_s16(left_right_x_16_lo, left_right_x_16_lo, gvlo(right_edge)); \ |
| 900 | gvmin_s16(left_right_x_16_hi, left_right_x_16_hi, gvhi(right_edge)); \ |
| 901 | \ |
| 902 | gvsub_u16(left_right_x_16_hi, left_right_x_16_hi, left_right_x_16_lo); \ |
| 903 | gvadd_u16(left_right_x_16_hi, left_right_x_16_hi, c_0x07); \ |
| 904 | gvand(span_shift, left_right_x_16_hi, c_0x07); \ |
| 905 | gvshl_u16(span_shift, c_0xFFFE, span_shift); \ |
| 906 | gvshr_n_u16(left_right_x_16_hi, left_right_x_16_hi, 3); \ |
| 907 | \ |
| 908 | gvst4_pi_u16(left_right_x_16_lo, left_right_x_16_hi, span_shift, y_x4, \ |
| 909 | span_edge_data); \ |
| 910 | \ |
| 911 | setup_spans_adjust_y_##direction(); \ |
| 912 | } \ |
| 913 | |
| 914 | |
| 915 | #define setup_spans_alternate_adjust_yes() \ |
| 916 | edge_alt -= edge_dx_dy_alt * (s64)height_minor_a \ |
| 917 | |
| 918 | #define setup_spans_alternate_adjust_no() \ |
| 919 | |
| 920 | |
| 921 | #define setup_spans_down(left_index, right_index, alternate, alternate_active) \ |
| 922 | setup_spans_alternate_adjust_##alternate_active(); \ |
| 923 | if(y_c > psx_gpu->viewport_end_y) \ |
| 924 | height -= y_c - psx_gpu->viewport_end_y - 1; \ |
| 925 | \ |
| 926 | clip = psx_gpu->viewport_start_y - y_a; \ |
| 927 | if(clip > 0) \ |
| 928 | { \ |
| 929 | height -= clip; \ |
| 930 | y_a += clip; \ |
| 931 | setup_spans_clip(increment, alternate_active); \ |
| 932 | } \ |
| 933 | \ |
| 934 | setup_spans_prologue_b(); \ |
| 935 | \ |
| 936 | if(height > 0) \ |
| 937 | { \ |
| 938 | u64 y_x4_ = ((u64)(y_a + 3) << 48) | ((u64)(u16)(y_a + 2) << 32) \ |
| 939 | | (u32)((y_a + 1) << 16) | (u16)y_a; \ |
| 940 | gvcreate_u64(y_x4, y_x4_); \ |
| 941 | setup_spans_adjust_edges_alternate_##alternate_active(left_index, \ |
| 942 | right_index); \ |
| 943 | \ |
| 944 | psx_gpu->num_spans = height; \ |
| 945 | do \ |
| 946 | { \ |
| 947 | setup_spans_set_x4(alternate, down, alternate_active); \ |
| 948 | height -= 4; \ |
| 949 | } while(height > 0); \ |
| 950 | } \ |
| 951 | |
| 952 | |
| 953 | #define setup_spans_alternate_pre_increment_yes() \ |
| 954 | edge_alt += edge_dx_dy_alt \ |
| 955 | |
| 956 | #define setup_spans_alternate_pre_increment_no() \ |
| 957 | |
| 958 | #define setup_spans_up_decrement_height_yes() \ |
| 959 | height-- \ |
| 960 | |
| 961 | #define setup_spans_up_decrement_height_no() \ |
| 962 | {} \ |
| 963 | |
| 964 | #define setup_spans_up(left_index, right_index, alternate, alternate_active) \ |
| 965 | setup_spans_alternate_adjust_##alternate_active(); \ |
| 966 | y_a--; \ |
| 967 | \ |
| 968 | if(y_c < psx_gpu->viewport_start_y) \ |
| 969 | height -= psx_gpu->viewport_start_y - y_c; \ |
| 970 | else \ |
| 971 | setup_spans_up_decrement_height_##alternate_active(); \ |
| 972 | \ |
| 973 | clip = y_a - psx_gpu->viewport_end_y; \ |
| 974 | if(clip > 0) \ |
| 975 | { \ |
| 976 | height -= clip; \ |
| 977 | y_a -= clip; \ |
| 978 | setup_spans_clip(decrement, alternate_active); \ |
| 979 | } \ |
| 980 | \ |
| 981 | setup_spans_prologue_b(); \ |
| 982 | \ |
| 983 | if(height > 0) \ |
| 984 | { \ |
| 985 | u64 y_x4_ = ((u64)(y_a - 3) << 48) | ((u64)(u16)(y_a - 2) << 32) \ |
| 986 | | (u32)((y_a - 1) << 16) | (u16)y_a; \ |
| 987 | gvcreate_u64(y_x4, y_x4_); \ |
| 988 | gvaddw_s32(edges_xy.full, edges_xy.full, edges_dx_dy); \ |
| 989 | setup_spans_alternate_pre_increment_##alternate_active(); \ |
| 990 | setup_spans_adjust_edges_alternate_##alternate_active(left_index, \ |
| 991 | right_index); \ |
| 992 | setup_spans_adjust_interpolants_up(); \ |
| 993 | \ |
| 994 | psx_gpu->num_spans = height; \ |
| 995 | while(height > 0) \ |
| 996 | { \ |
| 997 | setup_spans_set_x4(alternate, up, alternate_active); \ |
| 998 | height -= 4; \ |
| 999 | } \ |
| 1000 | } \ |
| 1001 | |
| 1002 | #define index_left 0 |
| 1003 | #define index_right 1 |
| 1004 | |
| 1005 | #define setup_spans_up_up(minor, major) \ |
| 1006 | setup_spans_prologue(yes); \ |
| 1007 | s32 height_minor_a = y_a - y_b; \ |
| 1008 | s32 height_minor_b = y_b - y_c; \ |
| 1009 | s32 height = y_a - y_c; \ |
| 1010 | \ |
| 1011 | gvdup_n_u32(x_starts, x_a); \ |
| 1012 | gvcreate_u32(x_ends, x_c, x_b); \ |
| 1013 | \ |
| 1014 | compute_edge_delta_x3(x_b, height, height_minor_a); \ |
| 1015 | setup_spans_up(index_##major, index_##minor, minor, yes) \ |
| 1016 | |
| 1017 | void setup_spans_up_left(psx_gpu_struct *psx_gpu, vertex_struct *v_a, |
| 1018 | vertex_struct *v_b, vertex_struct *v_c) |
| 1019 | { |
| 1020 | #if 0 |
| 1021 | setup_spans_up_left_(psx_gpu, v_a, v_b, v_c); |
| 1022 | return; |
| 1023 | #endif |
| 1024 | setup_spans_up_up(left, right) |
| 1025 | } |
| 1026 | |
| 1027 | void setup_spans_up_right(psx_gpu_struct *psx_gpu, vertex_struct *v_a, |
| 1028 | vertex_struct *v_b, vertex_struct *v_c) |
| 1029 | { |
| 1030 | #if 0 |
| 1031 | setup_spans_up_right_(psx_gpu, v_a, v_b, v_c); |
| 1032 | return; |
| 1033 | #endif |
| 1034 | setup_spans_up_up(right, left) |
| 1035 | } |
| 1036 | |
| 1037 | #define setup_spans_down_down(minor, major) \ |
| 1038 | setup_spans_prologue(yes); \ |
| 1039 | s32 height_minor_a = y_b - y_a; \ |
| 1040 | s32 height_minor_b = y_c - y_b; \ |
| 1041 | s32 height = y_c - y_a; \ |
| 1042 | \ |
| 1043 | gvdup_n_u32(x_starts, x_a); \ |
| 1044 | gvcreate_u32(x_ends, x_c, x_b); \ |
| 1045 | \ |
| 1046 | compute_edge_delta_x3(x_b, height, height_minor_a); \ |
| 1047 | setup_spans_down(index_##major, index_##minor, minor, yes) \ |
| 1048 | |
| 1049 | void setup_spans_down_left(psx_gpu_struct *psx_gpu, vertex_struct *v_a, |
| 1050 | vertex_struct *v_b, vertex_struct *v_c) |
| 1051 | { |
| 1052 | #if 0 |
| 1053 | setup_spans_down_left_(psx_gpu, v_a, v_b, v_c); |
| 1054 | return; |
| 1055 | #endif |
| 1056 | setup_spans_down_down(left, right) |
| 1057 | } |
| 1058 | |
| 1059 | void setup_spans_down_right(psx_gpu_struct *psx_gpu, vertex_struct *v_a, |
| 1060 | vertex_struct *v_b, vertex_struct *v_c) |
| 1061 | { |
| 1062 | #if 0 |
| 1063 | setup_spans_down_right_(psx_gpu, v_a, v_b, v_c); |
| 1064 | return; |
| 1065 | #endif |
| 1066 | setup_spans_down_down(right, left) |
| 1067 | } |
| 1068 | |
| 1069 | #define setup_spans_up_flat() \ |
| 1070 | s32 height = y_a - y_c; \ |
| 1071 | \ |
| 1072 | compute_edge_delta_x2(); \ |
| 1073 | setup_spans_up(index_left, index_right, none, no) \ |
| 1074 | |
| 1075 | void setup_spans_up_a(psx_gpu_struct *psx_gpu, vertex_struct *v_a, |
| 1076 | vertex_struct *v_b, vertex_struct *v_c) |
| 1077 | { |
| 1078 | #if 0 |
| 1079 | setup_spans_up_a_(psx_gpu, v_a, v_b, v_c); |
| 1080 | return; |
| 1081 | #endif |
| 1082 | setup_spans_prologue(no); |
| 1083 | |
| 1084 | gvcreate_u32(x_starts, x_a, x_b); |
| 1085 | gvdup_n_u32(x_ends, x_c); |
| 1086 | |
| 1087 | setup_spans_up_flat() |
| 1088 | } |
| 1089 | |
| 1090 | void setup_spans_up_b(psx_gpu_struct *psx_gpu, vertex_struct *v_a, |
| 1091 | vertex_struct *v_b, vertex_struct *v_c) |
| 1092 | { |
| 1093 | #if 0 |
| 1094 | setup_spans_up_b_(psx_gpu, v_a, v_b, v_c); |
| 1095 | return; |
| 1096 | #endif |
| 1097 | setup_spans_prologue(no); |
| 1098 | |
| 1099 | gvdup_n_u32(x_starts, x_a); |
| 1100 | gvcreate_u32(x_ends, x_b, x_c); |
| 1101 | |
| 1102 | setup_spans_up_flat() |
| 1103 | } |
| 1104 | |
| 1105 | #define setup_spans_down_flat() \ |
| 1106 | s32 height = y_c - y_a; \ |
| 1107 | \ |
| 1108 | compute_edge_delta_x2(); \ |
| 1109 | setup_spans_down(index_left, index_right, none, no) \ |
| 1110 | |
| 1111 | void setup_spans_down_a(psx_gpu_struct *psx_gpu, vertex_struct *v_a, |
| 1112 | vertex_struct *v_b, vertex_struct *v_c) |
| 1113 | { |
| 1114 | #if 0 |
| 1115 | setup_spans_down_a_(psx_gpu, v_a, v_b, v_c); |
| 1116 | return; |
| 1117 | #endif |
| 1118 | setup_spans_prologue(no); |
| 1119 | |
| 1120 | gvcreate_u32(x_starts, x_a, x_b); |
| 1121 | gvdup_n_u32(x_ends, x_c); |
| 1122 | |
| 1123 | setup_spans_down_flat() |
| 1124 | } |
| 1125 | |
| 1126 | void setup_spans_down_b(psx_gpu_struct *psx_gpu, vertex_struct *v_a, |
| 1127 | vertex_struct *v_b, vertex_struct *v_c) |
| 1128 | { |
| 1129 | #if 0 |
| 1130 | setup_spans_down_b_(psx_gpu, v_a, v_b, v_c); |
| 1131 | return; |
| 1132 | #endif |
| 1133 | setup_spans_prologue(no) |
| 1134 | |
| 1135 | gvdup_n_u32(x_starts, x_a); |
| 1136 | gvcreate_u32(x_ends, x_b, x_c); |
| 1137 | |
| 1138 | setup_spans_down_flat() |
| 1139 | } |
| 1140 | |
| 1141 | void setup_spans_up_down(psx_gpu_struct *psx_gpu, vertex_struct *v_a, |
| 1142 | vertex_struct *v_b, vertex_struct *v_c) |
| 1143 | { |
| 1144 | #if 0 |
| 1145 | setup_spans_up_down_(psx_gpu, v_a, v_b, v_c); |
| 1146 | return; |
| 1147 | #endif |
| 1148 | setup_spans_prologue(no); |
| 1149 | |
| 1150 | s32 y_b = v_b->y; |
| 1151 | s64 edge_alt; |
| 1152 | s32 edge_dx_dy_alt; |
| 1153 | u32 edge_shift_alt; |
| 1154 | |
| 1155 | s32 middle_y = y_a; |
| 1156 | s32 height_minor_a = y_a - y_b; |
| 1157 | s32 height_minor_b = y_c - y_a; |
| 1158 | s32 height_major = y_c - y_b; |
| 1159 | |
| 1160 | vec_2x64s edges_xy_b; |
| 1161 | vec_1x64s edges_xy_b_left; |
| 1162 | vec_2x32s edges_dx_dy_b; |
| 1163 | vec_2x32u edge_shifts_b; |
| 1164 | |
| 1165 | vec_2x32s height_increment; |
| 1166 | |
| 1167 | gvcreate_u32(x_starts, x_a, x_c); |
| 1168 | gvdup_n_u32(x_ends, x_b); |
| 1169 | |
| 1170 | compute_edge_delta_x3(x_a, height_minor_a, height_major); |
| 1171 | |
| 1172 | gvcreate_s32(height_increment, 0, height_minor_b); |
| 1173 | |
| 1174 | gvmlal_s32(edges_xy.full, edges_dx_dy, height_increment); |
| 1175 | |
| 1176 | gvcreate_s64(edges_xy_b_left, edge_alt); |
| 1177 | gvcombine_s64(edges_xy_b, edges_xy_b_left, gvhi(edges_xy.full)); |
| 1178 | |
| 1179 | edge_shifts_b = edge_shifts; |
| 1180 | gvmov_l_u32(edge_shifts_b, edge_shift_alt, 0); |
| 1181 | |
| 1182 | gvneg_s32(edges_dx_dy_b, edges_dx_dy); |
| 1183 | gvmov_l_s32(edges_dx_dy_b, edge_dx_dy_alt, 0); |
| 1184 | |
| 1185 | y_a--; |
| 1186 | |
| 1187 | if(y_b < psx_gpu->viewport_start_y) |
| 1188 | height_minor_a -= psx_gpu->viewport_start_y - y_b; |
| 1189 | |
| 1190 | clip = y_a - psx_gpu->viewport_end_y; |
| 1191 | if(clip > 0) |
| 1192 | { |
| 1193 | height_minor_a -= clip; |
| 1194 | y_a -= clip; |
| 1195 | setup_spans_clip(decrement, no); |
| 1196 | } |
| 1197 | |
| 1198 | setup_spans_prologue_b(); |
| 1199 | |
| 1200 | if(height_minor_a > 0) |
| 1201 | { |
| 1202 | u64 y_x4_ = ((u64)(y_a - 3) << 48) | ((u64)(u16)(y_a - 2) << 32) |
| 1203 | | (u32)((y_a - 1) << 16) | (u16)y_a; |
| 1204 | gvcreate_u64(y_x4, y_x4_); |
| 1205 | gvaddw_s32(edges_xy.full, edges_xy.full, edges_dx_dy); |
| 1206 | setup_spans_adjust_edges_alternate_no(index_left, index_right); |
| 1207 | setup_spans_adjust_interpolants_up(); |
| 1208 | |
| 1209 | psx_gpu->num_spans = height_minor_a; |
| 1210 | while(height_minor_a > 0) |
| 1211 | { |
| 1212 | setup_spans_set_x4(none, up, no); |
| 1213 | height_minor_a -= 4; |
| 1214 | } |
| 1215 | |
| 1216 | span_edge_data += height_minor_a; |
| 1217 | span_uvrg_offset += height_minor_a; |
| 1218 | span_b_offset += height_minor_a; |
| 1219 | } |
| 1220 | |
| 1221 | edges_xy.full = edges_xy_b; |
| 1222 | edges_dx_dy = edges_dx_dy_b; |
| 1223 | edge_shifts = edge_shifts_b; |
| 1224 | |
| 1225 | gvld1q_u32(uvrg, psx_gpu->uvrg.e); |
| 1226 | b = psx_gpu->b; |
| 1227 | |
| 1228 | y_a = middle_y; |
| 1229 | |
| 1230 | if(y_c > psx_gpu->viewport_end_y) |
| 1231 | height_minor_b -= y_c - psx_gpu->viewport_end_y - 1; |
| 1232 | |
| 1233 | clip = psx_gpu->viewport_start_y - y_a; |
| 1234 | if(clip > 0) |
| 1235 | { |
| 1236 | height_minor_b -= clip; |
| 1237 | y_a += clip; |
| 1238 | setup_spans_clip(increment, no); |
| 1239 | } |
| 1240 | |
| 1241 | if(height_minor_b > 0) |
| 1242 | { |
| 1243 | u64 y_x4_ = ((u64)(y_a + 3) << 48) | ((u64)(u16)(y_a + 2) << 32) |
| 1244 | | (u32)((y_a + 1) << 16) | (u16)y_a; |
| 1245 | gvcreate_u64(y_x4, y_x4_); |
| 1246 | setup_spans_adjust_edges_alternate_no(index_left, index_right); |
| 1247 | |
| 1248 | // FIXME: overflow corner case |
| 1249 | if(psx_gpu->num_spans + height_minor_b == MAX_SPANS) |
| 1250 | height_minor_b &= ~3; |
| 1251 | |
| 1252 | psx_gpu->num_spans += height_minor_b; |
| 1253 | while(height_minor_b > 0) |
| 1254 | { |
| 1255 | setup_spans_set_x4(none, down, no); |
| 1256 | height_minor_b -= 4; |
| 1257 | } |
| 1258 | } |
| 1259 | } |
| 1260 | |
| 1261 | |
| 1262 | #define dither_table_entry_normal(value) \ |
| 1263 | (value) \ |
| 1264 | |
| 1265 | #define setup_blocks_load_msb_mask_indirect() \ |
| 1266 | |
| 1267 | #define setup_blocks_load_msb_mask_direct() \ |
| 1268 | vec_8x16u msb_mask; \ |
| 1269 | gvdupq_n_u16(msb_mask, psx_gpu->mask_msb); \ |
| 1270 | |
| 1271 | #define setup_blocks_variables_shaded_textured(target) \ |
| 1272 | vec_4x32u u_block; \ |
| 1273 | vec_4x32u v_block; \ |
| 1274 | vec_4x32u r_block; \ |
| 1275 | vec_4x32u g_block; \ |
| 1276 | vec_4x32u b_block; \ |
| 1277 | vec_4x32u uvrg_dx; \ |
| 1278 | vec_4x32u uvrg_dx4; \ |
| 1279 | vec_4x32u uvrg_dx8; \ |
| 1280 | vec_4x32u uvrg; \ |
| 1281 | vec_16x8u texture_mask; \ |
| 1282 | vec_8x8u texture_mask_lo, texture_mask_hi; \ |
| 1283 | u32 b_dx = psx_gpu->b_block_span.e[1]; \ |
| 1284 | u32 b_dx4 = b_dx << 2; \ |
| 1285 | u32 b_dx8 = b_dx << 3; \ |
| 1286 | u32 b; \ |
| 1287 | \ |
| 1288 | gvld1q_u32(uvrg_dx, psx_gpu->uvrg_dx.e); \ |
| 1289 | gvshlq_n_u32(uvrg_dx4, uvrg_dx, 2); \ |
| 1290 | gvshlq_n_u32(uvrg_dx8, uvrg_dx, 3); \ |
| 1291 | gvld2_dup(texture_mask_lo, texture_mask_hi, &psx_gpu->texture_mask_width); \ |
| 1292 | gvcombine_u16(texture_mask, texture_mask_lo, texture_mask_hi) \ |
| 1293 | |
| 1294 | #define setup_blocks_variables_shaded_untextured(target) \ |
| 1295 | vec_4x32u r_block; \ |
| 1296 | vec_4x32u g_block; \ |
| 1297 | vec_4x32u b_block; \ |
| 1298 | vec_4x32u rgb_dx; \ |
| 1299 | vec_2x32u rgb_dx_lo, rgb_dx_hi; \ |
| 1300 | vec_4x32u rgb_dx4; \ |
| 1301 | vec_4x32u rgb_dx8; \ |
| 1302 | vec_4x32u rgb; \ |
| 1303 | vec_2x32u rgb_lo, rgb_hi; \ |
| 1304 | \ |
| 1305 | vec_8x8u d64_0x07; \ |
| 1306 | vec_8x8u d64_1; \ |
| 1307 | vec_8x8u d64_4; \ |
| 1308 | vec_8x8u d64_128; \ |
| 1309 | \ |
| 1310 | gvdup_n_u8(d64_0x07, 0x07); \ |
| 1311 | gvdup_n_u8(d64_1, 1); \ |
| 1312 | gvdup_n_u8(d64_4, 4); \ |
| 1313 | gvdup_n_u8(d64_128, 128); \ |
| 1314 | \ |
| 1315 | gvld1_u32(rgb_dx_lo, &psx_gpu->uvrg_dx.e[2]); \ |
| 1316 | gvcreate_u32(rgb_dx_hi, psx_gpu->b_block_span.e[1], 0); \ |
| 1317 | gvcombine_u32(rgb_dx, rgb_dx_lo, rgb_dx_hi); \ |
| 1318 | gvshlq_n_u32(rgb_dx4, rgb_dx, 2); \ |
| 1319 | gvshlq_n_u32(rgb_dx8, rgb_dx, 3) \ |
| 1320 | |
| 1321 | #define setup_blocks_variables_unshaded_textured(target) \ |
| 1322 | vec_4x32u u_block; \ |
| 1323 | vec_4x32u v_block; \ |
| 1324 | vec_2x32u uv_dx; \ |
| 1325 | vec_2x32u uv_dx4; \ |
| 1326 | vec_2x32u uv_dx8; \ |
| 1327 | vec_2x32u uv; \ |
| 1328 | vec_16x8u texture_mask; \ |
| 1329 | vec_8x8u texture_mask_lo, texture_mask_hi; \ |
| 1330 | \ |
| 1331 | gvld1_u32(uv_dx, psx_gpu->uvrg_dx.e); \ |
| 1332 | gvld1_u32(uv, psx_gpu->uvrg.e); \ |
| 1333 | gvshl_n_u32(uv_dx4, uv_dx, 2); \ |
| 1334 | gvshl_n_u32(uv_dx8, uv_dx, 3); \ |
| 1335 | gvld2_dup(texture_mask_lo, texture_mask_hi, &psx_gpu->texture_mask_width); \ |
| 1336 | gvcombine_u16(texture_mask, texture_mask_lo, texture_mask_hi) \ |
| 1337 | |
| 1338 | #define setup_blocks_variables_unshaded_untextured_direct() \ |
| 1339 | gvorrq(colors, colors, msb_mask) \ |
| 1340 | |
| 1341 | #define setup_blocks_variables_unshaded_untextured_indirect() \ |
| 1342 | |
| 1343 | #define setup_blocks_variables_unshaded_untextured(target) \ |
| 1344 | u32 color = psx_gpu->triangle_color; \ |
| 1345 | vec_8x16u colors; \ |
| 1346 | \ |
| 1347 | u32 color_r = color & 0xFF; \ |
| 1348 | u32 color_g = (color >> 8) & 0xFF; \ |
| 1349 | u32 color_b = (color >> 16) & 0xFF; \ |
| 1350 | \ |
| 1351 | color = (color_r >> 3) | ((color_g >> 3) << 5) | \ |
| 1352 | ((color_b >> 3) << 10); \ |
| 1353 | gvdupq_n_u16(colors, color); \ |
| 1354 | setup_blocks_variables_unshaded_untextured_##target() \ |
| 1355 | |
| 1356 | #define setup_blocks_span_initialize_dithered_textured() \ |
| 1357 | vec_8x16u dither_offsets; \ |
| 1358 | gvshll_n_s8(dither_offsets, dither_offsets_short, 4) \ |
| 1359 | |
| 1360 | #define setup_blocks_span_initialize_dithered_untextured() \ |
| 1361 | vec_8x8u dither_offsets; \ |
| 1362 | gvadd_u8(dither_offsets, dither_offsets_short, d64_4) \ |
| 1363 | |
| 1364 | #define setup_blocks_span_initialize_dithered(texturing) \ |
| 1365 | u32 dither_row = psx_gpu->dither_table[y & 0x3]; \ |
| 1366 | u32 dither_shift = (span_edge_data->left_x & 0x3) * 8; \ |
| 1367 | vec_8x8s dither_offsets_short; \ |
| 1368 | \ |
| 1369 | dither_row = \ |
| 1370 | (dither_row >> dither_shift) | (dither_row << (32 - dither_shift)); \ |
| 1371 | gvdup_n_u32(dither_offsets_short, dither_row); \ |
| 1372 | setup_blocks_span_initialize_dithered_##texturing() \ |
| 1373 | |
| 1374 | #define setup_blocks_span_initialize_undithered(texturing) \ |
| 1375 | |
| 1376 | #define setup_blocks_span_initialize_shaded_textured() \ |
| 1377 | { \ |
| 1378 | u32 left_x = span_edge_data->left_x; \ |
| 1379 | vec_4x32u block_span; \ |
| 1380 | vec_4x32u v_left_x; \ |
| 1381 | \ |
| 1382 | gvld1q_u32(uvrg, span_uvrg_offset); \ |
| 1383 | gvdupq_n_u32(v_left_x, left_x); \ |
| 1384 | gvmlaq_u32(uvrg, uvrg_dx, v_left_x); \ |
| 1385 | b = *span_b_offset; \ |
| 1386 | b += b_dx * left_x; \ |
| 1387 | \ |
| 1388 | gvdupq_l_u32(u_block, gvlo(uvrg), 0); \ |
| 1389 | gvdupq_l_u32(v_block, gvlo(uvrg), 1); \ |
| 1390 | gvdupq_l_u32(r_block, gvhi(uvrg), 0); \ |
| 1391 | gvdupq_l_u32(g_block, gvhi(uvrg), 1); \ |
| 1392 | gvdupq_n_u32(b_block, b); \ |
| 1393 | \ |
| 1394 | gvld1q_u32(block_span, psx_gpu->u_block_span.e); \ |
| 1395 | gvaddq_u32(u_block, u_block, block_span); \ |
| 1396 | gvld1q_u32(block_span, psx_gpu->v_block_span.e); \ |
| 1397 | gvaddq_u32(v_block, v_block, block_span); \ |
| 1398 | gvld1q_u32(block_span, psx_gpu->r_block_span.e); \ |
| 1399 | gvaddq_u32(r_block, r_block, block_span); \ |
| 1400 | gvld1q_u32(block_span, psx_gpu->g_block_span.e); \ |
| 1401 | gvaddq_u32(g_block, g_block, block_span); \ |
| 1402 | gvld1q_u32(block_span, psx_gpu->b_block_span.e); \ |
| 1403 | gvaddq_u32(b_block, b_block, block_span); \ |
| 1404 | } |
| 1405 | |
| 1406 | #define setup_blocks_span_initialize_shaded_untextured() \ |
| 1407 | { \ |
| 1408 | u32 left_x = span_edge_data->left_x; \ |
| 1409 | u32 *span_uvrg_offset_high = (u32 *)span_uvrg_offset + 2; \ |
| 1410 | vec_4x32u block_span; \ |
| 1411 | vec_4x32u v_left_x; \ |
| 1412 | \ |
| 1413 | gvld1_u32(rgb_lo, span_uvrg_offset_high); \ |
| 1414 | gvcreate_u32(rgb_hi, *span_b_offset, 0); \ |
| 1415 | gvcombine_u32(rgb, rgb_lo, rgb_hi); \ |
| 1416 | gvdupq_n_u32(v_left_x, left_x); \ |
| 1417 | gvmlaq_u32(rgb, rgb_dx, v_left_x); \ |
| 1418 | \ |
| 1419 | gvdupq_l_u32(r_block, gvlo(rgb), 0); \ |
| 1420 | gvdupq_l_u32(g_block, gvlo(rgb), 1); \ |
| 1421 | gvdupq_l_u32(b_block, gvhi(rgb), 0); \ |
| 1422 | \ |
| 1423 | gvld1q_u32(block_span, psx_gpu->r_block_span.e); \ |
| 1424 | gvaddq_u32(r_block, r_block, block_span); \ |
| 1425 | gvld1q_u32(block_span, psx_gpu->g_block_span.e); \ |
| 1426 | gvaddq_u32(g_block, g_block, block_span); \ |
| 1427 | gvld1q_u32(block_span, psx_gpu->b_block_span.e); \ |
| 1428 | gvaddq_u32(b_block, b_block, block_span); \ |
| 1429 | } \ |
| 1430 | |
| 1431 | #define setup_blocks_span_initialize_unshaded_textured() \ |
| 1432 | { \ |
| 1433 | u32 left_x = span_edge_data->left_x; \ |
| 1434 | vec_4x32u block_span; \ |
| 1435 | vec_2x32u v_left_x; \ |
| 1436 | \ |
| 1437 | gvld1_u32(uv, span_uvrg_offset); \ |
| 1438 | gvdup_n_u32(v_left_x, left_x); \ |
| 1439 | gvmla_u32(uv, uv_dx, v_left_x); \ |
| 1440 | \ |
| 1441 | gvdupq_l_u32(u_block, uv, 0); \ |
| 1442 | gvdupq_l_u32(v_block, uv, 1); \ |
| 1443 | \ |
| 1444 | gvld1q_u32(block_span, psx_gpu->u_block_span.e); \ |
| 1445 | gvaddq_u32(u_block, u_block, block_span); \ |
| 1446 | gvld1q_u32(block_span, psx_gpu->v_block_span.e); \ |
| 1447 | gvaddq_u32(v_block, v_block, block_span); \ |
| 1448 | } \ |
| 1449 | |
| 1450 | #define setup_blocks_span_initialize_unshaded_untextured() \ |
| 1451 | |
| 1452 | #define setup_blocks_texture_swizzled() \ |
| 1453 | { \ |
| 1454 | vec_8x8u u_saved = u; \ |
| 1455 | gvsli_n_u8(u, v, 4); \ |
| 1456 | gvsri_n_u8(v, u_saved, 4); \ |
| 1457 | } \ |
| 1458 | |
| 1459 | #define setup_blocks_texture_unswizzled() \ |
| 1460 | |
| 1461 | #define setup_blocks_store_shaded_textured(swizzling, dithering, target, \ |
| 1462 | edge_type) \ |
| 1463 | { \ |
| 1464 | vec_8x16u u_whole; \ |
| 1465 | vec_8x16u v_whole; \ |
| 1466 | vec_8x16u r_whole; \ |
| 1467 | vec_8x16u g_whole; \ |
| 1468 | vec_8x16u b_whole; \ |
| 1469 | vec_4x16u u_whole_lo, u_whole_hi; \ |
| 1470 | vec_4x16u v_whole_lo, v_whole_hi; \ |
| 1471 | vec_4x16u r_whole_lo, r_whole_hi; \ |
| 1472 | vec_4x16u g_whole_lo, g_whole_hi; \ |
| 1473 | vec_4x16u b_whole_lo, b_whole_hi; \ |
| 1474 | \ |
| 1475 | vec_8x8u u; \ |
| 1476 | vec_8x8u v; \ |
| 1477 | vec_8x8u r; \ |
| 1478 | vec_8x8u g; \ |
| 1479 | vec_8x8u b; \ |
| 1480 | \ |
| 1481 | vec_4x32u dx4; \ |
| 1482 | vec_4x32u dx8; \ |
| 1483 | \ |
| 1484 | gvshrn_n_u32(u_whole_lo, u_block, 16); \ |
| 1485 | gvshrn_n_u32(v_whole_lo, v_block, 16); \ |
| 1486 | gvshrn_n_u32(r_whole_lo, r_block, 16); \ |
| 1487 | gvshrn_n_u32(g_whole_lo, g_block, 16); \ |
| 1488 | gvshrn_n_u32(b_whole_lo, b_block, 16); \ |
| 1489 | \ |
| 1490 | gvdupq_l_u32(dx4, gvlo(uvrg_dx4), 0); \ |
| 1491 | gvaddhn_u32(u_whole_hi, u_block, dx4); \ |
| 1492 | gvdupq_l_u32(dx4, gvlo(uvrg_dx4), 1); \ |
| 1493 | gvaddhn_u32(v_whole_hi, v_block, dx4); \ |
| 1494 | gvdupq_l_u32(dx4, gvhi(uvrg_dx4), 0); \ |
| 1495 | gvaddhn_u32(r_whole_hi, r_block, dx4); \ |
| 1496 | gvdupq_l_u32(dx4, gvhi(uvrg_dx4), 1); \ |
| 1497 | gvaddhn_u32(g_whole_hi, g_block, dx4); \ |
| 1498 | gvdupq_n_u32(dx4, b_dx4); \ |
| 1499 | gvaddhn_u32(b_whole_hi, b_block, dx4); \ |
| 1500 | \ |
| 1501 | gvcombine_u16(u_whole, u_whole_lo, u_whole_hi); \ |
| 1502 | gvcombine_u16(v_whole, v_whole_lo, v_whole_hi); \ |
| 1503 | gvcombine_u16(r_whole, r_whole_lo, r_whole_hi); \ |
| 1504 | gvcombine_u16(g_whole, g_whole_lo, g_whole_hi); \ |
| 1505 | gvcombine_u16(b_whole, b_whole_lo, b_whole_hi); \ |
| 1506 | gvmovn_u16(u, u_whole); \ |
| 1507 | gvmovn_u16(v, v_whole); \ |
| 1508 | gvmovn_u16(r, r_whole); \ |
| 1509 | gvmovn_u16(g, g_whole); \ |
| 1510 | gvmovn_u16(b, b_whole); \ |
| 1511 | \ |
| 1512 | gvdupq_l_u32(dx8, gvlo(uvrg_dx8), 0); \ |
| 1513 | gvaddq_u32(u_block, u_block, dx8); \ |
| 1514 | gvdupq_l_u32(dx8, gvlo(uvrg_dx8), 1); \ |
| 1515 | gvaddq_u32(v_block, v_block, dx8); \ |
| 1516 | gvdupq_l_u32(dx8, gvhi(uvrg_dx8), 0); \ |
| 1517 | gvaddq_u32(r_block, r_block, dx8); \ |
| 1518 | gvdupq_l_u32(dx8, gvhi(uvrg_dx8), 1); \ |
| 1519 | gvaddq_u32(g_block, g_block, dx8); \ |
| 1520 | gvdupq_n_u32(dx8, b_dx8); \ |
| 1521 | gvaddq_u32(b_block, b_block, dx8); \ |
| 1522 | \ |
| 1523 | gvand(u, u, gvlo(texture_mask)); \ |
| 1524 | gvand(v, v, gvhi(texture_mask)); \ |
| 1525 | setup_blocks_texture_##swizzling(); \ |
| 1526 | \ |
| 1527 | gvst2_u8(u, v, (u8 *)block->uv.e); \ |
| 1528 | gvst1_u8(r, block->r.e); \ |
| 1529 | gvst1_u8(g, block->g.e); \ |
| 1530 | gvst1_u8(b, block->b.e); \ |
| 1531 | gvst1q_u16(dither_offsets, (u16 *)block->dither_offsets.e); \ |
| 1532 | block->fb_ptr = fb_ptr; \ |
| 1533 | } \ |
| 1534 | |
| 1535 | #define setup_blocks_store_unshaded_textured(swizzling, dithering, target, \ |
| 1536 | edge_type) \ |
| 1537 | { \ |
| 1538 | vec_8x16u u_whole; \ |
| 1539 | vec_8x16u v_whole; \ |
| 1540 | vec_4x16u u_whole_lo, u_whole_hi; \ |
| 1541 | vec_4x16u v_whole_lo, v_whole_hi; \ |
| 1542 | \ |
| 1543 | vec_8x8u u; \ |
| 1544 | vec_8x8u v; \ |
| 1545 | \ |
| 1546 | vec_4x32u dx4; \ |
| 1547 | vec_4x32u dx8; \ |
| 1548 | \ |
| 1549 | gvshrn_n_u32(u_whole_lo, u_block, 16); \ |
| 1550 | gvshrn_n_u32(v_whole_lo, v_block, 16); \ |
| 1551 | \ |
| 1552 | gvdupq_l_u32(dx4, uv_dx4, 0); \ |
| 1553 | gvaddhn_u32(u_whole_hi, u_block, dx4); \ |
| 1554 | gvdupq_l_u32(dx4, uv_dx4, 1); \ |
| 1555 | gvaddhn_u32(v_whole_hi, v_block, dx4); \ |
| 1556 | \ |
| 1557 | gvcombine_u16(u_whole, u_whole_lo, u_whole_hi); \ |
| 1558 | gvcombine_u16(v_whole, v_whole_lo, v_whole_hi); \ |
| 1559 | gvmovn_u16(u, u_whole); \ |
| 1560 | gvmovn_u16(v, v_whole); \ |
| 1561 | \ |
| 1562 | gvdupq_l_u32(dx8, uv_dx8, 0); \ |
| 1563 | gvaddq_u32(u_block, u_block, dx8); \ |
| 1564 | gvdupq_l_u32(dx8, uv_dx8, 1); \ |
| 1565 | gvaddq_u32(v_block, v_block, dx8); \ |
| 1566 | \ |
| 1567 | gvand(u, u, gvlo(texture_mask)); \ |
| 1568 | gvand(v, v, gvhi(texture_mask)); \ |
| 1569 | setup_blocks_texture_##swizzling(); \ |
| 1570 | \ |
| 1571 | gvst2_u8(u, v, (u8 *)block->uv.e); \ |
| 1572 | gvst1q_u16(dither_offsets, (u16 *)block->dither_offsets.e); \ |
| 1573 | block->fb_ptr = fb_ptr; \ |
| 1574 | } \ |
| 1575 | |
| 1576 | #define setup_blocks_store_shaded_untextured_dithered() \ |
| 1577 | gvqadd_u8(r, r, dither_offsets); \ |
| 1578 | gvqadd_u8(g, g, dither_offsets); \ |
| 1579 | gvqadd_u8(b, b, dither_offsets); \ |
| 1580 | \ |
| 1581 | gvqsub_u8(r, r, d64_4); \ |
| 1582 | gvqsub_u8(g, g, d64_4); \ |
| 1583 | gvqsub_u8(b, b, d64_4) \ |
| 1584 | |
| 1585 | #define setup_blocks_store_shaded_untextured_undithered() \ |
| 1586 | |
| 1587 | #define setup_blocks_store_untextured_pixels_indirect_full(_pixels) \ |
| 1588 | gvst1q_u16(_pixels, block->pixels.e); \ |
| 1589 | block->fb_ptr = fb_ptr \ |
| 1590 | |
| 1591 | #define setup_blocks_store_untextured_pixels_indirect_edge(_pixels) \ |
| 1592 | gvst1q_u16(_pixels, block->pixels.e); \ |
| 1593 | block->fb_ptr = fb_ptr \ |
| 1594 | |
| 1595 | #define setup_blocks_store_shaded_untextured_seed_pixels_indirect() \ |
| 1596 | gvmull_u8(pixels, r, d64_1) \ |
| 1597 | |
| 1598 | #define setup_blocks_store_untextured_pixels_direct_full(_pixels) \ |
| 1599 | gvst1q_u16(_pixels, fb_ptr) \ |
| 1600 | |
| 1601 | #define setup_blocks_store_untextured_pixels_direct_edge(_pixels) \ |
| 1602 | { \ |
| 1603 | vec_8x16u fb_pixels; \ |
| 1604 | vec_8x16u draw_mask; \ |
| 1605 | vec_8x16u test_mask; \ |
| 1606 | \ |
| 1607 | gvld1q_u16(test_mask, psx_gpu->test_mask.e); \ |
| 1608 | gvld1q_u16(fb_pixels, fb_ptr); \ |
| 1609 | gvdupq_n_u16(draw_mask, span_edge_data->right_mask); \ |
| 1610 | gvtstq_u16(draw_mask, draw_mask, test_mask); \ |
| 1611 | gvbifq(fb_pixels, _pixels, draw_mask); \ |
| 1612 | gvst1q_u16(fb_pixels, fb_ptr); \ |
| 1613 | } \ |
| 1614 | |
| 1615 | #define setup_blocks_store_shaded_untextured_seed_pixels_direct() \ |
| 1616 | pixels = msb_mask; \ |
| 1617 | gvmlal_u8(pixels, r, d64_1) \ |
| 1618 | |
| 1619 | #define setup_blocks_store_shaded_untextured(swizzling, dithering, target, \ |
| 1620 | edge_type) \ |
| 1621 | { \ |
| 1622 | vec_8x16u r_whole; \ |
| 1623 | vec_8x16u g_whole; \ |
| 1624 | vec_8x16u b_whole; \ |
| 1625 | vec_4x16u r_whole_lo, r_whole_hi; \ |
| 1626 | vec_4x16u g_whole_lo, g_whole_hi; \ |
| 1627 | vec_4x16u b_whole_lo, b_whole_hi; \ |
| 1628 | \ |
| 1629 | vec_8x8u r; \ |
| 1630 | vec_8x8u g; \ |
| 1631 | vec_8x8u b; \ |
| 1632 | \ |
| 1633 | vec_4x32u dx4; \ |
| 1634 | vec_4x32u dx8; \ |
| 1635 | \ |
| 1636 | vec_8x16u pixels; \ |
| 1637 | \ |
| 1638 | gvshrn_n_u32(r_whole_lo, r_block, 16); \ |
| 1639 | gvshrn_n_u32(g_whole_lo, g_block, 16); \ |
| 1640 | gvshrn_n_u32(b_whole_lo, b_block, 16); \ |
| 1641 | \ |
| 1642 | gvdupq_l_u32(dx4, gvlo(rgb_dx4), 0); \ |
| 1643 | gvaddhn_u32(r_whole_hi, r_block, dx4); \ |
| 1644 | gvdupq_l_u32(dx4, gvlo(rgb_dx4), 1); \ |
| 1645 | gvaddhn_u32(g_whole_hi, g_block, dx4); \ |
| 1646 | gvdupq_l_u32(dx4, gvhi(rgb_dx4), 0); \ |
| 1647 | gvaddhn_u32(b_whole_hi, b_block, dx4); \ |
| 1648 | \ |
| 1649 | gvcombine_u16(r_whole, r_whole_lo, r_whole_hi); \ |
| 1650 | gvcombine_u16(g_whole, g_whole_lo, g_whole_hi); \ |
| 1651 | gvcombine_u16(b_whole, b_whole_lo, b_whole_hi); \ |
| 1652 | gvmovn_u16(r, r_whole); \ |
| 1653 | gvmovn_u16(g, g_whole); \ |
| 1654 | gvmovn_u16(b, b_whole); \ |
| 1655 | \ |
| 1656 | gvdupq_l_u32(dx8, gvlo(rgb_dx8), 0); \ |
| 1657 | gvaddq_u32(r_block, r_block, dx8); \ |
| 1658 | gvdupq_l_u32(dx8, gvlo(rgb_dx8), 1); \ |
| 1659 | gvaddq_u32(g_block, g_block, dx8); \ |
| 1660 | gvdupq_l_u32(dx8, gvhi(rgb_dx8), 0); \ |
| 1661 | gvaddq_u32(b_block, b_block, dx8); \ |
| 1662 | \ |
| 1663 | setup_blocks_store_shaded_untextured_##dithering(); \ |
| 1664 | \ |
| 1665 | gvshr_n_u8(r, r, 3); \ |
| 1666 | gvbic(g, g, d64_0x07); \ |
| 1667 | gvbic(b, b, d64_0x07); \ |
| 1668 | \ |
| 1669 | setup_blocks_store_shaded_untextured_seed_pixels_##target(); \ |
| 1670 | gvmlal_u8(pixels, g, d64_4); \ |
| 1671 | gvmlal_u8(pixels, b, d64_128); \ |
| 1672 | \ |
| 1673 | setup_blocks_store_untextured_pixels_##target##_##edge_type(pixels); \ |
| 1674 | } \ |
| 1675 | |
| 1676 | #define setup_blocks_store_unshaded_untextured(swizzling, dithering, target, \ |
| 1677 | edge_type) \ |
| 1678 | setup_blocks_store_untextured_pixels_##target##_##edge_type(colors) \ |
| 1679 | |
| 1680 | #define setup_blocks_store_draw_mask_textured_indirect(_block, bits) \ |
| 1681 | (_block)->draw_mask_bits = bits \ |
| 1682 | |
| 1683 | #define setup_blocks_store_draw_mask_untextured_indirect(_block, bits) \ |
| 1684 | { \ |
| 1685 | vec_8x16u bits_mask; \ |
| 1686 | vec_8x16u test_mask; \ |
| 1687 | \ |
| 1688 | gvld1q_u16(test_mask, psx_gpu->test_mask.e); \ |
| 1689 | gvdupq_n_u16(bits_mask, bits); \ |
| 1690 | gvtstq_u16(bits_mask, bits_mask, test_mask); \ |
| 1691 | gvst1q_u16(bits_mask, (_block)->draw_mask.e); \ |
| 1692 | } \ |
| 1693 | |
| 1694 | #define setup_blocks_store_draw_mask_untextured_direct(_block, bits) \ |
| 1695 | |
| 1696 | #define setup_blocks_add_blocks_indirect() \ |
| 1697 | num_blocks += span_num_blocks; \ |
| 1698 | \ |
| 1699 | if(num_blocks > MAX_BLOCKS) \ |
| 1700 | { \ |
| 1701 | psx_gpu->num_blocks = num_blocks - span_num_blocks; \ |
| 1702 | flush_render_block_buffer(psx_gpu); \ |
| 1703 | num_blocks = span_num_blocks; \ |
| 1704 | block = psx_gpu->blocks; \ |
| 1705 | } \ |
| 1706 | |
| 1707 | #define setup_blocks_add_blocks_direct() \ |
| 1708 | |
| 1709 | #define setup_blocks_do(shading, texturing, dithering, sw, target) \ |
| 1710 | setup_blocks_load_msb_mask_##target(); \ |
| 1711 | setup_blocks_variables_##shading##_##texturing(target); \ |
| 1712 | \ |
| 1713 | edge_data_struct *span_edge_data = psx_gpu->span_edge_data; \ |
| 1714 | vec_4x32u *span_uvrg_offset = (vec_4x32u *)psx_gpu->span_uvrg_offset; \ |
| 1715 | u32 *span_b_offset = psx_gpu->span_b_offset; \ |
| 1716 | \ |
| 1717 | block_struct *block = psx_gpu->blocks + psx_gpu->num_blocks; \ |
| 1718 | \ |
| 1719 | u32 num_spans = psx_gpu->num_spans; \ |
| 1720 | \ |
| 1721 | u16 *fb_ptr; \ |
| 1722 | u32 y; \ |
| 1723 | \ |
| 1724 | u32 num_blocks = psx_gpu->num_blocks; \ |
| 1725 | u32 span_num_blocks; \ |
| 1726 | \ |
| 1727 | while(num_spans) \ |
| 1728 | { \ |
| 1729 | span_num_blocks = span_edge_data->num_blocks; \ |
| 1730 | if(span_num_blocks) \ |
| 1731 | { \ |
| 1732 | y = span_edge_data->y; \ |
| 1733 | fb_ptr = psx_gpu->vram_out_ptr + span_edge_data->left_x + (y * 1024); \ |
| 1734 | \ |
| 1735 | setup_blocks_span_initialize_##shading##_##texturing(); \ |
| 1736 | setup_blocks_span_initialize_##dithering(texturing); \ |
| 1737 | \ |
| 1738 | setup_blocks_add_blocks_##target(); \ |
| 1739 | \ |
| 1740 | s32 pixel_span = span_num_blocks * 8; \ |
| 1741 | pixel_span -= __builtin_popcount(span_edge_data->right_mask & 0xFF); \ |
| 1742 | \ |
| 1743 | span_num_blocks--; \ |
| 1744 | while(span_num_blocks) \ |
| 1745 | { \ |
| 1746 | setup_blocks_store_##shading##_##texturing(sw, dithering, target, \ |
| 1747 | full); \ |
| 1748 | setup_blocks_store_draw_mask_##texturing##_##target(block, 0x00); \ |
| 1749 | \ |
| 1750 | fb_ptr += 8; \ |
| 1751 | block++; \ |
| 1752 | span_num_blocks--; \ |
| 1753 | } \ |
| 1754 | \ |
| 1755 | setup_blocks_store_##shading##_##texturing(sw, dithering, target, edge); \ |
| 1756 | setup_blocks_store_draw_mask_##texturing##_##target(block, \ |
| 1757 | span_edge_data->right_mask); \ |
| 1758 | \ |
| 1759 | block++; \ |
| 1760 | } \ |
| 1761 | \ |
| 1762 | num_spans--; \ |
| 1763 | span_edge_data++; \ |
| 1764 | span_uvrg_offset++; \ |
| 1765 | span_b_offset++; \ |
| 1766 | } \ |
| 1767 | \ |
| 1768 | psx_gpu->num_blocks = num_blocks \ |
| 1769 | |
| 1770 | void setup_blocks_shaded_textured_dithered_swizzled_indirect(psx_gpu_struct |
| 1771 | *psx_gpu) |
| 1772 | { |
| 1773 | #if 0 |
| 1774 | setup_blocks_shaded_textured_dithered_swizzled_indirect_(psx_gpu); |
| 1775 | return; |
| 1776 | #endif |
| 1777 | setup_blocks_do(shaded, textured, dithered, swizzled, indirect); |
| 1778 | } |
| 1779 | |
| 1780 | void setup_blocks_shaded_textured_dithered_unswizzled_indirect(psx_gpu_struct |
| 1781 | *psx_gpu) |
| 1782 | { |
| 1783 | #if 0 |
| 1784 | setup_blocks_shaded_textured_dithered_unswizzled_indirect_(psx_gpu); |
| 1785 | return; |
| 1786 | #endif |
| 1787 | setup_blocks_do(shaded, textured, dithered, unswizzled, indirect); |
| 1788 | } |
| 1789 | |
| 1790 | void setup_blocks_unshaded_textured_dithered_swizzled_indirect(psx_gpu_struct |
| 1791 | *psx_gpu) |
| 1792 | { |
| 1793 | #if 0 |
| 1794 | setup_blocks_unshaded_textured_dithered_swizzled_indirect_(psx_gpu); |
| 1795 | return; |
| 1796 | #endif |
| 1797 | setup_blocks_do(unshaded, textured, dithered, swizzled, indirect); |
| 1798 | } |
| 1799 | |
| 1800 | void setup_blocks_unshaded_textured_dithered_unswizzled_indirect(psx_gpu_struct |
| 1801 | *psx_gpu) |
| 1802 | { |
| 1803 | #if 0 |
| 1804 | setup_blocks_unshaded_textured_dithered_unswizzled_indirect_(psx_gpu); |
| 1805 | return; |
| 1806 | #endif |
| 1807 | setup_blocks_do(unshaded, textured, dithered, unswizzled, indirect); |
| 1808 | } |
| 1809 | |
| 1810 | void setup_blocks_unshaded_untextured_undithered_unswizzled_indirect( |
| 1811 | psx_gpu_struct *psx_gpu) |
| 1812 | { |
| 1813 | #if 0 |
| 1814 | setup_blocks_unshaded_untextured_undithered_unswizzled_indirect_(psx_gpu); |
| 1815 | return; |
| 1816 | #endif |
| 1817 | setup_blocks_do(unshaded, untextured, undithered, unswizzled, indirect); |
| 1818 | } |
| 1819 | |
| 1820 | void setup_blocks_unshaded_untextured_undithered_unswizzled_direct( |
| 1821 | psx_gpu_struct *psx_gpu) |
| 1822 | { |
| 1823 | #if 0 |
| 1824 | setup_blocks_unshaded_untextured_undithered_unswizzled_direct_(psx_gpu); |
| 1825 | return; |
| 1826 | #endif |
| 1827 | setup_blocks_do(unshaded, untextured, undithered, unswizzled, direct); |
| 1828 | } |
| 1829 | |
| 1830 | void setup_blocks_shaded_untextured_undithered_unswizzled_indirect(psx_gpu_struct |
| 1831 | *psx_gpu) |
| 1832 | { |
| 1833 | #if 0 |
| 1834 | setup_blocks_shaded_untextured_undithered_unswizzled_indirect_(psx_gpu); |
| 1835 | return; |
| 1836 | #endif |
| 1837 | setup_blocks_do(shaded, untextured, undithered, unswizzled, indirect); |
| 1838 | } |
| 1839 | |
| 1840 | void setup_blocks_shaded_untextured_dithered_unswizzled_indirect(psx_gpu_struct |
| 1841 | *psx_gpu) |
| 1842 | { |
| 1843 | #if 0 |
| 1844 | setup_blocks_shaded_untextured_dithered_unswizzled_indirect_(psx_gpu); |
| 1845 | return; |
| 1846 | #endif |
| 1847 | setup_blocks_do(shaded, untextured, dithered, unswizzled, indirect); |
| 1848 | } |
| 1849 | |
| 1850 | void setup_blocks_shaded_untextured_undithered_unswizzled_direct( |
| 1851 | psx_gpu_struct *psx_gpu) |
| 1852 | { |
| 1853 | #if 0 |
| 1854 | setup_blocks_shaded_untextured_undithered_unswizzled_direct_(psx_gpu); |
| 1855 | return; |
| 1856 | #endif |
| 1857 | setup_blocks_do(shaded, untextured, undithered, unswizzled, direct); |
| 1858 | } |
| 1859 | |
| 1860 | void setup_blocks_shaded_untextured_dithered_unswizzled_direct(psx_gpu_struct |
| 1861 | *psx_gpu) |
| 1862 | { |
| 1863 | #if 0 |
| 1864 | setup_blocks_shaded_untextured_dithered_unswizzled_direct_(psx_gpu); |
| 1865 | return; |
| 1866 | #endif |
| 1867 | setup_blocks_do(shaded, untextured, dithered, unswizzled, direct); |
| 1868 | } |
| 1869 | |
| 1870 | static void update_texture_4bpp_cache(psx_gpu_struct *psx_gpu) |
| 1871 | { |
| 1872 | u32 current_texture_page = psx_gpu->current_texture_page; |
| 1873 | u8 *texture_page_ptr = psx_gpu->texture_page_base; |
| 1874 | const u16 *vram_ptr = psx_gpu->vram_ptr; |
| 1875 | u32 tile_x, tile_y; |
| 1876 | u32 sub_y; |
| 1877 | vec_8x16u c_0x00f0; |
| 1878 | |
| 1879 | vram_ptr += (current_texture_page >> 4) * 256 * 1024; |
| 1880 | vram_ptr += (current_texture_page & 0xF) * 64; |
| 1881 | |
| 1882 | gvdupq_n_u16(c_0x00f0, 0x00f0); |
| 1883 | |
| 1884 | psx_gpu->dirty_textures_4bpp_mask &= ~(psx_gpu->current_texture_mask); |
| 1885 | |
| 1886 | for (tile_y = 16; tile_y; tile_y--) |
| 1887 | { |
| 1888 | for (tile_x = 16; tile_x; tile_x--) |
| 1889 | { |
| 1890 | for (sub_y = 8; sub_y; sub_y--) |
| 1891 | { |
| 1892 | vec_8x8u texel_block_a, texel_block_b; |
| 1893 | vec_8x16u texel_block_expanded_a, texel_block_expanded_b; |
| 1894 | vec_8x16u texel_block_expanded_c, texel_block_expanded_d; |
| 1895 | vec_8x16u texel_block_expanded_ab, texel_block_expanded_cd; |
| 1896 | |
| 1897 | gvld1_u8(texel_block_a, (u8 *)vram_ptr); vram_ptr += 1024; |
| 1898 | gvld1_u8(texel_block_b, (u8 *)vram_ptr); vram_ptr += 1024; |
| 1899 | |
| 1900 | gvmovl_u8(texel_block_expanded_a, texel_block_a); |
| 1901 | gvshll_n_u8(texel_block_expanded_b, texel_block_a, 4); |
| 1902 | gvmovl_u8(texel_block_expanded_c, texel_block_b); |
| 1903 | gvshll_n_u8(texel_block_expanded_d, texel_block_b, 4); |
| 1904 | |
| 1905 | gvbicq(texel_block_expanded_a, texel_block_expanded_a, c_0x00f0); |
| 1906 | gvbicq(texel_block_expanded_b, texel_block_expanded_b, c_0x00f0); |
| 1907 | gvbicq(texel_block_expanded_c, texel_block_expanded_c, c_0x00f0); |
| 1908 | gvbicq(texel_block_expanded_d, texel_block_expanded_d, c_0x00f0); |
| 1909 | |
| 1910 | gvorrq(texel_block_expanded_ab, texel_block_expanded_a, texel_block_expanded_b); |
| 1911 | gvorrq(texel_block_expanded_cd, texel_block_expanded_c, texel_block_expanded_d); |
| 1912 | |
| 1913 | gvst1q_2_pi_u32(texel_block_expanded_ab, texel_block_expanded_cd, texture_page_ptr); |
| 1914 | } |
| 1915 | |
| 1916 | vram_ptr -= (1024 * 16) - 4; |
| 1917 | } |
| 1918 | |
| 1919 | vram_ptr += (16 * 1024) - (4 * 16); |
| 1920 | } |
| 1921 | } |
| 1922 | |
| 1923 | void update_texture_8bpp_cache_slice(psx_gpu_struct *psx_gpu, |
| 1924 | u32 texture_page) |
| 1925 | { |
| 1926 | #if 0 |
| 1927 | update_texture_8bpp_cache_slice_(psx_gpu, texture_page); |
| 1928 | return; |
| 1929 | #endif |
| 1930 | u16 *texture_page_ptr = psx_gpu->texture_page_base; |
| 1931 | u16 *vram_ptr = psx_gpu->vram_ptr; |
| 1932 | |
| 1933 | u32 tile_x, tile_y; |
| 1934 | u32 sub_y; |
| 1935 | |
| 1936 | vram_ptr += (texture_page >> 4) * 256 * 1024; |
| 1937 | vram_ptr += (texture_page & 0xF) * 64; |
| 1938 | |
| 1939 | if((texture_page ^ psx_gpu->current_texture_page) & 0x1) |
| 1940 | texture_page_ptr += (8 * 16) * 8; |
| 1941 | |
| 1942 | for (tile_y = 16; tile_y; tile_y--) |
| 1943 | { |
| 1944 | for (tile_x = 8; tile_x; tile_x--) |
| 1945 | { |
| 1946 | for (sub_y = 4; sub_y; sub_y--) |
| 1947 | { |
| 1948 | vec_4x32u texels_a, texels_b, texels_c, texels_d = {}; |
| 1949 | gvld1q_u32(texels_a, vram_ptr); vram_ptr += 1024; |
| 1950 | gvld1q_u32(texels_b, vram_ptr); vram_ptr += 1024; |
| 1951 | gvld1q_u32(texels_c, vram_ptr); vram_ptr += 1024; |
| 1952 | gvld1q_u32(texels_d, vram_ptr); vram_ptr += 1024; |
| 1953 | |
| 1954 | gvst1q_2_pi_u32(texels_a, texels_b, texture_page_ptr); |
| 1955 | gvst1q_2_pi_u32(texels_c, texels_d, texture_page_ptr); |
| 1956 | } |
| 1957 | |
| 1958 | vram_ptr -= (1024 * 16) - 8; |
| 1959 | } |
| 1960 | |
| 1961 | vram_ptr -= (8 * 8); |
| 1962 | vram_ptr += (16 * 1024); |
| 1963 | |
| 1964 | texture_page_ptr += (8 * 16) * 8; |
| 1965 | } |
| 1966 | } |
| 1967 | |
| 1968 | void texture_blocks_untextured(psx_gpu_struct *psx_gpu) |
| 1969 | { |
| 1970 | } |
| 1971 | |
| 1972 | void texture_blocks_4bpp(psx_gpu_struct *psx_gpu) |
| 1973 | { |
| 1974 | #if 0 |
| 1975 | texture_blocks_4bpp_(psx_gpu); |
| 1976 | return; |
| 1977 | #endif |
| 1978 | block_struct *block = psx_gpu->blocks; |
| 1979 | u32 num_blocks = psx_gpu->num_blocks; |
| 1980 | |
| 1981 | vec_8x8u texels_low; |
| 1982 | vec_8x8u texels_high; |
| 1983 | |
| 1984 | vec_16x8u clut_low; |
| 1985 | vec_16x8u clut_high; |
| 1986 | |
| 1987 | const u8 *texture_ptr_8bpp = psx_gpu->texture_page_ptr; |
| 1988 | |
| 1989 | gvld2q_u8(clut_low, clut_high, (u8 *)psx_gpu->clut_ptr); |
| 1990 | |
| 1991 | if(psx_gpu->current_texture_mask & psx_gpu->dirty_textures_4bpp_mask) |
| 1992 | update_texture_4bpp_cache(psx_gpu); |
| 1993 | |
| 1994 | while(num_blocks) |
| 1995 | { |
| 1996 | vec_8x8u texels = |
| 1997 | { |
| 1998 | .u8 = |
| 1999 | { |
| 2000 | texture_ptr_8bpp[block->uv.e[0]], |
| 2001 | texture_ptr_8bpp[block->uv.e[1]], |
| 2002 | texture_ptr_8bpp[block->uv.e[2]], |
| 2003 | texture_ptr_8bpp[block->uv.e[3]], |
| 2004 | texture_ptr_8bpp[block->uv.e[4]], |
| 2005 | texture_ptr_8bpp[block->uv.e[5]], |
| 2006 | texture_ptr_8bpp[block->uv.e[6]], |
| 2007 | texture_ptr_8bpp[block->uv.e[7]] |
| 2008 | } |
| 2009 | }; |
| 2010 | |
| 2011 | gvtbl2_u8(texels_low, clut_low, texels); |
| 2012 | gvtbl2_u8(texels_high, clut_high, texels); |
| 2013 | |
| 2014 | gvst2_u8(texels_low, texels_high, (u8 *)block->texels.e); |
| 2015 | |
| 2016 | num_blocks--; |
| 2017 | block++; |
| 2018 | } |
| 2019 | } |
| 2020 | |
| 2021 | void texture_blocks_8bpp(psx_gpu_struct *psx_gpu) |
| 2022 | { |
| 2023 | #if 0 |
| 2024 | texture_blocks_8bpp_(psx_gpu); |
| 2025 | return; |
| 2026 | #endif |
| 2027 | u32 num_blocks = psx_gpu->num_blocks; |
| 2028 | |
| 2029 | if(psx_gpu->current_texture_mask & psx_gpu->dirty_textures_8bpp_mask) |
| 2030 | update_texture_8bpp_cache(psx_gpu); |
| 2031 | |
| 2032 | const u8 * __restrict__ texture_ptr_8bpp = psx_gpu->texture_page_ptr; |
| 2033 | const u16 * __restrict__ clut_ptr = psx_gpu->clut_ptr; |
| 2034 | block_struct * __restrict__ block = psx_gpu->blocks; |
| 2035 | |
| 2036 | while(num_blocks) |
| 2037 | { |
| 2038 | u16 offset; |
| 2039 | #define load_one(i_) \ |
| 2040 | offset = block->uv.e[i_]; u16 texel##i_ = texture_ptr_8bpp[offset] |
| 2041 | #define store_one(i_) \ |
| 2042 | block->texels.e[i_] = clut_ptr[texel##i_] |
| 2043 | load_one(0); load_one(1); load_one(2); load_one(3); |
| 2044 | load_one(4); load_one(5); load_one(6); load_one(7); |
| 2045 | store_one(0); store_one(1); store_one(2); store_one(3); |
| 2046 | store_one(4); store_one(5); store_one(6); store_one(7); |
| 2047 | #undef load_one |
| 2048 | #undef store_one |
| 2049 | |
| 2050 | num_blocks--; |
| 2051 | block++; |
| 2052 | } |
| 2053 | } |
| 2054 | |
| 2055 | void texture_blocks_16bpp(psx_gpu_struct *psx_gpu) |
| 2056 | { |
| 2057 | #if 0 |
| 2058 | texture_blocks_16bpp_(psx_gpu); |
| 2059 | return; |
| 2060 | #endif |
| 2061 | u32 num_blocks = psx_gpu->num_blocks; |
| 2062 | const u16 * __restrict__ texture_ptr_16bpp = psx_gpu->texture_page_ptr; |
| 2063 | block_struct * __restrict__ block = psx_gpu->blocks; |
| 2064 | |
| 2065 | while(num_blocks) |
| 2066 | { |
| 2067 | u32 offset; |
| 2068 | #define load_one(i_) \ |
| 2069 | offset = block->uv.e[i_]; \ |
| 2070 | offset += ((offset & 0xFF00) * 3); \ |
| 2071 | u16 texel##i_ = texture_ptr_16bpp[offset] |
| 2072 | #define store_one(i_) \ |
| 2073 | block->texels.e[i_] = texel##i_ |
| 2074 | load_one(0); load_one(1); load_one(2); load_one(3); |
| 2075 | load_one(4); load_one(5); load_one(6); load_one(7); |
| 2076 | store_one(0); store_one(1); store_one(2); store_one(3); |
| 2077 | store_one(4); store_one(5); store_one(6); store_one(7); |
| 2078 | #undef load_one |
| 2079 | #undef store_one |
| 2080 | |
| 2081 | num_blocks--; |
| 2082 | block++; |
| 2083 | } |
| 2084 | } |
| 2085 | |
| 2086 | #define shade_blocks_load_msb_mask_indirect() \ |
| 2087 | |
| 2088 | #define shade_blocks_load_msb_mask_direct() \ |
| 2089 | vec_8x16u msb_mask; \ |
| 2090 | gvdupq_n_u16(msb_mask, psx_gpu->mask_msb); \ |
| 2091 | |
| 2092 | #define shade_blocks_store_indirect(_draw_mask, _pixels) \ |
| 2093 | gvst1q_u16(_draw_mask, block->draw_mask.e); \ |
| 2094 | gvst1q_u16(_pixels, block->pixels.e); \ |
| 2095 | |
| 2096 | #define shade_blocks_store_direct(_draw_mask, _pixels) \ |
| 2097 | { \ |
| 2098 | vec_8x16u fb_pixels; \ |
| 2099 | gvorrq(_pixels, _pixels, msb_mask); \ |
| 2100 | gvld1q_u16(fb_pixels, block->fb_ptr); \ |
| 2101 | gvbifq(fb_pixels, _pixels, _draw_mask); \ |
| 2102 | gvst1q_u16(fb_pixels, block->fb_ptr); \ |
| 2103 | } \ |
| 2104 | |
| 2105 | #define shade_blocks_textured_false_modulated_check_dithered(target) \ |
| 2106 | |
| 2107 | #define shade_blocks_textured_false_modulated_check_undithered(target) \ |
| 2108 | if(psx_gpu->triangle_color == 0x808080) \ |
| 2109 | { \ |
| 2110 | shade_blocks_textured_unmodulated_##target(psx_gpu); \ |
| 2111 | return; \ |
| 2112 | } \ |
| 2113 | |
| 2114 | #define shade_blocks_textured_modulated_shaded_primitive_load(dithering, \ |
| 2115 | target) \ |
| 2116 | |
| 2117 | #define shade_blocks_textured_modulated_unshaded_primitive_load(dithering, \ |
| 2118 | target) \ |
| 2119 | { \ |
| 2120 | u32 color = psx_gpu->triangle_color; \ |
| 2121 | gvdup_n_u8(colors_r, color); \ |
| 2122 | gvdup_n_u8(colors_g, color >> 8); \ |
| 2123 | gvdup_n_u8(colors_b, color >> 16); \ |
| 2124 | shade_blocks_textured_false_modulated_check_##dithering(target); \ |
| 2125 | } \ |
| 2126 | |
| 2127 | #define shade_blocks_textured_modulated_shaded_block_load() \ |
| 2128 | gvld1_u8(colors_r, block->r.e); \ |
| 2129 | gvld1_u8(colors_g, block->g.e); \ |
| 2130 | gvld1_u8(colors_b, block->b.e) \ |
| 2131 | |
| 2132 | #define shade_blocks_textured_modulated_unshaded_block_load() \ |
| 2133 | |
| 2134 | #define shade_blocks_textured_modulate_dithered(component) \ |
| 2135 | gvld1q_u16(pixels_##component, block->dither_offsets.e); \ |
| 2136 | gvmlal_u8(pixels_##component, texels_##component, colors_##component) \ |
| 2137 | |
| 2138 | #define shade_blocks_textured_modulate_undithered(component) \ |
| 2139 | gvmull_u8(pixels_##component, texels_##component, colors_##component) \ |
| 2140 | |
| 2141 | #define shade_blocks_textured_modulated_do(shading, dithering, target) \ |
| 2142 | block_struct *block = psx_gpu->blocks; \ |
| 2143 | u32 num_blocks = psx_gpu->num_blocks; \ |
| 2144 | vec_8x16u texels; \ |
| 2145 | \ |
| 2146 | vec_8x8u texels_r; \ |
| 2147 | vec_8x8u texels_g; \ |
| 2148 | vec_8x8u texels_b; \ |
| 2149 | \ |
| 2150 | vec_8x8u colors_r; \ |
| 2151 | vec_8x8u colors_g; \ |
| 2152 | vec_8x8u colors_b; \ |
| 2153 | \ |
| 2154 | vec_8x8u pixels_r_low; \ |
| 2155 | vec_8x8u pixels_g_low; \ |
| 2156 | vec_8x8u pixels_b_low; \ |
| 2157 | vec_8x16u pixels; \ |
| 2158 | \ |
| 2159 | vec_8x16u pixels_r; \ |
| 2160 | vec_8x16u pixels_g; \ |
| 2161 | vec_8x16u pixels_b; \ |
| 2162 | \ |
| 2163 | vec_8x16u draw_mask; \ |
| 2164 | vec_8x16u zero_mask; \ |
| 2165 | \ |
| 2166 | vec_8x8u d64_0x07; \ |
| 2167 | vec_8x8u d64_0x1F; \ |
| 2168 | vec_8x8u d64_1; \ |
| 2169 | vec_8x8u d64_4; \ |
| 2170 | vec_8x8u d64_128; \ |
| 2171 | \ |
| 2172 | vec_8x16u d128_0x8000; \ |
| 2173 | \ |
| 2174 | vec_8x16u test_mask; \ |
| 2175 | u32 draw_mask_bits; \ |
| 2176 | \ |
| 2177 | gvld1q_u16(test_mask, psx_gpu->test_mask.e); \ |
| 2178 | shade_blocks_load_msb_mask_##target(); \ |
| 2179 | \ |
| 2180 | gvdup_n_u8(d64_0x07, 0x07); \ |
| 2181 | gvdup_n_u8(d64_0x1F, 0x1F); \ |
| 2182 | gvdup_n_u8(d64_1, 1); \ |
| 2183 | gvdup_n_u8(d64_4, 4); \ |
| 2184 | gvdup_n_u8(d64_128, 128); \ |
| 2185 | \ |
| 2186 | gvdupq_n_u16(d128_0x8000, 0x8000); \ |
| 2187 | \ |
| 2188 | shade_blocks_textured_modulated_##shading##_primitive_load(dithering, \ |
| 2189 | target); \ |
| 2190 | \ |
| 2191 | while(num_blocks) \ |
| 2192 | { \ |
| 2193 | draw_mask_bits = block->draw_mask_bits; \ |
| 2194 | gvdupq_n_u16(draw_mask, draw_mask_bits); \ |
| 2195 | gvtstq_u16(draw_mask, draw_mask, test_mask); \ |
| 2196 | \ |
| 2197 | shade_blocks_textured_modulated_##shading##_block_load(); \ |
| 2198 | \ |
| 2199 | gvld1q_u16(texels, block->texels.e); \ |
| 2200 | \ |
| 2201 | gvmovn_u16(texels_r, texels); \ |
| 2202 | gvshrn_n_u16(texels_g, texels, 5); \ |
| 2203 | gvshrn_n_u16(texels_b, texels, 7); \ |
| 2204 | \ |
| 2205 | gvand(texels_r, texels_r, d64_0x1F); \ |
| 2206 | gvand(texels_g, texels_g, d64_0x1F); \ |
| 2207 | gvshr_n_u8(texels_b, texels_b, 3); \ |
| 2208 | \ |
| 2209 | shade_blocks_textured_modulate_##dithering(r); \ |
| 2210 | shade_blocks_textured_modulate_##dithering(g); \ |
| 2211 | shade_blocks_textured_modulate_##dithering(b); \ |
| 2212 | \ |
| 2213 | gvceqzq_u16(zero_mask, texels); \ |
| 2214 | gvand(pixels, texels, d128_0x8000); \ |
| 2215 | \ |
| 2216 | gvqshrun_n_s16(pixels_r_low, pixels_r, 4); \ |
| 2217 | gvqshrun_n_s16(pixels_g_low, pixels_g, 4); \ |
| 2218 | gvqshrun_n_s16(pixels_b_low, pixels_b, 4); \ |
| 2219 | \ |
| 2220 | gvorrq(zero_mask, draw_mask, zero_mask); \ |
| 2221 | \ |
| 2222 | gvshr_n_u8(pixels_r_low, pixels_r_low, 3); \ |
| 2223 | gvbic(pixels_g_low, pixels_g_low, d64_0x07); \ |
| 2224 | gvbic(pixels_b_low, pixels_b_low, d64_0x07); \ |
| 2225 | \ |
| 2226 | gvmlal_u8(pixels, pixels_r_low, d64_1); \ |
| 2227 | gvmlal_u8(pixels, pixels_g_low, d64_4); \ |
| 2228 | gvmlal_u8(pixels, pixels_b_low, d64_128); \ |
| 2229 | \ |
| 2230 | shade_blocks_store_##target(zero_mask, pixels); \ |
| 2231 | \ |
| 2232 | num_blocks--; \ |
| 2233 | block++; \ |
| 2234 | } \ |
| 2235 | |
| 2236 | void shade_blocks_shaded_textured_modulated_dithered_direct(psx_gpu_struct |
| 2237 | *psx_gpu) |
| 2238 | { |
| 2239 | #if 0 |
| 2240 | shade_blocks_shaded_textured_modulated_dithered_direct_(psx_gpu); |
| 2241 | return; |
| 2242 | #endif |
| 2243 | shade_blocks_textured_modulated_do(shaded, dithered, direct); |
| 2244 | } |
| 2245 | |
| 2246 | void shade_blocks_shaded_textured_modulated_undithered_direct(psx_gpu_struct |
| 2247 | *psx_gpu) |
| 2248 | { |
| 2249 | #if 0 |
| 2250 | shade_blocks_shaded_textured_modulated_undithered_direct_(psx_gpu); |
| 2251 | return; |
| 2252 | #endif |
| 2253 | shade_blocks_textured_modulated_do(shaded, undithered, direct); |
| 2254 | } |
| 2255 | |
| 2256 | void shade_blocks_unshaded_textured_modulated_dithered_direct(psx_gpu_struct |
| 2257 | *psx_gpu) |
| 2258 | { |
| 2259 | #if 0 |
| 2260 | shade_blocks_unshaded_textured_modulated_dithered_direct_(psx_gpu); |
| 2261 | return; |
| 2262 | #endif |
| 2263 | shade_blocks_textured_modulated_do(unshaded, dithered, direct); |
| 2264 | } |
| 2265 | |
| 2266 | void shade_blocks_unshaded_textured_modulated_undithered_direct(psx_gpu_struct |
| 2267 | *psx_gpu) |
| 2268 | { |
| 2269 | #if 0 |
| 2270 | shade_blocks_unshaded_textured_modulated_undithered_direct_(psx_gpu); |
| 2271 | return; |
| 2272 | #endif |
| 2273 | shade_blocks_textured_modulated_do(unshaded, undithered, direct); |
| 2274 | } |
| 2275 | |
| 2276 | void shade_blocks_shaded_textured_modulated_dithered_indirect(psx_gpu_struct |
| 2277 | *psx_gpu) |
| 2278 | { |
| 2279 | #if 0 |
| 2280 | shade_blocks_shaded_textured_modulated_dithered_indirect_(psx_gpu); |
| 2281 | return; |
| 2282 | #endif |
| 2283 | shade_blocks_textured_modulated_do(shaded, dithered, indirect); |
| 2284 | } |
| 2285 | |
| 2286 | void shade_blocks_shaded_textured_modulated_undithered_indirect(psx_gpu_struct |
| 2287 | *psx_gpu) |
| 2288 | { |
| 2289 | #if 0 |
| 2290 | shade_blocks_shaded_textured_modulated_undithered_indirect_(psx_gpu); |
| 2291 | return; |
| 2292 | #endif |
| 2293 | shade_blocks_textured_modulated_do(shaded, undithered, indirect); |
| 2294 | } |
| 2295 | |
| 2296 | void shade_blocks_unshaded_textured_modulated_dithered_indirect(psx_gpu_struct |
| 2297 | *psx_gpu) |
| 2298 | { |
| 2299 | #if 0 |
| 2300 | shade_blocks_unshaded_textured_modulated_dithered_indirect_(psx_gpu); |
| 2301 | return; |
| 2302 | #endif |
| 2303 | shade_blocks_textured_modulated_do(unshaded, dithered, indirect); |
| 2304 | } |
| 2305 | |
| 2306 | void shade_blocks_unshaded_textured_modulated_undithered_indirect(psx_gpu_struct |
| 2307 | *psx_gpu) |
| 2308 | { |
| 2309 | #if 0 |
| 2310 | shade_blocks_unshaded_textured_modulated_undithered_indirect_(psx_gpu); |
| 2311 | return; |
| 2312 | #endif |
| 2313 | shade_blocks_textured_modulated_do(unshaded, undithered, indirect); |
| 2314 | } |
| 2315 | |
| 2316 | #define shade_blocks_textured_unmodulated_do(target) \ |
| 2317 | block_struct *block = psx_gpu->blocks; \ |
| 2318 | u32 num_blocks = psx_gpu->num_blocks; \ |
| 2319 | vec_8x16u draw_mask; \ |
| 2320 | vec_8x16u test_mask; \ |
| 2321 | u32 draw_mask_bits; \ |
| 2322 | \ |
| 2323 | vec_8x16u pixels; \ |
| 2324 | \ |
| 2325 | gvld1q_u16(test_mask, psx_gpu->test_mask.e); \ |
| 2326 | shade_blocks_load_msb_mask_##target(); \ |
| 2327 | \ |
| 2328 | while(num_blocks) \ |
| 2329 | { \ |
| 2330 | vec_8x16u zero_mask; \ |
| 2331 | \ |
| 2332 | draw_mask_bits = block->draw_mask_bits; \ |
| 2333 | gvdupq_n_u16(draw_mask, draw_mask_bits); \ |
| 2334 | gvtstq_u16(draw_mask, draw_mask, test_mask); \ |
| 2335 | \ |
| 2336 | gvld1q_u16(pixels, block->texels.e); \ |
| 2337 | \ |
| 2338 | gvceqzq_u16(zero_mask, pixels); \ |
| 2339 | gvorrq(zero_mask, draw_mask, zero_mask); \ |
| 2340 | \ |
| 2341 | shade_blocks_store_##target(zero_mask, pixels); \ |
| 2342 | \ |
| 2343 | num_blocks--; \ |
| 2344 | block++; \ |
| 2345 | } \ |
| 2346 | |
| 2347 | void shade_blocks_textured_unmodulated_indirect(psx_gpu_struct *psx_gpu) |
| 2348 | { |
| 2349 | #if 0 |
| 2350 | shade_blocks_textured_unmodulated_indirect_(psx_gpu); |
| 2351 | return; |
| 2352 | #endif |
| 2353 | shade_blocks_textured_unmodulated_do(indirect) |
| 2354 | } |
| 2355 | |
| 2356 | void shade_blocks_textured_unmodulated_direct(psx_gpu_struct *psx_gpu) |
| 2357 | { |
| 2358 | #if 0 |
| 2359 | shade_blocks_textured_unmodulated_direct_(psx_gpu); |
| 2360 | return; |
| 2361 | #endif |
| 2362 | shade_blocks_textured_unmodulated_do(direct) |
| 2363 | } |
| 2364 | |
| 2365 | void shade_blocks_unshaded_untextured_indirect(psx_gpu_struct *psx_gpu) |
| 2366 | { |
| 2367 | } |
| 2368 | |
| 2369 | void shade_blocks_unshaded_untextured_direct(psx_gpu_struct *psx_gpu) |
| 2370 | { |
| 2371 | #if 0 |
| 2372 | shade_blocks_unshaded_untextured_direct_(psx_gpu); |
| 2373 | return; |
| 2374 | #endif |
| 2375 | block_struct *block = psx_gpu->blocks; |
| 2376 | u32 num_blocks = psx_gpu->num_blocks; |
| 2377 | |
| 2378 | vec_8x16u pixels; |
| 2379 | gvld1q_u16(pixels, block->texels.e); |
| 2380 | shade_blocks_load_msb_mask_direct(); |
| 2381 | |
| 2382 | while(num_blocks) |
| 2383 | { |
| 2384 | vec_8x16u draw_mask; |
| 2385 | gvld1q_u16(draw_mask, block->draw_mask.e); |
| 2386 | shade_blocks_store_direct(draw_mask, pixels); |
| 2387 | |
| 2388 | num_blocks--; |
| 2389 | block++; |
| 2390 | } |
| 2391 | } |
| 2392 | |
| 2393 | #define blend_blocks_mask_evaluate_on() \ |
| 2394 | vec_8x16u mask_pixels; \ |
| 2395 | gvcltzq_s16(mask_pixels, framebuffer_pixels); \ |
| 2396 | gvorrq(draw_mask, draw_mask, mask_pixels) \ |
| 2397 | |
| 2398 | #define blend_blocks_mask_evaluate_off() \ |
| 2399 | |
| 2400 | #define blend_blocks_average() \ |
| 2401 | { \ |
| 2402 | vec_8x16u pixels_no_msb; \ |
| 2403 | vec_8x16u fb_pixels_no_msb; \ |
| 2404 | \ |
| 2405 | vec_8x16u d128_0x0421; \ |
| 2406 | \ |
| 2407 | gvdupq_n_u16(d128_0x0421, 0x0421); \ |
| 2408 | \ |
| 2409 | gveorq(blend_pixels, pixels, framebuffer_pixels); \ |
| 2410 | gvbicq(pixels_no_msb, pixels, d128_0x8000); \ |
| 2411 | gvand(blend_pixels, blend_pixels, d128_0x0421); \ |
| 2412 | gvsubq_u16(blend_pixels, pixels_no_msb, blend_pixels); \ |
| 2413 | gvbicq(fb_pixels_no_msb, framebuffer_pixels, d128_0x8000); \ |
| 2414 | gvhaddq_u16(blend_pixels, fb_pixels_no_msb, blend_pixels); \ |
| 2415 | } \ |
| 2416 | |
| 2417 | #define blend_blocks_add() \ |
| 2418 | { \ |
| 2419 | vec_8x16u pixels_rb, pixels_g; \ |
| 2420 | vec_8x16u fb_rb, fb_g; \ |
| 2421 | \ |
| 2422 | vec_8x16u d128_0x7C1F; \ |
| 2423 | vec_8x16u d128_0x03E0; \ |
| 2424 | \ |
| 2425 | gvdupq_n_u16(d128_0x7C1F, 0x7C1F); \ |
| 2426 | gvdupq_n_u16(d128_0x03E0, 0x03E0); \ |
| 2427 | \ |
| 2428 | gvand(pixels_rb, pixels, d128_0x7C1F); \ |
| 2429 | gvand(pixels_g, pixels, d128_0x03E0); \ |
| 2430 | \ |
| 2431 | gvand(fb_rb, framebuffer_pixels, d128_0x7C1F); \ |
| 2432 | gvand(fb_g, framebuffer_pixels, d128_0x03E0); \ |
| 2433 | \ |
| 2434 | gvaddq_u16(fb_rb, fb_rb, pixels_rb); \ |
| 2435 | gvaddq_u16(fb_g, fb_g, pixels_g); \ |
| 2436 | \ |
| 2437 | gvminq_u8(fb_rb, fb_rb, d128_0x7C1F); \ |
| 2438 | gvminq_u16(fb_g, fb_g, d128_0x03E0); \ |
| 2439 | \ |
| 2440 | gvorrq(blend_pixels, fb_rb, fb_g); \ |
| 2441 | } \ |
| 2442 | |
| 2443 | #define blend_blocks_subtract() \ |
| 2444 | { \ |
| 2445 | vec_8x16u pixels_rb, pixels_g; \ |
| 2446 | vec_8x16u fb_rb, fb_g; \ |
| 2447 | \ |
| 2448 | vec_8x16u d128_0x7C1F; \ |
| 2449 | vec_8x16u d128_0x03E0; \ |
| 2450 | \ |
| 2451 | gvdupq_n_u16(d128_0x7C1F, 0x7C1F); \ |
| 2452 | gvdupq_n_u16(d128_0x03E0, 0x03E0); \ |
| 2453 | \ |
| 2454 | gvand(pixels_rb, pixels, d128_0x7C1F); \ |
| 2455 | gvand(pixels_g, pixels, d128_0x03E0); \ |
| 2456 | \ |
| 2457 | gvand(fb_rb, framebuffer_pixels, d128_0x7C1F); \ |
| 2458 | gvand(fb_g, framebuffer_pixels, d128_0x03E0); \ |
| 2459 | \ |
| 2460 | gvqsubq_u8(fb_rb, fb_rb, pixels_rb); \ |
| 2461 | gvqsubq_u16(fb_g, fb_g, pixels_g); \ |
| 2462 | \ |
| 2463 | gvorrq(blend_pixels, fb_rb, fb_g); \ |
| 2464 | } \ |
| 2465 | |
| 2466 | #define blend_blocks_add_fourth() \ |
| 2467 | { \ |
| 2468 | vec_8x16u pixels_rb, pixels_g; \ |
| 2469 | vec_8x16u pixels_fourth; \ |
| 2470 | vec_8x16u fb_rb, fb_g; \ |
| 2471 | \ |
| 2472 | vec_8x16u d128_0x7C1F; \ |
| 2473 | vec_8x16u d128_0x1C07; \ |
| 2474 | vec_8x16u d128_0x03E0; \ |
| 2475 | vec_8x16u d128_0x00E0; \ |
| 2476 | \ |
| 2477 | gvdupq_n_u16(d128_0x7C1F, 0x7C1F); \ |
| 2478 | gvdupq_n_u16(d128_0x1C07, 0x1C07); \ |
| 2479 | gvdupq_n_u16(d128_0x03E0, 0x03E0); \ |
| 2480 | gvdupq_n_u16(d128_0x00E0, 0x00E0); \ |
| 2481 | \ |
| 2482 | gvshrq_n_u16(pixels_fourth, pixels, 2); \ |
| 2483 | \ |
| 2484 | gvand(fb_rb, framebuffer_pixels, d128_0x7C1F); \ |
| 2485 | gvand(fb_g, framebuffer_pixels, d128_0x03E0); \ |
| 2486 | \ |
| 2487 | gvand(pixels_rb, pixels_fourth, d128_0x1C07); \ |
| 2488 | gvand(pixels_g, pixels_fourth, d128_0x00E0); \ |
| 2489 | \ |
| 2490 | gvaddq_u16(fb_rb, fb_rb, pixels_rb); \ |
| 2491 | gvaddq_u16(fb_g, fb_g, pixels_g); \ |
| 2492 | \ |
| 2493 | gvminq_u8(fb_rb, fb_rb, d128_0x7C1F); \ |
| 2494 | gvminq_u16(fb_g, fb_g, d128_0x03E0); \ |
| 2495 | \ |
| 2496 | gvorrq(blend_pixels, fb_rb, fb_g); \ |
| 2497 | } \ |
| 2498 | |
| 2499 | #define blend_blocks_blended_combine_textured() \ |
| 2500 | { \ |
| 2501 | vec_8x16u blend_mask; \ |
| 2502 | gvcltzq_s16(blend_mask, pixels); \ |
| 2503 | \ |
| 2504 | gvorrq(blend_pixels, blend_pixels, d128_0x8000); \ |
| 2505 | gvbifq(blend_pixels, pixels, blend_mask); \ |
| 2506 | } \ |
| 2507 | |
| 2508 | #define blend_blocks_blended_combine_untextured() \ |
| 2509 | |
| 2510 | #define blend_blocks_body_blend(blend_mode, texturing) \ |
| 2511 | { \ |
| 2512 | blend_blocks_##blend_mode(); \ |
| 2513 | blend_blocks_blended_combine_##texturing(); \ |
| 2514 | } \ |
| 2515 | |
| 2516 | #define blend_blocks_body_average(texturing) \ |
| 2517 | blend_blocks_body_blend(average, texturing) \ |
| 2518 | |
| 2519 | #define blend_blocks_body_add(texturing) \ |
| 2520 | blend_blocks_body_blend(add, texturing) \ |
| 2521 | |
| 2522 | #define blend_blocks_body_subtract(texturing) \ |
| 2523 | blend_blocks_body_blend(subtract, texturing) \ |
| 2524 | |
| 2525 | #define blend_blocks_body_add_fourth(texturing) \ |
| 2526 | blend_blocks_body_blend(add_fourth, texturing) \ |
| 2527 | |
| 2528 | #define blend_blocks_body_unblended(texturing) \ |
| 2529 | blend_pixels = pixels \ |
| 2530 | |
| 2531 | #define blend_blocks_do(texturing, blend_mode, mask_evaluate) \ |
| 2532 | block_struct *block = psx_gpu->blocks; \ |
| 2533 | u32 num_blocks = psx_gpu->num_blocks; \ |
| 2534 | vec_8x16u draw_mask; \ |
| 2535 | vec_8x16u pixels; \ |
| 2536 | vec_8x16u blend_pixels; \ |
| 2537 | vec_8x16u framebuffer_pixels; \ |
| 2538 | vec_8x16u msb_mask; \ |
| 2539 | vec_8x16u d128_0x8000; \ |
| 2540 | \ |
| 2541 | u16 *fb_ptr; \ |
| 2542 | \ |
| 2543 | gvdupq_n_u16(d128_0x8000, 0x8000); \ |
| 2544 | gvdupq_n_u16(msb_mask, psx_gpu->mask_msb); \ |
| 2545 | (void)d128_0x8000; /* sometimes unused */ \ |
| 2546 | \ |
| 2547 | while(num_blocks) \ |
| 2548 | { \ |
| 2549 | gvld1q_u16(pixels, block->pixels.e); \ |
| 2550 | gvld1q_u16(draw_mask, block->draw_mask.e); \ |
| 2551 | fb_ptr = block->fb_ptr; \ |
| 2552 | \ |
| 2553 | gvld1q_u16(framebuffer_pixels, fb_ptr); \ |
| 2554 | \ |
| 2555 | blend_blocks_mask_evaluate_##mask_evaluate(); \ |
| 2556 | blend_blocks_body_##blend_mode(texturing); \ |
| 2557 | \ |
| 2558 | gvorrq(blend_pixels, blend_pixels, msb_mask); \ |
| 2559 | gvbifq(framebuffer_pixels, blend_pixels, draw_mask); \ |
| 2560 | gvst1q_u16(framebuffer_pixels, fb_ptr); \ |
| 2561 | \ |
| 2562 | num_blocks--; \ |
| 2563 | block++; \ |
| 2564 | } \ |
| 2565 | |
| 2566 | |
| 2567 | void blend_blocks_textured_average_off(psx_gpu_struct *psx_gpu) |
| 2568 | { |
| 2569 | #if 0 |
| 2570 | blend_blocks_textured_average_off_(psx_gpu); |
| 2571 | return; |
| 2572 | #endif |
| 2573 | blend_blocks_do(textured, average, off); |
| 2574 | } |
| 2575 | |
| 2576 | void blend_blocks_untextured_average_off(psx_gpu_struct *psx_gpu) |
| 2577 | { |
| 2578 | #if 0 |
| 2579 | blend_blocks_untextured_average_off_(psx_gpu); |
| 2580 | return; |
| 2581 | #endif |
| 2582 | blend_blocks_do(untextured, average, off); |
| 2583 | } |
| 2584 | |
| 2585 | void blend_blocks_textured_average_on(psx_gpu_struct *psx_gpu) |
| 2586 | { |
| 2587 | #if 0 |
| 2588 | blend_blocks_textured_average_on_(psx_gpu); |
| 2589 | return; |
| 2590 | #endif |
| 2591 | blend_blocks_do(textured, average, on); |
| 2592 | } |
| 2593 | |
| 2594 | void blend_blocks_untextured_average_on(psx_gpu_struct *psx_gpu) |
| 2595 | { |
| 2596 | #if 0 |
| 2597 | blend_blocks_untextured_average_on_(psx_gpu); |
| 2598 | return; |
| 2599 | #endif |
| 2600 | blend_blocks_do(untextured, average, on); |
| 2601 | } |
| 2602 | |
| 2603 | void blend_blocks_textured_add_off(psx_gpu_struct *psx_gpu) |
| 2604 | { |
| 2605 | #if 0 |
| 2606 | blend_blocks_textured_add_off_(psx_gpu); |
| 2607 | return; |
| 2608 | #endif |
| 2609 | blend_blocks_do(textured, add, off); |
| 2610 | } |
| 2611 | |
| 2612 | void blend_blocks_textured_add_on(psx_gpu_struct *psx_gpu) |
| 2613 | { |
| 2614 | #if 0 |
| 2615 | blend_blocks_textured_add_on_(psx_gpu); |
| 2616 | return; |
| 2617 | #endif |
| 2618 | blend_blocks_do(textured, add, on); |
| 2619 | } |
| 2620 | |
| 2621 | void blend_blocks_untextured_add_off(psx_gpu_struct *psx_gpu) |
| 2622 | { |
| 2623 | #if 0 |
| 2624 | blend_blocks_untextured_add_off_(psx_gpu); |
| 2625 | return; |
| 2626 | #endif |
| 2627 | blend_blocks_do(untextured, add, off); |
| 2628 | } |
| 2629 | |
| 2630 | void blend_blocks_untextured_add_on(psx_gpu_struct *psx_gpu) |
| 2631 | { |
| 2632 | #if 0 |
| 2633 | blend_blocks_untextured_add_on_(psx_gpu); |
| 2634 | return; |
| 2635 | #endif |
| 2636 | blend_blocks_do(untextured, add, on); |
| 2637 | } |
| 2638 | |
| 2639 | void blend_blocks_textured_subtract_off(psx_gpu_struct *psx_gpu) |
| 2640 | { |
| 2641 | #if 0 |
| 2642 | blend_blocks_textured_subtract_off_(psx_gpu); |
| 2643 | return; |
| 2644 | #endif |
| 2645 | blend_blocks_do(textured, subtract, off); |
| 2646 | } |
| 2647 | |
| 2648 | void blend_blocks_textured_subtract_on(psx_gpu_struct *psx_gpu) |
| 2649 | { |
| 2650 | #if 0 |
| 2651 | blend_blocks_textured_subtract_on_(psx_gpu); |
| 2652 | return; |
| 2653 | #endif |
| 2654 | blend_blocks_do(textured, subtract, on); |
| 2655 | } |
| 2656 | |
| 2657 | void blend_blocks_untextured_subtract_off(psx_gpu_struct *psx_gpu) |
| 2658 | { |
| 2659 | #if 0 |
| 2660 | blend_blocks_untextured_subtract_off_(psx_gpu); |
| 2661 | return; |
| 2662 | #endif |
| 2663 | blend_blocks_do(untextured, subtract, off); |
| 2664 | } |
| 2665 | |
| 2666 | void blend_blocks_untextured_subtract_on(psx_gpu_struct *psx_gpu) |
| 2667 | { |
| 2668 | #if 0 |
| 2669 | blend_blocks_untextured_subtract_on_(psx_gpu); |
| 2670 | return; |
| 2671 | #endif |
| 2672 | blend_blocks_do(untextured, subtract, on); |
| 2673 | } |
| 2674 | |
| 2675 | void blend_blocks_textured_add_fourth_off(psx_gpu_struct *psx_gpu) |
| 2676 | { |
| 2677 | #if 0 |
| 2678 | blend_blocks_textured_add_fourth_off_(psx_gpu); |
| 2679 | return; |
| 2680 | #endif |
| 2681 | blend_blocks_do(textured, add_fourth, off); |
| 2682 | } |
| 2683 | |
| 2684 | void blend_blocks_textured_add_fourth_on(psx_gpu_struct *psx_gpu) |
| 2685 | { |
| 2686 | #if 0 |
| 2687 | blend_blocks_textured_add_fourth_on_(psx_gpu); |
| 2688 | return; |
| 2689 | #endif |
| 2690 | blend_blocks_do(textured, add_fourth, on); |
| 2691 | } |
| 2692 | |
| 2693 | void blend_blocks_untextured_add_fourth_off(psx_gpu_struct *psx_gpu) |
| 2694 | { |
| 2695 | #if 0 |
| 2696 | blend_blocks_untextured_add_fourth_off_(psx_gpu); |
| 2697 | return; |
| 2698 | #endif |
| 2699 | blend_blocks_do(untextured, add_fourth, off); |
| 2700 | } |
| 2701 | |
| 2702 | void blend_blocks_untextured_add_fourth_on(psx_gpu_struct *psx_gpu) |
| 2703 | { |
| 2704 | #if 0 |
| 2705 | blend_blocks_untextured_add_fourth_on_(psx_gpu); |
| 2706 | return; |
| 2707 | #endif |
| 2708 | blend_blocks_do(untextured, add_fourth, on); |
| 2709 | } |
| 2710 | |
| 2711 | void blend_blocks_textured_unblended_on(psx_gpu_struct *psx_gpu) |
| 2712 | { |
| 2713 | #if 0 |
| 2714 | blend_blocks_textured_unblended_on_(psx_gpu); |
| 2715 | return; |
| 2716 | #endif |
| 2717 | blend_blocks_do(textured, unblended, on); |
| 2718 | } |
| 2719 | |
| 2720 | void blend_blocks_textured_unblended_off(psx_gpu_struct *psx_gpu) |
| 2721 | { |
| 2722 | } |
| 2723 | |
| 2724 | void setup_sprite_untextured(psx_gpu_struct *psx_gpu, s32 x, s32 y, s32 u, |
| 2725 | s32 v, s32 width, s32 height, u32 color) |
| 2726 | { |
| 2727 | if((psx_gpu->render_state & (RENDER_STATE_MASK_EVALUATE | |
| 2728 | RENDER_FLAGS_MODULATE_TEXELS | RENDER_FLAGS_BLEND)) == 0 && |
| 2729 | (psx_gpu->render_mode & RENDER_INTERLACE_ENABLED) == 0) |
| 2730 | { |
| 2731 | setup_sprite_untextured_simple(psx_gpu, x, y, u, v, width, height, color); |
| 2732 | return; |
| 2733 | } |
| 2734 | |
| 2735 | #if 0 |
| 2736 | setup_sprite_untextured_(psx_gpu, x, y, u, v, width, height, color); |
| 2737 | return; |
| 2738 | #endif |
| 2739 | u32 right_width = ((width - 1) & 0x7) + 1; |
| 2740 | u32 right_mask_bits = (0xFF << right_width); |
| 2741 | u16 *fb_ptr = psx_gpu->vram_out_ptr + (y * 1024) + x; |
| 2742 | u32 block_width = (width + 7) / 8; |
| 2743 | u32 fb_ptr_pitch = 1024 - ((block_width - 1) * 8); |
| 2744 | u32 blocks_remaining; |
| 2745 | u32 num_blocks = psx_gpu->num_blocks; |
| 2746 | block_struct *block = psx_gpu->blocks + num_blocks; |
| 2747 | |
| 2748 | u32 color_r = color & 0xFF; |
| 2749 | u32 color_g = (color >> 8) & 0xFF; |
| 2750 | u32 color_b = (color >> 16) & 0xFF; |
| 2751 | vec_8x16u colors; |
| 2752 | vec_8x16u right_mask; |
| 2753 | vec_8x16u test_mask; |
| 2754 | vec_8x16u zero_mask; |
| 2755 | |
| 2756 | gvld1q_u16(test_mask, psx_gpu->test_mask.e); |
| 2757 | color = (color_r >> 3) | ((color_g >> 3) << 5) | ((color_b >> 3) << 10); |
| 2758 | |
| 2759 | gvdupq_n_u16(colors, color); |
| 2760 | gvdupq_n_u16(zero_mask, 0x00); |
| 2761 | gvdupq_n_u16(right_mask, right_mask_bits); |
| 2762 | gvtstq_u16(right_mask, right_mask, test_mask); |
| 2763 | |
| 2764 | while(height) |
| 2765 | { |
| 2766 | blocks_remaining = block_width - 1; |
| 2767 | num_blocks += block_width; |
| 2768 | |
| 2769 | if(num_blocks > MAX_BLOCKS) |
| 2770 | { |
| 2771 | flush_render_block_buffer(psx_gpu); |
| 2772 | num_blocks = block_width; |
| 2773 | block = psx_gpu->blocks; |
| 2774 | } |
| 2775 | |
| 2776 | while(blocks_remaining) |
| 2777 | { |
| 2778 | gvst1q_u16(colors, block->pixels.e); |
| 2779 | gvst1q_u16(zero_mask, block->draw_mask.e); |
| 2780 | block->fb_ptr = fb_ptr; |
| 2781 | |
| 2782 | fb_ptr += 8; |
| 2783 | block++; |
| 2784 | blocks_remaining--; |
| 2785 | } |
| 2786 | |
| 2787 | gvst1q_u16(colors, block->pixels.e); |
| 2788 | gvst1q_u16(right_mask, block->draw_mask.e); |
| 2789 | block->fb_ptr = fb_ptr; |
| 2790 | |
| 2791 | block++; |
| 2792 | fb_ptr += fb_ptr_pitch; |
| 2793 | |
| 2794 | height--; |
| 2795 | psx_gpu->num_blocks = num_blocks; |
| 2796 | } |
| 2797 | } |
| 2798 | |
| 2799 | #define setup_sprite_tiled_initialize_4bpp_clut() \ |
| 2800 | vec_16x8u clut_low, clut_high; \ |
| 2801 | \ |
| 2802 | gvld2q_u8(clut_low, clut_high, (u8 *)psx_gpu->clut_ptr) \ |
| 2803 | |
| 2804 | #define setup_sprite_tiled_initialize_4bpp() \ |
| 2805 | setup_sprite_tiled_initialize_4bpp_clut(); \ |
| 2806 | \ |
| 2807 | if(psx_gpu->current_texture_mask & psx_gpu->dirty_textures_4bpp_mask) \ |
| 2808 | update_texture_4bpp_cache(psx_gpu) \ |
| 2809 | |
| 2810 | #define setup_sprite_tiled_initialize_8bpp() \ |
| 2811 | if(psx_gpu->current_texture_mask & psx_gpu->dirty_textures_8bpp_mask) \ |
| 2812 | update_texture_8bpp_cache(psx_gpu) \ |
| 2813 | |
| 2814 | #define setup_sprite_tile_fetch_texel_block_8bpp(offset) \ |
| 2815 | texture_block_ptr = psx_gpu->texture_page_ptr + \ |
| 2816 | ((texture_offset + offset) & texture_mask); \ |
| 2817 | \ |
| 2818 | gvld1_u8(texels, (u8 *)texture_block_ptr) \ |
| 2819 | |
| 2820 | #define setup_sprite_tile_add_blocks(tile_num_blocks) \ |
| 2821 | num_blocks += tile_num_blocks; \ |
| 2822 | \ |
| 2823 | if(num_blocks > MAX_BLOCKS) \ |
| 2824 | { \ |
| 2825 | flush_render_block_buffer(psx_gpu); \ |
| 2826 | num_blocks = tile_num_blocks; \ |
| 2827 | block = psx_gpu->blocks; \ |
| 2828 | } \ |
| 2829 | |
| 2830 | #define setup_sprite_tile_full_4bpp(edge) \ |
| 2831 | { \ |
| 2832 | vec_8x8u texels_low, texels_high; \ |
| 2833 | setup_sprite_tile_add_blocks(sub_tile_height * 2); \ |
| 2834 | \ |
| 2835 | while(sub_tile_height) \ |
| 2836 | { \ |
| 2837 | setup_sprite_tile_fetch_texel_block_8bpp(0); \ |
| 2838 | gvtbl2_u8(texels_low, clut_low, texels); \ |
| 2839 | gvtbl2_u8(texels_high, clut_high, texels); \ |
| 2840 | \ |
| 2841 | gvst2_u8(texels_low, texels_high, (u8 *)block->texels.e); \ |
| 2842 | block->draw_mask_bits = left_mask_bits; \ |
| 2843 | block->fb_ptr = fb_ptr; \ |
| 2844 | block++; \ |
| 2845 | \ |
| 2846 | setup_sprite_tile_fetch_texel_block_8bpp(8); \ |
| 2847 | gvtbl2_u8(texels_low, clut_low, texels); \ |
| 2848 | gvtbl2_u8(texels_high, clut_high, texels); \ |
| 2849 | \ |
| 2850 | gvst2_u8(texels_low, texels_high, (u8 *)block->texels.e); \ |
| 2851 | block->draw_mask_bits = right_mask_bits; \ |
| 2852 | block->fb_ptr = fb_ptr + 8; \ |
| 2853 | block++; \ |
| 2854 | \ |
| 2855 | fb_ptr += 1024; \ |
| 2856 | texture_offset += 0x10; \ |
| 2857 | sub_tile_height--; \ |
| 2858 | } \ |
| 2859 | texture_offset += 0xF00; \ |
| 2860 | psx_gpu->num_blocks = num_blocks; \ |
| 2861 | } \ |
| 2862 | |
| 2863 | #define setup_sprite_tile_half_4bpp(edge) \ |
| 2864 | { \ |
| 2865 | vec_8x8u texels_low, texels_high; \ |
| 2866 | setup_sprite_tile_add_blocks(sub_tile_height); \ |
| 2867 | \ |
| 2868 | while(sub_tile_height) \ |
| 2869 | { \ |
| 2870 | setup_sprite_tile_fetch_texel_block_8bpp(0); \ |
| 2871 | gvtbl2_u8(texels_low, clut_low, texels); \ |
| 2872 | gvtbl2_u8(texels_high, clut_high, texels); \ |
| 2873 | \ |
| 2874 | gvst2_u8(texels_low, texels_high, (u8 *)block->texels.e); \ |
| 2875 | block->draw_mask_bits = edge##_mask_bits; \ |
| 2876 | block->fb_ptr = fb_ptr; \ |
| 2877 | block++; \ |
| 2878 | \ |
| 2879 | fb_ptr += 1024; \ |
| 2880 | texture_offset += 0x10; \ |
| 2881 | sub_tile_height--; \ |
| 2882 | } \ |
| 2883 | texture_offset += 0xF00; \ |
| 2884 | psx_gpu->num_blocks = num_blocks; \ |
| 2885 | } \ |
| 2886 | |
| 2887 | #define setup_sprite_tile_full_8bpp(edge) \ |
| 2888 | { \ |
| 2889 | setup_sprite_tile_add_blocks(sub_tile_height * 2); \ |
| 2890 | \ |
| 2891 | while(sub_tile_height) \ |
| 2892 | { \ |
| 2893 | setup_sprite_tile_fetch_texel_block_8bpp(0); \ |
| 2894 | gvst1_u8(texels, block->r.e); \ |
| 2895 | block->draw_mask_bits = left_mask_bits; \ |
| 2896 | block->fb_ptr = fb_ptr; \ |
| 2897 | block++; \ |
| 2898 | \ |
| 2899 | setup_sprite_tile_fetch_texel_block_8bpp(8); \ |
| 2900 | gvst1_u8(texels, block->r.e); \ |
| 2901 | block->draw_mask_bits = right_mask_bits; \ |
| 2902 | block->fb_ptr = fb_ptr + 8; \ |
| 2903 | block++; \ |
| 2904 | \ |
| 2905 | fb_ptr += 1024; \ |
| 2906 | texture_offset += 0x10; \ |
| 2907 | sub_tile_height--; \ |
| 2908 | } \ |
| 2909 | texture_offset += 0xF00; \ |
| 2910 | psx_gpu->num_blocks = num_blocks; \ |
| 2911 | } \ |
| 2912 | |
| 2913 | #define setup_sprite_tile_half_8bpp(edge) \ |
| 2914 | { \ |
| 2915 | setup_sprite_tile_add_blocks(sub_tile_height); \ |
| 2916 | \ |
| 2917 | while(sub_tile_height) \ |
| 2918 | { \ |
| 2919 | setup_sprite_tile_fetch_texel_block_8bpp(0); \ |
| 2920 | gvst1_u8(texels, block->r.e); \ |
| 2921 | block->draw_mask_bits = edge##_mask_bits; \ |
| 2922 | block->fb_ptr = fb_ptr; \ |
| 2923 | block++; \ |
| 2924 | \ |
| 2925 | fb_ptr += 1024; \ |
| 2926 | texture_offset += 0x10; \ |
| 2927 | sub_tile_height--; \ |
| 2928 | } \ |
| 2929 | texture_offset += 0xF00; \ |
| 2930 | psx_gpu->num_blocks = num_blocks; \ |
| 2931 | } \ |
| 2932 | |
| 2933 | #define setup_sprite_tile_column_edge_pre_adjust_half_right() \ |
| 2934 | texture_offset = texture_offset_base + 8; \ |
| 2935 | fb_ptr += 8 \ |
| 2936 | |
| 2937 | #define setup_sprite_tile_column_edge_pre_adjust_half_left() \ |
| 2938 | texture_offset = texture_offset_base \ |
| 2939 | |
| 2940 | #define setup_sprite_tile_column_edge_pre_adjust_half(edge) \ |
| 2941 | setup_sprite_tile_column_edge_pre_adjust_half_##edge() \ |
| 2942 | |
| 2943 | #define setup_sprite_tile_column_edge_pre_adjust_full(edge) \ |
| 2944 | texture_offset = texture_offset_base \ |
| 2945 | |
| 2946 | #define setup_sprite_tile_column_edge_post_adjust_half_right() \ |
| 2947 | fb_ptr -= 8 \ |
| 2948 | |
| 2949 | #define setup_sprite_tile_column_edge_post_adjust_half_left() \ |
| 2950 | |
| 2951 | #define setup_sprite_tile_column_edge_post_adjust_half(edge) \ |
| 2952 | setup_sprite_tile_column_edge_post_adjust_half_##edge() \ |
| 2953 | |
| 2954 | #define setup_sprite_tile_column_edge_post_adjust_full(edge) \ |
| 2955 | |
| 2956 | |
| 2957 | #define setup_sprite_tile_column_height_single(edge_mode, edge, texture_mode, \ |
| 2958 | x4mode) \ |
| 2959 | do \ |
| 2960 | { \ |
| 2961 | sub_tile_height = column_data; \ |
| 2962 | setup_sprite_tile_column_edge_pre_adjust_##edge_mode##x4mode(edge); \ |
| 2963 | setup_sprite_tile_##edge_mode##_##texture_mode##x4mode(edge); \ |
| 2964 | setup_sprite_tile_column_edge_post_adjust_##edge_mode##x4mode(edge); \ |
| 2965 | } while(0) \ |
| 2966 | |
| 2967 | #define setup_sprite_tile_column_height_multi(edge_mode, edge, texture_mode, \ |
| 2968 | x4mode) \ |
| 2969 | do \ |
| 2970 | { \ |
| 2971 | u32 tiles_remaining = column_data >> 16; \ |
| 2972 | sub_tile_height = column_data & 0xFF; \ |
| 2973 | setup_sprite_tile_column_edge_pre_adjust_##edge_mode##x4mode(edge); \ |
| 2974 | setup_sprite_tile_##edge_mode##_##texture_mode##x4mode(edge); \ |
| 2975 | tiles_remaining -= 1; \ |
| 2976 | \ |
| 2977 | while(tiles_remaining) \ |
| 2978 | { \ |
| 2979 | sub_tile_height = 16; \ |
| 2980 | setup_sprite_tile_##edge_mode##_##texture_mode##x4mode(edge); \ |
| 2981 | tiles_remaining--; \ |
| 2982 | } \ |
| 2983 | \ |
| 2984 | sub_tile_height = (column_data >> 8) & 0xFF; \ |
| 2985 | setup_sprite_tile_##edge_mode##_##texture_mode##x4mode(edge); \ |
| 2986 | setup_sprite_tile_column_edge_post_adjust_##edge_mode##x4mode(edge); \ |
| 2987 | } while(0) \ |
| 2988 | |
| 2989 | |
| 2990 | #define setup_sprite_column_data_single() \ |
| 2991 | column_data = height \ |
| 2992 | |
| 2993 | #define setup_sprite_column_data_multi() \ |
| 2994 | column_data = 16 - offset_v; \ |
| 2995 | column_data |= ((height_rounded & 0xF) + 1) << 8; \ |
| 2996 | column_data |= (tile_height - 1) << 16 \ |
| 2997 | |
| 2998 | #define RIGHT_MASK_BIT_SHIFT 8 |
| 2999 | #define RIGHT_MASK_BIT_SHIFT_4x 16 |
| 3000 | |
| 3001 | #define setup_sprite_tile_column_width_single(texture_mode, multi_height, \ |
| 3002 | edge_mode, edge, x4mode) \ |
| 3003 | { \ |
| 3004 | setup_sprite_column_data_##multi_height(); \ |
| 3005 | left_mask_bits = left_block_mask | right_block_mask; \ |
| 3006 | right_mask_bits = left_mask_bits >> RIGHT_MASK_BIT_SHIFT##x4mode; \ |
| 3007 | \ |
| 3008 | setup_sprite_tile_column_height_##multi_height(edge_mode, edge, \ |
| 3009 | texture_mode, x4mode); \ |
| 3010 | } \ |
| 3011 | |
| 3012 | #define setup_sprite_tiled_advance_column() \ |
| 3013 | texture_offset_base += 0x100; \ |
| 3014 | if((texture_offset_base & 0xF00) == 0) \ |
| 3015 | texture_offset_base -= (0x100 + 0xF00) \ |
| 3016 | |
| 3017 | #define FB_PTR_MULTIPLIER 1 |
| 3018 | #define FB_PTR_MULTIPLIER_4x 2 |
| 3019 | |
| 3020 | #define setup_sprite_tile_column_width_multi(texture_mode, multi_height, \ |
| 3021 | left_mode, right_mode, x4mode) \ |
| 3022 | { \ |
| 3023 | setup_sprite_column_data_##multi_height(); \ |
| 3024 | s32 fb_ptr_advance_column = (16 - (1024 * height)) \ |
| 3025 | * FB_PTR_MULTIPLIER##x4mode; \ |
| 3026 | \ |
| 3027 | tile_width -= 2; \ |
| 3028 | left_mask_bits = left_block_mask; \ |
| 3029 | right_mask_bits = left_mask_bits >> RIGHT_MASK_BIT_SHIFT##x4mode; \ |
| 3030 | \ |
| 3031 | setup_sprite_tile_column_height_##multi_height(left_mode, right, \ |
| 3032 | texture_mode, x4mode); \ |
| 3033 | fb_ptr += fb_ptr_advance_column; \ |
| 3034 | \ |
| 3035 | left_mask_bits = 0x00; \ |
| 3036 | right_mask_bits = 0x00; \ |
| 3037 | \ |
| 3038 | while(tile_width) \ |
| 3039 | { \ |
| 3040 | setup_sprite_tiled_advance_column(); \ |
| 3041 | setup_sprite_tile_column_height_##multi_height(full, none, \ |
| 3042 | texture_mode, x4mode); \ |
| 3043 | fb_ptr += fb_ptr_advance_column; \ |
| 3044 | tile_width--; \ |
| 3045 | } \ |
| 3046 | \ |
| 3047 | left_mask_bits = right_block_mask; \ |
| 3048 | right_mask_bits = left_mask_bits >> RIGHT_MASK_BIT_SHIFT##x4mode; \ |
| 3049 | \ |
| 3050 | setup_sprite_tiled_advance_column(); \ |
| 3051 | setup_sprite_tile_column_height_##multi_height(right_mode, left, \ |
| 3052 | texture_mode, x4mode); \ |
| 3053 | } \ |
| 3054 | |
| 3055 | |
| 3056 | /* 4x stuff */ |
| 3057 | #define setup_sprite_tiled_initialize_4bpp_4x() \ |
| 3058 | setup_sprite_tiled_initialize_4bpp_clut() \ |
| 3059 | |
| 3060 | #define setup_sprite_tiled_initialize_8bpp_4x() \ |
| 3061 | |
| 3062 | #define setup_sprite_tile_full_4bpp_4x(edge) \ |
| 3063 | { \ |
| 3064 | vec_8x8u texels_low, texels_high; \ |
| 3065 | vec_8x16u pixels; \ |
| 3066 | vec_4x16u pixels_half; \ |
| 3067 | setup_sprite_tile_add_blocks(sub_tile_height * 2 * 4); \ |
| 3068 | u32 left_mask_bits_a = left_mask_bits & 0xFF; \ |
| 3069 | u32 left_mask_bits_b = left_mask_bits >> 8; \ |
| 3070 | u32 right_mask_bits_a = right_mask_bits & 0xFF; \ |
| 3071 | u32 right_mask_bits_b = right_mask_bits >> 8; \ |
| 3072 | \ |
| 3073 | while(sub_tile_height) \ |
| 3074 | { \ |
| 3075 | setup_sprite_tile_fetch_texel_block_8bpp(0); \ |
| 3076 | gvtbl2_u8(texels_low, clut_low, texels); \ |
| 3077 | gvtbl2_u8(texels_high, clut_high, texels); \ |
| 3078 | gvzip_u8(pixels, texels_low, texels_high); \ |
| 3079 | \ |
| 3080 | gvget_lo(pixels_half, pixels); \ |
| 3081 | gvst2_u16(pixels_half, pixels_half, block->texels.e); \ |
| 3082 | block->draw_mask_bits = left_mask_bits_a; \ |
| 3083 | block->fb_ptr = fb_ptr; \ |
| 3084 | block++; \ |
| 3085 | \ |
| 3086 | gvst2_u16(pixels_half, pixels_half, block->texels.e); \ |
| 3087 | block->draw_mask_bits = left_mask_bits_a; \ |
| 3088 | block->fb_ptr = fb_ptr + 1024; \ |
| 3089 | block++; \ |
| 3090 | \ |
| 3091 | gvget_hi(pixels_half, pixels); \ |
| 3092 | gvst2_u16(pixels_half, pixels_half, block->texels.e); \ |
| 3093 | block->draw_mask_bits = left_mask_bits_b; \ |
| 3094 | block->fb_ptr = fb_ptr + 8; \ |
| 3095 | block++; \ |
| 3096 | \ |
| 3097 | gvst2_u16(pixels_half, pixels_half, block->texels.e); \ |
| 3098 | block->draw_mask_bits = left_mask_bits_b; \ |
| 3099 | block->fb_ptr = fb_ptr + 1024 + 8; \ |
| 3100 | block++; \ |
| 3101 | \ |
| 3102 | setup_sprite_tile_fetch_texel_block_8bpp(8); \ |
| 3103 | gvtbl2_u8(texels_low, clut_low, texels); \ |
| 3104 | gvtbl2_u8(texels_high, clut_high, texels); \ |
| 3105 | gvzip_u8(pixels, texels_low, texels_high); \ |
| 3106 | \ |
| 3107 | gvget_lo(pixels_half, pixels); \ |
| 3108 | gvst2_u16(pixels_half, pixels_half, block->texels.e); \ |
| 3109 | block->draw_mask_bits = right_mask_bits_a; \ |
| 3110 | block->fb_ptr = fb_ptr + 16; \ |
| 3111 | block++; \ |
| 3112 | \ |
| 3113 | gvst2_u16(pixels_half, pixels_half, block->texels.e); \ |
| 3114 | block->draw_mask_bits = right_mask_bits_a; \ |
| 3115 | block->fb_ptr = fb_ptr + 1024 + 16; \ |
| 3116 | block++; \ |
| 3117 | \ |
| 3118 | gvget_hi(pixels_half, pixels); \ |
| 3119 | gvst2_u16(pixels_half, pixels_half, block->texels.e); \ |
| 3120 | block->draw_mask_bits = right_mask_bits_b; \ |
| 3121 | block->fb_ptr = fb_ptr + 24; \ |
| 3122 | block++; \ |
| 3123 | \ |
| 3124 | gvst2_u16(pixels_half, pixels_half, block->texels.e); \ |
| 3125 | block->draw_mask_bits = right_mask_bits_b; \ |
| 3126 | block->fb_ptr = fb_ptr + 1024 + 24; \ |
| 3127 | block++; \ |
| 3128 | \ |
| 3129 | fb_ptr += 2048; \ |
| 3130 | texture_offset += 0x10; \ |
| 3131 | sub_tile_height--; \ |
| 3132 | } \ |
| 3133 | texture_offset += 0xF00; \ |
| 3134 | psx_gpu->num_blocks = num_blocks; \ |
| 3135 | } \ |
| 3136 | |
| 3137 | #define setup_sprite_tile_half_4bpp_4x(edge) \ |
| 3138 | { \ |
| 3139 | vec_8x8u texels_low, texels_high; \ |
| 3140 | vec_8x16u pixels; \ |
| 3141 | vec_4x16u pixels_half; \ |
| 3142 | setup_sprite_tile_add_blocks(sub_tile_height * 4); \ |
| 3143 | u32 edge##_mask_bits_a = edge##_mask_bits & 0xFF; \ |
| 3144 | u32 edge##_mask_bits_b = edge##_mask_bits >> 8; \ |
| 3145 | \ |
| 3146 | while(sub_tile_height) \ |
| 3147 | { \ |
| 3148 | setup_sprite_tile_fetch_texel_block_8bpp(0); \ |
| 3149 | gvtbl2_u8(texels_low, clut_low, texels); \ |
| 3150 | gvtbl2_u8(texels_high, clut_high, texels); \ |
| 3151 | gvzip_u8(pixels, texels_low, texels_high); \ |
| 3152 | \ |
| 3153 | gvget_lo(pixels_half, pixels); \ |
| 3154 | gvst2_u16(pixels_half, pixels_half, block->texels.e); \ |
| 3155 | block->draw_mask_bits = edge##_mask_bits_a; \ |
| 3156 | block->fb_ptr = fb_ptr; \ |
| 3157 | block++; \ |
| 3158 | \ |
| 3159 | gvst2_u16(pixels_half, pixels_half, block->texels.e); \ |
| 3160 | block->draw_mask_bits = edge##_mask_bits_a; \ |
| 3161 | block->fb_ptr = fb_ptr + 1024; \ |
| 3162 | block++; \ |
| 3163 | \ |
| 3164 | gvget_hi(pixels_half, pixels); \ |
| 3165 | gvst2_u16(pixels_half, pixels_half, block->texels.e); \ |
| 3166 | block->draw_mask_bits = edge##_mask_bits_b; \ |
| 3167 | block->fb_ptr = fb_ptr + 8; \ |
| 3168 | block++; \ |
| 3169 | \ |
| 3170 | gvst2_u16(pixels_half, pixels_half, block->texels.e); \ |
| 3171 | block->draw_mask_bits = edge##_mask_bits_b; \ |
| 3172 | block->fb_ptr = fb_ptr + 1024 + 8; \ |
| 3173 | block++; \ |
| 3174 | \ |
| 3175 | fb_ptr += 2048; \ |
| 3176 | texture_offset += 0x10; \ |
| 3177 | sub_tile_height--; \ |
| 3178 | } \ |
| 3179 | texture_offset += 0xF00; \ |
| 3180 | psx_gpu->num_blocks = num_blocks; \ |
| 3181 | } \ |
| 3182 | |
| 3183 | #define setup_sprite_tile_full_8bpp_4x(edge) \ |
| 3184 | { \ |
| 3185 | setup_sprite_tile_add_blocks(sub_tile_height * 2 * 4); \ |
| 3186 | vec_8x16u texels_wide; \ |
| 3187 | vec_4x16u texels_half; \ |
| 3188 | u32 left_mask_bits_a = left_mask_bits & 0xFF; \ |
| 3189 | u32 left_mask_bits_b = left_mask_bits >> 8; \ |
| 3190 | u32 right_mask_bits_a = right_mask_bits & 0xFF; \ |
| 3191 | u32 right_mask_bits_b = right_mask_bits >> 8; \ |
| 3192 | \ |
| 3193 | while(sub_tile_height) \ |
| 3194 | { \ |
| 3195 | setup_sprite_tile_fetch_texel_block_8bpp(0); \ |
| 3196 | gvzip_u8(texels_wide, texels, texels); \ |
| 3197 | gvget_lo(texels_half, texels_wide); \ |
| 3198 | gvst1_u8(texels_half, block->r.e); \ |
| 3199 | block->draw_mask_bits = left_mask_bits_a; \ |
| 3200 | block->fb_ptr = fb_ptr; \ |
| 3201 | block++; \ |
| 3202 | \ |
| 3203 | gvst1_u8(texels_half, block->r.e); \ |
| 3204 | block->draw_mask_bits = left_mask_bits_a; \ |
| 3205 | block->fb_ptr = fb_ptr + 1024; \ |
| 3206 | block++; \ |
| 3207 | \ |
| 3208 | gvget_hi(texels_half, texels_wide); \ |
| 3209 | gvst1_u8(texels_half, block->r.e); \ |
| 3210 | block->draw_mask_bits = left_mask_bits_b; \ |
| 3211 | block->fb_ptr = fb_ptr + 8; \ |
| 3212 | block++; \ |
| 3213 | \ |
| 3214 | gvst1_u8(texels_half, block->r.e); \ |
| 3215 | block->draw_mask_bits = left_mask_bits_b; \ |
| 3216 | block->fb_ptr = fb_ptr + 1024 + 8; \ |
| 3217 | block++; \ |
| 3218 | \ |
| 3219 | setup_sprite_tile_fetch_texel_block_8bpp(8); \ |
| 3220 | gvzip_u8(texels_wide, texels, texels); \ |
| 3221 | gvget_lo(texels_half, texels_wide); \ |
| 3222 | gvst1_u8(texels_half, block->r.e); \ |
| 3223 | block->draw_mask_bits = right_mask_bits_a; \ |
| 3224 | block->fb_ptr = fb_ptr + 16; \ |
| 3225 | block++; \ |
| 3226 | \ |
| 3227 | gvst1_u8(texels_half, block->r.e); \ |
| 3228 | block->draw_mask_bits = right_mask_bits_a; \ |
| 3229 | block->fb_ptr = fb_ptr + 1024 + 16; \ |
| 3230 | block++; \ |
| 3231 | \ |
| 3232 | gvget_hi(texels_half, texels_wide); \ |
| 3233 | gvst1_u8(texels_half, block->r.e); \ |
| 3234 | block->draw_mask_bits = right_mask_bits_b; \ |
| 3235 | block->fb_ptr = fb_ptr + 24; \ |
| 3236 | block++; \ |
| 3237 | \ |
| 3238 | gvst1_u8(texels_half, block->r.e); \ |
| 3239 | block->draw_mask_bits = right_mask_bits_b; \ |
| 3240 | block->fb_ptr = fb_ptr + 24 + 1024; \ |
| 3241 | block++; \ |
| 3242 | \ |
| 3243 | fb_ptr += 2048; \ |
| 3244 | texture_offset += 0x10; \ |
| 3245 | sub_tile_height--; \ |
| 3246 | } \ |
| 3247 | texture_offset += 0xF00; \ |
| 3248 | psx_gpu->num_blocks = num_blocks; \ |
| 3249 | } \ |
| 3250 | |
| 3251 | #define setup_sprite_tile_half_8bpp_4x(edge) \ |
| 3252 | { \ |
| 3253 | setup_sprite_tile_add_blocks(sub_tile_height * 4); \ |
| 3254 | vec_8x16u texels_wide; \ |
| 3255 | vec_4x16u texels_half; \ |
| 3256 | u32 edge##_mask_bits_a = edge##_mask_bits & 0xFF; \ |
| 3257 | u32 edge##_mask_bits_b = edge##_mask_bits >> 8; \ |
| 3258 | \ |
| 3259 | while(sub_tile_height) \ |
| 3260 | { \ |
| 3261 | setup_sprite_tile_fetch_texel_block_8bpp(0); \ |
| 3262 | gvzip_u8(texels_wide, texels, texels); \ |
| 3263 | gvget_lo(texels_half, texels_wide); \ |
| 3264 | gvst1_u8(texels_half, block->r.e); \ |
| 3265 | block->draw_mask_bits = edge##_mask_bits_a; \ |
| 3266 | block->fb_ptr = fb_ptr; \ |
| 3267 | block++; \ |
| 3268 | \ |
| 3269 | gvst1_u8(texels_half, block->r.e); \ |
| 3270 | block->draw_mask_bits = edge##_mask_bits_a; \ |
| 3271 | block->fb_ptr = fb_ptr + 1024; \ |
| 3272 | block++; \ |
| 3273 | \ |
| 3274 | gvget_hi(texels_half, texels_wide); \ |
| 3275 | gvst1_u8(texels_half, block->r.e); \ |
| 3276 | block->draw_mask_bits = edge##_mask_bits_b; \ |
| 3277 | block->fb_ptr = fb_ptr + 8; \ |
| 3278 | block++; \ |
| 3279 | \ |
| 3280 | gvst1_u8(texels_half, block->r.e); \ |
| 3281 | block->draw_mask_bits = edge##_mask_bits_b; \ |
| 3282 | block->fb_ptr = fb_ptr + 8 + 1024; \ |
| 3283 | block++; \ |
| 3284 | \ |
| 3285 | fb_ptr += 2048; \ |
| 3286 | texture_offset += 0x10; \ |
| 3287 | sub_tile_height--; \ |
| 3288 | } \ |
| 3289 | texture_offset += 0xF00; \ |
| 3290 | psx_gpu->num_blocks = num_blocks; \ |
| 3291 | } \ |
| 3292 | |
| 3293 | #define setup_sprite_tile_column_edge_pre_adjust_half_right_4x() \ |
| 3294 | texture_offset = texture_offset_base + 8; \ |
| 3295 | fb_ptr += 16 \ |
| 3296 | |
| 3297 | #define setup_sprite_tile_column_edge_pre_adjust_half_left_4x() \ |
| 3298 | texture_offset = texture_offset_base \ |
| 3299 | |
| 3300 | #define setup_sprite_tile_column_edge_pre_adjust_half_4x(edge) \ |
| 3301 | setup_sprite_tile_column_edge_pre_adjust_half_##edge##_4x() \ |
| 3302 | |
| 3303 | #define setup_sprite_tile_column_edge_pre_adjust_full_4x(edge) \ |
| 3304 | texture_offset = texture_offset_base \ |
| 3305 | |
| 3306 | #define setup_sprite_tile_column_edge_post_adjust_half_right_4x() \ |
| 3307 | fb_ptr -= 16 \ |
| 3308 | |
| 3309 | #define setup_sprite_tile_column_edge_post_adjust_half_left_4x() \ |
| 3310 | |
| 3311 | #define setup_sprite_tile_column_edge_post_adjust_half_4x(edge) \ |
| 3312 | setup_sprite_tile_column_edge_post_adjust_half_##edge##_4x() \ |
| 3313 | |
| 3314 | #define setup_sprite_tile_column_edge_post_adjust_full_4x(edge) \ |
| 3315 | |
| 3316 | #define setup_sprite_offset_u_adjust() \ |
| 3317 | |
| 3318 | #define setup_sprite_comapre_left_block_mask() \ |
| 3319 | ((left_block_mask & 0xFF) == 0xFF) \ |
| 3320 | |
| 3321 | #define setup_sprite_comapre_right_block_mask() \ |
| 3322 | (((right_block_mask >> 8) & 0xFF) == 0xFF) \ |
| 3323 | |
| 3324 | #define setup_sprite_offset_u_adjust_4x() \ |
| 3325 | offset_u *= 2; \ |
| 3326 | offset_u_right = offset_u_right * 2 + 1 \ |
| 3327 | |
| 3328 | #define setup_sprite_comapre_left_block_mask_4x() \ |
| 3329 | ((left_block_mask & 0xFFFF) == 0xFFFF) \ |
| 3330 | |
| 3331 | #define setup_sprite_comapre_right_block_mask_4x() \ |
| 3332 | (((right_block_mask >> 16) & 0xFFFF) == 0xFFFF) \ |
| 3333 | |
| 3334 | |
| 3335 | #define setup_sprite_tiled_do(texture_mode, x4mode) \ |
| 3336 | s32 offset_u = u & 0xF; \ |
| 3337 | s32 offset_v = v & 0xF; \ |
| 3338 | \ |
| 3339 | s32 width_rounded = offset_u + width + 15; \ |
| 3340 | s32 height_rounded = offset_v + height + 15; \ |
| 3341 | s32 tile_height = height_rounded / 16; \ |
| 3342 | s32 tile_width = width_rounded / 16; \ |
| 3343 | u32 offset_u_right = width_rounded & 0xF; \ |
| 3344 | \ |
| 3345 | setup_sprite_offset_u_adjust##x4mode(); \ |
| 3346 | \ |
| 3347 | u32 left_block_mask = ~(0xFFFFFFFF << offset_u); \ |
| 3348 | u32 right_block_mask = 0xFFFFFFFE << offset_u_right; \ |
| 3349 | \ |
| 3350 | u32 left_mask_bits; \ |
| 3351 | u32 right_mask_bits; \ |
| 3352 | \ |
| 3353 | u32 sub_tile_height; \ |
| 3354 | u32 column_data; \ |
| 3355 | \ |
| 3356 | u32 texture_mask = (psx_gpu->texture_mask_width & 0xF) | \ |
| 3357 | ((psx_gpu->texture_mask_height & 0xF) << 4) | \ |
| 3358 | ((psx_gpu->texture_mask_width >> 4) << 8) | \ |
| 3359 | ((psx_gpu->texture_mask_height >> 4) << 12); \ |
| 3360 | u32 texture_offset = ((v & 0xF) << 4) | ((u & 0xF0) << 4) | \ |
| 3361 | ((v & 0xF0) << 8); \ |
| 3362 | u32 texture_offset_base = texture_offset; \ |
| 3363 | u32 control_mask; \ |
| 3364 | \ |
| 3365 | u16 *fb_ptr = psx_gpu->vram_out_ptr + (y * 1024) + (x - offset_u); \ |
| 3366 | u32 num_blocks = psx_gpu->num_blocks; \ |
| 3367 | block_struct *block = psx_gpu->blocks + num_blocks; \ |
| 3368 | \ |
| 3369 | u16 *texture_block_ptr; \ |
| 3370 | vec_8x8u texels; \ |
| 3371 | \ |
| 3372 | setup_sprite_tiled_initialize_##texture_mode##x4mode(); \ |
| 3373 | \ |
| 3374 | control_mask = tile_width == 1; \ |
| 3375 | control_mask |= (tile_height == 1) << 1; \ |
| 3376 | control_mask |= setup_sprite_comapre_left_block_mask##x4mode() << 2; \ |
| 3377 | control_mask |= setup_sprite_comapre_right_block_mask##x4mode() << 3; \ |
| 3378 | \ |
| 3379 | switch(control_mask) \ |
| 3380 | { \ |
| 3381 | default: \ |
| 3382 | case 0x0: \ |
| 3383 | setup_sprite_tile_column_width_multi(texture_mode, multi, full, full, \ |
| 3384 | x4mode); \ |
| 3385 | break; \ |
| 3386 | \ |
| 3387 | case 0x1: \ |
| 3388 | setup_sprite_tile_column_width_single(texture_mode, multi, full, none, \ |
| 3389 | x4mode); \ |
| 3390 | break; \ |
| 3391 | \ |
| 3392 | case 0x2: \ |
| 3393 | setup_sprite_tile_column_width_multi(texture_mode, single, full, full, \ |
| 3394 | x4mode); \ |
| 3395 | break; \ |
| 3396 | \ |
| 3397 | case 0x3: \ |
| 3398 | setup_sprite_tile_column_width_single(texture_mode, single, full, none, \ |
| 3399 | x4mode); \ |
| 3400 | break; \ |
| 3401 | \ |
| 3402 | case 0x4: \ |
| 3403 | setup_sprite_tile_column_width_multi(texture_mode, multi, half, full, \ |
| 3404 | x4mode); \ |
| 3405 | break; \ |
| 3406 | \ |
| 3407 | case 0x5: \ |
| 3408 | setup_sprite_tile_column_width_single(texture_mode, multi, half, right, \ |
| 3409 | x4mode); \ |
| 3410 | break; \ |
| 3411 | \ |
| 3412 | case 0x6: \ |
| 3413 | setup_sprite_tile_column_width_multi(texture_mode, single, half, full, \ |
| 3414 | x4mode); \ |
| 3415 | break; \ |
| 3416 | \ |
| 3417 | case 0x7: \ |
| 3418 | setup_sprite_tile_column_width_single(texture_mode, single, half, right, \ |
| 3419 | x4mode); \ |
| 3420 | break; \ |
| 3421 | \ |
| 3422 | case 0x8: \ |
| 3423 | setup_sprite_tile_column_width_multi(texture_mode, multi, full, half, \ |
| 3424 | x4mode); \ |
| 3425 | break; \ |
| 3426 | \ |
| 3427 | case 0x9: \ |
| 3428 | setup_sprite_tile_column_width_single(texture_mode, multi, half, left, \ |
| 3429 | x4mode); \ |
| 3430 | break; \ |
| 3431 | \ |
| 3432 | case 0xA: \ |
| 3433 | setup_sprite_tile_column_width_multi(texture_mode, single, full, half, \ |
| 3434 | x4mode); \ |
| 3435 | break; \ |
| 3436 | \ |
| 3437 | case 0xB: \ |
| 3438 | setup_sprite_tile_column_width_single(texture_mode, single, half, left, \ |
| 3439 | x4mode); \ |
| 3440 | break; \ |
| 3441 | \ |
| 3442 | case 0xC: \ |
| 3443 | setup_sprite_tile_column_width_multi(texture_mode, multi, half, half, \ |
| 3444 | x4mode); \ |
| 3445 | break; \ |
| 3446 | \ |
| 3447 | case 0xE: \ |
| 3448 | setup_sprite_tile_column_width_multi(texture_mode, single, half, half, \ |
| 3449 | x4mode); \ |
| 3450 | break; \ |
| 3451 | } \ |
| 3452 | |
| 3453 | void setup_sprite_4bpp(psx_gpu_struct *psx_gpu, s32 x, s32 y, s32 u, s32 v, |
| 3454 | s32 width, s32 height, u32 color) |
| 3455 | { |
| 3456 | #if 0 |
| 3457 | setup_sprite_4bpp_(psx_gpu, x, y, u, v, width, height, color); |
| 3458 | return; |
| 3459 | #endif |
| 3460 | setup_sprite_tiled_do(4bpp,) |
| 3461 | } |
| 3462 | |
| 3463 | void setup_sprite_8bpp(psx_gpu_struct *psx_gpu, s32 x, s32 y, s32 u, s32 v, |
| 3464 | s32 width, s32 height, u32 color) |
| 3465 | { |
| 3466 | #if 0 |
| 3467 | setup_sprite_8bpp_(psx_gpu, x, y, u, v, width, height, color); |
| 3468 | return; |
| 3469 | #endif |
| 3470 | setup_sprite_tiled_do(8bpp,) |
| 3471 | } |
| 3472 | |
| 3473 | #undef draw_mask_fb_ptr_left |
| 3474 | #undef draw_mask_fb_ptr_right |
| 3475 | |
| 3476 | void setup_sprite_4bpp_4x(psx_gpu_struct *psx_gpu, s32 x, s32 y, s32 u, s32 v, |
| 3477 | s32 width, s32 height, u32 color) |
| 3478 | { |
| 3479 | #if 0 |
| 3480 | setup_sprite_4bpp_4x_(psx_gpu, x, y, u, v, width, height, color); |
| 3481 | return; |
| 3482 | #endif |
| 3483 | setup_sprite_tiled_do(4bpp, _4x) |
| 3484 | } |
| 3485 | |
| 3486 | void setup_sprite_8bpp_4x(psx_gpu_struct *psx_gpu, s32 x, s32 y, s32 u, s32 v, |
| 3487 | s32 width, s32 height, u32 color) |
| 3488 | { |
| 3489 | #if 0 |
| 3490 | setup_sprite_8bpp_4x_(psx_gpu, x, y, u, v, width, height, color); |
| 3491 | return; |
| 3492 | #endif |
| 3493 | setup_sprite_tiled_do(8bpp, _4x) |
| 3494 | } |
| 3495 | |
| 3496 | |
| 3497 | void scale2x_tiles8(void * __restrict__ dst_, const void * __restrict__ src_, int w8, int h) |
| 3498 | { |
| 3499 | #if 0 |
| 3500 | scale2x_tiles8_(dst_, src_, w8, h); |
| 3501 | return; |
| 3502 | #endif |
| 3503 | const u16 * __restrict__ src = src_; |
| 3504 | const u16 * __restrict__ src1; |
| 3505 | u16 * __restrict__ dst = dst_; |
| 3506 | u16 * __restrict__ dst1; |
| 3507 | gvreg a, b; |
| 3508 | int w; |
| 3509 | for (; h > 0; h--, src += 1024, dst += 1024*2) |
| 3510 | { |
| 3511 | src1 = src; |
| 3512 | dst1 = dst; |
| 3513 | for (w = w8; w > 0; w--, src1 += 8, dst1 += 8*2) |
| 3514 | { |
| 3515 | gvld1q_u16(a, src1); |
| 3516 | gvzipq_u16(a, b, a, a); |
| 3517 | gvst1q_u16(a, dst1); |
| 3518 | gvst1q_u16(b, dst1 + 8); |
| 3519 | gvst1q_u16(a, dst1 + 1024); |
| 3520 | gvst1q_u16(b, dst1 + 1024 + 8); |
| 3521 | } |
| 3522 | } |
| 3523 | } |
| 3524 | |
| 3525 | // vim:ts=2:sw=2:expandtab |