| 1 | /* |
| 2 | * Copyright (C) 2011 Gilead Kutnick "Exophase" <exophase@gmail.com> |
| 3 | * Copyright (C) 2022 GraÅžvydas Ignotas "notaz" <notasas@gmail.com> |
| 4 | * |
| 5 | * This program is free software; you can redistribute it and/or |
| 6 | * modify it under the terms of the GNU General Public License as |
| 7 | * published by the Free Software Foundation; either version 2 of |
| 8 | * the License, or (at your option) any later version. |
| 9 | * |
| 10 | * This program is distributed in the hope that it will be useful, |
| 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 13 | * General Public License for more details. |
| 14 | */ |
| 15 | |
| 16 | #include <string.h> |
| 17 | #include "psx_gpu.h" |
| 18 | #include "psx_gpu_simd.h" |
| 19 | //#define ASM_PROTOTYPES |
| 20 | //#include "psx_gpu_simd.h" |
| 21 | #ifdef __SSE2__ |
| 22 | #include <x86intrin.h> |
| 23 | #endif |
| 24 | #ifndef SIMD_BUILD |
| 25 | #error "please define SIMD_BUILD if you want this gpu_neon C simd implementation" |
| 26 | #endif |
| 27 | |
| 28 | typedef u8 gvu8 __attribute__((vector_size(16))); |
| 29 | typedef u16 gvu16 __attribute__((vector_size(16))); |
| 30 | typedef u32 gvu32 __attribute__((vector_size(16))); |
| 31 | typedef u64 gvu64 __attribute__((vector_size(16))); |
| 32 | typedef s8 gvs8 __attribute__((vector_size(16))); |
| 33 | typedef s16 gvs16 __attribute__((vector_size(16))); |
| 34 | typedef s32 gvs32 __attribute__((vector_size(16))); |
| 35 | typedef s64 gvs64 __attribute__((vector_size(16))); |
| 36 | |
| 37 | typedef u8 gvhu8 __attribute__((vector_size(8))); |
| 38 | typedef u16 gvhu16 __attribute__((vector_size(8))); |
| 39 | typedef u32 gvhu32 __attribute__((vector_size(8))); |
| 40 | typedef u64 gvhu64 __attribute__((vector_size(8))); |
| 41 | typedef s8 gvhs8 __attribute__((vector_size(8))); |
| 42 | typedef s16 gvhs16 __attribute__((vector_size(8))); |
| 43 | typedef s32 gvhs32 __attribute__((vector_size(8))); |
| 44 | typedef s64 gvhs64 __attribute__((vector_size(8))); |
| 45 | |
| 46 | typedef union |
| 47 | { |
| 48 | gvu8 u8; |
| 49 | gvu16 u16; |
| 50 | gvu32 u32; |
| 51 | gvu64 u64; |
| 52 | gvs8 s8; |
| 53 | gvs16 s16; |
| 54 | gvs32 s32; |
| 55 | gvs64 s64; |
| 56 | #ifdef __SSE2__ |
| 57 | __m128i m; |
| 58 | #endif |
| 59 | // this may be tempting, but it causes gcc to do lots of stack spills |
| 60 | //gvhreg h[2]; |
| 61 | } gvreg; |
| 62 | |
| 63 | typedef gvreg gvreg_ua __attribute__((aligned(1))); |
| 64 | typedef uint64_t uint64_t_ua __attribute__((aligned(1))); |
| 65 | typedef gvu8 gvu8_ua __attribute__((aligned(1))); |
| 66 | typedef gvu16 gvu16_ua __attribute__((aligned(1))); |
| 67 | |
| 68 | #if defined(__ARM_NEON) || defined(__ARM_NEON__) |
| 69 | #include <arm_neon.h> |
| 70 | |
| 71 | typedef union |
| 72 | { |
| 73 | gvhu8 u8; |
| 74 | gvhu16 u16; |
| 75 | gvhu32 u32; |
| 76 | gvhu64 u64; |
| 77 | //u64 u64; |
| 78 | //uint64x1_t u64; |
| 79 | gvhs8 s8; |
| 80 | gvhs16 s16; |
| 81 | gvhs32 s32; |
| 82 | gvhs64 s64; |
| 83 | //s64 s64; |
| 84 | //int64x1_t s64; |
| 85 | } gvhreg; |
| 86 | |
| 87 | #define gvaddhn_u32(d, a, b) d.u16 = vaddhn_u32(a.u32, b.u32) |
| 88 | #define gvaddw_s32(d, a, b) d.s64 = vaddw_s32(a.s64, b.s32) |
| 89 | #define gvabsq_s32(d, s) d.s32 = vabsq_s32(s.s32) |
| 90 | #define gvbic_n_u16(d, n) d.u16 = vbic_u16(d.u16, vmov_n_u16(n)) |
| 91 | #define gvbifq(d, a, b) d.u8 = vbslq_u8(b.u8, d.u8, a.u8) |
| 92 | #define gvbit(d, a, b) d.u8 = vbsl_u8(b.u8, a.u8, d.u8) |
| 93 | #define gvceqq_u16(d, a, b) d.u16 = vceqq_u16(a.u16, b.u16) |
| 94 | #define gvcgt_s16(d, a, b) d.u16 = vcgt_s16(a.s16, b.s16) |
| 95 | #define gvclt_s16(d, a, b) d.u16 = vclt_s16(a.s16, b.s16) |
| 96 | #define gvcreate_s32(d, a, b) d.s32 = vcreate_s32((u32)(a) | ((u64)(b) << 32)) |
| 97 | #define gvcreate_u32(d, a, b) d.u32 = vcreate_u32((u32)(a) | ((u64)(b) << 32)) |
| 98 | #define gvcreate_s64(d, s) d.s64 = (gvhs64)vcreate_s64(s) |
| 99 | #define gvcreate_u64(d, s) d.u64 = (gvhu64)vcreate_u64(s) |
| 100 | #define gvcombine_u16(d, l, h) d.u16 = vcombine_u16(l.u16, h.u16) |
| 101 | #define gvcombine_u32(d, l, h) d.u32 = vcombine_u32(l.u32, h.u32) |
| 102 | #define gvcombine_s64(d, l, h) d.s64 = vcombine_s64((int64x1_t)l.s64, (int64x1_t)h.s64) |
| 103 | #define gvdup_l_u8(d, s, l) d.u8 = vdup_lane_u8(s.u8, l) |
| 104 | #define gvdup_l_u16(d, s, l) d.u16 = vdup_lane_u16(s.u16, l) |
| 105 | #define gvdup_l_u32(d, s, l) d.u32 = vdup_lane_u32(s.u32, l) |
| 106 | #define gvdupq_l_s64(d, s, l) d.s64 = vdupq_lane_s64((int64x1_t)s.s64, l) |
| 107 | #define gvdupq_l_u32(d, s, l) d.u32 = vdupq_lane_u32(s.u32, l) |
| 108 | #define gvdup_n_s64(d, n) d.s64 = vdup_n_s64(n) |
| 109 | #define gvdup_n_u8(d, n) d.u8 = vdup_n_u8(n) |
| 110 | #define gvdup_n_u16(d, n) d.u16 = vdup_n_u16(n) |
| 111 | #define gvdup_n_u32(d, n) d.u32 = vdup_n_u32(n) |
| 112 | #define gvdupq_n_u16(d, n) d.u16 = vdupq_n_u16(n) |
| 113 | #define gvdupq_n_u32(d, n) d.u32 = vdupq_n_u32(n) |
| 114 | #define gvdupq_n_s64(d, n) d.s64 = vdupq_n_s64(n) |
| 115 | #define gvhaddq_u16(d, a, b) d.u16 = vhaddq_u16(a.u16, b.u16) |
| 116 | #define gvmax_s16(d, a, b) d.s16 = vmax_s16(a.s16, b.s16) |
| 117 | #define gvmin_s16(d, a, b) d.s16 = vmin_s16(a.s16, b.s16) |
| 118 | #define gvminq_u8(d, a, b) d.u8 = vminq_u8(a.u8, b.u8) |
| 119 | #define gvminq_u16(d, a, b) d.u16 = vminq_u16(a.u16, b.u16) |
| 120 | #define gvmla_s32(d, a, b) d.s32 = vmla_s32(d.s32, a.s32, b.s32) |
| 121 | #define gvmla_u32(d, a, b) d.u32 = vmla_u32(d.u32, a.u32, b.u32) |
| 122 | #define gvmlaq_s32(d, a, b) d.s32 = vmlaq_s32(d.s32, a.s32, b.s32) |
| 123 | #define gvmlaq_u32(d, a, b) d.u32 = vmlaq_u32(d.u32, a.u32, b.u32) |
| 124 | #define gvmlal_s32(d, a, b) d.s64 = vmlal_s32(d.s64, a.s32, b.s32) |
| 125 | #define gvmlal_u8(d, a, b) d.u16 = vmlal_u8(d.u16, a.u8, b.u8) |
| 126 | #define gvmlsq_s32(d, a, b) d.s32 = vmlsq_s32(d.s32, a.s32, b.s32) |
| 127 | #define gvmlsq_l_s32(d, a, b, l) d.s32 = vmlsq_lane_s32(d.s32, a.s32, b.s32, l) |
| 128 | #define gvmov_l_s32(d, s, l) d.s32 = vset_lane_s32(s, d.s32, l) |
| 129 | #define gvmov_l_u32(d, s, l) d.u32 = vset_lane_u32(s, d.u32, l) |
| 130 | #define gvmovl_u8(d, s) d.u16 = vmovl_u8(s.u8) |
| 131 | #define gvmovl_s32(d, s) d.s64 = vmovl_s32(s.s32) |
| 132 | #define gvmovn_u16(d, s) d.u8 = vmovn_u16(s.u16) |
| 133 | #define gvmovn_u32(d, s) d.u16 = vmovn_u32(s.u32) |
| 134 | #define gvmovn_u64(d, s) d.u32 = vmovn_u64(s.u64) |
| 135 | #define gvmul_s32(d, a, b) d.s32 = vmul_s32(a.s32, b.s32) |
| 136 | #define gvmull_s16(d, a, b) d.s32 = vmull_s16(a.s16, b.s16) |
| 137 | #define gvmull_s32(d, a, b) d.s64 = vmull_s32(a.s32, b.s32) |
| 138 | #define gvmull_u8(d, a, b) d.u16 = vmull_u8(a.u8, b.u8) |
| 139 | #define gvmull_l_u32(d, a, b, l) d.u64 = vmull_lane_u32(a.u32, b.u32, l) |
| 140 | #define gvmlsl_s16(d, a, b) d.s32 = vmlsl_s16(d.s32, a.s16, b.s16) |
| 141 | #define gvneg_s32(d, s) d.s32 = vneg_s32(s.s32) |
| 142 | #define gvqadd_u8(d, a, b) d.u8 = vqadd_u8(a.u8, b.u8) |
| 143 | #define gvqsub_u8(d, a, b) d.u8 = vqsub_u8(a.u8, b.u8) |
| 144 | #define gvshl_u16(d, a, b) d.u16 = vshl_u16(a.u16, b.s16) |
| 145 | #define gvshlq_u64(d, a, b) d.u64 = vshlq_u64(a.u64, b.s64) |
| 146 | #define gvshrq_n_s16(d, s, n) d.s16 = vshrq_n_s16(s.s16, n) |
| 147 | #define gvshrq_n_u16(d, s, n) d.u16 = vshrq_n_u16(s.u16, n) |
| 148 | #define gvshl_n_u32(d, s, n) d.u32 = vshl_n_u32(s.u32, n) |
| 149 | #define gvshlq_n_u16(d, s, n) d.u16 = vshlq_n_u16(s.u16, n) |
| 150 | #define gvshlq_n_u32(d, s, n) d.u32 = vshlq_n_u32(s.u32, n) |
| 151 | #define gvshll_n_s8(d, s, n) d.s16 = vshll_n_s8(s.s8, n) |
| 152 | #define gvshll_n_u8(d, s, n) d.u16 = vshll_n_u8(s.u8, n) |
| 153 | #define gvshll_n_u16(d, s, n) d.u32 = vshll_n_u16(s.u16, n) |
| 154 | #define gvshr_n_u8(d, s, n) d.u8 = vshr_n_u8(s.u8, n) |
| 155 | #define gvshr_n_u16(d, s, n) d.u16 = vshr_n_u16(s.u16, n) |
| 156 | #define gvshr_n_u32(d, s, n) d.u32 = vshr_n_u32(s.u32, n) |
| 157 | #define gvshr_n_u64(d, s, n) d.u64 = (gvhu64)vshr_n_u64((uint64x1_t)s.u64, n) |
| 158 | #define gvshrn_n_u16(d, s, n) d.u8 = vshrn_n_u16(s.u16, n) |
| 159 | #define gvshrn_n_u32(d, s, n) d.u16 = vshrn_n_u32(s.u32, n) |
| 160 | #define gvsli_n_u8(d, s, n) d.u8 = vsli_n_u8(d.u8, s.u8, n) |
| 161 | #define gvsri_n_u8(d, s, n) d.u8 = vsri_n_u8(d.u8, s.u8, n) |
| 162 | #define gvtstq_u16(d, a, b) d.u16 = vtstq_u16(a.u16, b.u16) |
| 163 | #define gvqshrun_n_s16(d, s, n) d.u8 = vqshrun_n_s16(s.s16, n) |
| 164 | #define gvqsubq_u8(d, a, b) d.u8 = vqsubq_u8(a.u8, b.u8) |
| 165 | #define gvqsubq_u16(d, a, b) d.u16 = vqsubq_u16(a.u16, b.u16) |
| 166 | |
| 167 | #define gvmovn_top_u64(d, s) d.u32 = vshrn_n_u64(s.u64, 32) |
| 168 | |
| 169 | #define gvget_lo(d, s) d.u16 = vget_low_u16(s.u16) |
| 170 | #define gvget_hi(d, s) d.u16 = vget_high_u16(s.u16) |
| 171 | #define gvlo(s) ({gvhreg t_; gvget_lo(t_, s); t_;}) |
| 172 | #define gvhi(s) ({gvhreg t_; gvget_hi(t_, s); t_;}) |
| 173 | |
| 174 | #define gvset_lo(d, s) d.u16 = vcombine_u16(s.u16, gvhi(d).u16) |
| 175 | #define gvset_hi(d, s) d.u16 = vcombine_u16(gvlo(d).u16, s.u16) |
| 176 | |
| 177 | #define gvtbl2_u8(d, a, b) { \ |
| 178 | uint8x8x2_t v_; \ |
| 179 | v_.val[0] = vget_low_u8(a.u8); v_.val[1] = vget_high_u8(a.u8); \ |
| 180 | d.u8 = vtbl2_u8(v_, b.u8); \ |
| 181 | } |
| 182 | |
| 183 | #define gvzip_u8(d, a, b) { \ |
| 184 | uint8x8x2_t v_ = vzip_u8(a.u8, b.u8); \ |
| 185 | d.u8 = vcombine_u8(v_.val[0], v_.val[1]); \ |
| 186 | } |
| 187 | #define gvzipq_u16(d0, d1, s0, s1) { \ |
| 188 | uint16x8x2_t v_ = vzipq_u16(s0.u16, s1.u16); \ |
| 189 | d0.u16 = v_.val[0]; d1.u16 = v_.val[1]; \ |
| 190 | } |
| 191 | |
| 192 | #define gvld1_u8(d, s) d.u8 = vld1_u8(s) |
| 193 | #define gvld1_u32(d, s) d.u32 = vld1_u32((const u32 *)(s)) |
| 194 | #define gvld1q_u8(d, s) d.u8 = vld1q_u8(s) |
| 195 | #define gvld1q_u16(d, s) d.u16 = vld1q_u16(s) |
| 196 | #define gvld1q_u32(d, s) d.u32 = vld1q_u32((const u32 *)(s)) |
| 197 | #define gvld2_u8_dup(v0, v1, p) { \ |
| 198 | uint8x8x2_t v_ = vld2_dup_u8(p); \ |
| 199 | v0.u8 = v_.val[0]; v1.u8 = v_.val[1]; \ |
| 200 | } |
| 201 | #define gvld2q_u8(v0, v1, p) { \ |
| 202 | uint8x16x2_t v_ = vld2q_u8(p); \ |
| 203 | v0.u8 = v_.val[0]; v1.u8 = v_.val[1]; \ |
| 204 | } |
| 205 | |
| 206 | #define gvst1_u8(v, p) \ |
| 207 | vst1_u8(p, v.u8) |
| 208 | #define gvst1q_u16(v, p) \ |
| 209 | vst1q_u16(p, v.u16) |
| 210 | #define gvst1q_inc_u32(v, p, i) { \ |
| 211 | vst1q_u32((u32 *)(p), v.u32); \ |
| 212 | p += (i) / sizeof(*p); \ |
| 213 | } |
| 214 | #define gvst2_u8(v0, v1, p) { \ |
| 215 | uint8x8x2_t v_; \ |
| 216 | v_.val[0] = v0.u8; v_.val[1] = v1.u8; \ |
| 217 | vst2_u8(p, v_); \ |
| 218 | } |
| 219 | #define gvst2_u16(v0, v1, p) { \ |
| 220 | uint16x4x2_t v_; \ |
| 221 | v_.val[0] = v0.u16; v_.val[1] = v1.u16; \ |
| 222 | vst2_u16(p, v_); \ |
| 223 | } |
| 224 | #define gvst2q_u8(v0, v1, p) { \ |
| 225 | uint8x16x2_t v_; \ |
| 226 | v_.val[0] = v0.u8; v_.val[1] = v1.u8; \ |
| 227 | vst2q_u8(p, v_); \ |
| 228 | } |
| 229 | #define gvst4_4_inc_u32(v0, v1, v2, v3, p, i) { \ |
| 230 | uint32x2x4_t v_; \ |
| 231 | v_.val[0] = v0.u32; v_.val[1] = v1.u32; v_.val[2] = v2.u32; v_.val[3] = v3.u32; \ |
| 232 | vst4_u32(p, v_); p += (i) / sizeof(*p); \ |
| 233 | } |
| 234 | #define gvst4_pi_u16(v0, v1, v2, v3, p) { \ |
| 235 | uint16x4x4_t v_; \ |
| 236 | v_.val[0] = v0.u16; v_.val[1] = v1.u16; v_.val[2] = v2.u16; v_.val[3] = v3.u16; \ |
| 237 | vst4_u16((u16 *)(p), v_); p += sizeof(v_) / sizeof(*p); \ |
| 238 | } |
| 239 | #define gvst1q_pi_u32(v, p) \ |
| 240 | gvst1q_inc_u32(v, p, sizeof(v)) |
| 241 | // could use vst1q_u32_x2 but that's not always available |
| 242 | #define gvst1q_2_pi_u32(v0, v1, p) { \ |
| 243 | gvst1q_inc_u32(v0, p, sizeof(v0)); \ |
| 244 | gvst1q_inc_u32(v1, p, sizeof(v1)); \ |
| 245 | } |
| 246 | |
| 247 | /* notes: |
| 248 | - gcc > 9: (arm32) int64x1_t type produces ops on gp regs |
| 249 | (also u64 __attribute__((vector_size(8))) :( ) |
| 250 | - gcc <11: (arm32) handles '<vec> == 0' poorly |
| 251 | */ |
| 252 | |
| 253 | #elif defined(__SSE2__) |
| 254 | |
| 255 | // use a full reg and discard the upper half |
| 256 | #define gvhreg gvreg |
| 257 | |
| 258 | #define gv0() _mm_setzero_si128() |
| 259 | |
| 260 | #ifdef __x86_64__ |
| 261 | #define gvcreate_s32(d, a, b) d.m = _mm_cvtsi64_si128((u32)(a) | ((u64)(b) << 32)) |
| 262 | #define gvcreate_s64(d, s) d.m = _mm_cvtsi64_si128(s) |
| 263 | #else |
| 264 | #define gvcreate_s32(d, a, b) d.m = _mm_set_epi32(0, 0, b, a) |
| 265 | #define gvcreate_s64(d, s) d.m = _mm_loadu_si64(&(s)) |
| 266 | #endif |
| 267 | |
| 268 | #define gvbic_n_u16(d, n) d.m = _mm_andnot_si128(_mm_set1_epi16(n), d.m) |
| 269 | #define gvceqq_u16(d, a, b) d.u16 = vceqq_u16(a.u16, b.u16) |
| 270 | #define gvcgt_s16(d, a, b) d.m = _mm_cmpgt_epi16(a.m, b.m) |
| 271 | #define gvclt_s16(d, a, b) d.m = _mm_cmpgt_epi16(b.m, a.m) |
| 272 | #define gvcreate_u32 gvcreate_s32 |
| 273 | #define gvcreate_u64 gvcreate_s64 |
| 274 | #define gvcombine_u16(d, l, h) d.m = _mm_unpacklo_epi64(l.m, h.m) |
| 275 | #define gvcombine_u32 gvcombine_u16 |
| 276 | #define gvcombine_s64 gvcombine_u16 |
| 277 | #define gvdup_l_u8(d, s, l) d.u8 = vdup_lane_u8(s.u8, l) |
| 278 | #define gvdup_l_u16(d, s, l) d.m = _mm_shufflelo_epi16(s.m, (l)|((l)<<2)|((l)<<4)|((l)<<6)) |
| 279 | #define gvdup_l_u32(d, s, l) d.m = vdup_lane_u32(s.u32, l) |
| 280 | #define gvdupq_l_s64(d, s, l) d.m = _mm_unpacklo_epi64(s.m, s.m) |
| 281 | #define gvdupq_l_u32(d, s, l) d.m = _mm_shuffle_epi32(s.m, (l)|((l)<<2)|((l)<<4)|((l)<<6)) |
| 282 | #define gvdup_n_s64(d, n) d.m = _mm_set1_epi64x(n) |
| 283 | #define gvdup_n_u8(d, n) d.m = _mm_set1_epi8(n) |
| 284 | #define gvdup_n_u16(d, n) d.m = _mm_set1_epi16(n) |
| 285 | #define gvdup_n_u32(d, n) d.m = _mm_set1_epi32(n) |
| 286 | #define gvdupq_n_u16(d, n) d.m = _mm_set1_epi16(n) |
| 287 | #define gvdupq_n_u32(d, n) d.m = _mm_set1_epi32(n) |
| 288 | #define gvdupq_n_s64(d, n) d.m = _mm_set1_epi64x(n) |
| 289 | #define gvmax_s16(d, a, b) d.m = _mm_max_epi16(a.m, b.m) |
| 290 | #define gvmin_s16(d, a, b) d.m = _mm_min_epi16(a.m, b.m) |
| 291 | #define gvminq_u8(d, a, b) d.m = _mm_min_epu8(a.m, b.m) |
| 292 | #define gvmovn_u64(d, s) d.m = _mm_shuffle_epi32(s.m, 0 | (2 << 2)) |
| 293 | #define gvmovn_top_u64(d, s) d.m = _mm_shuffle_epi32(s.m, 1 | (3 << 2)) |
| 294 | #define gvmull_s16(d, a, b) { \ |
| 295 | __m128i lo_ = _mm_mullo_epi16(a.m, b.m); \ |
| 296 | __m128i hi_ = _mm_mulhi_epi16(a.m, b.m); \ |
| 297 | d.m = _mm_unpacklo_epi16(lo_, hi_); \ |
| 298 | } |
| 299 | #define gvmull_l_u32(d, a, b, l) { \ |
| 300 | __m128i a_ = _mm_unpacklo_epi32(a.m, a.m); /* lanes 0,1 -> 0,2 */ \ |
| 301 | __m128i b_ = _mm_shuffle_epi32(b.m, (l) | ((l) << 4)); \ |
| 302 | d.m = _mm_mul_epu32(a_, b_); \ |
| 303 | } |
| 304 | #define gvmlsl_s16(d, a, b) { \ |
| 305 | gvreg tmp_; \ |
| 306 | gvmull_s16(tmp_, a, b); \ |
| 307 | d.m = _mm_sub_epi32(d.m, tmp_.m); \ |
| 308 | } |
| 309 | #define gvqadd_u8(d, a, b) d.m = _mm_adds_epu8(a.m, b.m) |
| 310 | #define gvqsub_u8(d, a, b) d.m = _mm_subs_epu8(a.m, b.m) |
| 311 | #define gvshrq_n_s16(d, s, n) d.m = _mm_srai_epi16(s.m, n) |
| 312 | #define gvshrq_n_u16(d, s, n) d.m = _mm_srli_epi16(s.m, n) |
| 313 | #define gvshrq_n_u32(d, s, n) d.m = _mm_srli_epi32(s.m, n) |
| 314 | #define gvshl_n_u32(d, s, n) d.m = _mm_slli_epi32(s.m, n) |
| 315 | #define gvshlq_n_u16(d, s, n) d.m = _mm_slli_epi16(s.m, n) |
| 316 | #define gvshlq_n_u32(d, s, n) d.m = _mm_slli_epi32(s.m, n) |
| 317 | #define gvshll_n_u16(d, s, n) d.m = _mm_slli_epi32(_mm_unpacklo_epi16(s.m, gv0()), n) |
| 318 | #define gvshr_n_u16(d, s, n) d.m = _mm_srli_epi16(s.m, n) |
| 319 | #define gvshr_n_u32(d, s, n) d.m = _mm_srli_epi32(s.m, n) |
| 320 | #define gvshr_n_u64(d, s, n) d.m = _mm_srli_epi64(s.m, n) |
| 321 | #define gvshrn_n_s64(d, s, n) { \ |
| 322 | gvreg tmp_; \ |
| 323 | gvshrq_n_s64(tmp_, s, n); \ |
| 324 | d.m = _mm_shuffle_epi32(tmp_.m, 0 | (2 << 2)); \ |
| 325 | } |
| 326 | #define gvqshrun_n_s16(d, s, n) { \ |
| 327 | __m128i t_ = _mm_srai_epi16(s.m, n); \ |
| 328 | d.m = _mm_packus_epi16(t_, t_); \ |
| 329 | } |
| 330 | #define gvqsubq_u8(d, a, b) d.m = _mm_subs_epu8(a.m, b.m) |
| 331 | #define gvqsubq_u16(d, a, b) d.m = _mm_subs_epu16(a.m, b.m) |
| 332 | |
| 333 | #ifdef __SSSE3__ |
| 334 | #define gvabsq_s32(d, s) d.m = _mm_abs_epi32(s.m) |
| 335 | #define gvtbl2_u8(d, a, b) d.m = _mm_shuffle_epi8(a.m, b.m) |
| 336 | #else |
| 337 | // must supply these here or else gcc will produce something terrible with __builtin_shuffle |
| 338 | #define gvmovn_u16(d, s) { \ |
| 339 | __m128i t2_ = _mm_and_si128(s.m, _mm_set1_epi16(0xff)); \ |
| 340 | d.m = _mm_packus_epi16(t2_, t2_); \ |
| 341 | } |
| 342 | #define gvmovn_u32(d, s) { \ |
| 343 | __m128i t2_; \ |
| 344 | t2_ = _mm_shufflelo_epi16(s.m, (0 << 0) | (2 << 2)); \ |
| 345 | t2_ = _mm_shufflehi_epi16(t2_, (0 << 0) | (2 << 2)); \ |
| 346 | d.m = _mm_shuffle_epi32(t2_, (0 << 0) | (2 << 2)); \ |
| 347 | } |
| 348 | #define gvmovn_top_u32(d, s) { \ |
| 349 | __m128i t2_; \ |
| 350 | t2_ = _mm_shufflelo_epi16(s.m, (1 << 0) | (3 << 2)); \ |
| 351 | t2_ = _mm_shufflehi_epi16(t2_, (1 << 0) | (3 << 2)); \ |
| 352 | d.m = _mm_shuffle_epi32(t2_, (0 << 0) | (2 << 2)); \ |
| 353 | } |
| 354 | #endif // !__SSSE3__ |
| 355 | #ifdef __SSE4_1__ |
| 356 | #define gvminq_u16(d, a, b) d.m = _mm_min_epu16(a.m, b.m) |
| 357 | #define gvmovl_u8(d, s) d.m = _mm_cvtepu8_epi16(s.m) |
| 358 | #define gvmovl_s8(d, s) d.m = _mm_cvtepi8_epi16(s.m) |
| 359 | #define gvmovl_s32(d, s) d.m = _mm_cvtepi32_epi64(s.m) |
| 360 | #define gvmull_s32(d, a, b) { \ |
| 361 | __m128i a_ = _mm_unpacklo_epi32(a.m, a.m); /* lanes 0,1 -> 0,2 */ \ |
| 362 | __m128i b_ = _mm_unpacklo_epi32(b.m, b.m); \ |
| 363 | d.m = _mm_mul_epi32(a_, b_); \ |
| 364 | } |
| 365 | #else |
| 366 | #define gvmovl_u8(d, s) d.m = _mm_unpacklo_epi8(s.m, gv0()) |
| 367 | #define gvmovl_s8(d, s) d.m = _mm_unpacklo_epi8(s.m, _mm_cmpgt_epi8(gv0(), s.m)) |
| 368 | #define gvmovl_s32(d, s) d.m = _mm_unpacklo_epi32(s.m, _mm_srai_epi32(s.m, 31)) |
| 369 | #endif // !__SSE4_1__ |
| 370 | #ifndef __AVX2__ |
| 371 | #define gvshlq_u64(d, a, b) { \ |
| 372 | gvreg t1_, t2_; \ |
| 373 | t1_.m = _mm_sll_epi64(a.m, b.m); \ |
| 374 | t2_.m = _mm_sll_epi64(a.m, _mm_shuffle_epi32(b.m, (2 << 0) | (3 << 2))); \ |
| 375 | d.u64 = (gvu64){ t1_.u64[0], t2_.u64[1] }; \ |
| 376 | } |
| 377 | #endif // __AVX2__ |
| 378 | |
| 379 | #define gvlo(s) s |
| 380 | #define gvhi(s) ((gvreg)_mm_shuffle_epi32(s.m, (2 << 0) | (3 << 2))) |
| 381 | #define gvget_lo(d, s) d = gvlo(s) |
| 382 | #define gvget_hi(d, s) d = gvhi(s) |
| 383 | |
| 384 | #define gvset_lo(d, s) d.m = _mm_unpacklo_epi64(s.m, gvhi(d).m) |
| 385 | #define gvset_hi(d, s) d.m = _mm_unpacklo_epi64(d.m, s.m) |
| 386 | |
| 387 | #define gvld1_u8(d, s) d.m = _mm_loadu_si64(s) |
| 388 | #define gvld1_u32 gvld1_u8 |
| 389 | #define gvld1q_u8(d, s) d.m = _mm_loadu_si128((__m128i *)(s)) |
| 390 | #define gvld1q_u16 gvld1q_u8 |
| 391 | #define gvld1q_u32 gvld1q_u8 |
| 392 | |
| 393 | #define gvst4_4_inc_u32(v0, v1, v2, v3, p, i) { \ |
| 394 | __m128i t0 = _mm_unpacklo_epi32(v0.m, v1.m); \ |
| 395 | __m128i t1 = _mm_unpacklo_epi32(v2.m, v3.m); \ |
| 396 | _mm_storeu_si128(((__m128i *)(p)) + 0, _mm_unpacklo_epi64(t0, t1)); \ |
| 397 | _mm_storeu_si128(((__m128i *)(p)) + 1, _mm_unpackhi_epi64(t0, t1)); \ |
| 398 | p += (i) / sizeof(*p); \ |
| 399 | } |
| 400 | #define gvst4_pi_u16(v0, v1, v2, v3, p) { \ |
| 401 | __m128i t0 = _mm_unpacklo_epi16(v0.m, v1.m); \ |
| 402 | __m128i t1 = _mm_unpacklo_epi16(v2.m, v3.m); \ |
| 403 | _mm_storeu_si128(((__m128i *)(p)) + 0, _mm_unpacklo_epi32(t0, t1)); \ |
| 404 | _mm_storeu_si128(((__m128i *)(p)) + 1, _mm_unpackhi_epi32(t0, t1)); \ |
| 405 | p += sizeof(t0) * 2 / sizeof(*p); \ |
| 406 | } |
| 407 | |
| 408 | #else |
| 409 | #error "arch not supported or SIMD support was not enabled by your compiler" |
| 410 | #endif |
| 411 | |
| 412 | // the below have intrinsics but they evaluate to basic operations on both gcc and clang |
| 413 | #define gvadd_s64(d, a, b) d.s64 = a.s64 + b.s64 |
| 414 | #define gvadd_u8(d, a, b) d.u8 = a.u8 + b.u8 |
| 415 | #define gvadd_u16(d, a, b) d.u16 = a.u16 + b.u16 |
| 416 | #define gvadd_u32(d, a, b) d.u32 = a.u32 + b.u32 |
| 417 | #define gvaddq_s64 gvadd_s64 |
| 418 | #define gvaddq_u16 gvadd_u16 |
| 419 | #define gvaddq_u32 gvadd_u32 |
| 420 | #define gvand(d, a, b) d.u32 = a.u32 & b.u32 |
| 421 | #define gvand_n_u32(d, n) d.u32 &= n |
| 422 | #define gvbic(d, a, b) d.u32 = a.u32 & ~b.u32 |
| 423 | #define gvbicq gvbic |
| 424 | #define gveor(d, a, b) d.u32 = a.u32 ^ b.u32 |
| 425 | #define gveorq gveor |
| 426 | #define gvceqz_u16(d, s) d.u16 = s.u16 == 0 |
| 427 | #define gvceqzq_u16 gvceqz_u16 |
| 428 | #define gvcltz_s16(d, s) d.s16 = s.s16 < 0 |
| 429 | #define gvcltzq_s16 gvcltz_s16 |
| 430 | #define gvsub_u16(d, a, b) d.u16 = a.u16 - b.u16 |
| 431 | #define gvsub_u32(d, a, b) d.u32 = a.u32 - b.u32 |
| 432 | #define gvsubq_u16 gvsub_u16 |
| 433 | #define gvsubq_u32 gvsub_u32 |
| 434 | #define gvorr(d, a, b) d.u32 = a.u32 | b.u32 |
| 435 | #define gvorrq gvorr |
| 436 | #define gvorr_n_u16(d, n) d.u16 |= n |
| 437 | |
| 438 | // fallbacks |
| 439 | #if 1 |
| 440 | |
| 441 | #ifndef gvaddhn_u32 |
| 442 | #define gvaddhn_u32(d, a, b) { \ |
| 443 | gvreg tmp1_ = { .u32 = a.u32 + b.u32 }; \ |
| 444 | gvmovn_top_u32(d, tmp1_); \ |
| 445 | } |
| 446 | #endif |
| 447 | #ifndef gvabsq_s32 |
| 448 | #define gvabsq_s32(d, s) { \ |
| 449 | gvreg tmp1_ = { .s32 = (gvs32){} - s.s32 }; \ |
| 450 | gvreg mask_ = { .s32 = s.s32 >> 31 }; \ |
| 451 | gvbslq_(d, mask_, tmp1_, s); \ |
| 452 | } |
| 453 | #endif |
| 454 | #ifndef gvbit |
| 455 | #define gvbslq_(d, s, a, b) d.u32 = (a.u32 & s.u32) | (b.u32 & ~s.u32) |
| 456 | #define gvbifq(d, a, b) gvbslq_(d, b, d, a) |
| 457 | #define gvbit(d, a, b) gvbslq_(d, b, a, d) |
| 458 | #endif |
| 459 | #ifndef gvaddw_s32 |
| 460 | #define gvaddw_s32(d, a, b) {gvreg t_; gvmovl_s32(t_, b); d.s64 += t_.s64;} |
| 461 | #endif |
| 462 | #ifndef gvhaddq_u16 |
| 463 | // can do this because the caller needs the msb clear |
| 464 | #define gvhaddq_u16(d, a, b) d.u16 = (a.u16 + b.u16) >> 1 |
| 465 | #endif |
| 466 | #ifndef gvminq_u16 |
| 467 | #define gvminq_u16(d, a, b) { \ |
| 468 | gvu16 t_ = a.u16 < b.u16; \ |
| 469 | d.u16 = (a.u16 & t_) | (b.u16 & ~t_); \ |
| 470 | } |
| 471 | #endif |
| 472 | #ifndef gvmlsq_s32 |
| 473 | #define gvmlsq_s32(d, a, b) d.s32 -= a.s32 * b.s32 |
| 474 | #endif |
| 475 | #ifndef gvmlsq_l_s32 |
| 476 | #define gvmlsq_l_s32(d, a, b, l){gvreg t_; gvdupq_l_u32(t_, b, l); d.s32 -= a.s32 * t_.s32;} |
| 477 | #endif |
| 478 | #ifndef gvmla_s32 |
| 479 | #define gvmla_s32(d, a, b) d.s32 += a.s32 * b.s32 |
| 480 | #endif |
| 481 | #ifndef gvmla_u32 |
| 482 | #define gvmla_u32 gvmla_s32 |
| 483 | #endif |
| 484 | #ifndef gvmlaq_s32 |
| 485 | #define gvmlaq_s32(d, a, b) d.s32 += a.s32 * b.s32 |
| 486 | #endif |
| 487 | #ifndef gvmlaq_u32 |
| 488 | #define gvmlaq_u32 gvmlaq_s32 |
| 489 | #endif |
| 490 | #ifndef gvmlal_u8 |
| 491 | #define gvmlal_u8(d, a, b) {gvreg t_; gvmull_u8(t_, a, b); d.u16 += t_.u16;} |
| 492 | #endif |
| 493 | #ifndef gvmlal_s32 |
| 494 | #define gvmlal_s32(d, a, b) {gvreg t_; gvmull_s32(t_, a, b); d.s64 += t_.s64;} |
| 495 | #endif |
| 496 | #ifndef gvmov_l_s32 |
| 497 | #define gvmov_l_s32(d, s, l) d.s32[l] = s |
| 498 | #endif |
| 499 | #ifndef gvmov_l_u32 |
| 500 | #define gvmov_l_u32(d, s, l) d.u32[l] = s |
| 501 | #endif |
| 502 | #ifndef gvmul_s32 |
| 503 | #define gvmul_s32(d, a, b) d.s32 = a.s32 * b.s32 |
| 504 | #endif |
| 505 | #ifndef gvmull_u8 |
| 506 | #define gvmull_u8(d, a, b) { \ |
| 507 | gvreg t1_, t2_; \ |
| 508 | gvmovl_u8(t1_, a); \ |
| 509 | gvmovl_u8(t2_, b); \ |
| 510 | d.u16 = t1_.u16 * t2_.u16; \ |
| 511 | } |
| 512 | #endif |
| 513 | #ifndef gvmull_s32 |
| 514 | // note: compilers tend to use int regs here |
| 515 | #define gvmull_s32(d, a, b) { \ |
| 516 | d.s64[0] = (s64)a.s32[0] * b.s32[0]; \ |
| 517 | d.s64[1] = (s64)a.s32[1] * b.s32[1]; \ |
| 518 | } |
| 519 | #endif |
| 520 | #ifndef gvneg_s32 |
| 521 | #define gvneg_s32(d, s) d.s32 = -s.s32 |
| 522 | #endif |
| 523 | // x86 note: needs _mm_sllv_epi16 (avx512), else this sucks terribly |
| 524 | #ifndef gvshl_u16 |
| 525 | #define gvshl_u16(d, a, b) d.u16 = a.u16 << b.u16 |
| 526 | #endif |
| 527 | // x86 note: needs _mm_sllv_* (avx2) |
| 528 | #ifndef gvshlq_u64 |
| 529 | #define gvshlq_u64(d, a, b) d.u64 = a.u64 << b.u64 |
| 530 | #endif |
| 531 | #ifndef gvshll_n_s8 |
| 532 | #define gvshll_n_s8(d, s, n) {gvreg t_; gvmovl_s8(t_, s); gvshlq_n_u16(d, t_, n);} |
| 533 | #endif |
| 534 | #ifndef gvshll_n_u8 |
| 535 | #define gvshll_n_u8(d, s, n) {gvreg t_; gvmovl_u8(t_, s); gvshlq_n_u16(d, t_, n);} |
| 536 | #endif |
| 537 | #ifndef gvshr_n_u8 |
| 538 | #define gvshr_n_u8(d, s, n) d.u8 = s.u8 >> (n) |
| 539 | #endif |
| 540 | #ifndef gvshrq_n_s64 |
| 541 | #define gvshrq_n_s64(d, s, n) d.s64 = s.s64 >> (n) |
| 542 | #endif |
| 543 | #ifndef gvshrn_n_u16 |
| 544 | #define gvshrn_n_u16(d, s, n) {gvreg t_; gvshrq_n_u16(t_, s, n); gvmovn_u16(d, t_);} |
| 545 | #endif |
| 546 | #ifndef gvshrn_n_u32 |
| 547 | #define gvshrn_n_u32(d, s, n) {gvreg t_; gvshrq_n_u32(t_, s, n); gvmovn_u32(d, t_);} |
| 548 | #endif |
| 549 | #ifndef gvsli_n_u8 |
| 550 | #define gvsli_n_u8(d, s, n) d.u8 = (s.u8 << (n)) | (d.u8 & ((1u << (n)) - 1u)) |
| 551 | #endif |
| 552 | #ifndef gvsri_n_u8 |
| 553 | #define gvsri_n_u8(d, s, n) d.u8 = (s.u8 >> (n)) | (d.u8 & ((0xff00u >> (n)) & 0xffu)) |
| 554 | #endif |
| 555 | #ifndef gvtstq_u16 |
| 556 | #define gvtstq_u16(d, a, b) d.u16 = (a.u16 & b.u16) != 0 |
| 557 | #endif |
| 558 | |
| 559 | #ifndef gvld2_u8_dup |
| 560 | #define gvld2_u8_dup(v0, v1, p) { \ |
| 561 | gvdup_n_u8(v0, ((const u8 *)(p))[0]); \ |
| 562 | gvdup_n_u8(v1, ((const u8 *)(p))[1]); \ |
| 563 | } |
| 564 | #endif |
| 565 | #ifndef gvst1_u8 |
| 566 | #define gvst1_u8(v, p) *(uint64_t_ua *)(p) = v.u64[0] |
| 567 | #endif |
| 568 | #ifndef gvst1q_u16 |
| 569 | #define gvst1q_u16(v, p) *(gvreg_ua *)(p) = v |
| 570 | #endif |
| 571 | #ifndef gvst1q_inc_u32 |
| 572 | #define gvst1q_inc_u32(v, p, i) {*(gvreg_ua *)(p) = v; p += (i) / sizeof(*p);} |
| 573 | #endif |
| 574 | #ifndef gvst1q_pi_u32 |
| 575 | #define gvst1q_pi_u32(v, p) gvst1q_inc_u32(v, p, sizeof(v)) |
| 576 | #endif |
| 577 | #ifndef gvst1q_2_pi_u32 |
| 578 | #define gvst1q_2_pi_u32(v0, v1, p) { \ |
| 579 | gvst1q_inc_u32(v0, p, sizeof(v0)); \ |
| 580 | gvst1q_inc_u32(v1, p, sizeof(v1)); \ |
| 581 | } |
| 582 | #endif |
| 583 | #ifndef gvst2_u8 |
| 584 | #define gvst2_u8(v0, v1, p) {gvreg t_; gvzip_u8(t_, v0, v1); *(gvu8_ua *)(p) = t_.u8;} |
| 585 | #endif |
| 586 | #ifndef gvst2_u16 |
| 587 | #define gvst2_u16(v0, v1, p) {gvreg t_; gvzip_u16(t_, v0, v1); *(gvu16_ua *)(p) = t_.u16;} |
| 588 | #endif |
| 589 | |
| 590 | // note: these shuffles assume sizeof(gvhreg) == 16 && sizeof(gvreg) == 16 |
| 591 | #ifndef __has_builtin |
| 592 | #define __has_builtin(x) 0 |
| 593 | #endif |
| 594 | |
| 595 | // prefer __builtin_shuffle on gcc as it handles -1 poorly |
| 596 | #if __has_builtin(__builtin_shufflevector) && !__has_builtin(__builtin_shuffle) |
| 597 | |
| 598 | #ifndef gvld2q_u8 |
| 599 | #define gvld2q_u8(v0, v1, p) { \ |
| 600 | gvu8 v0_ = ((gvu8_ua *)(p))[0]; \ |
| 601 | gvu8 v1_ = ((gvu8_ua *)(p))[1]; \ |
| 602 | v0.u8 = __builtin_shufflevector(v0_, v1_, 0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30); \ |
| 603 | v1.u8 = __builtin_shufflevector(v0_, v1_, 1,3,5,7,9,11,13,15,17,19,21,23,25,27,29,31); \ |
| 604 | } |
| 605 | #endif |
| 606 | #ifndef gvmovn_u16 |
| 607 | #define gvmovn_u16(d, s) \ |
| 608 | d.u8 = __builtin_shufflevector(s.u8, s.u8, 0,2,4,6,8,10,12,14,-1,-1,-1,-1,-1,-1,-1,-1) |
| 609 | #endif |
| 610 | #ifndef gvmovn_u32 |
| 611 | #define gvmovn_u32(d, s) \ |
| 612 | d.u16 = __builtin_shufflevector(s.u16, s.u16, 0,2,4,6,-1,-1,-1,-1) |
| 613 | #endif |
| 614 | #ifndef gvmovn_top_u32 |
| 615 | #define gvmovn_top_u32(d, s) \ |
| 616 | d.u16 = __builtin_shufflevector(s.u16, s.u16, 1,3,5,7,-1,-1,-1,-1) |
| 617 | #endif |
| 618 | #ifndef gvzip_u8 |
| 619 | #define gvzip_u8(d, a, b) \ |
| 620 | d.u8 = __builtin_shufflevector(a.u8, b.u8, 0,16,1,17,2,18,3,19,4,20,5,21,6,22,7,23) |
| 621 | #endif |
| 622 | #ifndef gvzip_u16 |
| 623 | #define gvzip_u16(d, a, b) \ |
| 624 | d.u16 = __builtin_shufflevector(a.u16, b.u16, 0,8,1,9,2,10,3,11) |
| 625 | #endif |
| 626 | #ifndef gvzipq_u16 |
| 627 | #define gvzipq_u16(d0, d1, s0, s1) { \ |
| 628 | gvu16 t_ = __builtin_shufflevector(s0.u16, s1.u16, 0, 8, 1, 9, 2, 10, 3, 11); \ |
| 629 | d1.u16 = __builtin_shufflevector(s0.u16, s1.u16, 4,12, 5,13, 6, 14, 7, 15); \ |
| 630 | d0.u16 = t_; \ |
| 631 | } |
| 632 | #endif |
| 633 | |
| 634 | #else // !__has_builtin(__builtin_shufflevector) |
| 635 | |
| 636 | #ifndef gvld2q_u8 |
| 637 | #define gvld2q_u8(v0, v1, p) { \ |
| 638 | gvu8 v0_ = ((gvu8_ua *)(p))[0]; \ |
| 639 | gvu8 v1_ = ((gvu8_ua *)(p))[1]; \ |
| 640 | v0.u8 = __builtin_shuffle(v0_, v1_, (gvu8){0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30}); \ |
| 641 | v1.u8 = __builtin_shuffle(v0_, v1_, (gvu8){1,3,5,7,9,11,13,15,17,19,21,23,25,27,29,31}); \ |
| 642 | } |
| 643 | #endif |
| 644 | #ifndef gvmovn_u16 |
| 645 | #define gvmovn_u16(d, s) \ |
| 646 | d.u8 = __builtin_shuffle(s.u8, (gvu8){0,2,4,6,8,10,12,14,0,2,4,6,8,10,12,14}) |
| 647 | #endif |
| 648 | #ifndef gvmovn_u32 |
| 649 | #define gvmovn_u32(d, s) \ |
| 650 | d.u16 = __builtin_shuffle(s.u16, (gvu16){0,2,4,6,0,2,4,6}) |
| 651 | #endif |
| 652 | #ifndef gvmovn_top_u32 |
| 653 | #define gvmovn_top_u32(d, s) \ |
| 654 | d.u16 = __builtin_shuffle(s.u16, (gvu16){1,3,5,7,1,3,5,7}) |
| 655 | #endif |
| 656 | #ifndef gvtbl2_u8 |
| 657 | #define gvtbl2_u8(d, a, b) d.u8 = __builtin_shuffle(a.u8, b.u8) |
| 658 | #endif |
| 659 | #ifndef gvzip_u8 |
| 660 | #define gvzip_u8(d, a, b) \ |
| 661 | d.u8 = __builtin_shuffle(a.u8, b.u8, (gvu8){0,16,1,17,2,18,3,19,4,20,5,21,6,22,7,23}) |
| 662 | #endif |
| 663 | #ifndef gvzip_u16 |
| 664 | #define gvzip_u16(d, a, b) \ |
| 665 | d.u16 = __builtin_shuffle(a.u16, b.u16, (gvu16){0,8,1,9,2,10,3,11}) |
| 666 | #endif |
| 667 | #ifndef gvzipq_u16 |
| 668 | #define gvzipq_u16(d0, d1, s0, s1) { \ |
| 669 | gvu16 t_ = __builtin_shuffle(s0.u16, s1.u16, (gvu16){0, 8, 1, 9, 2, 10, 3, 11}); \ |
| 670 | d1.u16 = __builtin_shuffle(s0.u16, s1.u16, (gvu16){4,12, 5,13, 6, 14, 7, 15}); \ |
| 671 | d0.u16 = t_; \ |
| 672 | } |
| 673 | #endif |
| 674 | |
| 675 | #endif // __builtin_shufflevector || __builtin_shuffle |
| 676 | |
| 677 | #ifndef gvtbl2_u8 |
| 678 | #define gvtbl2_u8(d, a, b) { \ |
| 679 | int i_; \ |
| 680 | for (i_ = 0; i_ < 16; i_++) \ |
| 681 | d.u8[i_] = a.u8[b.u8[i_]]; \ |
| 682 | } |
| 683 | #endif |
| 684 | |
| 685 | #endif // fallbacks |
| 686 | |
| 687 | #if defined(__arm__) |
| 688 | |
| 689 | #define gssub16(d, a, b) asm("ssub16 %0,%1,%2" : "=r"(d) : "r"(a), "r"(b)) |
| 690 | #define gsmusdx(d, a, b) asm("smusdx %0,%1,%2" : "=r"(d) : "r"(a), "r"(b)) |
| 691 | |
| 692 | #if 0 |
| 693 | // gcc/config/arm/arm.c |
| 694 | #undef gvadd_s64 |
| 695 | #define gvadd_s64(d, a, b) asm("vadd.i64 %P0,%P1,%P2" : "=w"(d.s64) : "w"(a.s64), "w"(b.s64)) |
| 696 | #endif |
| 697 | |
| 698 | #else |
| 699 | |
| 700 | #define gssub16(d, a, b) d = (u16)((a) - (b)) | ((((a) >> 16) - ((b) >> 16)) << 16) |
| 701 | #define gsmusdx(d, a, b) d = ((s32)(s16)(a) * ((s32)(b) >> 16)) \ |
| 702 | - (((s32)(a) >> 16) * (s16)(b)) |
| 703 | |
| 704 | #endif |
| 705 | |
| 706 | // for compatibility with the original psx_gpu.c code |
| 707 | #define vec_2x64s gvreg |
| 708 | #define vec_2x64u gvreg |
| 709 | #define vec_4x32s gvreg |
| 710 | #define vec_4x32u gvreg |
| 711 | #define vec_8x16s gvreg |
| 712 | #define vec_8x16u gvreg |
| 713 | #define vec_16x8s gvreg |
| 714 | #define vec_16x8u gvreg |
| 715 | #define vec_1x64s gvhreg |
| 716 | #define vec_1x64u gvhreg |
| 717 | #define vec_2x32s gvhreg |
| 718 | #define vec_2x32u gvhreg |
| 719 | #define vec_4x16s gvhreg |
| 720 | #define vec_4x16u gvhreg |
| 721 | #define vec_8x8s gvhreg |
| 722 | #define vec_8x8u gvhreg |
| 723 | |
| 724 | #if 0 |
| 725 | #include <stdio.h> |
| 726 | #include <stdlib.h> |
| 727 | #include <unistd.h> |
| 728 | static int ccount, dump_enabled; |
| 729 | void cmpp(const char *name, const void *a_, const void *b_, size_t len) |
| 730 | { |
| 731 | const uint32_t *a = a_, *b = b_, masks[] = { 0, 0xff, 0xffff, 0xffffff }; |
| 732 | size_t i, left; |
| 733 | uint32_t mask; |
| 734 | for (i = 0; i < (len + 3)/4; i++) { |
| 735 | left = len - i*4; |
| 736 | mask = left >= 4 ? ~0u : masks[left]; |
| 737 | if ((a[i] ^ b[i]) & mask) { |
| 738 | printf("%s: %08x %08x [%03zx/%zu] #%d\n", |
| 739 | name, a[i] & mask, b[i] & mask, i*4, i, ccount); |
| 740 | exit(1); |
| 741 | } |
| 742 | } |
| 743 | ccount++; |
| 744 | } |
| 745 | #define ccmpf(n) cmpp(#n, &psx_gpu->n, &n##_c, sizeof(n##_c)) |
| 746 | #define ccmpa(n,c) cmpp(#n, &psx_gpu->n, &n##_c, sizeof(n##_c[0]) * c) |
| 747 | |
| 748 | void dump_r_(const char *name, void *dump, int is_q) |
| 749 | { |
| 750 | unsigned long long *u = dump; |
| 751 | if (!dump_enabled) return; |
| 752 | //if (ccount > 1) return; |
| 753 | printf("%20s %016llx ", name, u[0]); |
| 754 | if (is_q) |
| 755 | printf("%016llx", u[1]); |
| 756 | puts(""); |
| 757 | } |
| 758 | void __attribute__((noinline,noclone)) dump_r_d(const char *name, void *dump) |
| 759 | { dump_r_(name, dump, 0); } |
| 760 | void __attribute__((noinline,noclone)) dump_r_q(const char *name, void *dump) |
| 761 | { dump_r_(name, dump, 1); } |
| 762 | #define dumprd(n) { u8 dump_[8]; gvst1_u8(n, dump_); dump_r_d(#n, dump_); } |
| 763 | #define dumprq(n) { u16 dump_[8]; gvst1q_u16(n, dump_); dump_r_q(#n, dump_); } |
| 764 | #endif |
| 765 | |
| 766 | void compute_all_gradients(psx_gpu_struct * __restrict__ psx_gpu, |
| 767 | const vertex_struct * __restrict__ a, const vertex_struct * __restrict__ b, |
| 768 | const vertex_struct * __restrict__ c) |
| 769 | { |
| 770 | union { double d; struct { u32 l; u32 h; } i; } divident, divider; |
| 771 | union { double d; gvhreg v; } d30; |
| 772 | |
| 773 | #if 0 |
| 774 | compute_all_gradients_(psx_gpu, a, b, c); |
| 775 | return; |
| 776 | #endif |
| 777 | // First compute the triangle area reciprocal and shift. The division will |
| 778 | // happen concurrently with much of the work which follows. |
| 779 | |
| 780 | // load exponent of 62 into upper half of double |
| 781 | u32 shift = __builtin_clz(psx_gpu->triangle_area); |
| 782 | u32 triangle_area_normalized = psx_gpu->triangle_area << shift; |
| 783 | |
| 784 | // load area normalized into lower half of double |
| 785 | divident.i.l = triangle_area_normalized >> 10; |
| 786 | divident.i.h = (62 + 1023) << 20; |
| 787 | |
| 788 | divider.i.l = triangle_area_normalized << 20; |
| 789 | divider.i.h = ((1022 + 31) << 20) + (triangle_area_normalized >> 11); |
| 790 | |
| 791 | d30.d = divident.d / divider.d; // d30 = ((1 << 62) + ta_n) / ta_n |
| 792 | |
| 793 | // ((x1 - x0) * (y2 - y1)) - ((x2 - x1) * (y1 - y0)) = |
| 794 | // ( d0 * d1 ) - ( d2 * d3 ) = |
| 795 | // ( m0 ) - ( m1 ) = gradient |
| 796 | |
| 797 | // This is split to do 12 elements at a time over three sets: a, b, and c. |
| 798 | // Technically we only need to do 10 elements (uvrgb_x and uvrgb_y), so |
| 799 | // two of the slots are unused. |
| 800 | |
| 801 | // Inputs are all 16-bit signed. The m0/m1 results are 32-bit signed, as |
| 802 | // is g. |
| 803 | |
| 804 | // First type is: uvrg bxxx xxxx |
| 805 | // Second type is: yyyy ybyy uvrg |
| 806 | // Since x_a and y_c are the same the same variable is used for both. |
| 807 | |
| 808 | gvreg v0; |
| 809 | gvreg v1; |
| 810 | gvreg v2; |
| 811 | gvreg uvrg_xxxx0; |
| 812 | gvreg uvrg_xxxx1; |
| 813 | gvreg uvrg_xxxx2; |
| 814 | |
| 815 | gvreg y0_ab; |
| 816 | gvreg y1_ab; |
| 817 | gvreg y2_ab; |
| 818 | |
| 819 | gvreg d0_ab; |
| 820 | gvreg d1_ab; |
| 821 | gvreg d2_ab; |
| 822 | gvreg d3_ab; |
| 823 | |
| 824 | gvreg ga_uvrg_x; |
| 825 | gvreg ga_uvrg_y; |
| 826 | gvreg gw_rg_x; |
| 827 | gvreg gw_rg_y; |
| 828 | gvreg w_mask; |
| 829 | gvreg r_shift; |
| 830 | gvreg uvrg_dx2, uvrg_dx3; |
| 831 | gvreg uvrgb_phase; |
| 832 | gvhreg zero, tmp_lo, tmp_hi; |
| 833 | |
| 834 | gvld1q_u8(v0, (u8 *)a); // v0 = { uvrg0, b0, x0, y0 } |
| 835 | gvld1q_u8(v1, (u8 *)b); // v1 = { uvrg1, b1, x1, y1 } |
| 836 | gvld1q_u8(v2, (u8 *)c); // v2 = { uvrg2, b2, x2, y2 } |
| 837 | |
| 838 | gvmovl_u8(uvrg_xxxx0, gvlo(v0)); // uvrg_xxxx0 = { uv0, rg0, b0-, -- } |
| 839 | gvmovl_u8(uvrg_xxxx1, gvlo(v1)); // uvrg_xxxx1 = { uv1, rg1, b1-, -- } |
| 840 | gvmovl_u8(uvrg_xxxx2, gvlo(v2)); // uvrg_xxxx2 = { uv2, rg2, b2-, -- } |
| 841 | |
| 842 | gvdup_l_u16(tmp_lo, gvhi(v0), 1); // yyyy0 = { yy0, yy0 } |
| 843 | gvcombine_u16(y0_ab, tmp_lo, gvlo(uvrg_xxxx0)); |
| 844 | |
| 845 | gvdup_l_u16(tmp_lo, gvhi(v0), 0); // xxxx0 = { xx0, xx0 } |
| 846 | gvset_hi(uvrg_xxxx0, tmp_lo); |
| 847 | |
| 848 | u32 x1_x2 = (u16)b->x | (c->x << 16); // x1_x2 = { x1, x2 } |
| 849 | u32 x0_x1 = (u16)a->x | (b->x << 16); // x0_x1 = { x0, x1 } |
| 850 | |
| 851 | gvdup_l_u16(tmp_lo, gvhi(v1), 1); // yyyy1 = { yy1, yy1 } |
| 852 | gvcombine_u16(y1_ab, tmp_lo, gvlo(uvrg_xxxx1)); |
| 853 | |
| 854 | gvdup_l_u16(tmp_lo, gvhi(v1), 0); // xxxx1 = { xx1, xx1 } |
| 855 | gvset_hi(uvrg_xxxx1, tmp_lo); |
| 856 | |
| 857 | gvdup_l_u16(tmp_lo, gvhi(v2), 1); // yyyy2 = { yy2, yy2 } |
| 858 | gvcombine_u16(y2_ab, tmp_lo, gvlo(uvrg_xxxx2)); |
| 859 | |
| 860 | gvdup_l_u16(tmp_lo, gvhi(v2), 0); // xxxx2 = { xx2, xx2 } |
| 861 | gvset_hi(uvrg_xxxx2, tmp_lo); |
| 862 | |
| 863 | u32 y0_y1 = (u16)a->y | (b->y << 16); // y0_y1 = { y0, y1 } |
| 864 | u32 y1_y2 = (u16)b->y | (c->y << 16); // y1_y2 = { y1, y2 } |
| 865 | |
| 866 | gvsubq_u16(d0_ab, uvrg_xxxx1, uvrg_xxxx0); |
| 867 | |
| 868 | u32 b1_b2 = b->b | (c->b << 16); // b1_b2 = { b1, b2 } |
| 869 | |
| 870 | gvsubq_u16(d2_ab, uvrg_xxxx2, uvrg_xxxx1); |
| 871 | |
| 872 | gvsubq_u16(d1_ab, y2_ab, y1_ab); |
| 873 | |
| 874 | u32 b0_b1 = a->b | (b->b << 16); // b0_b1 = { b0, b1 } |
| 875 | |
| 876 | u32 dx, dy, db; |
| 877 | gssub16(dx, x1_x2, x0_x1); // dx = { x1 - x0, x2 - x1 } |
| 878 | gssub16(dy, y1_y2, y0_y1); // dy = { y1 - y0, y2 - y1 } |
| 879 | gssub16(db, b1_b2, b0_b1); // db = { b1 - b0, b2 - b1 } |
| 880 | |
| 881 | u32 ga_by, ga_bx; |
| 882 | gvsubq_u16(d3_ab, y1_ab, y0_ab); |
| 883 | gsmusdx(ga_by, dx, db); // ga_by = ((x1 - x0) * (b2 - b1)) - |
| 884 | // ((x2 - X1) * (b1 - b0)) |
| 885 | gvmull_s16(ga_uvrg_x, gvlo(d0_ab), gvlo(d1_ab)); |
| 886 | gsmusdx(ga_bx, db, dy); // ga_bx = ((b1 - b0) * (y2 - y1)) - |
| 887 | // ((b2 - b1) * (y1 - y0)) |
| 888 | gvmlsl_s16(ga_uvrg_x, gvlo(d2_ab), gvlo(d3_ab)); |
| 889 | u32 gs_bx = (s32)ga_bx >> 31; // movs |
| 890 | |
| 891 | gvmull_s16(ga_uvrg_y, gvhi(d0_ab), gvhi(d1_ab)); |
| 892 | if ((s32)gs_bx < 0) ga_bx = -ga_bx; // rsbmi |
| 893 | |
| 894 | gvmlsl_s16(ga_uvrg_y, gvhi(d2_ab), gvhi(d3_ab)); |
| 895 | u32 gs_by = (s32)ga_by >> 31; // movs |
| 896 | |
| 897 | gvhreg d0; |
| 898 | gvshr_n_u64(d0, d30.v, 22); // note: on "d30 >> 22" gcc generates junk code |
| 899 | |
| 900 | gvdupq_n_u32(uvrgb_phase, psx_gpu->uvrgb_phase); |
| 901 | u32 b_base = psx_gpu->uvrgb_phase + (a->b << 16); |
| 902 | |
| 903 | if ((s32)gs_by < 0) ga_by = -ga_by; // rsbmi |
| 904 | gvreg gs_uvrg_x, gs_uvrg_y; |
| 905 | gs_uvrg_x.s32 = ga_uvrg_x.s32 < 0; // gs_uvrg_x = ga_uvrg_x < 0 |
| 906 | gs_uvrg_y.s32 = ga_uvrg_y.s32 < 0; // gs_uvrg_y = ga_uvrg_y < 0 |
| 907 | |
| 908 | gvdupq_n_u32(w_mask, -psx_gpu->triangle_winding); // w_mask = { -w, -w, -w, -w } |
| 909 | shift -= 62 - 12; // shift -= (62 - FIXED_BITS) |
| 910 | |
| 911 | gvreg uvrg_base; |
| 912 | gvshll_n_u16(uvrg_base, gvlo(uvrg_xxxx0), 16); // uvrg_base = uvrg0 << 16 |
| 913 | |
| 914 | gvaddq_u32(uvrg_base, uvrg_base, uvrgb_phase); |
| 915 | gvabsq_s32(ga_uvrg_x, ga_uvrg_x); // ga_uvrg_x = abs(ga_uvrg_x) |
| 916 | |
| 917 | u32 area_r_s = d0.u32[0]; // area_r_s = triangle_reciprocal |
| 918 | gvabsq_s32(ga_uvrg_y, ga_uvrg_y); // ga_uvrg_y = abs(ga_uvrg_y) |
| 919 | |
| 920 | gvmull_l_u32(gw_rg_x, gvhi(ga_uvrg_x), d0, 0); |
| 921 | gvmull_l_u32(ga_uvrg_x, gvlo(ga_uvrg_x), d0, 0); |
| 922 | gvmull_l_u32(gw_rg_y, gvhi(ga_uvrg_y), d0, 0); |
| 923 | gvmull_l_u32(ga_uvrg_y, gvlo(ga_uvrg_y), d0, 0); |
| 924 | |
| 925 | #if defined(__ARM_NEON) || defined(__ARM_NEON__) |
| 926 | gvdupq_n_s64(r_shift, shift); // r_shift = { shift, shift } |
| 927 | gvshlq_u64(gw_rg_x, gw_rg_x, r_shift); |
| 928 | gvshlq_u64(ga_uvrg_x, ga_uvrg_x, r_shift); |
| 929 | gvshlq_u64(gw_rg_y, gw_rg_y, r_shift); |
| 930 | gvshlq_u64(ga_uvrg_y, ga_uvrg_y, r_shift); |
| 931 | #elif defined(__SSE2__) |
| 932 | r_shift.m = _mm_cvtsi32_si128(-shift); |
| 933 | gw_rg_x.m = _mm_srl_epi64(gw_rg_x.m, r_shift.m); |
| 934 | ga_uvrg_x.m = _mm_srl_epi64(ga_uvrg_x.m, r_shift.m); |
| 935 | gw_rg_y.m = _mm_srl_epi64(gw_rg_y.m, r_shift.m); |
| 936 | ga_uvrg_y.m = _mm_srl_epi64(ga_uvrg_y.m, r_shift.m); |
| 937 | #else |
| 938 | gvdupq_n_s64(r_shift, -shift); // r_shift = { shift, shift } |
| 939 | gvshrq_u64(gw_rg_x, gw_rg_x, r_shift); |
| 940 | gvshrq_u64(ga_uvrg_x, ga_uvrg_x, r_shift); |
| 941 | gvshrq_u64(gw_rg_y, gw_rg_y, r_shift); |
| 942 | gvshrq_u64(ga_uvrg_y, ga_uvrg_y, r_shift); |
| 943 | #endif |
| 944 | |
| 945 | gveorq(gs_uvrg_x, gs_uvrg_x, w_mask); |
| 946 | gvmovn_u64(tmp_lo, ga_uvrg_x); |
| 947 | |
| 948 | gveorq(gs_uvrg_y, gs_uvrg_y, w_mask); |
| 949 | gvmovn_u64(tmp_hi, gw_rg_x); |
| 950 | |
| 951 | gvcombine_u32(ga_uvrg_x, tmp_lo, tmp_hi); |
| 952 | |
| 953 | gveorq(ga_uvrg_x, ga_uvrg_x, gs_uvrg_x); |
| 954 | gvmovn_u64(tmp_lo, ga_uvrg_y); |
| 955 | |
| 956 | gvsubq_u32(ga_uvrg_x, ga_uvrg_x, gs_uvrg_x); |
| 957 | gvmovn_u64(tmp_hi, gw_rg_y); |
| 958 | |
| 959 | gvcombine_u32(ga_uvrg_y, tmp_lo, tmp_hi); |
| 960 | |
| 961 | gveorq(ga_uvrg_y, ga_uvrg_y, gs_uvrg_y); |
| 962 | ga_bx = ga_bx << 13; |
| 963 | |
| 964 | gvsubq_u32(ga_uvrg_y, ga_uvrg_y, gs_uvrg_y); |
| 965 | ga_by = ga_by << 13; |
| 966 | |
| 967 | u32 gw_bx_h, gw_by_h; |
| 968 | gw_bx_h = (u64)ga_bx * area_r_s >> 32; |
| 969 | |
| 970 | gvshlq_n_u32(ga_uvrg_x, ga_uvrg_x, 4); |
| 971 | gvshlq_n_u32(ga_uvrg_y, ga_uvrg_y, 4); |
| 972 | |
| 973 | gw_by_h = (u64)ga_by * area_r_s >> 32; |
| 974 | gvdup_n_u32(tmp_lo, a->x); |
| 975 | gvmlsq_l_s32(uvrg_base, ga_uvrg_x, tmp_lo, 0); |
| 976 | |
| 977 | gs_bx = gs_bx ^ -psx_gpu->triangle_winding; |
| 978 | gvaddq_u32(uvrg_dx2, ga_uvrg_x, ga_uvrg_x); |
| 979 | |
| 980 | gs_by = gs_by ^ -psx_gpu->triangle_winding; |
| 981 | |
| 982 | u32 r11 = -shift; // r11 = negative shift for scalar lsr |
| 983 | u32 *store_a = psx_gpu->uvrg.e; |
| 984 | r11 = r11 - (32 - 13); |
| 985 | u32 *store_b = store_a + 16 / sizeof(u32); |
| 986 | |
| 987 | gvaddq_u32(uvrg_dx3, uvrg_dx2, ga_uvrg_x); |
| 988 | gvst1q_inc_u32(uvrg_base, store_a, 32); |
| 989 | |
| 990 | gvst1q_inc_u32(ga_uvrg_x, store_b, 32); |
| 991 | u32 g_bx = (u32)gw_bx_h >> r11; |
| 992 | |
| 993 | gvst1q_inc_u32(ga_uvrg_y, store_a, 32); |
| 994 | u32 g_by = (u32)gw_by_h >> r11; |
| 995 | |
| 996 | gvdup_n_u32(zero, 0); |
| 997 | |
| 998 | gvst4_4_inc_u32(zero, gvlo(ga_uvrg_x), gvlo(uvrg_dx2), gvlo(uvrg_dx3), store_b, 32); |
| 999 | g_bx = g_bx ^ gs_bx; |
| 1000 | |
| 1001 | gvst4_4_inc_u32(zero, gvhi(ga_uvrg_x), gvhi(uvrg_dx2), gvhi(uvrg_dx3), store_b, 32); |
| 1002 | g_bx = g_bx - gs_bx; |
| 1003 | |
| 1004 | g_bx = g_bx << 4; |
| 1005 | g_by = g_by ^ gs_by; |
| 1006 | |
| 1007 | b_base -= g_bx * a->x; |
| 1008 | g_by = g_by - gs_by; |
| 1009 | |
| 1010 | g_by = g_by << 4; |
| 1011 | |
| 1012 | u32 g_bx2 = g_bx + g_bx; |
| 1013 | u32 g_bx3 = g_bx + g_bx2; |
| 1014 | |
| 1015 | // 112 |
| 1016 | store_b[0] = 0; |
| 1017 | store_b[1] = g_bx; |
| 1018 | store_b[2] = g_bx2; |
| 1019 | store_b[3] = g_bx3; |
| 1020 | store_b[4] = b_base; |
| 1021 | store_b[5] = g_by; // 132 |
| 1022 | } |
| 1023 | |
| 1024 | #define setup_spans_debug_check(span_edge_data_element) \ |
| 1025 | |
| 1026 | #define setup_spans_prologue_alternate_yes() \ |
| 1027 | vec_2x64s alternate_x; \ |
| 1028 | vec_2x64s alternate_dx_dy; \ |
| 1029 | vec_4x32s alternate_x_32; \ |
| 1030 | vec_4x16u alternate_x_16; \ |
| 1031 | \ |
| 1032 | vec_4x16u alternate_select; \ |
| 1033 | vec_4x16s y_mid_point; \ |
| 1034 | \ |
| 1035 | s32 y_b = v_b->y; \ |
| 1036 | s64 edge_alt; \ |
| 1037 | s32 edge_dx_dy_alt; \ |
| 1038 | u32 edge_shift_alt \ |
| 1039 | |
| 1040 | #define setup_spans_prologue_alternate_no() \ |
| 1041 | |
| 1042 | #define setup_spans_prologue(alternate_active) \ |
| 1043 | edge_data_struct *span_edge_data; \ |
| 1044 | vec_4x32u *span_uvrg_offset; \ |
| 1045 | u32 *span_b_offset; \ |
| 1046 | \ |
| 1047 | s32 clip; \ |
| 1048 | vec_4x32u v_clip; \ |
| 1049 | \ |
| 1050 | vec_2x64s edges_xy; \ |
| 1051 | vec_2x32s edges_dx_dy; \ |
| 1052 | vec_2x32u edge_shifts; \ |
| 1053 | \ |
| 1054 | vec_2x64s left_x, right_x; \ |
| 1055 | vec_2x64s left_dx_dy, right_dx_dy; \ |
| 1056 | vec_4x32s left_x_32, right_x_32; \ |
| 1057 | vec_2x32s left_x_32_lo, right_x_32_lo; \ |
| 1058 | vec_2x32s left_x_32_hi, right_x_32_hi; \ |
| 1059 | vec_4x16s left_right_x_16_lo, left_right_x_16_hi; \ |
| 1060 | vec_4x16s y_x4; \ |
| 1061 | vec_8x16s left_edge; \ |
| 1062 | vec_8x16s right_edge; \ |
| 1063 | vec_4x16u span_shift; \ |
| 1064 | \ |
| 1065 | vec_2x32u c_0x01; \ |
| 1066 | vec_4x16u c_0x04; \ |
| 1067 | vec_4x16u c_0xFFFE; \ |
| 1068 | vec_4x16u c_0x07; \ |
| 1069 | \ |
| 1070 | vec_2x32s x_starts; \ |
| 1071 | vec_2x32s x_ends; \ |
| 1072 | \ |
| 1073 | s32 x_a = v_a->x; \ |
| 1074 | s32 x_b = v_b->x; \ |
| 1075 | s32 x_c = v_c->x; \ |
| 1076 | s32 y_a = v_a->y; \ |
| 1077 | s32 y_c = v_c->y; \ |
| 1078 | \ |
| 1079 | vec_4x32u uvrg; \ |
| 1080 | vec_4x32u uvrg_dy; \ |
| 1081 | u32 b = psx_gpu->b; \ |
| 1082 | u32 b_dy = psx_gpu->b_dy; \ |
| 1083 | const u32 *reciprocal_table = psx_gpu->reciprocal_table_ptr; \ |
| 1084 | \ |
| 1085 | gvld1q_u32(uvrg, psx_gpu->uvrg.e); \ |
| 1086 | gvld1q_u32(uvrg_dy, psx_gpu->uvrg_dy.e); \ |
| 1087 | gvdup_n_u32(c_0x01, 0x01); \ |
| 1088 | setup_spans_prologue_alternate_##alternate_active() \ |
| 1089 | |
| 1090 | #define setup_spans_prologue_b() \ |
| 1091 | span_edge_data = psx_gpu->span_edge_data; \ |
| 1092 | span_uvrg_offset = (vec_4x32u *)psx_gpu->span_uvrg_offset; \ |
| 1093 | span_b_offset = psx_gpu->span_b_offset; \ |
| 1094 | \ |
| 1095 | vec_8x16u c_0x0001; \ |
| 1096 | \ |
| 1097 | gvdupq_n_u16(c_0x0001, 0x0001); \ |
| 1098 | gvdupq_n_u16(left_edge, psx_gpu->viewport_start_x); \ |
| 1099 | gvdupq_n_u16(right_edge, psx_gpu->viewport_end_x); \ |
| 1100 | gvaddq_u16(right_edge, right_edge, c_0x0001); \ |
| 1101 | gvdup_n_u16(c_0x04, 0x04); \ |
| 1102 | gvdup_n_u16(c_0x07, 0x07); \ |
| 1103 | gvdup_n_u16(c_0xFFFE, 0xFFFE); \ |
| 1104 | |
| 1105 | #if defined(__ARM_NEON) || defined(__ARM_NEON__) |
| 1106 | // better encoding, remaining bits are unused anyway |
| 1107 | #define mask_edge_shifts(edge_shifts) \ |
| 1108 | gvbic_n_u16(edge_shifts, 0xE0) |
| 1109 | #else |
| 1110 | #define mask_edge_shifts(edge_shifts) \ |
| 1111 | gvand_n_u32(edge_shifts, 0x1F) |
| 1112 | #endif |
| 1113 | |
| 1114 | #define compute_edge_delta_x2() \ |
| 1115 | { \ |
| 1116 | vec_2x32s heights; \ |
| 1117 | vec_2x32s height_reciprocals; \ |
| 1118 | vec_2x32s heights_b; \ |
| 1119 | vec_2x32u widths; \ |
| 1120 | \ |
| 1121 | u32 edge_shift = reciprocal_table[height]; \ |
| 1122 | \ |
| 1123 | gvdup_n_u32(heights, height); \ |
| 1124 | gvsub_u32(widths, x_ends, x_starts); \ |
| 1125 | \ |
| 1126 | gvdup_n_u32(edge_shifts, edge_shift); \ |
| 1127 | gvsub_u32(heights_b, heights, c_0x01); \ |
| 1128 | gvshr_n_u32(height_reciprocals, edge_shifts, 10); \ |
| 1129 | \ |
| 1130 | gvmla_s32(heights_b, x_starts, heights); \ |
| 1131 | mask_edge_shifts(edge_shifts); \ |
| 1132 | gvmul_s32(edges_dx_dy, widths, height_reciprocals); \ |
| 1133 | gvmull_s32(edges_xy, heights_b, height_reciprocals); \ |
| 1134 | } \ |
| 1135 | |
| 1136 | #define compute_edge_delta_x3(start_c, height_a, height_b) \ |
| 1137 | { \ |
| 1138 | vec_2x32s heights; \ |
| 1139 | vec_2x32s height_reciprocals; \ |
| 1140 | vec_2x32s heights_b; \ |
| 1141 | vec_2x32u widths; \ |
| 1142 | \ |
| 1143 | u32 width_alt; \ |
| 1144 | s32 height_b_alt; \ |
| 1145 | u32 height_reciprocal_alt; \ |
| 1146 | \ |
| 1147 | gvcreate_u32(heights, height_a, height_b); \ |
| 1148 | gvcreate_u32(edge_shifts, reciprocal_table[height_a], reciprocal_table[height_b]); \ |
| 1149 | \ |
| 1150 | edge_shift_alt = reciprocal_table[height_minor_b]; \ |
| 1151 | \ |
| 1152 | gvsub_u32(widths, x_ends, x_starts); \ |
| 1153 | width_alt = x_c - start_c; \ |
| 1154 | \ |
| 1155 | gvshr_n_u32(height_reciprocals, edge_shifts, 10); \ |
| 1156 | height_reciprocal_alt = edge_shift_alt >> 10; \ |
| 1157 | \ |
| 1158 | mask_edge_shifts(edge_shifts); \ |
| 1159 | edge_shift_alt &= 0x1F; \ |
| 1160 | \ |
| 1161 | gvsub_u32(heights_b, heights, c_0x01); \ |
| 1162 | height_b_alt = height_minor_b - 1; \ |
| 1163 | \ |
| 1164 | gvmla_s32(heights_b, x_starts, heights); \ |
| 1165 | height_b_alt += height_minor_b * start_c; \ |
| 1166 | \ |
| 1167 | gvmull_s32(edges_xy, heights_b, height_reciprocals); \ |
| 1168 | edge_alt = (s64)height_b_alt * height_reciprocal_alt; \ |
| 1169 | \ |
| 1170 | gvmul_s32(edges_dx_dy, widths, height_reciprocals); \ |
| 1171 | edge_dx_dy_alt = width_alt * height_reciprocal_alt; \ |
| 1172 | } \ |
| 1173 | |
| 1174 | |
| 1175 | #define setup_spans_adjust_y_up() \ |
| 1176 | gvsub_u32(y_x4, y_x4, c_0x04) \ |
| 1177 | |
| 1178 | #define setup_spans_adjust_y_down() \ |
| 1179 | gvadd_u32(y_x4, y_x4, c_0x04) \ |
| 1180 | |
| 1181 | #define setup_spans_adjust_interpolants_up() \ |
| 1182 | gvsubq_u32(uvrg, uvrg, uvrg_dy); \ |
| 1183 | b -= b_dy \ |
| 1184 | |
| 1185 | #define setup_spans_adjust_interpolants_down() \ |
| 1186 | gvaddq_u32(uvrg, uvrg, uvrg_dy); \ |
| 1187 | b += b_dy \ |
| 1188 | |
| 1189 | |
| 1190 | #define setup_spans_clip_interpolants_increment() \ |
| 1191 | gvmlaq_s32(uvrg, uvrg_dy, v_clip); \ |
| 1192 | b += b_dy * clip \ |
| 1193 | |
| 1194 | #define setup_spans_clip_interpolants_decrement() \ |
| 1195 | gvmlsq_s32(uvrg, uvrg_dy, v_clip); \ |
| 1196 | b -= b_dy * clip \ |
| 1197 | |
| 1198 | #define setup_spans_clip_alternate_yes() \ |
| 1199 | edge_alt += edge_dx_dy_alt * (s64)(clip) \ |
| 1200 | |
| 1201 | #define setup_spans_clip_alternate_no() \ |
| 1202 | |
| 1203 | #define setup_spans_clip(direction, alternate_active) \ |
| 1204 | { \ |
| 1205 | gvdupq_n_u32(v_clip, clip); \ |
| 1206 | gvmlal_s32(edges_xy, edges_dx_dy, gvlo(v_clip)); \ |
| 1207 | setup_spans_clip_alternate_##alternate_active(); \ |
| 1208 | setup_spans_clip_interpolants_##direction(); \ |
| 1209 | } \ |
| 1210 | |
| 1211 | |
| 1212 | #define setup_spans_adjust_edges_alternate_no(left_half, right_half) \ |
| 1213 | { \ |
| 1214 | vec_2x64s edge_shifts_64; \ |
| 1215 | vec_2x64s edges_dx_dy_64; \ |
| 1216 | vec_1x64s left_x_hi, right_x_hi; \ |
| 1217 | \ |
| 1218 | gvmovl_s32(edge_shifts_64, edge_shifts); \ |
| 1219 | gvshlq_u64(edges_xy, edges_xy, edge_shifts_64); \ |
| 1220 | \ |
| 1221 | gvmovl_s32(edges_dx_dy_64, edges_dx_dy); \ |
| 1222 | gvshlq_u64(edges_dx_dy_64, edges_dx_dy_64, edge_shifts_64); \ |
| 1223 | \ |
| 1224 | gvdupq_l_s64(left_x, gv##left_half(edges_xy), 0); \ |
| 1225 | gvdupq_l_s64(right_x, gv##right_half(edges_xy), 0); \ |
| 1226 | \ |
| 1227 | gvdupq_l_s64(left_dx_dy, gv##left_half(edges_dx_dy_64), 0); \ |
| 1228 | gvdupq_l_s64(right_dx_dy, gv##right_half(edges_dx_dy_64), 0); \ |
| 1229 | \ |
| 1230 | gvadd_s64(left_x_hi, gvlo(left_x), gvlo(left_dx_dy)); \ |
| 1231 | gvadd_s64(right_x_hi, gvlo(right_x), gvlo(right_dx_dy)); \ |
| 1232 | \ |
| 1233 | gvset_hi(left_x, left_x_hi); \ |
| 1234 | gvset_hi(right_x, right_x_hi); \ |
| 1235 | \ |
| 1236 | gvaddq_s64(left_dx_dy, left_dx_dy, left_dx_dy); \ |
| 1237 | gvaddq_s64(right_dx_dy, right_dx_dy, right_dx_dy); \ |
| 1238 | } \ |
| 1239 | |
| 1240 | #define setup_spans_adjust_edges_alternate_yes(left_half, right_half) \ |
| 1241 | { \ |
| 1242 | setup_spans_adjust_edges_alternate_no(left_half, right_half); \ |
| 1243 | s64 edge_dx_dy_alt_64; \ |
| 1244 | vec_1x64s alternate_x_hi; \ |
| 1245 | \ |
| 1246 | gvdup_n_u16(y_mid_point, y_b); \ |
| 1247 | \ |
| 1248 | edge_alt <<= edge_shift_alt; \ |
| 1249 | edge_dx_dy_alt_64 = (s64)edge_dx_dy_alt << edge_shift_alt; \ |
| 1250 | \ |
| 1251 | gvdupq_n_s64(alternate_x, edge_alt); \ |
| 1252 | gvdupq_n_s64(alternate_dx_dy, edge_dx_dy_alt_64); \ |
| 1253 | \ |
| 1254 | gvadd_s64(alternate_x_hi, gvlo(alternate_x), gvlo(alternate_dx_dy)); \ |
| 1255 | gvaddq_s64(alternate_dx_dy, alternate_dx_dy, alternate_dx_dy); \ |
| 1256 | gvset_hi(alternate_x, alternate_x_hi); \ |
| 1257 | } \ |
| 1258 | |
| 1259 | |
| 1260 | #define setup_spans_y_select_up() \ |
| 1261 | gvclt_s16(alternate_select, y_x4, y_mid_point) \ |
| 1262 | |
| 1263 | #define setup_spans_y_select_down() \ |
| 1264 | gvcgt_s16(alternate_select, y_x4, y_mid_point) \ |
| 1265 | |
| 1266 | #define setup_spans_y_select_alternate_yes(direction) \ |
| 1267 | setup_spans_y_select_##direction() \ |
| 1268 | |
| 1269 | #define setup_spans_y_select_alternate_no(direction) \ |
| 1270 | |
| 1271 | #define setup_spans_alternate_select_left() \ |
| 1272 | gvbit(left_right_x_16_lo, alternate_x_16, alternate_select); \ |
| 1273 | |
| 1274 | #define setup_spans_alternate_select_right() \ |
| 1275 | gvbit(left_right_x_16_hi, alternate_x_16, alternate_select); \ |
| 1276 | |
| 1277 | #define setup_spans_alternate_select_none() \ |
| 1278 | |
| 1279 | #define setup_spans_increment_alternate_yes() \ |
| 1280 | { \ |
| 1281 | vec_2x32s alternate_x_32_lo, alternate_x_32_hi; \ |
| 1282 | gvmovn_top_u64(alternate_x_32_lo, alternate_x); \ |
| 1283 | gvaddq_s64(alternate_x, alternate_x, alternate_dx_dy); \ |
| 1284 | gvmovn_top_u64(alternate_x_32_hi, alternate_x); \ |
| 1285 | gvaddq_s64(alternate_x, alternate_x, alternate_dx_dy); \ |
| 1286 | gvcombine_u32(alternate_x_32, alternate_x_32_lo, alternate_x_32_hi); \ |
| 1287 | gvmovn_u32(alternate_x_16, alternate_x_32); \ |
| 1288 | } \ |
| 1289 | |
| 1290 | #define setup_spans_increment_alternate_no() \ |
| 1291 | |
| 1292 | #if defined(__SSE2__) && !(defined(__AVX512BW__) && defined(__AVX512VL__)) |
| 1293 | #define setup_spans_make_span_shift(span_shift) { \ |
| 1294 | gvreg tab1_ = { .u8 = { 0xfe, 0xfc, 0xf8, 0xf0, 0xe0, 0xc0, 0x80, 0x00 } }; \ |
| 1295 | gvtbl2_u8(span_shift, tab1_, span_shift); \ |
| 1296 | gvorr_n_u16(span_shift, 0xff00); \ |
| 1297 | (void)c_0xFFFE; \ |
| 1298 | } |
| 1299 | #else |
| 1300 | #define setup_spans_make_span_shift(span_shift) \ |
| 1301 | gvshl_u16(span_shift, c_0xFFFE, span_shift) |
| 1302 | #endif |
| 1303 | |
| 1304 | #define setup_spans_set_x4(alternate, direction, alternate_active) \ |
| 1305 | { \ |
| 1306 | gvst1q_pi_u32(uvrg, span_uvrg_offset); \ |
| 1307 | *span_b_offset++ = b; \ |
| 1308 | setup_spans_adjust_interpolants_##direction(); \ |
| 1309 | \ |
| 1310 | gvst1q_pi_u32(uvrg, span_uvrg_offset); \ |
| 1311 | *span_b_offset++ = b; \ |
| 1312 | setup_spans_adjust_interpolants_##direction(); \ |
| 1313 | \ |
| 1314 | gvst1q_pi_u32(uvrg, span_uvrg_offset); \ |
| 1315 | *span_b_offset++ = b; \ |
| 1316 | setup_spans_adjust_interpolants_##direction(); \ |
| 1317 | \ |
| 1318 | gvst1q_pi_u32(uvrg, span_uvrg_offset); \ |
| 1319 | *span_b_offset++ = b; \ |
| 1320 | setup_spans_adjust_interpolants_##direction(); \ |
| 1321 | \ |
| 1322 | gvmovn_top_u64(left_x_32_lo, left_x); \ |
| 1323 | gvmovn_top_u64(right_x_32_lo, right_x); \ |
| 1324 | \ |
| 1325 | gvaddq_s64(left_x, left_x, left_dx_dy); \ |
| 1326 | gvaddq_s64(right_x, right_x, right_dx_dy); \ |
| 1327 | \ |
| 1328 | gvmovn_top_u64(left_x_32_hi, left_x); \ |
| 1329 | gvmovn_top_u64(right_x_32_hi, right_x); \ |
| 1330 | \ |
| 1331 | gvaddq_s64(left_x, left_x, left_dx_dy); \ |
| 1332 | gvaddq_s64(right_x, right_x, right_dx_dy); \ |
| 1333 | \ |
| 1334 | gvcombine_s64(left_x_32, left_x_32_lo, left_x_32_hi); \ |
| 1335 | gvcombine_s64(right_x_32, right_x_32_lo, right_x_32_hi); \ |
| 1336 | \ |
| 1337 | gvmovn_u32(left_right_x_16_lo, left_x_32); \ |
| 1338 | gvmovn_u32(left_right_x_16_hi, right_x_32); \ |
| 1339 | \ |
| 1340 | setup_spans_increment_alternate_##alternate_active(); \ |
| 1341 | setup_spans_y_select_alternate_##alternate_active(direction); \ |
| 1342 | setup_spans_alternate_select_##alternate(); \ |
| 1343 | \ |
| 1344 | gvmax_s16(left_right_x_16_lo, left_right_x_16_lo, gvlo(left_edge)); \ |
| 1345 | gvmax_s16(left_right_x_16_hi, left_right_x_16_hi, gvhi(left_edge)); \ |
| 1346 | gvmin_s16(left_right_x_16_lo, left_right_x_16_lo, gvlo(right_edge)); \ |
| 1347 | gvmin_s16(left_right_x_16_hi, left_right_x_16_hi, gvhi(right_edge)); \ |
| 1348 | \ |
| 1349 | gvsub_u16(left_right_x_16_hi, left_right_x_16_hi, left_right_x_16_lo); \ |
| 1350 | gvadd_u16(left_right_x_16_hi, left_right_x_16_hi, c_0x07); \ |
| 1351 | gvand(span_shift, left_right_x_16_hi, c_0x07); \ |
| 1352 | setup_spans_make_span_shift(span_shift); \ |
| 1353 | gvshr_n_u16(left_right_x_16_hi, left_right_x_16_hi, 3); \ |
| 1354 | \ |
| 1355 | gvst4_pi_u16(left_right_x_16_lo, left_right_x_16_hi, span_shift, y_x4, \ |
| 1356 | span_edge_data); \ |
| 1357 | \ |
| 1358 | setup_spans_adjust_y_##direction(); \ |
| 1359 | } \ |
| 1360 | |
| 1361 | |
| 1362 | #define setup_spans_alternate_adjust_yes() \ |
| 1363 | edge_alt -= edge_dx_dy_alt * (s64)height_minor_a \ |
| 1364 | |
| 1365 | #define setup_spans_alternate_adjust_no() \ |
| 1366 | |
| 1367 | |
| 1368 | #define setup_spans_down(left_half, right_half, alternate, alternate_active) \ |
| 1369 | setup_spans_alternate_adjust_##alternate_active(); \ |
| 1370 | if(y_c > psx_gpu->viewport_end_y) \ |
| 1371 | height -= y_c - psx_gpu->viewport_end_y - 1; \ |
| 1372 | \ |
| 1373 | clip = psx_gpu->viewport_start_y - y_a; \ |
| 1374 | if(clip > 0) \ |
| 1375 | { \ |
| 1376 | height -= clip; \ |
| 1377 | y_a += clip; \ |
| 1378 | setup_spans_clip(increment, alternate_active); \ |
| 1379 | } \ |
| 1380 | \ |
| 1381 | setup_spans_prologue_b(); \ |
| 1382 | \ |
| 1383 | if(height > 0) \ |
| 1384 | { \ |
| 1385 | u64 y_x4_ = ((u64)(y_a + 3) << 48) | ((u64)(u16)(y_a + 2) << 32) \ |
| 1386 | | (u32)((y_a + 1) << 16) | (u16)y_a; \ |
| 1387 | gvcreate_u64(y_x4, y_x4_); \ |
| 1388 | setup_spans_adjust_edges_alternate_##alternate_active(left_half, right_half); \ |
| 1389 | \ |
| 1390 | psx_gpu->num_spans = height; \ |
| 1391 | do \ |
| 1392 | { \ |
| 1393 | setup_spans_set_x4(alternate, down, alternate_active); \ |
| 1394 | height -= 4; \ |
| 1395 | } while(height > 0); \ |
| 1396 | } \ |
| 1397 | |
| 1398 | |
| 1399 | #define setup_spans_alternate_pre_increment_yes() \ |
| 1400 | edge_alt += edge_dx_dy_alt \ |
| 1401 | |
| 1402 | #define setup_spans_alternate_pre_increment_no() \ |
| 1403 | |
| 1404 | #define setup_spans_up_decrement_height_yes() \ |
| 1405 | height-- \ |
| 1406 | |
| 1407 | #define setup_spans_up_decrement_height_no() \ |
| 1408 | {} \ |
| 1409 | |
| 1410 | #define setup_spans_up(left_half, right_half, alternate, alternate_active) \ |
| 1411 | setup_spans_alternate_adjust_##alternate_active(); \ |
| 1412 | y_a--; \ |
| 1413 | \ |
| 1414 | if(y_c < psx_gpu->viewport_start_y) \ |
| 1415 | height -= psx_gpu->viewport_start_y - y_c; \ |
| 1416 | else \ |
| 1417 | setup_spans_up_decrement_height_##alternate_active(); \ |
| 1418 | \ |
| 1419 | clip = y_a - psx_gpu->viewport_end_y; \ |
| 1420 | if(clip > 0) \ |
| 1421 | { \ |
| 1422 | height -= clip; \ |
| 1423 | y_a -= clip; \ |
| 1424 | setup_spans_clip(decrement, alternate_active); \ |
| 1425 | } \ |
| 1426 | \ |
| 1427 | setup_spans_prologue_b(); \ |
| 1428 | \ |
| 1429 | if(height > 0) \ |
| 1430 | { \ |
| 1431 | u64 y_x4_ = ((u64)(y_a - 3) << 48) | ((u64)(u16)(y_a - 2) << 32) \ |
| 1432 | | (u32)((y_a - 1) << 16) | (u16)y_a; \ |
| 1433 | gvcreate_u64(y_x4, y_x4_); \ |
| 1434 | gvaddw_s32(edges_xy, edges_xy, edges_dx_dy); \ |
| 1435 | setup_spans_alternate_pre_increment_##alternate_active(); \ |
| 1436 | setup_spans_adjust_edges_alternate_##alternate_active(left_half, right_half); \ |
| 1437 | setup_spans_adjust_interpolants_up(); \ |
| 1438 | \ |
| 1439 | psx_gpu->num_spans = height; \ |
| 1440 | while(height > 0) \ |
| 1441 | { \ |
| 1442 | setup_spans_set_x4(alternate, up, alternate_active); \ |
| 1443 | height -= 4; \ |
| 1444 | } \ |
| 1445 | } \ |
| 1446 | |
| 1447 | #define half_left lo |
| 1448 | #define half_right hi |
| 1449 | |
| 1450 | #define setup_spans_up_up(minor, major) \ |
| 1451 | setup_spans_prologue(yes); \ |
| 1452 | s32 height_minor_a = y_a - y_b; \ |
| 1453 | s32 height_minor_b = y_b - y_c; \ |
| 1454 | s32 height = y_a - y_c; \ |
| 1455 | \ |
| 1456 | gvdup_n_u32(x_starts, x_a); \ |
| 1457 | gvcreate_u32(x_ends, x_c, x_b); \ |
| 1458 | \ |
| 1459 | compute_edge_delta_x3(x_b, height, height_minor_a); \ |
| 1460 | setup_spans_up(half_##major, half_##minor, minor, yes) \ |
| 1461 | |
| 1462 | void setup_spans_up_left(psx_gpu_struct *psx_gpu, vertex_struct *v_a, |
| 1463 | vertex_struct *v_b, vertex_struct *v_c) |
| 1464 | { |
| 1465 | #if 0 |
| 1466 | setup_spans_up_left_(psx_gpu, v_a, v_b, v_c); |
| 1467 | return; |
| 1468 | #endif |
| 1469 | setup_spans_up_up(left, right) |
| 1470 | } |
| 1471 | |
| 1472 | void setup_spans_up_right(psx_gpu_struct *psx_gpu, vertex_struct *v_a, |
| 1473 | vertex_struct *v_b, vertex_struct *v_c) |
| 1474 | { |
| 1475 | #if 0 |
| 1476 | setup_spans_up_right_(psx_gpu, v_a, v_b, v_c); |
| 1477 | return; |
| 1478 | #endif |
| 1479 | setup_spans_up_up(right, left) |
| 1480 | } |
| 1481 | |
| 1482 | #define setup_spans_down_down(minor, major) \ |
| 1483 | setup_spans_prologue(yes); \ |
| 1484 | s32 height_minor_a = y_b - y_a; \ |
| 1485 | s32 height_minor_b = y_c - y_b; \ |
| 1486 | s32 height = y_c - y_a; \ |
| 1487 | \ |
| 1488 | gvdup_n_u32(x_starts, x_a); \ |
| 1489 | gvcreate_u32(x_ends, x_c, x_b); \ |
| 1490 | \ |
| 1491 | compute_edge_delta_x3(x_b, height, height_minor_a); \ |
| 1492 | setup_spans_down(half_##major, half_##minor, minor, yes) \ |
| 1493 | |
| 1494 | void setup_spans_down_left(psx_gpu_struct *psx_gpu, vertex_struct *v_a, |
| 1495 | vertex_struct *v_b, vertex_struct *v_c) |
| 1496 | { |
| 1497 | #if 0 |
| 1498 | setup_spans_down_left_(psx_gpu, v_a, v_b, v_c); |
| 1499 | return; |
| 1500 | #endif |
| 1501 | setup_spans_down_down(left, right) |
| 1502 | } |
| 1503 | |
| 1504 | void setup_spans_down_right(psx_gpu_struct *psx_gpu, vertex_struct *v_a, |
| 1505 | vertex_struct *v_b, vertex_struct *v_c) |
| 1506 | { |
| 1507 | #if 0 |
| 1508 | setup_spans_down_right_(psx_gpu, v_a, v_b, v_c); |
| 1509 | return; |
| 1510 | #endif |
| 1511 | setup_spans_down_down(right, left) |
| 1512 | } |
| 1513 | |
| 1514 | #define setup_spans_up_flat() \ |
| 1515 | s32 height = y_a - y_c; \ |
| 1516 | \ |
| 1517 | compute_edge_delta_x2(); \ |
| 1518 | setup_spans_up(half_left, half_right, none, no) \ |
| 1519 | |
| 1520 | void setup_spans_up_a(psx_gpu_struct *psx_gpu, vertex_struct *v_a, |
| 1521 | vertex_struct *v_b, vertex_struct *v_c) |
| 1522 | { |
| 1523 | #if 0 |
| 1524 | setup_spans_up_a_(psx_gpu, v_a, v_b, v_c); |
| 1525 | return; |
| 1526 | #endif |
| 1527 | setup_spans_prologue(no); |
| 1528 | |
| 1529 | gvcreate_u32(x_starts, x_a, x_b); |
| 1530 | gvdup_n_u32(x_ends, x_c); |
| 1531 | |
| 1532 | setup_spans_up_flat() |
| 1533 | } |
| 1534 | |
| 1535 | void setup_spans_up_b(psx_gpu_struct *psx_gpu, vertex_struct *v_a, |
| 1536 | vertex_struct *v_b, vertex_struct *v_c) |
| 1537 | { |
| 1538 | #if 0 |
| 1539 | setup_spans_up_b_(psx_gpu, v_a, v_b, v_c); |
| 1540 | return; |
| 1541 | #endif |
| 1542 | setup_spans_prologue(no); |
| 1543 | |
| 1544 | gvdup_n_u32(x_starts, x_a); |
| 1545 | gvcreate_u32(x_ends, x_b, x_c); |
| 1546 | |
| 1547 | setup_spans_up_flat() |
| 1548 | } |
| 1549 | |
| 1550 | #define setup_spans_down_flat() \ |
| 1551 | s32 height = y_c - y_a; \ |
| 1552 | \ |
| 1553 | compute_edge_delta_x2(); \ |
| 1554 | setup_spans_down(half_left, half_right, none, no) \ |
| 1555 | |
| 1556 | void setup_spans_down_a(psx_gpu_struct *psx_gpu, vertex_struct *v_a, |
| 1557 | vertex_struct *v_b, vertex_struct *v_c) |
| 1558 | { |
| 1559 | #if 0 |
| 1560 | setup_spans_down_a_(psx_gpu, v_a, v_b, v_c); |
| 1561 | return; |
| 1562 | #endif |
| 1563 | setup_spans_prologue(no); |
| 1564 | |
| 1565 | gvcreate_u32(x_starts, x_a, x_b); |
| 1566 | gvdup_n_u32(x_ends, x_c); |
| 1567 | |
| 1568 | setup_spans_down_flat() |
| 1569 | } |
| 1570 | |
| 1571 | void setup_spans_down_b(psx_gpu_struct *psx_gpu, vertex_struct *v_a, |
| 1572 | vertex_struct *v_b, vertex_struct *v_c) |
| 1573 | { |
| 1574 | #if 0 |
| 1575 | setup_spans_down_b_(psx_gpu, v_a, v_b, v_c); |
| 1576 | return; |
| 1577 | #endif |
| 1578 | setup_spans_prologue(no) |
| 1579 | |
| 1580 | gvdup_n_u32(x_starts, x_a); |
| 1581 | gvcreate_u32(x_ends, x_b, x_c); |
| 1582 | |
| 1583 | setup_spans_down_flat() |
| 1584 | } |
| 1585 | |
| 1586 | void setup_spans_up_down(psx_gpu_struct *psx_gpu, vertex_struct *v_a, |
| 1587 | vertex_struct *v_b, vertex_struct *v_c) |
| 1588 | { |
| 1589 | #if 0 |
| 1590 | setup_spans_up_down_(psx_gpu, v_a, v_b, v_c); |
| 1591 | return; |
| 1592 | #endif |
| 1593 | setup_spans_prologue(no); |
| 1594 | |
| 1595 | s32 y_b = v_b->y; |
| 1596 | s64 edge_alt; |
| 1597 | s32 edge_dx_dy_alt; |
| 1598 | u32 edge_shift_alt; |
| 1599 | |
| 1600 | s32 middle_y = y_a; |
| 1601 | s32 height_minor_a = y_a - y_b; |
| 1602 | s32 height_minor_b = y_c - y_a; |
| 1603 | s32 height_major = y_c - y_b; |
| 1604 | |
| 1605 | vec_2x64s edges_xy_b; |
| 1606 | vec_1x64s edges_xy_b_left; |
| 1607 | vec_2x32s edges_dx_dy_b; |
| 1608 | vec_2x32u edge_shifts_b; |
| 1609 | |
| 1610 | vec_2x32s height_increment; |
| 1611 | |
| 1612 | gvcreate_u32(x_starts, x_a, x_c); |
| 1613 | gvdup_n_u32(x_ends, x_b); |
| 1614 | |
| 1615 | compute_edge_delta_x3(x_a, height_minor_a, height_major); |
| 1616 | |
| 1617 | gvcreate_s32(height_increment, 0, height_minor_b); |
| 1618 | |
| 1619 | gvmlal_s32(edges_xy, edges_dx_dy, height_increment); |
| 1620 | |
| 1621 | gvcreate_s64(edges_xy_b_left, edge_alt); |
| 1622 | gvcombine_s64(edges_xy_b, edges_xy_b_left, gvhi(edges_xy)); |
| 1623 | |
| 1624 | edge_shifts_b = edge_shifts; |
| 1625 | gvmov_l_u32(edge_shifts_b, edge_shift_alt, 0); |
| 1626 | |
| 1627 | gvneg_s32(edges_dx_dy_b, edges_dx_dy); |
| 1628 | gvmov_l_s32(edges_dx_dy_b, edge_dx_dy_alt, 0); |
| 1629 | |
| 1630 | y_a--; |
| 1631 | |
| 1632 | if(y_b < psx_gpu->viewport_start_y) |
| 1633 | height_minor_a -= psx_gpu->viewport_start_y - y_b; |
| 1634 | |
| 1635 | clip = y_a - psx_gpu->viewport_end_y; |
| 1636 | if(clip > 0) |
| 1637 | { |
| 1638 | height_minor_a -= clip; |
| 1639 | y_a -= clip; |
| 1640 | setup_spans_clip(decrement, no); |
| 1641 | } |
| 1642 | |
| 1643 | setup_spans_prologue_b(); |
| 1644 | |
| 1645 | if(height_minor_a > 0) |
| 1646 | { |
| 1647 | u64 y_x4_ = ((u64)(y_a - 3) << 48) | ((u64)(u16)(y_a - 2) << 32) |
| 1648 | | (u32)((y_a - 1) << 16) | (u16)y_a; |
| 1649 | gvcreate_u64(y_x4, y_x4_); |
| 1650 | gvaddw_s32(edges_xy, edges_xy, edges_dx_dy); |
| 1651 | setup_spans_adjust_edges_alternate_no(lo, hi); |
| 1652 | setup_spans_adjust_interpolants_up(); |
| 1653 | |
| 1654 | psx_gpu->num_spans = height_minor_a; |
| 1655 | while(height_minor_a > 0) |
| 1656 | { |
| 1657 | setup_spans_set_x4(none, up, no); |
| 1658 | height_minor_a -= 4; |
| 1659 | } |
| 1660 | |
| 1661 | span_edge_data += height_minor_a; |
| 1662 | span_uvrg_offset += height_minor_a; |
| 1663 | span_b_offset += height_minor_a; |
| 1664 | } |
| 1665 | |
| 1666 | edges_xy = edges_xy_b; |
| 1667 | edges_dx_dy = edges_dx_dy_b; |
| 1668 | edge_shifts = edge_shifts_b; |
| 1669 | |
| 1670 | gvld1q_u32(uvrg, psx_gpu->uvrg.e); |
| 1671 | b = psx_gpu->b; |
| 1672 | |
| 1673 | y_a = middle_y; |
| 1674 | |
| 1675 | if(y_c > psx_gpu->viewport_end_y) |
| 1676 | height_minor_b -= y_c - psx_gpu->viewport_end_y - 1; |
| 1677 | |
| 1678 | clip = psx_gpu->viewport_start_y - y_a; |
| 1679 | if(clip > 0) |
| 1680 | { |
| 1681 | height_minor_b -= clip; |
| 1682 | y_a += clip; |
| 1683 | setup_spans_clip(increment, no); |
| 1684 | } |
| 1685 | |
| 1686 | if(height_minor_b > 0) |
| 1687 | { |
| 1688 | u64 y_x4_ = ((u64)(y_a + 3) << 48) | ((u64)(u16)(y_a + 2) << 32) |
| 1689 | | (u32)((y_a + 1) << 16) | (u16)y_a; |
| 1690 | gvcreate_u64(y_x4, y_x4_); |
| 1691 | setup_spans_adjust_edges_alternate_no(lo, hi); |
| 1692 | |
| 1693 | // FIXME: overflow corner case |
| 1694 | if(psx_gpu->num_spans + height_minor_b == MAX_SPANS) |
| 1695 | height_minor_b &= ~3; |
| 1696 | |
| 1697 | psx_gpu->num_spans += height_minor_b; |
| 1698 | while(height_minor_b > 0) |
| 1699 | { |
| 1700 | setup_spans_set_x4(none, down, no); |
| 1701 | height_minor_b -= 4; |
| 1702 | } |
| 1703 | } |
| 1704 | } |
| 1705 | |
| 1706 | |
| 1707 | #define dither_table_entry_normal(value) \ |
| 1708 | (value) \ |
| 1709 | |
| 1710 | #define setup_blocks_load_msb_mask_indirect() \ |
| 1711 | |
| 1712 | #define setup_blocks_load_msb_mask_direct() \ |
| 1713 | vec_8x16u msb_mask; \ |
| 1714 | gvdupq_n_u16(msb_mask, psx_gpu->mask_msb); \ |
| 1715 | |
| 1716 | #define setup_blocks_variables_shaded_textured(target) \ |
| 1717 | vec_4x32u u_block; \ |
| 1718 | vec_4x32u v_block; \ |
| 1719 | vec_4x32u r_block; \ |
| 1720 | vec_4x32u g_block; \ |
| 1721 | vec_4x32u b_block; \ |
| 1722 | vec_4x32u uvrg_dx; \ |
| 1723 | vec_4x32u uvrg_dx4; \ |
| 1724 | vec_4x32u uvrg_dx8; \ |
| 1725 | vec_4x32u uvrg; \ |
| 1726 | vec_16x8u texture_mask; \ |
| 1727 | vec_8x8u texture_mask_lo, texture_mask_hi; \ |
| 1728 | u32 b_dx = psx_gpu->b_block_span.e[1]; \ |
| 1729 | u32 b_dx4 = b_dx << 2; \ |
| 1730 | u32 b_dx8 = b_dx << 3; \ |
| 1731 | u32 b; \ |
| 1732 | \ |
| 1733 | gvld1q_u32(uvrg_dx, psx_gpu->uvrg_dx.e); \ |
| 1734 | gvshlq_n_u32(uvrg_dx4, uvrg_dx, 2); \ |
| 1735 | gvshlq_n_u32(uvrg_dx8, uvrg_dx, 3); \ |
| 1736 | gvld2_u8_dup(texture_mask_lo, texture_mask_hi, &psx_gpu->texture_mask_width); \ |
| 1737 | gvcombine_u16(texture_mask, texture_mask_lo, texture_mask_hi) \ |
| 1738 | |
| 1739 | #define setup_blocks_variables_shaded_untextured(target) \ |
| 1740 | vec_4x32u r_block; \ |
| 1741 | vec_4x32u g_block; \ |
| 1742 | vec_4x32u b_block; \ |
| 1743 | vec_4x32u rgb_dx; \ |
| 1744 | vec_2x32u rgb_dx_lo, rgb_dx_hi; \ |
| 1745 | vec_4x32u rgb_dx4; \ |
| 1746 | vec_4x32u rgb_dx8; \ |
| 1747 | vec_4x32u rgb; \ |
| 1748 | vec_2x32u rgb_lo, rgb_hi; \ |
| 1749 | \ |
| 1750 | vec_8x8u d64_0x07; \ |
| 1751 | vec_8x8u d64_1; \ |
| 1752 | vec_8x8u d64_4; \ |
| 1753 | vec_8x8u d64_128; \ |
| 1754 | \ |
| 1755 | gvdup_n_u8(d64_0x07, 0x07); \ |
| 1756 | gvdup_n_u8(d64_1, 1); \ |
| 1757 | gvdup_n_u8(d64_4, 4); \ |
| 1758 | gvdup_n_u8(d64_128, 128u); \ |
| 1759 | \ |
| 1760 | gvld1_u32(rgb_dx_lo, &psx_gpu->uvrg_dx.e[2]); \ |
| 1761 | gvcreate_u32(rgb_dx_hi, psx_gpu->b_block_span.e[1], 0); \ |
| 1762 | gvcombine_u32(rgb_dx, rgb_dx_lo, rgb_dx_hi); \ |
| 1763 | gvshlq_n_u32(rgb_dx4, rgb_dx, 2); \ |
| 1764 | gvshlq_n_u32(rgb_dx8, rgb_dx, 3) \ |
| 1765 | |
| 1766 | #define setup_blocks_variables_unshaded_textured(target) \ |
| 1767 | vec_4x32u u_block; \ |
| 1768 | vec_4x32u v_block; \ |
| 1769 | vec_2x32u uv_dx; \ |
| 1770 | vec_2x32u uv_dx4; \ |
| 1771 | vec_2x32u uv_dx8; \ |
| 1772 | vec_2x32u uv; \ |
| 1773 | vec_16x8u texture_mask; \ |
| 1774 | vec_8x8u texture_mask_lo, texture_mask_hi; \ |
| 1775 | \ |
| 1776 | gvld1_u32(uv_dx, psx_gpu->uvrg_dx.e); \ |
| 1777 | gvld1_u32(uv, psx_gpu->uvrg.e); \ |
| 1778 | gvshl_n_u32(uv_dx4, uv_dx, 2); \ |
| 1779 | gvshl_n_u32(uv_dx8, uv_dx, 3); \ |
| 1780 | gvld2_u8_dup(texture_mask_lo, texture_mask_hi, &psx_gpu->texture_mask_width); \ |
| 1781 | gvcombine_u16(texture_mask, texture_mask_lo, texture_mask_hi) \ |
| 1782 | |
| 1783 | #define setup_blocks_variables_unshaded_untextured_direct() \ |
| 1784 | gvorrq(colors, colors, msb_mask) \ |
| 1785 | |
| 1786 | #define setup_blocks_variables_unshaded_untextured_indirect() \ |
| 1787 | |
| 1788 | #define setup_blocks_variables_unshaded_untextured(target) \ |
| 1789 | u32 color = psx_gpu->triangle_color; \ |
| 1790 | vec_8x16u colors; \ |
| 1791 | \ |
| 1792 | u32 color_r = color & 0xFF; \ |
| 1793 | u32 color_g = (color >> 8) & 0xFF; \ |
| 1794 | u32 color_b = (color >> 16) & 0xFF; \ |
| 1795 | \ |
| 1796 | color = (color_r >> 3) | ((color_g >> 3) << 5) | \ |
| 1797 | ((color_b >> 3) << 10); \ |
| 1798 | gvdupq_n_u16(colors, color); \ |
| 1799 | setup_blocks_variables_unshaded_untextured_##target() \ |
| 1800 | |
| 1801 | #define setup_blocks_span_initialize_dithered_textured() \ |
| 1802 | vec_8x16u dither_offsets; \ |
| 1803 | gvshll_n_s8(dither_offsets, dither_offsets_short, 4) \ |
| 1804 | |
| 1805 | #define setup_blocks_span_initialize_dithered_untextured() \ |
| 1806 | vec_8x8u dither_offsets; \ |
| 1807 | gvadd_u8(dither_offsets, dither_offsets_short, d64_4) \ |
| 1808 | |
| 1809 | #define setup_blocks_span_initialize_dithered(texturing) \ |
| 1810 | u32 dither_row = psx_gpu->dither_table[y & 0x3]; \ |
| 1811 | u32 dither_shift = (span_edge_data->left_x & 0x3) * 8; \ |
| 1812 | vec_8x8s dither_offsets_short; \ |
| 1813 | \ |
| 1814 | dither_row = \ |
| 1815 | (dither_row >> dither_shift) | (dither_row << (32 - dither_shift)); \ |
| 1816 | gvdup_n_u32(dither_offsets_short, dither_row); \ |
| 1817 | setup_blocks_span_initialize_dithered_##texturing() \ |
| 1818 | |
| 1819 | #define setup_blocks_span_initialize_undithered(texturing) \ |
| 1820 | |
| 1821 | #define setup_blocks_span_initialize_shaded_textured() \ |
| 1822 | { \ |
| 1823 | u32 left_x = span_edge_data->left_x; \ |
| 1824 | vec_4x32u block_span; \ |
| 1825 | vec_4x32u v_left_x; \ |
| 1826 | \ |
| 1827 | gvld1q_u32(uvrg, span_uvrg_offset); \ |
| 1828 | gvdupq_n_u32(v_left_x, left_x); \ |
| 1829 | gvmlaq_u32(uvrg, uvrg_dx, v_left_x); \ |
| 1830 | b = *span_b_offset; \ |
| 1831 | b += b_dx * left_x; \ |
| 1832 | \ |
| 1833 | gvdupq_l_u32(u_block, gvlo(uvrg), 0); \ |
| 1834 | gvdupq_l_u32(v_block, gvlo(uvrg), 1); \ |
| 1835 | gvdupq_l_u32(r_block, gvhi(uvrg), 0); \ |
| 1836 | gvdupq_l_u32(g_block, gvhi(uvrg), 1); \ |
| 1837 | gvdupq_n_u32(b_block, b); \ |
| 1838 | \ |
| 1839 | gvld1q_u32(block_span, psx_gpu->u_block_span.e); \ |
| 1840 | gvaddq_u32(u_block, u_block, block_span); \ |
| 1841 | gvld1q_u32(block_span, psx_gpu->v_block_span.e); \ |
| 1842 | gvaddq_u32(v_block, v_block, block_span); \ |
| 1843 | gvld1q_u32(block_span, psx_gpu->r_block_span.e); \ |
| 1844 | gvaddq_u32(r_block, r_block, block_span); \ |
| 1845 | gvld1q_u32(block_span, psx_gpu->g_block_span.e); \ |
| 1846 | gvaddq_u32(g_block, g_block, block_span); \ |
| 1847 | gvld1q_u32(block_span, psx_gpu->b_block_span.e); \ |
| 1848 | gvaddq_u32(b_block, b_block, block_span); \ |
| 1849 | } |
| 1850 | |
| 1851 | #define setup_blocks_span_initialize_shaded_untextured() \ |
| 1852 | { \ |
| 1853 | u32 left_x = span_edge_data->left_x; \ |
| 1854 | u32 *span_uvrg_offset_high = (u32 *)span_uvrg_offset + 2; \ |
| 1855 | vec_4x32u block_span; \ |
| 1856 | vec_4x32u v_left_x; \ |
| 1857 | \ |
| 1858 | gvld1_u32(rgb_lo, span_uvrg_offset_high); \ |
| 1859 | gvcreate_u32(rgb_hi, *span_b_offset, 0); \ |
| 1860 | gvcombine_u32(rgb, rgb_lo, rgb_hi); \ |
| 1861 | gvdupq_n_u32(v_left_x, left_x); \ |
| 1862 | gvmlaq_u32(rgb, rgb_dx, v_left_x); \ |
| 1863 | \ |
| 1864 | gvdupq_l_u32(r_block, gvlo(rgb), 0); \ |
| 1865 | gvdupq_l_u32(g_block, gvlo(rgb), 1); \ |
| 1866 | gvdupq_l_u32(b_block, gvhi(rgb), 0); \ |
| 1867 | \ |
| 1868 | gvld1q_u32(block_span, psx_gpu->r_block_span.e); \ |
| 1869 | gvaddq_u32(r_block, r_block, block_span); \ |
| 1870 | gvld1q_u32(block_span, psx_gpu->g_block_span.e); \ |
| 1871 | gvaddq_u32(g_block, g_block, block_span); \ |
| 1872 | gvld1q_u32(block_span, psx_gpu->b_block_span.e); \ |
| 1873 | gvaddq_u32(b_block, b_block, block_span); \ |
| 1874 | } \ |
| 1875 | |
| 1876 | #define setup_blocks_span_initialize_unshaded_textured() \ |
| 1877 | { \ |
| 1878 | u32 left_x = span_edge_data->left_x; \ |
| 1879 | vec_4x32u block_span; \ |
| 1880 | vec_2x32u v_left_x; \ |
| 1881 | \ |
| 1882 | gvld1_u32(uv, span_uvrg_offset); \ |
| 1883 | gvdup_n_u32(v_left_x, left_x); \ |
| 1884 | gvmla_u32(uv, uv_dx, v_left_x); \ |
| 1885 | \ |
| 1886 | gvdupq_l_u32(u_block, uv, 0); \ |
| 1887 | gvdupq_l_u32(v_block, uv, 1); \ |
| 1888 | \ |
| 1889 | gvld1q_u32(block_span, psx_gpu->u_block_span.e); \ |
| 1890 | gvaddq_u32(u_block, u_block, block_span); \ |
| 1891 | gvld1q_u32(block_span, psx_gpu->v_block_span.e); \ |
| 1892 | gvaddq_u32(v_block, v_block, block_span); \ |
| 1893 | } \ |
| 1894 | |
| 1895 | #define setup_blocks_span_initialize_unshaded_untextured() \ |
| 1896 | |
| 1897 | #define setup_blocks_texture_swizzled() \ |
| 1898 | { \ |
| 1899 | vec_8x8u u_saved = u; \ |
| 1900 | gvsli_n_u8(u, v, 4); \ |
| 1901 | gvsri_n_u8(v, u_saved, 4); \ |
| 1902 | } \ |
| 1903 | |
| 1904 | #define setup_blocks_texture_unswizzled() \ |
| 1905 | |
| 1906 | #define setup_blocks_store_shaded_textured(swizzling, dithering, target, \ |
| 1907 | edge_type) \ |
| 1908 | { \ |
| 1909 | vec_8x16u u_whole; \ |
| 1910 | vec_8x16u v_whole; \ |
| 1911 | vec_8x16u r_whole; \ |
| 1912 | vec_8x16u g_whole; \ |
| 1913 | vec_8x16u b_whole; \ |
| 1914 | vec_4x16u u_whole_lo, u_whole_hi; \ |
| 1915 | vec_4x16u v_whole_lo, v_whole_hi; \ |
| 1916 | vec_4x16u r_whole_lo, r_whole_hi; \ |
| 1917 | vec_4x16u g_whole_lo, g_whole_hi; \ |
| 1918 | vec_4x16u b_whole_lo, b_whole_hi; \ |
| 1919 | \ |
| 1920 | vec_8x8u u; \ |
| 1921 | vec_8x8u v; \ |
| 1922 | vec_8x8u r; \ |
| 1923 | vec_8x8u g; \ |
| 1924 | vec_8x8u b; \ |
| 1925 | \ |
| 1926 | vec_4x32u dx4; \ |
| 1927 | vec_4x32u dx8; \ |
| 1928 | \ |
| 1929 | gvshrn_n_u32(u_whole_lo, u_block, 16); \ |
| 1930 | gvshrn_n_u32(v_whole_lo, v_block, 16); \ |
| 1931 | gvshrn_n_u32(r_whole_lo, r_block, 16); \ |
| 1932 | gvshrn_n_u32(g_whole_lo, g_block, 16); \ |
| 1933 | gvshrn_n_u32(b_whole_lo, b_block, 16); \ |
| 1934 | \ |
| 1935 | gvdupq_l_u32(dx4, gvlo(uvrg_dx4), 0); \ |
| 1936 | gvaddhn_u32(u_whole_hi, u_block, dx4); \ |
| 1937 | gvdupq_l_u32(dx4, gvlo(uvrg_dx4), 1); \ |
| 1938 | gvaddhn_u32(v_whole_hi, v_block, dx4); \ |
| 1939 | gvdupq_l_u32(dx4, gvhi(uvrg_dx4), 0); \ |
| 1940 | gvaddhn_u32(r_whole_hi, r_block, dx4); \ |
| 1941 | gvdupq_l_u32(dx4, gvhi(uvrg_dx4), 1); \ |
| 1942 | gvaddhn_u32(g_whole_hi, g_block, dx4); \ |
| 1943 | gvdupq_n_u32(dx4, b_dx4); \ |
| 1944 | gvaddhn_u32(b_whole_hi, b_block, dx4); \ |
| 1945 | \ |
| 1946 | gvcombine_u16(u_whole, u_whole_lo, u_whole_hi); \ |
| 1947 | gvcombine_u16(v_whole, v_whole_lo, v_whole_hi); \ |
| 1948 | gvcombine_u16(r_whole, r_whole_lo, r_whole_hi); \ |
| 1949 | gvcombine_u16(g_whole, g_whole_lo, g_whole_hi); \ |
| 1950 | gvcombine_u16(b_whole, b_whole_lo, b_whole_hi); \ |
| 1951 | gvmovn_u16(u, u_whole); \ |
| 1952 | gvmovn_u16(v, v_whole); \ |
| 1953 | gvmovn_u16(r, r_whole); \ |
| 1954 | gvmovn_u16(g, g_whole); \ |
| 1955 | gvmovn_u16(b, b_whole); \ |
| 1956 | \ |
| 1957 | gvdupq_l_u32(dx8, gvlo(uvrg_dx8), 0); \ |
| 1958 | gvaddq_u32(u_block, u_block, dx8); \ |
| 1959 | gvdupq_l_u32(dx8, gvlo(uvrg_dx8), 1); \ |
| 1960 | gvaddq_u32(v_block, v_block, dx8); \ |
| 1961 | gvdupq_l_u32(dx8, gvhi(uvrg_dx8), 0); \ |
| 1962 | gvaddq_u32(r_block, r_block, dx8); \ |
| 1963 | gvdupq_l_u32(dx8, gvhi(uvrg_dx8), 1); \ |
| 1964 | gvaddq_u32(g_block, g_block, dx8); \ |
| 1965 | gvdupq_n_u32(dx8, b_dx8); \ |
| 1966 | gvaddq_u32(b_block, b_block, dx8); \ |
| 1967 | \ |
| 1968 | gvand(u, u, gvlo(texture_mask)); \ |
| 1969 | gvand(v, v, gvhi(texture_mask)); \ |
| 1970 | setup_blocks_texture_##swizzling(); \ |
| 1971 | \ |
| 1972 | gvst2_u8(u, v, (u8 *)block->uv.e); \ |
| 1973 | gvst1_u8(r, block->r.e); \ |
| 1974 | gvst1_u8(g, block->g.e); \ |
| 1975 | gvst1_u8(b, block->b.e); \ |
| 1976 | gvst1q_u16(dither_offsets, (u16 *)block->dither_offsets.e); \ |
| 1977 | block->fb_ptr = fb_ptr; \ |
| 1978 | } \ |
| 1979 | |
| 1980 | #define setup_blocks_store_unshaded_textured(swizzling, dithering, target, \ |
| 1981 | edge_type) \ |
| 1982 | { \ |
| 1983 | vec_8x16u u_whole; \ |
| 1984 | vec_8x16u v_whole; \ |
| 1985 | vec_4x16u u_whole_lo, u_whole_hi; \ |
| 1986 | vec_4x16u v_whole_lo, v_whole_hi; \ |
| 1987 | \ |
| 1988 | vec_8x8u u; \ |
| 1989 | vec_8x8u v; \ |
| 1990 | \ |
| 1991 | vec_4x32u dx4; \ |
| 1992 | vec_4x32u dx8; \ |
| 1993 | \ |
| 1994 | gvshrn_n_u32(u_whole_lo, u_block, 16); \ |
| 1995 | gvshrn_n_u32(v_whole_lo, v_block, 16); \ |
| 1996 | \ |
| 1997 | gvdupq_l_u32(dx4, uv_dx4, 0); \ |
| 1998 | gvaddhn_u32(u_whole_hi, u_block, dx4); \ |
| 1999 | gvdupq_l_u32(dx4, uv_dx4, 1); \ |
| 2000 | gvaddhn_u32(v_whole_hi, v_block, dx4); \ |
| 2001 | \ |
| 2002 | gvcombine_u16(u_whole, u_whole_lo, u_whole_hi); \ |
| 2003 | gvcombine_u16(v_whole, v_whole_lo, v_whole_hi); \ |
| 2004 | gvmovn_u16(u, u_whole); \ |
| 2005 | gvmovn_u16(v, v_whole); \ |
| 2006 | \ |
| 2007 | gvdupq_l_u32(dx8, uv_dx8, 0); \ |
| 2008 | gvaddq_u32(u_block, u_block, dx8); \ |
| 2009 | gvdupq_l_u32(dx8, uv_dx8, 1); \ |
| 2010 | gvaddq_u32(v_block, v_block, dx8); \ |
| 2011 | \ |
| 2012 | gvand(u, u, gvlo(texture_mask)); \ |
| 2013 | gvand(v, v, gvhi(texture_mask)); \ |
| 2014 | setup_blocks_texture_##swizzling(); \ |
| 2015 | \ |
| 2016 | gvst2_u8(u, v, (u8 *)block->uv.e); \ |
| 2017 | gvst1q_u16(dither_offsets, (u16 *)block->dither_offsets.e); \ |
| 2018 | block->fb_ptr = fb_ptr; \ |
| 2019 | } \ |
| 2020 | |
| 2021 | #define setup_blocks_store_shaded_untextured_dithered() \ |
| 2022 | gvqadd_u8(r, r, dither_offsets); \ |
| 2023 | gvqadd_u8(g, g, dither_offsets); \ |
| 2024 | gvqadd_u8(b, b, dither_offsets); \ |
| 2025 | \ |
| 2026 | gvqsub_u8(r, r, d64_4); \ |
| 2027 | gvqsub_u8(g, g, d64_4); \ |
| 2028 | gvqsub_u8(b, b, d64_4) \ |
| 2029 | |
| 2030 | #define setup_blocks_store_shaded_untextured_undithered() \ |
| 2031 | |
| 2032 | #define setup_blocks_store_untextured_pixels_indirect_full(_pixels) \ |
| 2033 | gvst1q_u16(_pixels, block->pixels.e); \ |
| 2034 | block->fb_ptr = fb_ptr \ |
| 2035 | |
| 2036 | #define setup_blocks_store_untextured_pixels_indirect_edge(_pixels) \ |
| 2037 | gvst1q_u16(_pixels, block->pixels.e); \ |
| 2038 | block->fb_ptr = fb_ptr \ |
| 2039 | |
| 2040 | #define setup_blocks_store_shaded_untextured_seed_pixels_indirect() \ |
| 2041 | gvmull_u8(pixels, r, d64_1) \ |
| 2042 | |
| 2043 | #define setup_blocks_store_untextured_pixels_direct_full(_pixels) \ |
| 2044 | gvst1q_u16(_pixels, fb_ptr) \ |
| 2045 | |
| 2046 | #define setup_blocks_store_untextured_pixels_direct_edge(_pixels) \ |
| 2047 | { \ |
| 2048 | vec_8x16u fb_pixels; \ |
| 2049 | vec_8x16u draw_mask; \ |
| 2050 | vec_8x16u test_mask; \ |
| 2051 | \ |
| 2052 | gvld1q_u16(test_mask, psx_gpu->test_mask.e); \ |
| 2053 | gvld1q_u16(fb_pixels, fb_ptr); \ |
| 2054 | gvdupq_n_u16(draw_mask, span_edge_data->right_mask); \ |
| 2055 | gvtstq_u16(draw_mask, draw_mask, test_mask); \ |
| 2056 | gvbifq(fb_pixels, _pixels, draw_mask); \ |
| 2057 | gvst1q_u16(fb_pixels, fb_ptr); \ |
| 2058 | } \ |
| 2059 | |
| 2060 | #define setup_blocks_store_shaded_untextured_seed_pixels_direct() \ |
| 2061 | pixels = msb_mask; \ |
| 2062 | gvmlal_u8(pixels, r, d64_1) \ |
| 2063 | |
| 2064 | #define setup_blocks_store_shaded_untextured(swizzling, dithering, target, \ |
| 2065 | edge_type) \ |
| 2066 | { \ |
| 2067 | vec_8x16u r_whole; \ |
| 2068 | vec_8x16u g_whole; \ |
| 2069 | vec_8x16u b_whole; \ |
| 2070 | vec_4x16u r_whole_lo, r_whole_hi; \ |
| 2071 | vec_4x16u g_whole_lo, g_whole_hi; \ |
| 2072 | vec_4x16u b_whole_lo, b_whole_hi; \ |
| 2073 | \ |
| 2074 | vec_8x8u r; \ |
| 2075 | vec_8x8u g; \ |
| 2076 | vec_8x8u b; \ |
| 2077 | \ |
| 2078 | vec_4x32u dx4; \ |
| 2079 | vec_4x32u dx8; \ |
| 2080 | \ |
| 2081 | vec_8x16u pixels; \ |
| 2082 | \ |
| 2083 | gvshrn_n_u32(r_whole_lo, r_block, 16); \ |
| 2084 | gvshrn_n_u32(g_whole_lo, g_block, 16); \ |
| 2085 | gvshrn_n_u32(b_whole_lo, b_block, 16); \ |
| 2086 | \ |
| 2087 | gvdupq_l_u32(dx4, gvlo(rgb_dx4), 0); \ |
| 2088 | gvaddhn_u32(r_whole_hi, r_block, dx4); \ |
| 2089 | gvdupq_l_u32(dx4, gvlo(rgb_dx4), 1); \ |
| 2090 | gvaddhn_u32(g_whole_hi, g_block, dx4); \ |
| 2091 | gvdupq_l_u32(dx4, gvhi(rgb_dx4), 0); \ |
| 2092 | gvaddhn_u32(b_whole_hi, b_block, dx4); \ |
| 2093 | \ |
| 2094 | gvcombine_u16(r_whole, r_whole_lo, r_whole_hi); \ |
| 2095 | gvcombine_u16(g_whole, g_whole_lo, g_whole_hi); \ |
| 2096 | gvcombine_u16(b_whole, b_whole_lo, b_whole_hi); \ |
| 2097 | gvmovn_u16(r, r_whole); \ |
| 2098 | gvmovn_u16(g, g_whole); \ |
| 2099 | gvmovn_u16(b, b_whole); \ |
| 2100 | \ |
| 2101 | gvdupq_l_u32(dx8, gvlo(rgb_dx8), 0); \ |
| 2102 | gvaddq_u32(r_block, r_block, dx8); \ |
| 2103 | gvdupq_l_u32(dx8, gvlo(rgb_dx8), 1); \ |
| 2104 | gvaddq_u32(g_block, g_block, dx8); \ |
| 2105 | gvdupq_l_u32(dx8, gvhi(rgb_dx8), 0); \ |
| 2106 | gvaddq_u32(b_block, b_block, dx8); \ |
| 2107 | \ |
| 2108 | setup_blocks_store_shaded_untextured_##dithering(); \ |
| 2109 | \ |
| 2110 | gvshr_n_u8(r, r, 3); \ |
| 2111 | gvbic(g, g, d64_0x07); \ |
| 2112 | gvbic(b, b, d64_0x07); \ |
| 2113 | \ |
| 2114 | setup_blocks_store_shaded_untextured_seed_pixels_##target(); \ |
| 2115 | gvmlal_u8(pixels, g, d64_4); \ |
| 2116 | gvmlal_u8(pixels, b, d64_128); \ |
| 2117 | \ |
| 2118 | setup_blocks_store_untextured_pixels_##target##_##edge_type(pixels); \ |
| 2119 | } \ |
| 2120 | |
| 2121 | #define setup_blocks_store_unshaded_untextured(swizzling, dithering, target, \ |
| 2122 | edge_type) \ |
| 2123 | setup_blocks_store_untextured_pixels_##target##_##edge_type(colors) \ |
| 2124 | |
| 2125 | #define setup_blocks_store_draw_mask_textured_indirect(_block, bits) \ |
| 2126 | (_block)->draw_mask_bits = bits \ |
| 2127 | |
| 2128 | #define setup_blocks_store_draw_mask_untextured_indirect(_block, bits) \ |
| 2129 | { \ |
| 2130 | vec_8x16u bits_mask; \ |
| 2131 | vec_8x16u test_mask; \ |
| 2132 | \ |
| 2133 | gvld1q_u16(test_mask, psx_gpu->test_mask.e); \ |
| 2134 | gvdupq_n_u16(bits_mask, bits); \ |
| 2135 | gvtstq_u16(bits_mask, bits_mask, test_mask); \ |
| 2136 | gvst1q_u16(bits_mask, (_block)->draw_mask.e); \ |
| 2137 | } \ |
| 2138 | |
| 2139 | #define setup_blocks_store_draw_mask_untextured_direct(_block, bits) \ |
| 2140 | |
| 2141 | #define setup_blocks_add_blocks_indirect() \ |
| 2142 | num_blocks += span_num_blocks; \ |
| 2143 | \ |
| 2144 | if(num_blocks > MAX_BLOCKS) \ |
| 2145 | { \ |
| 2146 | psx_gpu->num_blocks = num_blocks - span_num_blocks; \ |
| 2147 | flush_render_block_buffer(psx_gpu); \ |
| 2148 | num_blocks = span_num_blocks; \ |
| 2149 | block = psx_gpu->blocks; \ |
| 2150 | } \ |
| 2151 | |
| 2152 | #define setup_blocks_add_blocks_direct() \ |
| 2153 | |
| 2154 | #define setup_blocks_do(shading, texturing, dithering, sw, target) \ |
| 2155 | setup_blocks_load_msb_mask_##target(); \ |
| 2156 | setup_blocks_variables_##shading##_##texturing(target); \ |
| 2157 | \ |
| 2158 | edge_data_struct *span_edge_data = psx_gpu->span_edge_data; \ |
| 2159 | vec_4x32u *span_uvrg_offset = (vec_4x32u *)psx_gpu->span_uvrg_offset; \ |
| 2160 | u32 *span_b_offset = psx_gpu->span_b_offset; \ |
| 2161 | \ |
| 2162 | block_struct *block = psx_gpu->blocks + psx_gpu->num_blocks; \ |
| 2163 | \ |
| 2164 | u32 num_spans = psx_gpu->num_spans; \ |
| 2165 | \ |
| 2166 | u16 * __restrict__ fb_ptr; \ |
| 2167 | u32 y; \ |
| 2168 | \ |
| 2169 | u32 num_blocks = psx_gpu->num_blocks; \ |
| 2170 | u32 span_num_blocks; \ |
| 2171 | \ |
| 2172 | while(num_spans) \ |
| 2173 | { \ |
| 2174 | span_num_blocks = span_edge_data->num_blocks; \ |
| 2175 | if(span_num_blocks) \ |
| 2176 | { \ |
| 2177 | y = span_edge_data->y; \ |
| 2178 | fb_ptr = psx_gpu->vram_out_ptr + span_edge_data->left_x + (y * 1024); \ |
| 2179 | \ |
| 2180 | setup_blocks_span_initialize_##shading##_##texturing(); \ |
| 2181 | setup_blocks_span_initialize_##dithering(texturing); \ |
| 2182 | \ |
| 2183 | setup_blocks_add_blocks_##target(); \ |
| 2184 | \ |
| 2185 | s32 pixel_span = span_num_blocks * 8; \ |
| 2186 | pixel_span -= __builtin_popcount(span_edge_data->right_mask & 0xFF); \ |
| 2187 | \ |
| 2188 | span_num_blocks--; \ |
| 2189 | while(span_num_blocks) \ |
| 2190 | { \ |
| 2191 | setup_blocks_store_##shading##_##texturing(sw, dithering, target, \ |
| 2192 | full); \ |
| 2193 | setup_blocks_store_draw_mask_##texturing##_##target(block, 0x00); \ |
| 2194 | \ |
| 2195 | fb_ptr += 8; \ |
| 2196 | block++; \ |
| 2197 | span_num_blocks--; \ |
| 2198 | } \ |
| 2199 | \ |
| 2200 | setup_blocks_store_##shading##_##texturing(sw, dithering, target, edge); \ |
| 2201 | setup_blocks_store_draw_mask_##texturing##_##target(block, \ |
| 2202 | span_edge_data->right_mask); \ |
| 2203 | \ |
| 2204 | block++; \ |
| 2205 | } \ |
| 2206 | \ |
| 2207 | num_spans--; \ |
| 2208 | span_edge_data++; \ |
| 2209 | span_uvrg_offset++; \ |
| 2210 | span_b_offset++; \ |
| 2211 | } \ |
| 2212 | \ |
| 2213 | psx_gpu->num_blocks = num_blocks \ |
| 2214 | |
| 2215 | void setup_blocks_shaded_textured_dithered_swizzled_indirect(psx_gpu_struct |
| 2216 | *psx_gpu) |
| 2217 | { |
| 2218 | #if 0 |
| 2219 | setup_blocks_shaded_textured_dithered_swizzled_indirect_(psx_gpu); |
| 2220 | return; |
| 2221 | #endif |
| 2222 | setup_blocks_do(shaded, textured, dithered, swizzled, indirect); |
| 2223 | } |
| 2224 | |
| 2225 | void setup_blocks_shaded_textured_dithered_unswizzled_indirect(psx_gpu_struct |
| 2226 | *psx_gpu) |
| 2227 | { |
| 2228 | #if 0 |
| 2229 | setup_blocks_shaded_textured_dithered_unswizzled_indirect_(psx_gpu); |
| 2230 | return; |
| 2231 | #endif |
| 2232 | setup_blocks_do(shaded, textured, dithered, unswizzled, indirect); |
| 2233 | } |
| 2234 | |
| 2235 | void setup_blocks_unshaded_textured_dithered_swizzled_indirect(psx_gpu_struct |
| 2236 | *psx_gpu) |
| 2237 | { |
| 2238 | #if 0 |
| 2239 | setup_blocks_unshaded_textured_dithered_swizzled_indirect_(psx_gpu); |
| 2240 | return; |
| 2241 | #endif |
| 2242 | setup_blocks_do(unshaded, textured, dithered, swizzled, indirect); |
| 2243 | } |
| 2244 | |
| 2245 | void setup_blocks_unshaded_textured_dithered_unswizzled_indirect(psx_gpu_struct |
| 2246 | *psx_gpu) |
| 2247 | { |
| 2248 | #if 0 |
| 2249 | setup_blocks_unshaded_textured_dithered_unswizzled_indirect_(psx_gpu); |
| 2250 | return; |
| 2251 | #endif |
| 2252 | setup_blocks_do(unshaded, textured, dithered, unswizzled, indirect); |
| 2253 | } |
| 2254 | |
| 2255 | void setup_blocks_unshaded_untextured_undithered_unswizzled_indirect( |
| 2256 | psx_gpu_struct *psx_gpu) |
| 2257 | { |
| 2258 | #if 0 |
| 2259 | setup_blocks_unshaded_untextured_undithered_unswizzled_indirect_(psx_gpu); |
| 2260 | return; |
| 2261 | #endif |
| 2262 | setup_blocks_do(unshaded, untextured, undithered, unswizzled, indirect); |
| 2263 | } |
| 2264 | |
| 2265 | void setup_blocks_unshaded_untextured_undithered_unswizzled_direct( |
| 2266 | psx_gpu_struct *psx_gpu) |
| 2267 | { |
| 2268 | #if 0 |
| 2269 | setup_blocks_unshaded_untextured_undithered_unswizzled_direct_(psx_gpu); |
| 2270 | return; |
| 2271 | #endif |
| 2272 | setup_blocks_do(unshaded, untextured, undithered, unswizzled, direct); |
| 2273 | } |
| 2274 | |
| 2275 | void setup_blocks_shaded_untextured_undithered_unswizzled_indirect(psx_gpu_struct |
| 2276 | *psx_gpu) |
| 2277 | { |
| 2278 | #if 0 |
| 2279 | setup_blocks_shaded_untextured_undithered_unswizzled_indirect_(psx_gpu); |
| 2280 | return; |
| 2281 | #endif |
| 2282 | setup_blocks_do(shaded, untextured, undithered, unswizzled, indirect); |
| 2283 | } |
| 2284 | |
| 2285 | void setup_blocks_shaded_untextured_dithered_unswizzled_indirect(psx_gpu_struct |
| 2286 | *psx_gpu) |
| 2287 | { |
| 2288 | #if 0 |
| 2289 | setup_blocks_shaded_untextured_dithered_unswizzled_indirect_(psx_gpu); |
| 2290 | return; |
| 2291 | #endif |
| 2292 | setup_blocks_do(shaded, untextured, dithered, unswizzled, indirect); |
| 2293 | } |
| 2294 | |
| 2295 | void setup_blocks_shaded_untextured_undithered_unswizzled_direct( |
| 2296 | psx_gpu_struct *psx_gpu) |
| 2297 | { |
| 2298 | #if 0 |
| 2299 | setup_blocks_shaded_untextured_undithered_unswizzled_direct_(psx_gpu); |
| 2300 | return; |
| 2301 | #endif |
| 2302 | setup_blocks_do(shaded, untextured, undithered, unswizzled, direct); |
| 2303 | } |
| 2304 | |
| 2305 | void setup_blocks_shaded_untextured_dithered_unswizzled_direct(psx_gpu_struct |
| 2306 | *psx_gpu) |
| 2307 | { |
| 2308 | #if 0 |
| 2309 | setup_blocks_shaded_untextured_dithered_unswizzled_direct_(psx_gpu); |
| 2310 | return; |
| 2311 | #endif |
| 2312 | setup_blocks_do(shaded, untextured, dithered, unswizzled, direct); |
| 2313 | } |
| 2314 | |
| 2315 | static void update_texture_4bpp_cache(psx_gpu_struct *psx_gpu) |
| 2316 | { |
| 2317 | u32 current_texture_page = psx_gpu->current_texture_page; |
| 2318 | u8 *texture_page_ptr = psx_gpu->texture_page_base; |
| 2319 | const u16 *vram_ptr = psx_gpu->vram_ptr; |
| 2320 | u32 tile_x, tile_y; |
| 2321 | u32 sub_y; |
| 2322 | vec_8x16u c_0x00f0; |
| 2323 | |
| 2324 | vram_ptr += (current_texture_page >> 4) * 256 * 1024; |
| 2325 | vram_ptr += (current_texture_page & 0xF) * 64; |
| 2326 | |
| 2327 | gvdupq_n_u16(c_0x00f0, 0x00f0); |
| 2328 | |
| 2329 | psx_gpu->dirty_textures_4bpp_mask &= ~(psx_gpu->current_texture_mask); |
| 2330 | |
| 2331 | for (tile_y = 16; tile_y; tile_y--) |
| 2332 | { |
| 2333 | for (tile_x = 16; tile_x; tile_x--) |
| 2334 | { |
| 2335 | for (sub_y = 8; sub_y; sub_y--) |
| 2336 | { |
| 2337 | vec_8x8u texel_block_a, texel_block_b; |
| 2338 | vec_8x16u texel_block_expanded_a, texel_block_expanded_b; |
| 2339 | vec_8x16u texel_block_expanded_c, texel_block_expanded_d; |
| 2340 | vec_8x16u texel_block_expanded_ab, texel_block_expanded_cd; |
| 2341 | |
| 2342 | gvld1_u8(texel_block_a, (u8 *)vram_ptr); vram_ptr += 1024; |
| 2343 | gvld1_u8(texel_block_b, (u8 *)vram_ptr); vram_ptr += 1024; |
| 2344 | |
| 2345 | gvmovl_u8(texel_block_expanded_a, texel_block_a); |
| 2346 | gvshll_n_u8(texel_block_expanded_b, texel_block_a, 4); |
| 2347 | gvmovl_u8(texel_block_expanded_c, texel_block_b); |
| 2348 | gvshll_n_u8(texel_block_expanded_d, texel_block_b, 4); |
| 2349 | |
| 2350 | gvbicq(texel_block_expanded_a, texel_block_expanded_a, c_0x00f0); |
| 2351 | gvbicq(texel_block_expanded_b, texel_block_expanded_b, c_0x00f0); |
| 2352 | gvbicq(texel_block_expanded_c, texel_block_expanded_c, c_0x00f0); |
| 2353 | gvbicq(texel_block_expanded_d, texel_block_expanded_d, c_0x00f0); |
| 2354 | |
| 2355 | gvorrq(texel_block_expanded_ab, texel_block_expanded_a, texel_block_expanded_b); |
| 2356 | gvorrq(texel_block_expanded_cd, texel_block_expanded_c, texel_block_expanded_d); |
| 2357 | |
| 2358 | gvst1q_2_pi_u32(texel_block_expanded_ab, texel_block_expanded_cd, texture_page_ptr); |
| 2359 | } |
| 2360 | |
| 2361 | vram_ptr -= (1024 * 16) - 4; |
| 2362 | } |
| 2363 | |
| 2364 | vram_ptr += (16 * 1024) - (4 * 16); |
| 2365 | } |
| 2366 | } |
| 2367 | |
| 2368 | void update_texture_8bpp_cache_slice(psx_gpu_struct *psx_gpu, |
| 2369 | u32 texture_page) |
| 2370 | { |
| 2371 | #if 0 |
| 2372 | update_texture_8bpp_cache_slice_(psx_gpu, texture_page); |
| 2373 | return; |
| 2374 | #endif |
| 2375 | u16 *texture_page_ptr = psx_gpu->texture_page_base; |
| 2376 | u16 *vram_ptr = psx_gpu->vram_ptr; |
| 2377 | |
| 2378 | u32 tile_x, tile_y; |
| 2379 | u32 sub_y; |
| 2380 | |
| 2381 | vram_ptr += (texture_page >> 4) * 256 * 1024; |
| 2382 | vram_ptr += (texture_page & 0xF) * 64; |
| 2383 | |
| 2384 | if((texture_page ^ psx_gpu->current_texture_page) & 0x1) |
| 2385 | texture_page_ptr += (8 * 16) * 8; |
| 2386 | |
| 2387 | for (tile_y = 16; tile_y; tile_y--) |
| 2388 | { |
| 2389 | for (tile_x = 8; tile_x; tile_x--) |
| 2390 | { |
| 2391 | for (sub_y = 4; sub_y; sub_y--) |
| 2392 | { |
| 2393 | vec_4x32u texels_a, texels_b, texels_c, texels_d = {}; |
| 2394 | gvld1q_u32(texels_a, vram_ptr); vram_ptr += 1024; |
| 2395 | gvld1q_u32(texels_b, vram_ptr); vram_ptr += 1024; |
| 2396 | gvld1q_u32(texels_c, vram_ptr); vram_ptr += 1024; |
| 2397 | gvld1q_u32(texels_d, vram_ptr); vram_ptr += 1024; |
| 2398 | |
| 2399 | gvst1q_2_pi_u32(texels_a, texels_b, texture_page_ptr); |
| 2400 | gvst1q_2_pi_u32(texels_c, texels_d, texture_page_ptr); |
| 2401 | } |
| 2402 | |
| 2403 | vram_ptr -= (1024 * 16) - 8; |
| 2404 | } |
| 2405 | |
| 2406 | vram_ptr -= (8 * 8); |
| 2407 | vram_ptr += (16 * 1024); |
| 2408 | |
| 2409 | texture_page_ptr += (8 * 16) * 8; |
| 2410 | } |
| 2411 | } |
| 2412 | |
| 2413 | void texture_blocks_untextured(psx_gpu_struct *psx_gpu) |
| 2414 | { |
| 2415 | } |
| 2416 | |
| 2417 | void texture_blocks_4bpp(psx_gpu_struct *psx_gpu) |
| 2418 | { |
| 2419 | #if 0 |
| 2420 | texture_blocks_4bpp_(psx_gpu); |
| 2421 | return; |
| 2422 | #endif |
| 2423 | block_struct *block = psx_gpu->blocks; |
| 2424 | u32 num_blocks = psx_gpu->num_blocks; |
| 2425 | |
| 2426 | vec_8x8u texels_low; |
| 2427 | vec_8x8u texels_high; |
| 2428 | |
| 2429 | vec_16x8u clut_low; |
| 2430 | vec_16x8u clut_high; |
| 2431 | |
| 2432 | const u8 *texture_ptr_8bpp = psx_gpu->texture_page_ptr; |
| 2433 | |
| 2434 | gvld2q_u8(clut_low, clut_high, (u8 *)psx_gpu->clut_ptr); |
| 2435 | |
| 2436 | if(psx_gpu->current_texture_mask & psx_gpu->dirty_textures_4bpp_mask) |
| 2437 | update_texture_4bpp_cache(psx_gpu); |
| 2438 | |
| 2439 | while(num_blocks) |
| 2440 | { |
| 2441 | vec_8x8u texels = |
| 2442 | { |
| 2443 | .u8 = |
| 2444 | { |
| 2445 | texture_ptr_8bpp[block->uv.e[0]], |
| 2446 | texture_ptr_8bpp[block->uv.e[1]], |
| 2447 | texture_ptr_8bpp[block->uv.e[2]], |
| 2448 | texture_ptr_8bpp[block->uv.e[3]], |
| 2449 | texture_ptr_8bpp[block->uv.e[4]], |
| 2450 | texture_ptr_8bpp[block->uv.e[5]], |
| 2451 | texture_ptr_8bpp[block->uv.e[6]], |
| 2452 | texture_ptr_8bpp[block->uv.e[7]] |
| 2453 | } |
| 2454 | }; |
| 2455 | |
| 2456 | gvtbl2_u8(texels_low, clut_low, texels); |
| 2457 | gvtbl2_u8(texels_high, clut_high, texels); |
| 2458 | |
| 2459 | gvst2_u8(texels_low, texels_high, (u8 *)block->texels.e); |
| 2460 | |
| 2461 | num_blocks--; |
| 2462 | block++; |
| 2463 | } |
| 2464 | } |
| 2465 | |
| 2466 | void texture_blocks_8bpp(psx_gpu_struct *psx_gpu) |
| 2467 | { |
| 2468 | #if 0 |
| 2469 | texture_blocks_8bpp_(psx_gpu); |
| 2470 | return; |
| 2471 | #endif |
| 2472 | u32 num_blocks = psx_gpu->num_blocks; |
| 2473 | |
| 2474 | if(psx_gpu->current_texture_mask & psx_gpu->dirty_textures_8bpp_mask) |
| 2475 | update_texture_8bpp_cache(psx_gpu); |
| 2476 | |
| 2477 | const u8 * __restrict__ texture_ptr_8bpp = psx_gpu->texture_page_ptr; |
| 2478 | const u16 * __restrict__ clut_ptr = psx_gpu->clut_ptr; |
| 2479 | block_struct * __restrict__ block = psx_gpu->blocks; |
| 2480 | |
| 2481 | while(num_blocks) |
| 2482 | { |
| 2483 | u16 offset; |
| 2484 | #define load_one(i_) \ |
| 2485 | offset = block->uv.e[i_]; u16 texel##i_ = texture_ptr_8bpp[offset] |
| 2486 | #define store_one(i_) \ |
| 2487 | block->texels.e[i_] = clut_ptr[texel##i_] |
| 2488 | load_one(0); load_one(1); load_one(2); load_one(3); |
| 2489 | load_one(4); load_one(5); load_one(6); load_one(7); |
| 2490 | store_one(0); store_one(1); store_one(2); store_one(3); |
| 2491 | store_one(4); store_one(5); store_one(6); store_one(7); |
| 2492 | #undef load_one |
| 2493 | #undef store_one |
| 2494 | |
| 2495 | num_blocks--; |
| 2496 | block++; |
| 2497 | } |
| 2498 | } |
| 2499 | |
| 2500 | void texture_blocks_16bpp(psx_gpu_struct *psx_gpu) |
| 2501 | { |
| 2502 | #if 0 |
| 2503 | texture_blocks_16bpp_(psx_gpu); |
| 2504 | return; |
| 2505 | #endif |
| 2506 | u32 num_blocks = psx_gpu->num_blocks; |
| 2507 | const u16 * __restrict__ texture_ptr_16bpp = psx_gpu->texture_page_ptr; |
| 2508 | block_struct * __restrict__ block = psx_gpu->blocks; |
| 2509 | |
| 2510 | while(num_blocks) |
| 2511 | { |
| 2512 | u32 offset; |
| 2513 | #define load_one(i_) \ |
| 2514 | offset = block->uv.e[i_]; \ |
| 2515 | offset += ((offset & 0xFF00) * 3); \ |
| 2516 | u16 texel##i_ = texture_ptr_16bpp[offset] |
| 2517 | #define store_one(i_) \ |
| 2518 | block->texels.e[i_] = texel##i_ |
| 2519 | load_one(0); load_one(1); load_one(2); load_one(3); |
| 2520 | load_one(4); load_one(5); load_one(6); load_one(7); |
| 2521 | store_one(0); store_one(1); store_one(2); store_one(3); |
| 2522 | store_one(4); store_one(5); store_one(6); store_one(7); |
| 2523 | #undef load_one |
| 2524 | #undef store_one |
| 2525 | |
| 2526 | num_blocks--; |
| 2527 | block++; |
| 2528 | } |
| 2529 | } |
| 2530 | |
| 2531 | #define shade_blocks_load_msb_mask_indirect() \ |
| 2532 | |
| 2533 | #define shade_blocks_load_msb_mask_direct() \ |
| 2534 | vec_8x16u msb_mask; \ |
| 2535 | gvdupq_n_u16(msb_mask, psx_gpu->mask_msb); \ |
| 2536 | |
| 2537 | #define shade_blocks_store_indirect(_draw_mask, _pixels) \ |
| 2538 | gvst1q_u16(_draw_mask, block->draw_mask.e); \ |
| 2539 | gvst1q_u16(_pixels, block->pixels.e); \ |
| 2540 | |
| 2541 | #define shade_blocks_store_direct(_draw_mask, _pixels) \ |
| 2542 | { \ |
| 2543 | u16 * __restrict__ fb_ptr = block->fb_ptr; \ |
| 2544 | vec_8x16u fb_pixels; \ |
| 2545 | gvld1q_u16(fb_pixels, fb_ptr); \ |
| 2546 | gvorrq(_pixels, _pixels, msb_mask); \ |
| 2547 | gvbifq(fb_pixels, _pixels, _draw_mask); \ |
| 2548 | gvst1q_u16(fb_pixels, fb_ptr); \ |
| 2549 | } \ |
| 2550 | |
| 2551 | #define shade_blocks_textured_false_modulated_check_dithered(target) \ |
| 2552 | |
| 2553 | #define shade_blocks_textured_false_modulated_check_undithered(target) \ |
| 2554 | if(psx_gpu->triangle_color == 0x808080) \ |
| 2555 | { \ |
| 2556 | shade_blocks_textured_unmodulated_##target(psx_gpu); \ |
| 2557 | return; \ |
| 2558 | } \ |
| 2559 | |
| 2560 | #define shade_blocks_textured_modulated_shaded_primitive_load(dithering, \ |
| 2561 | target) \ |
| 2562 | |
| 2563 | #define shade_blocks_textured_modulated_unshaded_primitive_load(dithering, \ |
| 2564 | target) \ |
| 2565 | { \ |
| 2566 | u32 color = psx_gpu->triangle_color; \ |
| 2567 | gvdup_n_u8(colors_r, color); \ |
| 2568 | gvdup_n_u8(colors_g, color >> 8); \ |
| 2569 | gvdup_n_u8(colors_b, color >> 16); \ |
| 2570 | shade_blocks_textured_false_modulated_check_##dithering(target); \ |
| 2571 | } \ |
| 2572 | |
| 2573 | #define shade_blocks_textured_modulated_shaded_block_load() \ |
| 2574 | gvld1_u8(colors_r, block->r.e); \ |
| 2575 | gvld1_u8(colors_g, block->g.e); \ |
| 2576 | gvld1_u8(colors_b, block->b.e) \ |
| 2577 | |
| 2578 | #define shade_blocks_textured_modulated_unshaded_block_load() \ |
| 2579 | |
| 2580 | #define shade_blocks_textured_modulate_dithered(component) \ |
| 2581 | gvld1q_u16(pixels_##component, block->dither_offsets.e); \ |
| 2582 | gvmlal_u8(pixels_##component, texels_##component, colors_##component) \ |
| 2583 | |
| 2584 | #define shade_blocks_textured_modulate_undithered(component) \ |
| 2585 | gvmull_u8(pixels_##component, texels_##component, colors_##component) \ |
| 2586 | |
| 2587 | #define shade_blocks_textured_modulated_do(shading, dithering, target) \ |
| 2588 | block_struct * __restrict__ block = psx_gpu->blocks; \ |
| 2589 | u32 num_blocks = psx_gpu->num_blocks; \ |
| 2590 | vec_8x16u texels; \ |
| 2591 | \ |
| 2592 | vec_8x8u texels_r; \ |
| 2593 | vec_8x8u texels_g; \ |
| 2594 | vec_8x8u texels_b; \ |
| 2595 | \ |
| 2596 | vec_8x8u colors_r; \ |
| 2597 | vec_8x8u colors_g; \ |
| 2598 | vec_8x8u colors_b; \ |
| 2599 | \ |
| 2600 | vec_8x8u pixels_r_low; \ |
| 2601 | vec_8x8u pixels_g_low; \ |
| 2602 | vec_8x8u pixels_b_low; \ |
| 2603 | vec_8x16u pixels; \ |
| 2604 | \ |
| 2605 | vec_8x16u pixels_r; \ |
| 2606 | vec_8x16u pixels_g; \ |
| 2607 | vec_8x16u pixels_b; \ |
| 2608 | \ |
| 2609 | vec_8x16u draw_mask; \ |
| 2610 | vec_8x16u zero_mask; \ |
| 2611 | \ |
| 2612 | vec_8x8u d64_0x07; \ |
| 2613 | vec_8x8u d64_0x1F; \ |
| 2614 | vec_8x8u d64_1; \ |
| 2615 | vec_8x8u d64_4; \ |
| 2616 | vec_8x8u d64_128; \ |
| 2617 | \ |
| 2618 | vec_8x16u d128_0x8000; \ |
| 2619 | \ |
| 2620 | vec_8x16u test_mask; \ |
| 2621 | u32 draw_mask_bits; \ |
| 2622 | \ |
| 2623 | gvld1q_u16(test_mask, psx_gpu->test_mask.e); \ |
| 2624 | shade_blocks_load_msb_mask_##target(); \ |
| 2625 | \ |
| 2626 | gvdup_n_u8(d64_0x07, 0x07); \ |
| 2627 | gvdup_n_u8(d64_0x1F, 0x1F); \ |
| 2628 | gvdup_n_u8(d64_1, 1); \ |
| 2629 | gvdup_n_u8(d64_4, 4); \ |
| 2630 | gvdup_n_u8(d64_128, 128u); \ |
| 2631 | \ |
| 2632 | gvdupq_n_u16(d128_0x8000, 0x8000); \ |
| 2633 | \ |
| 2634 | shade_blocks_textured_modulated_##shading##_primitive_load(dithering, \ |
| 2635 | target); \ |
| 2636 | \ |
| 2637 | while(num_blocks) \ |
| 2638 | { \ |
| 2639 | draw_mask_bits = block->draw_mask_bits; \ |
| 2640 | gvdupq_n_u16(draw_mask, draw_mask_bits); \ |
| 2641 | gvtstq_u16(draw_mask, draw_mask, test_mask); \ |
| 2642 | \ |
| 2643 | shade_blocks_textured_modulated_##shading##_block_load(); \ |
| 2644 | \ |
| 2645 | gvld1q_u16(texels, block->texels.e); \ |
| 2646 | \ |
| 2647 | gvmovn_u16(texels_r, texels); \ |
| 2648 | gvshrn_n_u16(texels_g, texels, 5); \ |
| 2649 | gvshrn_n_u16(texels_b, texels, 7); \ |
| 2650 | \ |
| 2651 | gvand(texels_r, texels_r, d64_0x1F); \ |
| 2652 | gvand(texels_g, texels_g, d64_0x1F); \ |
| 2653 | gvshr_n_u8(texels_b, texels_b, 3); \ |
| 2654 | \ |
| 2655 | shade_blocks_textured_modulate_##dithering(r); \ |
| 2656 | shade_blocks_textured_modulate_##dithering(g); \ |
| 2657 | shade_blocks_textured_modulate_##dithering(b); \ |
| 2658 | \ |
| 2659 | gvceqzq_u16(zero_mask, texels); \ |
| 2660 | gvand(pixels, texels, d128_0x8000); \ |
| 2661 | \ |
| 2662 | gvqshrun_n_s16(pixels_r_low, pixels_r, 4); \ |
| 2663 | gvqshrun_n_s16(pixels_g_low, pixels_g, 4); \ |
| 2664 | gvqshrun_n_s16(pixels_b_low, pixels_b, 4); \ |
| 2665 | \ |
| 2666 | gvorrq(zero_mask, draw_mask, zero_mask); \ |
| 2667 | \ |
| 2668 | gvshr_n_u8(pixels_r_low, pixels_r_low, 3); \ |
| 2669 | gvbic(pixels_g_low, pixels_g_low, d64_0x07); \ |
| 2670 | gvbic(pixels_b_low, pixels_b_low, d64_0x07); \ |
| 2671 | \ |
| 2672 | gvmlal_u8(pixels, pixels_r_low, d64_1); \ |
| 2673 | gvmlal_u8(pixels, pixels_g_low, d64_4); \ |
| 2674 | gvmlal_u8(pixels, pixels_b_low, d64_128); \ |
| 2675 | \ |
| 2676 | shade_blocks_store_##target(zero_mask, pixels); \ |
| 2677 | \ |
| 2678 | num_blocks--; \ |
| 2679 | block++; \ |
| 2680 | } \ |
| 2681 | |
| 2682 | void shade_blocks_shaded_textured_modulated_dithered_direct(psx_gpu_struct |
| 2683 | *psx_gpu) |
| 2684 | { |
| 2685 | #if 0 |
| 2686 | shade_blocks_shaded_textured_modulated_dithered_direct_(psx_gpu); |
| 2687 | return; |
| 2688 | #endif |
| 2689 | shade_blocks_textured_modulated_do(shaded, dithered, direct); |
| 2690 | } |
| 2691 | |
| 2692 | void shade_blocks_shaded_textured_modulated_undithered_direct(psx_gpu_struct |
| 2693 | *psx_gpu) |
| 2694 | { |
| 2695 | #if 0 |
| 2696 | shade_blocks_shaded_textured_modulated_undithered_direct_(psx_gpu); |
| 2697 | return; |
| 2698 | #endif |
| 2699 | shade_blocks_textured_modulated_do(shaded, undithered, direct); |
| 2700 | } |
| 2701 | |
| 2702 | void shade_blocks_unshaded_textured_modulated_dithered_direct(psx_gpu_struct |
| 2703 | *psx_gpu) |
| 2704 | { |
| 2705 | #if 0 |
| 2706 | shade_blocks_unshaded_textured_modulated_dithered_direct_(psx_gpu); |
| 2707 | return; |
| 2708 | #endif |
| 2709 | shade_blocks_textured_modulated_do(unshaded, dithered, direct); |
| 2710 | } |
| 2711 | |
| 2712 | void shade_blocks_unshaded_textured_modulated_undithered_direct(psx_gpu_struct |
| 2713 | *psx_gpu) |
| 2714 | { |
| 2715 | #if 0 |
| 2716 | shade_blocks_unshaded_textured_modulated_undithered_direct_(psx_gpu); |
| 2717 | return; |
| 2718 | #endif |
| 2719 | shade_blocks_textured_modulated_do(unshaded, undithered, direct); |
| 2720 | } |
| 2721 | |
| 2722 | void shade_blocks_shaded_textured_modulated_dithered_indirect(psx_gpu_struct |
| 2723 | *psx_gpu) |
| 2724 | { |
| 2725 | #if 0 |
| 2726 | shade_blocks_shaded_textured_modulated_dithered_indirect_(psx_gpu); |
| 2727 | return; |
| 2728 | #endif |
| 2729 | shade_blocks_textured_modulated_do(shaded, dithered, indirect); |
| 2730 | } |
| 2731 | |
| 2732 | void shade_blocks_shaded_textured_modulated_undithered_indirect(psx_gpu_struct |
| 2733 | *psx_gpu) |
| 2734 | { |
| 2735 | #if 0 |
| 2736 | shade_blocks_shaded_textured_modulated_undithered_indirect_(psx_gpu); |
| 2737 | return; |
| 2738 | #endif |
| 2739 | shade_blocks_textured_modulated_do(shaded, undithered, indirect); |
| 2740 | } |
| 2741 | |
| 2742 | void shade_blocks_unshaded_textured_modulated_dithered_indirect(psx_gpu_struct |
| 2743 | *psx_gpu) |
| 2744 | { |
| 2745 | #if 0 |
| 2746 | shade_blocks_unshaded_textured_modulated_dithered_indirect_(psx_gpu); |
| 2747 | return; |
| 2748 | #endif |
| 2749 | shade_blocks_textured_modulated_do(unshaded, dithered, indirect); |
| 2750 | } |
| 2751 | |
| 2752 | void shade_blocks_unshaded_textured_modulated_undithered_indirect(psx_gpu_struct |
| 2753 | *psx_gpu) |
| 2754 | { |
| 2755 | #if 0 |
| 2756 | shade_blocks_unshaded_textured_modulated_undithered_indirect_(psx_gpu); |
| 2757 | return; |
| 2758 | #endif |
| 2759 | shade_blocks_textured_modulated_do(unshaded, undithered, indirect); |
| 2760 | } |
| 2761 | |
| 2762 | #define shade_blocks_textured_unmodulated_do(target) \ |
| 2763 | block_struct *block = psx_gpu->blocks; \ |
| 2764 | u32 num_blocks = psx_gpu->num_blocks; \ |
| 2765 | vec_8x16u draw_mask; \ |
| 2766 | vec_8x16u test_mask; \ |
| 2767 | u32 draw_mask_bits; \ |
| 2768 | \ |
| 2769 | vec_8x16u pixels; \ |
| 2770 | \ |
| 2771 | gvld1q_u16(test_mask, psx_gpu->test_mask.e); \ |
| 2772 | shade_blocks_load_msb_mask_##target(); \ |
| 2773 | \ |
| 2774 | while(num_blocks) \ |
| 2775 | { \ |
| 2776 | vec_8x16u zero_mask; \ |
| 2777 | \ |
| 2778 | draw_mask_bits = block->draw_mask_bits; \ |
| 2779 | gvdupq_n_u16(draw_mask, draw_mask_bits); \ |
| 2780 | gvtstq_u16(draw_mask, draw_mask, test_mask); \ |
| 2781 | \ |
| 2782 | gvld1q_u16(pixels, block->texels.e); \ |
| 2783 | \ |
| 2784 | gvceqzq_u16(zero_mask, pixels); \ |
| 2785 | gvorrq(zero_mask, draw_mask, zero_mask); \ |
| 2786 | \ |
| 2787 | shade_blocks_store_##target(zero_mask, pixels); \ |
| 2788 | \ |
| 2789 | num_blocks--; \ |
| 2790 | block++; \ |
| 2791 | } \ |
| 2792 | |
| 2793 | void shade_blocks_textured_unmodulated_indirect(psx_gpu_struct *psx_gpu) |
| 2794 | { |
| 2795 | #if 0 |
| 2796 | shade_blocks_textured_unmodulated_indirect_(psx_gpu); |
| 2797 | return; |
| 2798 | #endif |
| 2799 | shade_blocks_textured_unmodulated_do(indirect) |
| 2800 | } |
| 2801 | |
| 2802 | void shade_blocks_textured_unmodulated_direct(psx_gpu_struct *psx_gpu) |
| 2803 | { |
| 2804 | #if 0 |
| 2805 | shade_blocks_textured_unmodulated_direct_(psx_gpu); |
| 2806 | return; |
| 2807 | #endif |
| 2808 | shade_blocks_textured_unmodulated_do(direct) |
| 2809 | } |
| 2810 | |
| 2811 | void shade_blocks_unshaded_untextured_indirect(psx_gpu_struct *psx_gpu) |
| 2812 | { |
| 2813 | } |
| 2814 | |
| 2815 | void shade_blocks_unshaded_untextured_direct(psx_gpu_struct *psx_gpu) |
| 2816 | { |
| 2817 | #if 0 |
| 2818 | shade_blocks_unshaded_untextured_direct_(psx_gpu); |
| 2819 | return; |
| 2820 | #endif |
| 2821 | block_struct *block = psx_gpu->blocks; |
| 2822 | u32 num_blocks = psx_gpu->num_blocks; |
| 2823 | |
| 2824 | vec_8x16u pixels; |
| 2825 | gvld1q_u16(pixels, block->pixels.e); |
| 2826 | shade_blocks_load_msb_mask_direct(); |
| 2827 | |
| 2828 | while(num_blocks) |
| 2829 | { |
| 2830 | vec_8x16u draw_mask; |
| 2831 | gvld1q_u16(draw_mask, block->draw_mask.e); |
| 2832 | shade_blocks_store_direct(draw_mask, pixels); |
| 2833 | |
| 2834 | num_blocks--; |
| 2835 | block++; |
| 2836 | } |
| 2837 | } |
| 2838 | |
| 2839 | #define blend_blocks_mask_evaluate_on() \ |
| 2840 | vec_8x16u mask_pixels; \ |
| 2841 | gvcltzq_s16(mask_pixels, framebuffer_pixels); \ |
| 2842 | gvorrq(draw_mask, draw_mask, mask_pixels) \ |
| 2843 | |
| 2844 | #define blend_blocks_mask_evaluate_off() \ |
| 2845 | |
| 2846 | #define blend_blocks_average() \ |
| 2847 | { \ |
| 2848 | vec_8x16u pixels_no_msb; \ |
| 2849 | vec_8x16u fb_pixels_no_msb; \ |
| 2850 | \ |
| 2851 | vec_8x16u d128_0x0421; \ |
| 2852 | \ |
| 2853 | gvdupq_n_u16(d128_0x0421, 0x0421); \ |
| 2854 | \ |
| 2855 | gveorq(blend_pixels, pixels, framebuffer_pixels); \ |
| 2856 | gvbicq(pixels_no_msb, pixels, d128_0x8000); \ |
| 2857 | gvand(blend_pixels, blend_pixels, d128_0x0421); \ |
| 2858 | gvsubq_u16(blend_pixels, pixels_no_msb, blend_pixels); \ |
| 2859 | gvbicq(fb_pixels_no_msb, framebuffer_pixels, d128_0x8000); \ |
| 2860 | gvhaddq_u16(blend_pixels, fb_pixels_no_msb, blend_pixels); \ |
| 2861 | } \ |
| 2862 | |
| 2863 | #define blend_blocks_add() \ |
| 2864 | { \ |
| 2865 | vec_8x16u pixels_rb, pixels_g; \ |
| 2866 | vec_8x16u fb_rb, fb_g; \ |
| 2867 | \ |
| 2868 | vec_8x16u d128_0x7C1F; \ |
| 2869 | vec_8x16u d128_0x03E0; \ |
| 2870 | \ |
| 2871 | gvdupq_n_u16(d128_0x7C1F, 0x7C1F); \ |
| 2872 | gvdupq_n_u16(d128_0x03E0, 0x03E0); \ |
| 2873 | \ |
| 2874 | gvand(pixels_rb, pixels, d128_0x7C1F); \ |
| 2875 | gvand(pixels_g, pixels, d128_0x03E0); \ |
| 2876 | \ |
| 2877 | gvand(fb_rb, framebuffer_pixels, d128_0x7C1F); \ |
| 2878 | gvand(fb_g, framebuffer_pixels, d128_0x03E0); \ |
| 2879 | \ |
| 2880 | gvaddq_u16(fb_rb, fb_rb, pixels_rb); \ |
| 2881 | gvaddq_u16(fb_g, fb_g, pixels_g); \ |
| 2882 | \ |
| 2883 | gvminq_u8(fb_rb, fb_rb, d128_0x7C1F); \ |
| 2884 | gvminq_u16(fb_g, fb_g, d128_0x03E0); \ |
| 2885 | \ |
| 2886 | gvorrq(blend_pixels, fb_rb, fb_g); \ |
| 2887 | } \ |
| 2888 | |
| 2889 | #define blend_blocks_subtract() \ |
| 2890 | { \ |
| 2891 | vec_8x16u pixels_rb, pixels_g; \ |
| 2892 | vec_8x16u fb_rb, fb_g; \ |
| 2893 | \ |
| 2894 | vec_8x16u d128_0x7C1F; \ |
| 2895 | vec_8x16u d128_0x03E0; \ |
| 2896 | \ |
| 2897 | gvdupq_n_u16(d128_0x7C1F, 0x7C1F); \ |
| 2898 | gvdupq_n_u16(d128_0x03E0, 0x03E0); \ |
| 2899 | \ |
| 2900 | gvand(pixels_rb, pixels, d128_0x7C1F); \ |
| 2901 | gvand(pixels_g, pixels, d128_0x03E0); \ |
| 2902 | \ |
| 2903 | gvand(fb_rb, framebuffer_pixels, d128_0x7C1F); \ |
| 2904 | gvand(fb_g, framebuffer_pixels, d128_0x03E0); \ |
| 2905 | \ |
| 2906 | gvqsubq_u8(fb_rb, fb_rb, pixels_rb); \ |
| 2907 | gvqsubq_u16(fb_g, fb_g, pixels_g); \ |
| 2908 | \ |
| 2909 | gvorrq(blend_pixels, fb_rb, fb_g); \ |
| 2910 | } \ |
| 2911 | |
| 2912 | #define blend_blocks_add_fourth() \ |
| 2913 | { \ |
| 2914 | vec_8x16u pixels_rb, pixels_g; \ |
| 2915 | vec_8x16u pixels_fourth; \ |
| 2916 | vec_8x16u fb_rb, fb_g; \ |
| 2917 | \ |
| 2918 | vec_8x16u d128_0x7C1F; \ |
| 2919 | vec_8x16u d128_0x1C07; \ |
| 2920 | vec_8x16u d128_0x03E0; \ |
| 2921 | vec_8x16u d128_0x00E0; \ |
| 2922 | \ |
| 2923 | gvdupq_n_u16(d128_0x7C1F, 0x7C1F); \ |
| 2924 | gvdupq_n_u16(d128_0x1C07, 0x1C07); \ |
| 2925 | gvdupq_n_u16(d128_0x03E0, 0x03E0); \ |
| 2926 | gvdupq_n_u16(d128_0x00E0, 0x00E0); \ |
| 2927 | \ |
| 2928 | gvshrq_n_u16(pixels_fourth, pixels, 2); \ |
| 2929 | \ |
| 2930 | gvand(fb_rb, framebuffer_pixels, d128_0x7C1F); \ |
| 2931 | gvand(fb_g, framebuffer_pixels, d128_0x03E0); \ |
| 2932 | \ |
| 2933 | gvand(pixels_rb, pixels_fourth, d128_0x1C07); \ |
| 2934 | gvand(pixels_g, pixels_fourth, d128_0x00E0); \ |
| 2935 | \ |
| 2936 | gvaddq_u16(fb_rb, fb_rb, pixels_rb); \ |
| 2937 | gvaddq_u16(fb_g, fb_g, pixels_g); \ |
| 2938 | \ |
| 2939 | gvminq_u8(fb_rb, fb_rb, d128_0x7C1F); \ |
| 2940 | gvminq_u16(fb_g, fb_g, d128_0x03E0); \ |
| 2941 | \ |
| 2942 | gvorrq(blend_pixels, fb_rb, fb_g); \ |
| 2943 | } \ |
| 2944 | |
| 2945 | #define blend_blocks_blended_combine_textured() \ |
| 2946 | { \ |
| 2947 | vec_8x16u blend_mask; \ |
| 2948 | gvcltzq_s16(blend_mask, pixels); \ |
| 2949 | \ |
| 2950 | gvorrq(blend_pixels, blend_pixels, d128_0x8000); \ |
| 2951 | gvbifq(blend_pixels, pixels, blend_mask); \ |
| 2952 | } \ |
| 2953 | |
| 2954 | #define blend_blocks_blended_combine_untextured() \ |
| 2955 | |
| 2956 | #define blend_blocks_body_blend(blend_mode, texturing) \ |
| 2957 | { \ |
| 2958 | blend_blocks_##blend_mode(); \ |
| 2959 | blend_blocks_blended_combine_##texturing(); \ |
| 2960 | } \ |
| 2961 | |
| 2962 | #define blend_blocks_body_average(texturing) \ |
| 2963 | blend_blocks_body_blend(average, texturing) \ |
| 2964 | |
| 2965 | #define blend_blocks_body_add(texturing) \ |
| 2966 | blend_blocks_body_blend(add, texturing) \ |
| 2967 | |
| 2968 | #define blend_blocks_body_subtract(texturing) \ |
| 2969 | blend_blocks_body_blend(subtract, texturing) \ |
| 2970 | |
| 2971 | #define blend_blocks_body_add_fourth(texturing) \ |
| 2972 | blend_blocks_body_blend(add_fourth, texturing) \ |
| 2973 | |
| 2974 | #define blend_blocks_body_unblended(texturing) \ |
| 2975 | blend_pixels = pixels \ |
| 2976 | |
| 2977 | #define blend_blocks_do(texturing, blend_mode, mask_evaluate) \ |
| 2978 | block_struct *block = psx_gpu->blocks; \ |
| 2979 | u32 num_blocks = psx_gpu->num_blocks; \ |
| 2980 | vec_8x16u draw_mask; \ |
| 2981 | vec_8x16u pixels; \ |
| 2982 | vec_8x16u blend_pixels; \ |
| 2983 | vec_8x16u framebuffer_pixels; \ |
| 2984 | vec_8x16u msb_mask; \ |
| 2985 | vec_8x16u d128_0x8000; \ |
| 2986 | \ |
| 2987 | u16 *fb_ptr; \ |
| 2988 | \ |
| 2989 | gvdupq_n_u16(d128_0x8000, 0x8000); \ |
| 2990 | gvdupq_n_u16(msb_mask, psx_gpu->mask_msb); \ |
| 2991 | (void)d128_0x8000; /* sometimes unused */ \ |
| 2992 | \ |
| 2993 | while(num_blocks) \ |
| 2994 | { \ |
| 2995 | gvld1q_u16(pixels, block->pixels.e); \ |
| 2996 | gvld1q_u16(draw_mask, block->draw_mask.e); \ |
| 2997 | fb_ptr = block->fb_ptr; \ |
| 2998 | \ |
| 2999 | gvld1q_u16(framebuffer_pixels, fb_ptr); \ |
| 3000 | \ |
| 3001 | blend_blocks_mask_evaluate_##mask_evaluate(); \ |
| 3002 | blend_blocks_body_##blend_mode(texturing); \ |
| 3003 | \ |
| 3004 | gvorrq(blend_pixels, blend_pixels, msb_mask); \ |
| 3005 | gvbifq(framebuffer_pixels, blend_pixels, draw_mask); \ |
| 3006 | gvst1q_u16(framebuffer_pixels, fb_ptr); \ |
| 3007 | \ |
| 3008 | num_blocks--; \ |
| 3009 | block++; \ |
| 3010 | } \ |
| 3011 | |
| 3012 | |
| 3013 | void blend_blocks_textured_average_off(psx_gpu_struct *psx_gpu) |
| 3014 | { |
| 3015 | #if 0 |
| 3016 | blend_blocks_textured_average_off_(psx_gpu); |
| 3017 | return; |
| 3018 | #endif |
| 3019 | blend_blocks_do(textured, average, off); |
| 3020 | } |
| 3021 | |
| 3022 | void blend_blocks_untextured_average_off(psx_gpu_struct *psx_gpu) |
| 3023 | { |
| 3024 | #if 0 |
| 3025 | blend_blocks_untextured_average_off_(psx_gpu); |
| 3026 | return; |
| 3027 | #endif |
| 3028 | blend_blocks_do(untextured, average, off); |
| 3029 | } |
| 3030 | |
| 3031 | void blend_blocks_textured_average_on(psx_gpu_struct *psx_gpu) |
| 3032 | { |
| 3033 | #if 0 |
| 3034 | blend_blocks_textured_average_on_(psx_gpu); |
| 3035 | return; |
| 3036 | #endif |
| 3037 | blend_blocks_do(textured, average, on); |
| 3038 | } |
| 3039 | |
| 3040 | void blend_blocks_untextured_average_on(psx_gpu_struct *psx_gpu) |
| 3041 | { |
| 3042 | #if 0 |
| 3043 | blend_blocks_untextured_average_on_(psx_gpu); |
| 3044 | return; |
| 3045 | #endif |
| 3046 | blend_blocks_do(untextured, average, on); |
| 3047 | } |
| 3048 | |
| 3049 | void blend_blocks_textured_add_off(psx_gpu_struct *psx_gpu) |
| 3050 | { |
| 3051 | #if 0 |
| 3052 | blend_blocks_textured_add_off_(psx_gpu); |
| 3053 | return; |
| 3054 | #endif |
| 3055 | blend_blocks_do(textured, add, off); |
| 3056 | } |
| 3057 | |
| 3058 | void blend_blocks_textured_add_on(psx_gpu_struct *psx_gpu) |
| 3059 | { |
| 3060 | #if 0 |
| 3061 | blend_blocks_textured_add_on_(psx_gpu); |
| 3062 | return; |
| 3063 | #endif |
| 3064 | blend_blocks_do(textured, add, on); |
| 3065 | } |
| 3066 | |
| 3067 | void blend_blocks_untextured_add_off(psx_gpu_struct *psx_gpu) |
| 3068 | { |
| 3069 | #if 0 |
| 3070 | blend_blocks_untextured_add_off_(psx_gpu); |
| 3071 | return; |
| 3072 | #endif |
| 3073 | blend_blocks_do(untextured, add, off); |
| 3074 | } |
| 3075 | |
| 3076 | void blend_blocks_untextured_add_on(psx_gpu_struct *psx_gpu) |
| 3077 | { |
| 3078 | #if 0 |
| 3079 | blend_blocks_untextured_add_on_(psx_gpu); |
| 3080 | return; |
| 3081 | #endif |
| 3082 | blend_blocks_do(untextured, add, on); |
| 3083 | } |
| 3084 | |
| 3085 | void blend_blocks_textured_subtract_off(psx_gpu_struct *psx_gpu) |
| 3086 | { |
| 3087 | #if 0 |
| 3088 | blend_blocks_textured_subtract_off_(psx_gpu); |
| 3089 | return; |
| 3090 | #endif |
| 3091 | blend_blocks_do(textured, subtract, off); |
| 3092 | } |
| 3093 | |
| 3094 | void blend_blocks_textured_subtract_on(psx_gpu_struct *psx_gpu) |
| 3095 | { |
| 3096 | #if 0 |
| 3097 | blend_blocks_textured_subtract_on_(psx_gpu); |
| 3098 | return; |
| 3099 | #endif |
| 3100 | blend_blocks_do(textured, subtract, on); |
| 3101 | } |
| 3102 | |
| 3103 | void blend_blocks_untextured_subtract_off(psx_gpu_struct *psx_gpu) |
| 3104 | { |
| 3105 | #if 0 |
| 3106 | blend_blocks_untextured_subtract_off_(psx_gpu); |
| 3107 | return; |
| 3108 | #endif |
| 3109 | blend_blocks_do(untextured, subtract, off); |
| 3110 | } |
| 3111 | |
| 3112 | void blend_blocks_untextured_subtract_on(psx_gpu_struct *psx_gpu) |
| 3113 | { |
| 3114 | #if 0 |
| 3115 | blend_blocks_untextured_subtract_on_(psx_gpu); |
| 3116 | return; |
| 3117 | #endif |
| 3118 | blend_blocks_do(untextured, subtract, on); |
| 3119 | } |
| 3120 | |
| 3121 | void blend_blocks_textured_add_fourth_off(psx_gpu_struct *psx_gpu) |
| 3122 | { |
| 3123 | #if 0 |
| 3124 | blend_blocks_textured_add_fourth_off_(psx_gpu); |
| 3125 | return; |
| 3126 | #endif |
| 3127 | blend_blocks_do(textured, add_fourth, off); |
| 3128 | } |
| 3129 | |
| 3130 | void blend_blocks_textured_add_fourth_on(psx_gpu_struct *psx_gpu) |
| 3131 | { |
| 3132 | #if 0 |
| 3133 | blend_blocks_textured_add_fourth_on_(psx_gpu); |
| 3134 | return; |
| 3135 | #endif |
| 3136 | blend_blocks_do(textured, add_fourth, on); |
| 3137 | } |
| 3138 | |
| 3139 | void blend_blocks_untextured_add_fourth_off(psx_gpu_struct *psx_gpu) |
| 3140 | { |
| 3141 | #if 0 |
| 3142 | blend_blocks_untextured_add_fourth_off_(psx_gpu); |
| 3143 | return; |
| 3144 | #endif |
| 3145 | blend_blocks_do(untextured, add_fourth, off); |
| 3146 | } |
| 3147 | |
| 3148 | void blend_blocks_untextured_add_fourth_on(psx_gpu_struct *psx_gpu) |
| 3149 | { |
| 3150 | #if 0 |
| 3151 | blend_blocks_untextured_add_fourth_on_(psx_gpu); |
| 3152 | return; |
| 3153 | #endif |
| 3154 | blend_blocks_do(untextured, add_fourth, on); |
| 3155 | } |
| 3156 | |
| 3157 | void blend_blocks_textured_unblended_on(psx_gpu_struct *psx_gpu) |
| 3158 | { |
| 3159 | #if 0 |
| 3160 | blend_blocks_textured_unblended_on_(psx_gpu); |
| 3161 | return; |
| 3162 | #endif |
| 3163 | blend_blocks_do(textured, unblended, on); |
| 3164 | } |
| 3165 | |
| 3166 | void blend_blocks_textured_unblended_off(psx_gpu_struct *psx_gpu) |
| 3167 | { |
| 3168 | } |
| 3169 | |
| 3170 | void setup_sprite_untextured(psx_gpu_struct *psx_gpu, s32 x, s32 y, s32 u, |
| 3171 | s32 v, s32 width, s32 height, u32 color) |
| 3172 | { |
| 3173 | if((psx_gpu->render_state & (RENDER_STATE_MASK_EVALUATE | |
| 3174 | RENDER_FLAGS_MODULATE_TEXELS | RENDER_FLAGS_BLEND)) == 0 && |
| 3175 | (psx_gpu->render_mode & RENDER_INTERLACE_ENABLED) == 0) |
| 3176 | { |
| 3177 | setup_sprite_untextured_simple(psx_gpu, x, y, u, v, width, height, color); |
| 3178 | return; |
| 3179 | } |
| 3180 | |
| 3181 | #if 0 |
| 3182 | setup_sprite_untextured_(psx_gpu, x, y, u, v, width, height, color); |
| 3183 | return; |
| 3184 | #endif |
| 3185 | u32 right_width = ((width - 1) & 0x7) + 1; |
| 3186 | u32 right_mask_bits = (0xFF << right_width); |
| 3187 | u16 *fb_ptr = psx_gpu->vram_out_ptr + (y * 1024) + x; |
| 3188 | u32 block_width = (width + 7) / 8; |
| 3189 | u32 fb_ptr_pitch = 1024 - ((block_width - 1) * 8); |
| 3190 | u32 blocks_remaining; |
| 3191 | u32 num_blocks = psx_gpu->num_blocks; |
| 3192 | block_struct *block = psx_gpu->blocks + num_blocks; |
| 3193 | |
| 3194 | u32 color_r = color & 0xFF; |
| 3195 | u32 color_g = (color >> 8) & 0xFF; |
| 3196 | u32 color_b = (color >> 16) & 0xFF; |
| 3197 | vec_8x16u colors; |
| 3198 | vec_8x16u right_mask; |
| 3199 | vec_8x16u test_mask; |
| 3200 | vec_8x16u zero_mask; |
| 3201 | |
| 3202 | gvld1q_u16(test_mask, psx_gpu->test_mask.e); |
| 3203 | color = (color_r >> 3) | ((color_g >> 3) << 5) | ((color_b >> 3) << 10); |
| 3204 | |
| 3205 | gvdupq_n_u16(colors, color); |
| 3206 | gvdupq_n_u16(zero_mask, 0x00); |
| 3207 | gvdupq_n_u16(right_mask, right_mask_bits); |
| 3208 | gvtstq_u16(right_mask, right_mask, test_mask); |
| 3209 | |
| 3210 | while(height) |
| 3211 | { |
| 3212 | blocks_remaining = block_width - 1; |
| 3213 | num_blocks += block_width; |
| 3214 | |
| 3215 | if(num_blocks > MAX_BLOCKS) |
| 3216 | { |
| 3217 | flush_render_block_buffer(psx_gpu); |
| 3218 | num_blocks = block_width; |
| 3219 | block = psx_gpu->blocks; |
| 3220 | } |
| 3221 | |
| 3222 | while(blocks_remaining) |
| 3223 | { |
| 3224 | gvst1q_u16(colors, block->pixels.e); |
| 3225 | gvst1q_u16(zero_mask, block->draw_mask.e); |
| 3226 | block->fb_ptr = fb_ptr; |
| 3227 | |
| 3228 | fb_ptr += 8; |
| 3229 | block++; |
| 3230 | blocks_remaining--; |
| 3231 | } |
| 3232 | |
| 3233 | gvst1q_u16(colors, block->pixels.e); |
| 3234 | gvst1q_u16(right_mask, block->draw_mask.e); |
| 3235 | block->fb_ptr = fb_ptr; |
| 3236 | |
| 3237 | block++; |
| 3238 | fb_ptr += fb_ptr_pitch; |
| 3239 | |
| 3240 | height--; |
| 3241 | psx_gpu->num_blocks = num_blocks; |
| 3242 | } |
| 3243 | } |
| 3244 | |
| 3245 | #define setup_sprite_tiled_initialize_4bpp_clut() \ |
| 3246 | vec_16x8u clut_low, clut_high; \ |
| 3247 | \ |
| 3248 | gvld2q_u8(clut_low, clut_high, (u8 *)psx_gpu->clut_ptr) \ |
| 3249 | |
| 3250 | #define setup_sprite_tiled_initialize_4bpp() \ |
| 3251 | setup_sprite_tiled_initialize_4bpp_clut(); \ |
| 3252 | \ |
| 3253 | if(psx_gpu->current_texture_mask & psx_gpu->dirty_textures_4bpp_mask) \ |
| 3254 | update_texture_4bpp_cache(psx_gpu) \ |
| 3255 | |
| 3256 | #define setup_sprite_tiled_initialize_8bpp() \ |
| 3257 | if(psx_gpu->current_texture_mask & psx_gpu->dirty_textures_8bpp_mask) \ |
| 3258 | update_texture_8bpp_cache(psx_gpu) \ |
| 3259 | |
| 3260 | #define setup_sprite_tile_fetch_texel_block_8bpp(offset) \ |
| 3261 | texture_block_ptr = psx_gpu->texture_page_ptr + \ |
| 3262 | ((texture_offset + offset) & texture_mask); \ |
| 3263 | \ |
| 3264 | gvld1_u8(texels, (u8 *)texture_block_ptr) \ |
| 3265 | |
| 3266 | #define setup_sprite_tile_add_blocks(tile_num_blocks) \ |
| 3267 | num_blocks += tile_num_blocks; \ |
| 3268 | \ |
| 3269 | if(num_blocks > MAX_BLOCKS) \ |
| 3270 | { \ |
| 3271 | flush_render_block_buffer(psx_gpu); \ |
| 3272 | num_blocks = tile_num_blocks; \ |
| 3273 | block = psx_gpu->blocks; \ |
| 3274 | } \ |
| 3275 | |
| 3276 | #define setup_sprite_tile_full_4bpp(edge) \ |
| 3277 | { \ |
| 3278 | vec_8x8u texels_low, texels_high; \ |
| 3279 | setup_sprite_tile_add_blocks(sub_tile_height * 2); \ |
| 3280 | \ |
| 3281 | while(sub_tile_height) \ |
| 3282 | { \ |
| 3283 | setup_sprite_tile_fetch_texel_block_8bpp(0); \ |
| 3284 | gvtbl2_u8(texels_low, clut_low, texels); \ |
| 3285 | gvtbl2_u8(texels_high, clut_high, texels); \ |
| 3286 | \ |
| 3287 | gvst2_u8(texels_low, texels_high, (u8 *)block->texels.e); \ |
| 3288 | block->draw_mask_bits = left_mask_bits; \ |
| 3289 | block->fb_ptr = fb_ptr; \ |
| 3290 | block++; \ |
| 3291 | \ |
| 3292 | setup_sprite_tile_fetch_texel_block_8bpp(8); \ |
| 3293 | gvtbl2_u8(texels_low, clut_low, texels); \ |
| 3294 | gvtbl2_u8(texels_high, clut_high, texels); \ |
| 3295 | \ |
| 3296 | gvst2_u8(texels_low, texels_high, (u8 *)block->texels.e); \ |
| 3297 | block->draw_mask_bits = right_mask_bits; \ |
| 3298 | block->fb_ptr = fb_ptr + 8; \ |
| 3299 | block++; \ |
| 3300 | \ |
| 3301 | fb_ptr += 1024; \ |
| 3302 | texture_offset += 0x10; \ |
| 3303 | sub_tile_height--; \ |
| 3304 | } \ |
| 3305 | texture_offset += 0xF00; \ |
| 3306 | psx_gpu->num_blocks = num_blocks; \ |
| 3307 | } \ |
| 3308 | |
| 3309 | #define setup_sprite_tile_half_4bpp(edge) \ |
| 3310 | { \ |
| 3311 | vec_8x8u texels_low, texels_high; \ |
| 3312 | setup_sprite_tile_add_blocks(sub_tile_height); \ |
| 3313 | \ |
| 3314 | while(sub_tile_height) \ |
| 3315 | { \ |
| 3316 | setup_sprite_tile_fetch_texel_block_8bpp(0); \ |
| 3317 | gvtbl2_u8(texels_low, clut_low, texels); \ |
| 3318 | gvtbl2_u8(texels_high, clut_high, texels); \ |
| 3319 | \ |
| 3320 | gvst2_u8(texels_low, texels_high, (u8 *)block->texels.e); \ |
| 3321 | block->draw_mask_bits = edge##_mask_bits; \ |
| 3322 | block->fb_ptr = fb_ptr; \ |
| 3323 | block++; \ |
| 3324 | \ |
| 3325 | fb_ptr += 1024; \ |
| 3326 | texture_offset += 0x10; \ |
| 3327 | sub_tile_height--; \ |
| 3328 | } \ |
| 3329 | texture_offset += 0xF00; \ |
| 3330 | psx_gpu->num_blocks = num_blocks; \ |
| 3331 | } \ |
| 3332 | |
| 3333 | #define setup_sprite_tile_full_8bpp(edge) \ |
| 3334 | { \ |
| 3335 | setup_sprite_tile_add_blocks(sub_tile_height * 2); \ |
| 3336 | \ |
| 3337 | while(sub_tile_height) \ |
| 3338 | { \ |
| 3339 | setup_sprite_tile_fetch_texel_block_8bpp(0); \ |
| 3340 | gvst1_u8(texels, block->r.e); \ |
| 3341 | block->draw_mask_bits = left_mask_bits; \ |
| 3342 | block->fb_ptr = fb_ptr; \ |
| 3343 | block++; \ |
| 3344 | \ |
| 3345 | setup_sprite_tile_fetch_texel_block_8bpp(8); \ |
| 3346 | gvst1_u8(texels, block->r.e); \ |
| 3347 | block->draw_mask_bits = right_mask_bits; \ |
| 3348 | block->fb_ptr = fb_ptr + 8; \ |
| 3349 | block++; \ |
| 3350 | \ |
| 3351 | fb_ptr += 1024; \ |
| 3352 | texture_offset += 0x10; \ |
| 3353 | sub_tile_height--; \ |
| 3354 | } \ |
| 3355 | texture_offset += 0xF00; \ |
| 3356 | psx_gpu->num_blocks = num_blocks; \ |
| 3357 | } \ |
| 3358 | |
| 3359 | #define setup_sprite_tile_half_8bpp(edge) \ |
| 3360 | { \ |
| 3361 | setup_sprite_tile_add_blocks(sub_tile_height); \ |
| 3362 | \ |
| 3363 | while(sub_tile_height) \ |
| 3364 | { \ |
| 3365 | setup_sprite_tile_fetch_texel_block_8bpp(0); \ |
| 3366 | gvst1_u8(texels, block->r.e); \ |
| 3367 | block->draw_mask_bits = edge##_mask_bits; \ |
| 3368 | block->fb_ptr = fb_ptr; \ |
| 3369 | block++; \ |
| 3370 | \ |
| 3371 | fb_ptr += 1024; \ |
| 3372 | texture_offset += 0x10; \ |
| 3373 | sub_tile_height--; \ |
| 3374 | } \ |
| 3375 | texture_offset += 0xF00; \ |
| 3376 | psx_gpu->num_blocks = num_blocks; \ |
| 3377 | } \ |
| 3378 | |
| 3379 | #define setup_sprite_tile_column_edge_pre_adjust_half_right() \ |
| 3380 | texture_offset = texture_offset_base + 8; \ |
| 3381 | fb_ptr += 8 \ |
| 3382 | |
| 3383 | #define setup_sprite_tile_column_edge_pre_adjust_half_left() \ |
| 3384 | texture_offset = texture_offset_base \ |
| 3385 | |
| 3386 | #define setup_sprite_tile_column_edge_pre_adjust_half(edge) \ |
| 3387 | setup_sprite_tile_column_edge_pre_adjust_half_##edge() \ |
| 3388 | |
| 3389 | #define setup_sprite_tile_column_edge_pre_adjust_full(edge) \ |
| 3390 | texture_offset = texture_offset_base \ |
| 3391 | |
| 3392 | #define setup_sprite_tile_column_edge_post_adjust_half_right() \ |
| 3393 | fb_ptr -= 8 \ |
| 3394 | |
| 3395 | #define setup_sprite_tile_column_edge_post_adjust_half_left() \ |
| 3396 | |
| 3397 | #define setup_sprite_tile_column_edge_post_adjust_half(edge) \ |
| 3398 | setup_sprite_tile_column_edge_post_adjust_half_##edge() \ |
| 3399 | |
| 3400 | #define setup_sprite_tile_column_edge_post_adjust_full(edge) \ |
| 3401 | |
| 3402 | |
| 3403 | #define setup_sprite_tile_column_height_single(edge_mode, edge, texture_mode, \ |
| 3404 | x4mode) \ |
| 3405 | do \ |
| 3406 | { \ |
| 3407 | sub_tile_height = column_data; \ |
| 3408 | setup_sprite_tile_column_edge_pre_adjust_##edge_mode##x4mode(edge); \ |
| 3409 | setup_sprite_tile_##edge_mode##_##texture_mode##x4mode(edge); \ |
| 3410 | setup_sprite_tile_column_edge_post_adjust_##edge_mode##x4mode(edge); \ |
| 3411 | } while(0) \ |
| 3412 | |
| 3413 | #define setup_sprite_tile_column_height_multi(edge_mode, edge, texture_mode, \ |
| 3414 | x4mode) \ |
| 3415 | do \ |
| 3416 | { \ |
| 3417 | u32 tiles_remaining = column_data >> 16; \ |
| 3418 | sub_tile_height = column_data & 0xFF; \ |
| 3419 | setup_sprite_tile_column_edge_pre_adjust_##edge_mode##x4mode(edge); \ |
| 3420 | setup_sprite_tile_##edge_mode##_##texture_mode##x4mode(edge); \ |
| 3421 | tiles_remaining -= 1; \ |
| 3422 | \ |
| 3423 | while(tiles_remaining) \ |
| 3424 | { \ |
| 3425 | sub_tile_height = 16; \ |
| 3426 | setup_sprite_tile_##edge_mode##_##texture_mode##x4mode(edge); \ |
| 3427 | tiles_remaining--; \ |
| 3428 | } \ |
| 3429 | \ |
| 3430 | sub_tile_height = (column_data >> 8) & 0xFF; \ |
| 3431 | setup_sprite_tile_##edge_mode##_##texture_mode##x4mode(edge); \ |
| 3432 | setup_sprite_tile_column_edge_post_adjust_##edge_mode##x4mode(edge); \ |
| 3433 | } while(0) \ |
| 3434 | |
| 3435 | |
| 3436 | #define setup_sprite_column_data_single() \ |
| 3437 | column_data = height \ |
| 3438 | |
| 3439 | #define setup_sprite_column_data_multi() \ |
| 3440 | column_data = 16 - offset_v; \ |
| 3441 | column_data |= ((height_rounded & 0xF) + 1) << 8; \ |
| 3442 | column_data |= (tile_height - 1) << 16 \ |
| 3443 | |
| 3444 | #define RIGHT_MASK_BIT_SHIFT 8 |
| 3445 | #define RIGHT_MASK_BIT_SHIFT_4x 16 |
| 3446 | |
| 3447 | #define setup_sprite_tile_column_width_single(texture_mode, multi_height, \ |
| 3448 | edge_mode, edge, x4mode) \ |
| 3449 | { \ |
| 3450 | setup_sprite_column_data_##multi_height(); \ |
| 3451 | left_mask_bits = left_block_mask | right_block_mask; \ |
| 3452 | right_mask_bits = left_mask_bits >> RIGHT_MASK_BIT_SHIFT##x4mode; \ |
| 3453 | \ |
| 3454 | setup_sprite_tile_column_height_##multi_height(edge_mode, edge, \ |
| 3455 | texture_mode, x4mode); \ |
| 3456 | } \ |
| 3457 | |
| 3458 | #define setup_sprite_tiled_advance_column() \ |
| 3459 | texture_offset_base += 0x100; \ |
| 3460 | if((texture_offset_base & 0xF00) == 0) \ |
| 3461 | texture_offset_base -= (0x100 + 0xF00) \ |
| 3462 | |
| 3463 | #define FB_PTR_MULTIPLIER 1 |
| 3464 | #define FB_PTR_MULTIPLIER_4x 2 |
| 3465 | |
| 3466 | #define setup_sprite_tile_column_width_multi(texture_mode, multi_height, \ |
| 3467 | left_mode, right_mode, x4mode) \ |
| 3468 | { \ |
| 3469 | setup_sprite_column_data_##multi_height(); \ |
| 3470 | s32 fb_ptr_advance_column = (16 - (1024 * height)) \ |
| 3471 | * FB_PTR_MULTIPLIER##x4mode; \ |
| 3472 | \ |
| 3473 | tile_width -= 2; \ |
| 3474 | left_mask_bits = left_block_mask; \ |
| 3475 | right_mask_bits = left_mask_bits >> RIGHT_MASK_BIT_SHIFT##x4mode; \ |
| 3476 | \ |
| 3477 | setup_sprite_tile_column_height_##multi_height(left_mode, right, \ |
| 3478 | texture_mode, x4mode); \ |
| 3479 | fb_ptr += fb_ptr_advance_column; \ |
| 3480 | \ |
| 3481 | left_mask_bits = 0x00; \ |
| 3482 | right_mask_bits = 0x00; \ |
| 3483 | \ |
| 3484 | while(tile_width) \ |
| 3485 | { \ |
| 3486 | setup_sprite_tiled_advance_column(); \ |
| 3487 | setup_sprite_tile_column_height_##multi_height(full, none, \ |
| 3488 | texture_mode, x4mode); \ |
| 3489 | fb_ptr += fb_ptr_advance_column; \ |
| 3490 | tile_width--; \ |
| 3491 | } \ |
| 3492 | \ |
| 3493 | left_mask_bits = right_block_mask; \ |
| 3494 | right_mask_bits = left_mask_bits >> RIGHT_MASK_BIT_SHIFT##x4mode; \ |
| 3495 | \ |
| 3496 | setup_sprite_tiled_advance_column(); \ |
| 3497 | setup_sprite_tile_column_height_##multi_height(right_mode, left, \ |
| 3498 | texture_mode, x4mode); \ |
| 3499 | } \ |
| 3500 | |
| 3501 | |
| 3502 | /* 4x stuff */ |
| 3503 | #define setup_sprite_tiled_initialize_4bpp_4x() \ |
| 3504 | setup_sprite_tiled_initialize_4bpp_clut() \ |
| 3505 | |
| 3506 | #define setup_sprite_tiled_initialize_8bpp_4x() \ |
| 3507 | |
| 3508 | #define setup_sprite_tile_full_4bpp_4x(edge) \ |
| 3509 | { \ |
| 3510 | vec_8x8u texels_low, texels_high; \ |
| 3511 | vec_8x16u pixels; \ |
| 3512 | vec_4x16u pixels_half; \ |
| 3513 | setup_sprite_tile_add_blocks(sub_tile_height * 2 * 4); \ |
| 3514 | u32 left_mask_bits_a = left_mask_bits & 0xFF; \ |
| 3515 | u32 left_mask_bits_b = left_mask_bits >> 8; \ |
| 3516 | u32 right_mask_bits_a = right_mask_bits & 0xFF; \ |
| 3517 | u32 right_mask_bits_b = right_mask_bits >> 8; \ |
| 3518 | \ |
| 3519 | while(sub_tile_height) \ |
| 3520 | { \ |
| 3521 | setup_sprite_tile_fetch_texel_block_8bpp(0); \ |
| 3522 | gvtbl2_u8(texels_low, clut_low, texels); \ |
| 3523 | gvtbl2_u8(texels_high, clut_high, texels); \ |
| 3524 | gvzip_u8(pixels, texels_low, texels_high); \ |
| 3525 | \ |
| 3526 | gvget_lo(pixels_half, pixels); \ |
| 3527 | gvst2_u16(pixels_half, pixels_half, block->texels.e); \ |
| 3528 | block->draw_mask_bits = left_mask_bits_a; \ |
| 3529 | block->fb_ptr = fb_ptr; \ |
| 3530 | block++; \ |
| 3531 | \ |
| 3532 | gvst2_u16(pixels_half, pixels_half, block->texels.e); \ |
| 3533 | block->draw_mask_bits = left_mask_bits_a; \ |
| 3534 | block->fb_ptr = fb_ptr + 1024; \ |
| 3535 | block++; \ |
| 3536 | \ |
| 3537 | gvget_hi(pixels_half, pixels); \ |
| 3538 | gvst2_u16(pixels_half, pixels_half, block->texels.e); \ |
| 3539 | block->draw_mask_bits = left_mask_bits_b; \ |
| 3540 | block->fb_ptr = fb_ptr + 8; \ |
| 3541 | block++; \ |
| 3542 | \ |
| 3543 | gvst2_u16(pixels_half, pixels_half, block->texels.e); \ |
| 3544 | block->draw_mask_bits = left_mask_bits_b; \ |
| 3545 | block->fb_ptr = fb_ptr + 1024 + 8; \ |
| 3546 | block++; \ |
| 3547 | \ |
| 3548 | setup_sprite_tile_fetch_texel_block_8bpp(8); \ |
| 3549 | gvtbl2_u8(texels_low, clut_low, texels); \ |
| 3550 | gvtbl2_u8(texels_high, clut_high, texels); \ |
| 3551 | gvzip_u8(pixels, texels_low, texels_high); \ |
| 3552 | \ |
| 3553 | gvget_lo(pixels_half, pixels); \ |
| 3554 | gvst2_u16(pixels_half, pixels_half, block->texels.e); \ |
| 3555 | block->draw_mask_bits = right_mask_bits_a; \ |
| 3556 | block->fb_ptr = fb_ptr + 16; \ |
| 3557 | block++; \ |
| 3558 | \ |
| 3559 | gvst2_u16(pixels_half, pixels_half, block->texels.e); \ |
| 3560 | block->draw_mask_bits = right_mask_bits_a; \ |
| 3561 | block->fb_ptr = fb_ptr + 1024 + 16; \ |
| 3562 | block++; \ |
| 3563 | \ |
| 3564 | gvget_hi(pixels_half, pixels); \ |
| 3565 | gvst2_u16(pixels_half, pixels_half, block->texels.e); \ |
| 3566 | block->draw_mask_bits = right_mask_bits_b; \ |
| 3567 | block->fb_ptr = fb_ptr + 24; \ |
| 3568 | block++; \ |
| 3569 | \ |
| 3570 | gvst2_u16(pixels_half, pixels_half, block->texels.e); \ |
| 3571 | block->draw_mask_bits = right_mask_bits_b; \ |
| 3572 | block->fb_ptr = fb_ptr + 1024 + 24; \ |
| 3573 | block++; \ |
| 3574 | \ |
| 3575 | fb_ptr += 2048; \ |
| 3576 | texture_offset += 0x10; \ |
| 3577 | sub_tile_height--; \ |
| 3578 | } \ |
| 3579 | texture_offset += 0xF00; \ |
| 3580 | psx_gpu->num_blocks = num_blocks; \ |
| 3581 | } \ |
| 3582 | |
| 3583 | #define setup_sprite_tile_half_4bpp_4x(edge) \ |
| 3584 | { \ |
| 3585 | vec_8x8u texels_low, texels_high; \ |
| 3586 | vec_8x16u pixels; \ |
| 3587 | vec_4x16u pixels_half; \ |
| 3588 | setup_sprite_tile_add_blocks(sub_tile_height * 4); \ |
| 3589 | u32 edge##_mask_bits_a = edge##_mask_bits & 0xFF; \ |
| 3590 | u32 edge##_mask_bits_b = edge##_mask_bits >> 8; \ |
| 3591 | \ |
| 3592 | while(sub_tile_height) \ |
| 3593 | { \ |
| 3594 | setup_sprite_tile_fetch_texel_block_8bpp(0); \ |
| 3595 | gvtbl2_u8(texels_low, clut_low, texels); \ |
| 3596 | gvtbl2_u8(texels_high, clut_high, texels); \ |
| 3597 | gvzip_u8(pixels, texels_low, texels_high); \ |
| 3598 | \ |
| 3599 | gvget_lo(pixels_half, pixels); \ |
| 3600 | gvst2_u16(pixels_half, pixels_half, block->texels.e); \ |
| 3601 | block->draw_mask_bits = edge##_mask_bits_a; \ |
| 3602 | block->fb_ptr = fb_ptr; \ |
| 3603 | block++; \ |
| 3604 | \ |
| 3605 | gvst2_u16(pixels_half, pixels_half, block->texels.e); \ |
| 3606 | block->draw_mask_bits = edge##_mask_bits_a; \ |
| 3607 | block->fb_ptr = fb_ptr + 1024; \ |
| 3608 | block++; \ |
| 3609 | \ |
| 3610 | gvget_hi(pixels_half, pixels); \ |
| 3611 | gvst2_u16(pixels_half, pixels_half, block->texels.e); \ |
| 3612 | block->draw_mask_bits = edge##_mask_bits_b; \ |
| 3613 | block->fb_ptr = fb_ptr + 8; \ |
| 3614 | block++; \ |
| 3615 | \ |
| 3616 | gvst2_u16(pixels_half, pixels_half, block->texels.e); \ |
| 3617 | block->draw_mask_bits = edge##_mask_bits_b; \ |
| 3618 | block->fb_ptr = fb_ptr + 1024 + 8; \ |
| 3619 | block++; \ |
| 3620 | \ |
| 3621 | fb_ptr += 2048; \ |
| 3622 | texture_offset += 0x10; \ |
| 3623 | sub_tile_height--; \ |
| 3624 | } \ |
| 3625 | texture_offset += 0xF00; \ |
| 3626 | psx_gpu->num_blocks = num_blocks; \ |
| 3627 | } \ |
| 3628 | |
| 3629 | #define setup_sprite_tile_full_8bpp_4x(edge) \ |
| 3630 | { \ |
| 3631 | setup_sprite_tile_add_blocks(sub_tile_height * 2 * 4); \ |
| 3632 | vec_8x16u texels_wide; \ |
| 3633 | vec_4x16u texels_half; \ |
| 3634 | u32 left_mask_bits_a = left_mask_bits & 0xFF; \ |
| 3635 | u32 left_mask_bits_b = left_mask_bits >> 8; \ |
| 3636 | u32 right_mask_bits_a = right_mask_bits & 0xFF; \ |
| 3637 | u32 right_mask_bits_b = right_mask_bits >> 8; \ |
| 3638 | \ |
| 3639 | while(sub_tile_height) \ |
| 3640 | { \ |
| 3641 | setup_sprite_tile_fetch_texel_block_8bpp(0); \ |
| 3642 | gvzip_u8(texels_wide, texels, texels); \ |
| 3643 | gvget_lo(texels_half, texels_wide); \ |
| 3644 | gvst1_u8(texels_half, block->r.e); \ |
| 3645 | block->draw_mask_bits = left_mask_bits_a; \ |
| 3646 | block->fb_ptr = fb_ptr; \ |
| 3647 | block++; \ |
| 3648 | \ |
| 3649 | gvst1_u8(texels_half, block->r.e); \ |
| 3650 | block->draw_mask_bits = left_mask_bits_a; \ |
| 3651 | block->fb_ptr = fb_ptr + 1024; \ |
| 3652 | block++; \ |
| 3653 | \ |
| 3654 | gvget_hi(texels_half, texels_wide); \ |
| 3655 | gvst1_u8(texels_half, block->r.e); \ |
| 3656 | block->draw_mask_bits = left_mask_bits_b; \ |
| 3657 | block->fb_ptr = fb_ptr + 8; \ |
| 3658 | block++; \ |
| 3659 | \ |
| 3660 | gvst1_u8(texels_half, block->r.e); \ |
| 3661 | block->draw_mask_bits = left_mask_bits_b; \ |
| 3662 | block->fb_ptr = fb_ptr + 1024 + 8; \ |
| 3663 | block++; \ |
| 3664 | \ |
| 3665 | setup_sprite_tile_fetch_texel_block_8bpp(8); \ |
| 3666 | gvzip_u8(texels_wide, texels, texels); \ |
| 3667 | gvget_lo(texels_half, texels_wide); \ |
| 3668 | gvst1_u8(texels_half, block->r.e); \ |
| 3669 | block->draw_mask_bits = right_mask_bits_a; \ |
| 3670 | block->fb_ptr = fb_ptr + 16; \ |
| 3671 | block++; \ |
| 3672 | \ |
| 3673 | gvst1_u8(texels_half, block->r.e); \ |
| 3674 | block->draw_mask_bits = right_mask_bits_a; \ |
| 3675 | block->fb_ptr = fb_ptr + 1024 + 16; \ |
| 3676 | block++; \ |
| 3677 | \ |
| 3678 | gvget_hi(texels_half, texels_wide); \ |
| 3679 | gvst1_u8(texels_half, block->r.e); \ |
| 3680 | block->draw_mask_bits = right_mask_bits_b; \ |
| 3681 | block->fb_ptr = fb_ptr + 24; \ |
| 3682 | block++; \ |
| 3683 | \ |
| 3684 | gvst1_u8(texels_half, block->r.e); \ |
| 3685 | block->draw_mask_bits = right_mask_bits_b; \ |
| 3686 | block->fb_ptr = fb_ptr + 24 + 1024; \ |
| 3687 | block++; \ |
| 3688 | \ |
| 3689 | fb_ptr += 2048; \ |
| 3690 | texture_offset += 0x10; \ |
| 3691 | sub_tile_height--; \ |
| 3692 | } \ |
| 3693 | texture_offset += 0xF00; \ |
| 3694 | psx_gpu->num_blocks = num_blocks; \ |
| 3695 | } \ |
| 3696 | |
| 3697 | #define setup_sprite_tile_half_8bpp_4x(edge) \ |
| 3698 | { \ |
| 3699 | setup_sprite_tile_add_blocks(sub_tile_height * 4); \ |
| 3700 | vec_8x16u texels_wide; \ |
| 3701 | vec_4x16u texels_half; \ |
| 3702 | u32 edge##_mask_bits_a = edge##_mask_bits & 0xFF; \ |
| 3703 | u32 edge##_mask_bits_b = edge##_mask_bits >> 8; \ |
| 3704 | \ |
| 3705 | while(sub_tile_height) \ |
| 3706 | { \ |
| 3707 | setup_sprite_tile_fetch_texel_block_8bpp(0); \ |
| 3708 | gvzip_u8(texels_wide, texels, texels); \ |
| 3709 | gvget_lo(texels_half, texels_wide); \ |
| 3710 | gvst1_u8(texels_half, block->r.e); \ |
| 3711 | block->draw_mask_bits = edge##_mask_bits_a; \ |
| 3712 | block->fb_ptr = fb_ptr; \ |
| 3713 | block++; \ |
| 3714 | \ |
| 3715 | gvst1_u8(texels_half, block->r.e); \ |
| 3716 | block->draw_mask_bits = edge##_mask_bits_a; \ |
| 3717 | block->fb_ptr = fb_ptr + 1024; \ |
| 3718 | block++; \ |
| 3719 | \ |
| 3720 | gvget_hi(texels_half, texels_wide); \ |
| 3721 | gvst1_u8(texels_half, block->r.e); \ |
| 3722 | block->draw_mask_bits = edge##_mask_bits_b; \ |
| 3723 | block->fb_ptr = fb_ptr + 8; \ |
| 3724 | block++; \ |
| 3725 | \ |
| 3726 | gvst1_u8(texels_half, block->r.e); \ |
| 3727 | block->draw_mask_bits = edge##_mask_bits_b; \ |
| 3728 | block->fb_ptr = fb_ptr + 8 + 1024; \ |
| 3729 | block++; \ |
| 3730 | \ |
| 3731 | fb_ptr += 2048; \ |
| 3732 | texture_offset += 0x10; \ |
| 3733 | sub_tile_height--; \ |
| 3734 | } \ |
| 3735 | texture_offset += 0xF00; \ |
| 3736 | psx_gpu->num_blocks = num_blocks; \ |
| 3737 | } \ |
| 3738 | |
| 3739 | #define setup_sprite_tile_column_edge_pre_adjust_half_right_4x() \ |
| 3740 | texture_offset = texture_offset_base + 8; \ |
| 3741 | fb_ptr += 16 \ |
| 3742 | |
| 3743 | #define setup_sprite_tile_column_edge_pre_adjust_half_left_4x() \ |
| 3744 | texture_offset = texture_offset_base \ |
| 3745 | |
| 3746 | #define setup_sprite_tile_column_edge_pre_adjust_half_4x(edge) \ |
| 3747 | setup_sprite_tile_column_edge_pre_adjust_half_##edge##_4x() \ |
| 3748 | |
| 3749 | #define setup_sprite_tile_column_edge_pre_adjust_full_4x(edge) \ |
| 3750 | texture_offset = texture_offset_base \ |
| 3751 | |
| 3752 | #define setup_sprite_tile_column_edge_post_adjust_half_right_4x() \ |
| 3753 | fb_ptr -= 16 \ |
| 3754 | |
| 3755 | #define setup_sprite_tile_column_edge_post_adjust_half_left_4x() \ |
| 3756 | |
| 3757 | #define setup_sprite_tile_column_edge_post_adjust_half_4x(edge) \ |
| 3758 | setup_sprite_tile_column_edge_post_adjust_half_##edge##_4x() \ |
| 3759 | |
| 3760 | #define setup_sprite_tile_column_edge_post_adjust_full_4x(edge) \ |
| 3761 | |
| 3762 | #define setup_sprite_offset_u_adjust() \ |
| 3763 | |
| 3764 | #define setup_sprite_comapre_left_block_mask() \ |
| 3765 | ((left_block_mask & 0xFF) == 0xFF) \ |
| 3766 | |
| 3767 | #define setup_sprite_comapre_right_block_mask() \ |
| 3768 | (((right_block_mask >> 8) & 0xFF) == 0xFF) \ |
| 3769 | |
| 3770 | #define setup_sprite_offset_u_adjust_4x() \ |
| 3771 | offset_u *= 2; \ |
| 3772 | offset_u_right = offset_u_right * 2 + 1 \ |
| 3773 | |
| 3774 | #define setup_sprite_comapre_left_block_mask_4x() \ |
| 3775 | ((left_block_mask & 0xFFFF) == 0xFFFF) \ |
| 3776 | |
| 3777 | #define setup_sprite_comapre_right_block_mask_4x() \ |
| 3778 | (((right_block_mask >> 16) & 0xFFFF) == 0xFFFF) \ |
| 3779 | |
| 3780 | |
| 3781 | #define setup_sprite_tiled_do(texture_mode, x4mode) \ |
| 3782 | s32 offset_u = u & 0xF; \ |
| 3783 | s32 offset_v = v & 0xF; \ |
| 3784 | \ |
| 3785 | s32 width_rounded = offset_u + width + 15; \ |
| 3786 | s32 height_rounded = offset_v + height + 15; \ |
| 3787 | s32 tile_height = height_rounded / 16; \ |
| 3788 | s32 tile_width = width_rounded / 16; \ |
| 3789 | u32 offset_u_right = width_rounded & 0xF; \ |
| 3790 | \ |
| 3791 | setup_sprite_offset_u_adjust##x4mode(); \ |
| 3792 | \ |
| 3793 | u32 left_block_mask = ~(0xFFFFFFFF << offset_u); \ |
| 3794 | u32 right_block_mask = 0xFFFFFFFE << offset_u_right; \ |
| 3795 | \ |
| 3796 | u32 left_mask_bits; \ |
| 3797 | u32 right_mask_bits; \ |
| 3798 | \ |
| 3799 | u32 sub_tile_height; \ |
| 3800 | u32 column_data; \ |
| 3801 | \ |
| 3802 | u32 texture_mask = (psx_gpu->texture_mask_width & 0xF) | \ |
| 3803 | ((psx_gpu->texture_mask_height & 0xF) << 4) | \ |
| 3804 | ((psx_gpu->texture_mask_width >> 4) << 8) | \ |
| 3805 | ((psx_gpu->texture_mask_height >> 4) << 12); \ |
| 3806 | u32 texture_offset = ((v & 0xF) << 4) | ((u & 0xF0) << 4) | \ |
| 3807 | ((v & 0xF0) << 8); \ |
| 3808 | u32 texture_offset_base = texture_offset; \ |
| 3809 | u32 control_mask; \ |
| 3810 | \ |
| 3811 | u16 *fb_ptr = psx_gpu->vram_out_ptr + (y * 1024) + (x - offset_u); \ |
| 3812 | u32 num_blocks = psx_gpu->num_blocks; \ |
| 3813 | block_struct *block = psx_gpu->blocks + num_blocks; \ |
| 3814 | \ |
| 3815 | u16 *texture_block_ptr; \ |
| 3816 | vec_8x8u texels; \ |
| 3817 | \ |
| 3818 | setup_sprite_tiled_initialize_##texture_mode##x4mode(); \ |
| 3819 | \ |
| 3820 | control_mask = tile_width == 1; \ |
| 3821 | control_mask |= (tile_height == 1) << 1; \ |
| 3822 | control_mask |= setup_sprite_comapre_left_block_mask##x4mode() << 2; \ |
| 3823 | control_mask |= setup_sprite_comapre_right_block_mask##x4mode() << 3; \ |
| 3824 | \ |
| 3825 | switch(control_mask) \ |
| 3826 | { \ |
| 3827 | default: \ |
| 3828 | case 0x0: \ |
| 3829 | setup_sprite_tile_column_width_multi(texture_mode, multi, full, full, \ |
| 3830 | x4mode); \ |
| 3831 | break; \ |
| 3832 | \ |
| 3833 | case 0x1: \ |
| 3834 | setup_sprite_tile_column_width_single(texture_mode, multi, full, none, \ |
| 3835 | x4mode); \ |
| 3836 | break; \ |
| 3837 | \ |
| 3838 | case 0x2: \ |
| 3839 | setup_sprite_tile_column_width_multi(texture_mode, single, full, full, \ |
| 3840 | x4mode); \ |
| 3841 | break; \ |
| 3842 | \ |
| 3843 | case 0x3: \ |
| 3844 | setup_sprite_tile_column_width_single(texture_mode, single, full, none, \ |
| 3845 | x4mode); \ |
| 3846 | break; \ |
| 3847 | \ |
| 3848 | case 0x4: \ |
| 3849 | setup_sprite_tile_column_width_multi(texture_mode, multi, half, full, \ |
| 3850 | x4mode); \ |
| 3851 | break; \ |
| 3852 | \ |
| 3853 | case 0x5: \ |
| 3854 | setup_sprite_tile_column_width_single(texture_mode, multi, half, right, \ |
| 3855 | x4mode); \ |
| 3856 | break; \ |
| 3857 | \ |
| 3858 | case 0x6: \ |
| 3859 | setup_sprite_tile_column_width_multi(texture_mode, single, half, full, \ |
| 3860 | x4mode); \ |
| 3861 | break; \ |
| 3862 | \ |
| 3863 | case 0x7: \ |
| 3864 | setup_sprite_tile_column_width_single(texture_mode, single, half, right, \ |
| 3865 | x4mode); \ |
| 3866 | break; \ |
| 3867 | \ |
| 3868 | case 0x8: \ |
| 3869 | setup_sprite_tile_column_width_multi(texture_mode, multi, full, half, \ |
| 3870 | x4mode); \ |
| 3871 | break; \ |
| 3872 | \ |
| 3873 | case 0x9: \ |
| 3874 | setup_sprite_tile_column_width_single(texture_mode, multi, half, left, \ |
| 3875 | x4mode); \ |
| 3876 | break; \ |
| 3877 | \ |
| 3878 | case 0xA: \ |
| 3879 | setup_sprite_tile_column_width_multi(texture_mode, single, full, half, \ |
| 3880 | x4mode); \ |
| 3881 | break; \ |
| 3882 | \ |
| 3883 | case 0xB: \ |
| 3884 | setup_sprite_tile_column_width_single(texture_mode, single, half, left, \ |
| 3885 | x4mode); \ |
| 3886 | break; \ |
| 3887 | \ |
| 3888 | case 0xC: \ |
| 3889 | setup_sprite_tile_column_width_multi(texture_mode, multi, half, half, \ |
| 3890 | x4mode); \ |
| 3891 | break; \ |
| 3892 | \ |
| 3893 | case 0xE: \ |
| 3894 | setup_sprite_tile_column_width_multi(texture_mode, single, half, half, \ |
| 3895 | x4mode); \ |
| 3896 | break; \ |
| 3897 | } \ |
| 3898 | |
| 3899 | void setup_sprite_4bpp(psx_gpu_struct *psx_gpu, s32 x, s32 y, s32 u, s32 v, |
| 3900 | s32 width, s32 height, u32 color) |
| 3901 | { |
| 3902 | #if 0 |
| 3903 | setup_sprite_4bpp_(psx_gpu, x, y, u, v, width, height, color); |
| 3904 | return; |
| 3905 | #endif |
| 3906 | setup_sprite_tiled_do(4bpp,) |
| 3907 | } |
| 3908 | |
| 3909 | void setup_sprite_8bpp(psx_gpu_struct *psx_gpu, s32 x, s32 y, s32 u, s32 v, |
| 3910 | s32 width, s32 height, u32 color) |
| 3911 | { |
| 3912 | #if 0 |
| 3913 | setup_sprite_8bpp_(psx_gpu, x, y, u, v, width, height, color); |
| 3914 | return; |
| 3915 | #endif |
| 3916 | setup_sprite_tiled_do(8bpp,) |
| 3917 | } |
| 3918 | |
| 3919 | #undef draw_mask_fb_ptr_left |
| 3920 | #undef draw_mask_fb_ptr_right |
| 3921 | |
| 3922 | void setup_sprite_4bpp_4x(psx_gpu_struct *psx_gpu, s32 x, s32 y, s32 u, s32 v, |
| 3923 | s32 width, s32 height, u32 color) |
| 3924 | { |
| 3925 | #if 0 |
| 3926 | setup_sprite_4bpp_4x_(psx_gpu, x, y, u, v, width, height, color); |
| 3927 | return; |
| 3928 | #endif |
| 3929 | setup_sprite_tiled_do(4bpp, _4x) |
| 3930 | } |
| 3931 | |
| 3932 | void setup_sprite_8bpp_4x(psx_gpu_struct *psx_gpu, s32 x, s32 y, s32 u, s32 v, |
| 3933 | s32 width, s32 height, u32 color) |
| 3934 | { |
| 3935 | #if 0 |
| 3936 | setup_sprite_8bpp_4x_(psx_gpu, x, y, u, v, width, height, color); |
| 3937 | return; |
| 3938 | #endif |
| 3939 | setup_sprite_tiled_do(8bpp, _4x) |
| 3940 | } |
| 3941 | |
| 3942 | |
| 3943 | void scale2x_tiles8(void * __restrict__ dst_, const void * __restrict__ src_, int w8, int h) |
| 3944 | { |
| 3945 | #if 0 |
| 3946 | scale2x_tiles8_(dst_, src_, w8, h); |
| 3947 | return; |
| 3948 | #endif |
| 3949 | const u16 * __restrict__ src = src_; |
| 3950 | const u16 * __restrict__ src1; |
| 3951 | u16 * __restrict__ dst = dst_; |
| 3952 | u16 * __restrict__ dst1; |
| 3953 | gvreg a, b; |
| 3954 | int w; |
| 3955 | for (; h > 0; h--, src += 1024, dst += 1024*2) |
| 3956 | { |
| 3957 | src1 = src; |
| 3958 | dst1 = dst; |
| 3959 | for (w = w8; w > 0; w--, src1 += 8, dst1 += 8*2) |
| 3960 | { |
| 3961 | gvld1q_u16(a, src1); |
| 3962 | gvzipq_u16(a, b, a, a); |
| 3963 | gvst1q_u16(a, dst1); |
| 3964 | gvst1q_u16(b, dst1 + 8); |
| 3965 | gvst1q_u16(a, dst1 + 1024); |
| 3966 | gvst1q_u16(b, dst1 + 1024 + 8); |
| 3967 | } |
| 3968 | } |
| 3969 | } |
| 3970 | |
| 3971 | // vim:ts=2:sw=2:expandtab |