a2cb152a |
1 | /* |
2 | * Copyright (C) 2011 Gilead Kutnick "Exophase" <exophase@gmail.com> |
3 | * Copyright (C) 2022 GraÅžvydas Ignotas "notaz" <notasas@gmail.com> |
4 | * |
5 | * This program is free software; you can redistribute it and/or |
6 | * modify it under the terms of the GNU General Public License as |
7 | * published by the Free Software Foundation; either version 2 of |
8 | * the License, or (at your option) any later version. |
9 | * |
10 | * This program is distributed in the hope that it will be useful, |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
13 | * General Public License for more details. |
14 | */ |
15 | |
16 | #include <string.h> |
17 | #include "psx_gpu.h" |
18 | #include "psx_gpu_simd.h" |
19 | //#define ASM_PROTOTYPES |
20 | //#include "psx_gpu_simd.h" |
9088aca1 |
21 | #ifdef __SSE2__ |
22 | #include <x86intrin.h> |
23 | #endif |
a2cb152a |
24 | #ifndef SIMD_BUILD |
25 | #error "please define SIMD_BUILD if you want this gpu_neon C simd implementation" |
26 | #endif |
27 | |
28 | typedef u8 gvu8 __attribute__((vector_size(16))); |
29 | typedef u16 gvu16 __attribute__((vector_size(16))); |
30 | typedef u32 gvu32 __attribute__((vector_size(16))); |
31 | typedef u64 gvu64 __attribute__((vector_size(16))); |
32 | typedef s8 gvs8 __attribute__((vector_size(16))); |
33 | typedef s16 gvs16 __attribute__((vector_size(16))); |
34 | typedef s32 gvs32 __attribute__((vector_size(16))); |
35 | typedef s64 gvs64 __attribute__((vector_size(16))); |
36 | |
37 | typedef u8 gvhu8 __attribute__((vector_size(8))); |
38 | typedef u16 gvhu16 __attribute__((vector_size(8))); |
39 | typedef u32 gvhu32 __attribute__((vector_size(8))); |
40 | typedef u64 gvhu64 __attribute__((vector_size(8))); |
41 | typedef s8 gvhs8 __attribute__((vector_size(8))); |
42 | typedef s16 gvhs16 __attribute__((vector_size(8))); |
43 | typedef s32 gvhs32 __attribute__((vector_size(8))); |
44 | typedef s64 gvhs64 __attribute__((vector_size(8))); |
45 | |
a2cb152a |
46 | typedef union |
47 | { |
48 | gvu8 u8; |
49 | gvu16 u16; |
50 | gvu32 u32; |
51 | gvu64 u64; |
52 | gvs8 s8; |
53 | gvs16 s16; |
54 | gvs32 s32; |
55 | gvs64 s64; |
9088aca1 |
56 | #ifdef __SSE2__ |
57 | __m128i m; |
58 | #endif |
a2cb152a |
59 | // this may be tempting, but it causes gcc to do lots of stack spills |
60 | //gvhreg h[2]; |
61 | } gvreg; |
62 | |
9088aca1 |
63 | typedef gvreg gvreg_ua __attribute__((aligned(1))); |
64 | typedef uint64_t uint64_t_ua __attribute__((aligned(1))); |
65 | typedef gvu8 gvu8_ua __attribute__((aligned(1))); |
66 | typedef gvu16 gvu16_ua __attribute__((aligned(1))); |
67 | |
a2cb152a |
68 | #if defined(__ARM_NEON) || defined(__ARM_NEON__) |
69 | #include <arm_neon.h> |
70 | |
9088aca1 |
71 | typedef union |
72 | { |
73 | gvhu8 u8; |
74 | gvhu16 u16; |
75 | gvhu32 u32; |
76 | gvhu64 u64; |
77 | //u64 u64; |
78 | //uint64x1_t u64; |
79 | gvhs8 s8; |
80 | gvhs16 s16; |
81 | gvhs32 s32; |
82 | gvhs64 s64; |
83 | //s64 s64; |
84 | //int64x1_t s64; |
85 | } gvhreg; |
86 | |
a2cb152a |
87 | #define gvaddhn_u32(d, a, b) d.u16 = vaddhn_u32(a.u32, b.u32) |
88 | #define gvaddw_s32(d, a, b) d.s64 = vaddw_s32(a.s64, b.s32) |
89 | #define gvabsq_s32(d, s) d.s32 = vabsq_s32(s.s32) |
90 | #define gvbic_n_u16(d, n) d.u16 = vbic_u16(d.u16, vmov_n_u16(n)) |
91 | #define gvbifq(d, a, b) d.u8 = vbslq_u8(b.u8, d.u8, a.u8) |
92 | #define gvbit(d, a, b) d.u8 = vbsl_u8(b.u8, a.u8, d.u8) |
93 | #define gvceqq_u16(d, a, b) d.u16 = vceqq_u16(a.u16, b.u16) |
94 | #define gvcgt_s16(d, a, b) d.u16 = vcgt_s16(a.s16, b.s16) |
95 | #define gvclt_s16(d, a, b) d.u16 = vclt_s16(a.s16, b.s16) |
96 | #define gvcreate_s32(d, a, b) d.s32 = vcreate_s32((u32)(a) | ((u64)(b) << 32)) |
97 | #define gvcreate_u32(d, a, b) d.u32 = vcreate_u32((u32)(a) | ((u64)(b) << 32)) |
98 | #define gvcreate_s64(d, s) d.s64 = (gvhs64)vcreate_s64(s) |
99 | #define gvcreate_u64(d, s) d.u64 = (gvhu64)vcreate_u64(s) |
100 | #define gvcombine_u16(d, l, h) d.u16 = vcombine_u16(l.u16, h.u16) |
101 | #define gvcombine_u32(d, l, h) d.u32 = vcombine_u32(l.u32, h.u32) |
102 | #define gvcombine_s64(d, l, h) d.s64 = vcombine_s64((int64x1_t)l.s64, (int64x1_t)h.s64) |
103 | #define gvdup_l_u8(d, s, l) d.u8 = vdup_lane_u8(s.u8, l) |
104 | #define gvdup_l_u16(d, s, l) d.u16 = vdup_lane_u16(s.u16, l) |
105 | #define gvdup_l_u32(d, s, l) d.u32 = vdup_lane_u32(s.u32, l) |
106 | #define gvdupq_l_s64(d, s, l) d.s64 = vdupq_lane_s64((int64x1_t)s.s64, l) |
107 | #define gvdupq_l_u32(d, s, l) d.u32 = vdupq_lane_u32(s.u32, l) |
108 | #define gvdup_n_s64(d, n) d.s64 = vdup_n_s64(n) |
109 | #define gvdup_n_u8(d, n) d.u8 = vdup_n_u8(n) |
110 | #define gvdup_n_u16(d, n) d.u16 = vdup_n_u16(n) |
111 | #define gvdup_n_u32(d, n) d.u32 = vdup_n_u32(n) |
112 | #define gvdupq_n_u16(d, n) d.u16 = vdupq_n_u16(n) |
113 | #define gvdupq_n_u32(d, n) d.u32 = vdupq_n_u32(n) |
114 | #define gvdupq_n_s64(d, n) d.s64 = vdupq_n_s64(n) |
115 | #define gvhaddq_u16(d, a, b) d.u16 = vhaddq_u16(a.u16, b.u16) |
116 | #define gvmax_s16(d, a, b) d.s16 = vmax_s16(a.s16, b.s16) |
117 | #define gvmin_s16(d, a, b) d.s16 = vmin_s16(a.s16, b.s16) |
2d658c89 |
118 | #define gvmin_u16(d, a, b) d.u16 = vmin_u16(a.u16, b.u16) |
a2cb152a |
119 | #define gvminq_u8(d, a, b) d.u8 = vminq_u8(a.u8, b.u8) |
120 | #define gvminq_u16(d, a, b) d.u16 = vminq_u16(a.u16, b.u16) |
121 | #define gvmla_s32(d, a, b) d.s32 = vmla_s32(d.s32, a.s32, b.s32) |
122 | #define gvmla_u32(d, a, b) d.u32 = vmla_u32(d.u32, a.u32, b.u32) |
123 | #define gvmlaq_s32(d, a, b) d.s32 = vmlaq_s32(d.s32, a.s32, b.s32) |
124 | #define gvmlaq_u32(d, a, b) d.u32 = vmlaq_u32(d.u32, a.u32, b.u32) |
125 | #define gvmlal_s32(d, a, b) d.s64 = vmlal_s32(d.s64, a.s32, b.s32) |
126 | #define gvmlal_u8(d, a, b) d.u16 = vmlal_u8(d.u16, a.u8, b.u8) |
127 | #define gvmlsq_s32(d, a, b) d.s32 = vmlsq_s32(d.s32, a.s32, b.s32) |
128 | #define gvmlsq_l_s32(d, a, b, l) d.s32 = vmlsq_lane_s32(d.s32, a.s32, b.s32, l) |
129 | #define gvmov_l_s32(d, s, l) d.s32 = vset_lane_s32(s, d.s32, l) |
130 | #define gvmov_l_u32(d, s, l) d.u32 = vset_lane_u32(s, d.u32, l) |
131 | #define gvmovl_u8(d, s) d.u16 = vmovl_u8(s.u8) |
132 | #define gvmovl_s32(d, s) d.s64 = vmovl_s32(s.s32) |
133 | #define gvmovn_u16(d, s) d.u8 = vmovn_u16(s.u16) |
134 | #define gvmovn_u32(d, s) d.u16 = vmovn_u32(s.u32) |
135 | #define gvmovn_u64(d, s) d.u32 = vmovn_u64(s.u64) |
136 | #define gvmul_s32(d, a, b) d.s32 = vmul_s32(a.s32, b.s32) |
137 | #define gvmull_s16(d, a, b) d.s32 = vmull_s16(a.s16, b.s16) |
138 | #define gvmull_s32(d, a, b) d.s64 = vmull_s32(a.s32, b.s32) |
139 | #define gvmull_u8(d, a, b) d.u16 = vmull_u8(a.u8, b.u8) |
140 | #define gvmull_l_u32(d, a, b, l) d.u64 = vmull_lane_u32(a.u32, b.u32, l) |
141 | #define gvmlsl_s16(d, a, b) d.s32 = vmlsl_s16(d.s32, a.s16, b.s16) |
142 | #define gvneg_s32(d, s) d.s32 = vneg_s32(s.s32) |
143 | #define gvqadd_u8(d, a, b) d.u8 = vqadd_u8(a.u8, b.u8) |
144 | #define gvqsub_u8(d, a, b) d.u8 = vqsub_u8(a.u8, b.u8) |
145 | #define gvshl_u16(d, a, b) d.u16 = vshl_u16(a.u16, b.s16) |
a2cb152a |
146 | #define gvshlq_u64(d, a, b) d.u64 = vshlq_u64(a.u64, b.s64) |
147 | #define gvshrq_n_s16(d, s, n) d.s16 = vshrq_n_s16(s.s16, n) |
148 | #define gvshrq_n_u16(d, s, n) d.u16 = vshrq_n_u16(s.u16, n) |
149 | #define gvshl_n_u32(d, s, n) d.u32 = vshl_n_u32(s.u32, n) |
150 | #define gvshlq_n_u16(d, s, n) d.u16 = vshlq_n_u16(s.u16, n) |
151 | #define gvshlq_n_u32(d, s, n) d.u32 = vshlq_n_u32(s.u32, n) |
152 | #define gvshll_n_s8(d, s, n) d.s16 = vshll_n_s8(s.s8, n) |
153 | #define gvshll_n_u8(d, s, n) d.u16 = vshll_n_u8(s.u8, n) |
154 | #define gvshll_n_u16(d, s, n) d.u32 = vshll_n_u16(s.u16, n) |
155 | #define gvshr_n_u8(d, s, n) d.u8 = vshr_n_u8(s.u8, n) |
156 | #define gvshr_n_u16(d, s, n) d.u16 = vshr_n_u16(s.u16, n) |
157 | #define gvshr_n_u32(d, s, n) d.u32 = vshr_n_u32(s.u32, n) |
158 | #define gvshr_n_u64(d, s, n) d.u64 = (gvhu64)vshr_n_u64((uint64x1_t)s.u64, n) |
a2cb152a |
159 | #define gvshrn_n_u16(d, s, n) d.u8 = vshrn_n_u16(s.u16, n) |
160 | #define gvshrn_n_u32(d, s, n) d.u16 = vshrn_n_u32(s.u32, n) |
161 | #define gvsli_n_u8(d, s, n) d.u8 = vsli_n_u8(d.u8, s.u8, n) |
162 | #define gvsri_n_u8(d, s, n) d.u8 = vsri_n_u8(d.u8, s.u8, n) |
163 | #define gvtstq_u16(d, a, b) d.u16 = vtstq_u16(a.u16, b.u16) |
164 | #define gvqshrun_n_s16(d, s, n) d.u8 = vqshrun_n_s16(s.s16, n) |
165 | #define gvqsubq_u8(d, a, b) d.u8 = vqsubq_u8(a.u8, b.u8) |
166 | #define gvqsubq_u16(d, a, b) d.u16 = vqsubq_u16(a.u16, b.u16) |
167 | |
9088aca1 |
168 | #define gvmovn_top_u64(d, s) d.u32 = vshrn_n_u64(s.u64, 32) |
169 | |
a2cb152a |
170 | #define gvget_lo(d, s) d.u16 = vget_low_u16(s.u16) |
171 | #define gvget_hi(d, s) d.u16 = vget_high_u16(s.u16) |
172 | #define gvlo(s) ({gvhreg t_; gvget_lo(t_, s); t_;}) |
173 | #define gvhi(s) ({gvhreg t_; gvget_hi(t_, s); t_;}) |
174 | |
175 | #define gvset_lo(d, s) d.u16 = vcombine_u16(s.u16, gvhi(d).u16) |
176 | #define gvset_hi(d, s) d.u16 = vcombine_u16(gvlo(d).u16, s.u16) |
177 | |
178 | #define gvtbl2_u8(d, a, b) { \ |
179 | uint8x8x2_t v_; \ |
180 | v_.val[0] = vget_low_u8(a.u8); v_.val[1] = vget_high_u8(a.u8); \ |
181 | d.u8 = vtbl2_u8(v_, b.u8); \ |
182 | } |
183 | |
184 | #define gvzip_u8(d, a, b) { \ |
185 | uint8x8x2_t v_ = vzip_u8(a.u8, b.u8); \ |
186 | d.u8 = vcombine_u8(v_.val[0], v_.val[1]); \ |
187 | } |
188 | #define gvzipq_u16(d0, d1, s0, s1) { \ |
189 | uint16x8x2_t v_ = vzipq_u16(s0.u16, s1.u16); \ |
190 | d0.u16 = v_.val[0]; d1.u16 = v_.val[1]; \ |
191 | } |
192 | |
193 | #define gvld1_u8(d, s) d.u8 = vld1_u8(s) |
194 | #define gvld1_u32(d, s) d.u32 = vld1_u32((const u32 *)(s)) |
ee060c58 |
195 | #define gvld1_u64(d, s) d.u64 = vld1_u64((const u64 *)(s)) |
a2cb152a |
196 | #define gvld1q_u8(d, s) d.u8 = vld1q_u8(s) |
197 | #define gvld1q_u16(d, s) d.u16 = vld1q_u16(s) |
198 | #define gvld1q_u32(d, s) d.u32 = vld1q_u32((const u32 *)(s)) |
9088aca1 |
199 | #define gvld2_u8_dup(v0, v1, p) { \ |
a2cb152a |
200 | uint8x8x2_t v_ = vld2_dup_u8(p); \ |
201 | v0.u8 = v_.val[0]; v1.u8 = v_.val[1]; \ |
202 | } |
203 | #define gvld2q_u8(v0, v1, p) { \ |
204 | uint8x16x2_t v_ = vld2q_u8(p); \ |
205 | v0.u8 = v_.val[0]; v1.u8 = v_.val[1]; \ |
206 | } |
207 | |
208 | #define gvst1_u8(v, p) \ |
209 | vst1_u8(p, v.u8) |
ee060c58 |
210 | #define gvst1_u64(v, p) \ |
211 | vst1_u64((u64 *)(p), v.u64) |
a2cb152a |
212 | #define gvst1q_u16(v, p) \ |
213 | vst1q_u16(p, v.u16) |
214 | #define gvst1q_inc_u32(v, p, i) { \ |
215 | vst1q_u32((u32 *)(p), v.u32); \ |
216 | p += (i) / sizeof(*p); \ |
217 | } |
218 | #define gvst2_u8(v0, v1, p) { \ |
219 | uint8x8x2_t v_; \ |
220 | v_.val[0] = v0.u8; v_.val[1] = v1.u8; \ |
221 | vst2_u8(p, v_); \ |
222 | } |
223 | #define gvst2_u16(v0, v1, p) { \ |
224 | uint16x4x2_t v_; \ |
225 | v_.val[0] = v0.u16; v_.val[1] = v1.u16; \ |
226 | vst2_u16(p, v_); \ |
227 | } |
228 | #define gvst2q_u8(v0, v1, p) { \ |
229 | uint8x16x2_t v_; \ |
230 | v_.val[0] = v0.u8; v_.val[1] = v1.u8; \ |
231 | vst2q_u8(p, v_); \ |
232 | } |
233 | #define gvst4_4_inc_u32(v0, v1, v2, v3, p, i) { \ |
234 | uint32x2x4_t v_; \ |
235 | v_.val[0] = v0.u32; v_.val[1] = v1.u32; v_.val[2] = v2.u32; v_.val[3] = v3.u32; \ |
236 | vst4_u32(p, v_); p += (i) / sizeof(*p); \ |
237 | } |
238 | #define gvst4_pi_u16(v0, v1, v2, v3, p) { \ |
239 | uint16x4x4_t v_; \ |
240 | v_.val[0] = v0.u16; v_.val[1] = v1.u16; v_.val[2] = v2.u16; v_.val[3] = v3.u16; \ |
241 | vst4_u16((u16 *)(p), v_); p += sizeof(v_) / sizeof(*p); \ |
242 | } |
243 | #define gvst1q_pi_u32(v, p) \ |
244 | gvst1q_inc_u32(v, p, sizeof(v)) |
245 | // could use vst1q_u32_x2 but that's not always available |
246 | #define gvst1q_2_pi_u32(v0, v1, p) { \ |
247 | gvst1q_inc_u32(v0, p, sizeof(v0)); \ |
248 | gvst1q_inc_u32(v1, p, sizeof(v1)); \ |
249 | } |
250 | |
251 | /* notes: |
252 | - gcc > 9: (arm32) int64x1_t type produces ops on gp regs |
253 | (also u64 __attribute__((vector_size(8))) :( ) |
254 | - gcc <11: (arm32) handles '<vec> == 0' poorly |
255 | */ |
256 | |
a2cb152a |
257 | #elif defined(__SSE2__) |
9088aca1 |
258 | |
259 | // use a full reg and discard the upper half |
260 | #define gvhreg gvreg |
261 | |
262 | #define gv0() _mm_setzero_si128() |
263 | |
264 | #ifdef __x86_64__ |
265 | #define gvcreate_s32(d, a, b) d.m = _mm_cvtsi64_si128((u32)(a) | ((u64)(b) << 32)) |
266 | #define gvcreate_s64(d, s) d.m = _mm_cvtsi64_si128(s) |
267 | #else |
268 | #define gvcreate_s32(d, a, b) d.m = _mm_set_epi32(0, 0, b, a) |
269 | #define gvcreate_s64(d, s) d.m = _mm_loadu_si64(&(s)) |
270 | #endif |
271 | |
272 | #define gvbic_n_u16(d, n) d.m = _mm_andnot_si128(_mm_set1_epi16(n), d.m) |
273 | #define gvceqq_u16(d, a, b) d.u16 = vceqq_u16(a.u16, b.u16) |
274 | #define gvcgt_s16(d, a, b) d.m = _mm_cmpgt_epi16(a.m, b.m) |
275 | #define gvclt_s16(d, a, b) d.m = _mm_cmpgt_epi16(b.m, a.m) |
276 | #define gvcreate_u32 gvcreate_s32 |
277 | #define gvcreate_u64 gvcreate_s64 |
278 | #define gvcombine_u16(d, l, h) d.m = _mm_unpacklo_epi64(l.m, h.m) |
279 | #define gvcombine_u32 gvcombine_u16 |
280 | #define gvcombine_s64 gvcombine_u16 |
281 | #define gvdup_l_u8(d, s, l) d.u8 = vdup_lane_u8(s.u8, l) |
282 | #define gvdup_l_u16(d, s, l) d.m = _mm_shufflelo_epi16(s.m, (l)|((l)<<2)|((l)<<4)|((l)<<6)) |
283 | #define gvdup_l_u32(d, s, l) d.m = vdup_lane_u32(s.u32, l) |
284 | #define gvdupq_l_s64(d, s, l) d.m = _mm_unpacklo_epi64(s.m, s.m) |
285 | #define gvdupq_l_u32(d, s, l) d.m = _mm_shuffle_epi32(s.m, (l)|((l)<<2)|((l)<<4)|((l)<<6)) |
286 | #define gvdup_n_s64(d, n) d.m = _mm_set1_epi64x(n) |
287 | #define gvdup_n_u8(d, n) d.m = _mm_set1_epi8(n) |
288 | #define gvdup_n_u16(d, n) d.m = _mm_set1_epi16(n) |
289 | #define gvdup_n_u32(d, n) d.m = _mm_set1_epi32(n) |
290 | #define gvdupq_n_u16(d, n) d.m = _mm_set1_epi16(n) |
291 | #define gvdupq_n_u32(d, n) d.m = _mm_set1_epi32(n) |
292 | #define gvdupq_n_s64(d, n) d.m = _mm_set1_epi64x(n) |
293 | #define gvmax_s16(d, a, b) d.m = _mm_max_epi16(a.m, b.m) |
294 | #define gvmin_s16(d, a, b) d.m = _mm_min_epi16(a.m, b.m) |
295 | #define gvminq_u8(d, a, b) d.m = _mm_min_epu8(a.m, b.m) |
296 | #define gvmovn_u64(d, s) d.m = _mm_shuffle_epi32(s.m, 0 | (2 << 2)) |
297 | #define gvmovn_top_u64(d, s) d.m = _mm_shuffle_epi32(s.m, 1 | (3 << 2)) |
298 | #define gvmull_s16(d, a, b) { \ |
299 | __m128i lo_ = _mm_mullo_epi16(a.m, b.m); \ |
300 | __m128i hi_ = _mm_mulhi_epi16(a.m, b.m); \ |
301 | d.m = _mm_unpacklo_epi16(lo_, hi_); \ |
302 | } |
303 | #define gvmull_l_u32(d, a, b, l) { \ |
304 | __m128i a_ = _mm_unpacklo_epi32(a.m, a.m); /* lanes 0,1 -> 0,2 */ \ |
305 | __m128i b_ = _mm_shuffle_epi32(b.m, (l) | ((l) << 4)); \ |
306 | d.m = _mm_mul_epu32(a_, b_); \ |
307 | } |
308 | #define gvmlsl_s16(d, a, b) { \ |
309 | gvreg tmp_; \ |
310 | gvmull_s16(tmp_, a, b); \ |
311 | d.m = _mm_sub_epi32(d.m, tmp_.m); \ |
312 | } |
313 | #define gvqadd_u8(d, a, b) d.m = _mm_adds_epu8(a.m, b.m) |
314 | #define gvqsub_u8(d, a, b) d.m = _mm_subs_epu8(a.m, b.m) |
315 | #define gvshrq_n_s16(d, s, n) d.m = _mm_srai_epi16(s.m, n) |
316 | #define gvshrq_n_u16(d, s, n) d.m = _mm_srli_epi16(s.m, n) |
317 | #define gvshrq_n_u32(d, s, n) d.m = _mm_srli_epi32(s.m, n) |
318 | #define gvshl_n_u32(d, s, n) d.m = _mm_slli_epi32(s.m, n) |
319 | #define gvshlq_n_u16(d, s, n) d.m = _mm_slli_epi16(s.m, n) |
320 | #define gvshlq_n_u32(d, s, n) d.m = _mm_slli_epi32(s.m, n) |
321 | #define gvshll_n_u16(d, s, n) d.m = _mm_slli_epi32(_mm_unpacklo_epi16(s.m, gv0()), n) |
322 | #define gvshr_n_u16(d, s, n) d.m = _mm_srli_epi16(s.m, n) |
323 | #define gvshr_n_u32(d, s, n) d.m = _mm_srli_epi32(s.m, n) |
324 | #define gvshr_n_u64(d, s, n) d.m = _mm_srli_epi64(s.m, n) |
325 | #define gvshrn_n_s64(d, s, n) { \ |
326 | gvreg tmp_; \ |
327 | gvshrq_n_s64(tmp_, s, n); \ |
328 | d.m = _mm_shuffle_epi32(tmp_.m, 0 | (2 << 2)); \ |
329 | } |
330 | #define gvqshrun_n_s16(d, s, n) { \ |
331 | __m128i t_ = _mm_srai_epi16(s.m, n); \ |
332 | d.m = _mm_packus_epi16(t_, t_); \ |
333 | } |
334 | #define gvqsubq_u8(d, a, b) d.m = _mm_subs_epu8(a.m, b.m) |
335 | #define gvqsubq_u16(d, a, b) d.m = _mm_subs_epu16(a.m, b.m) |
336 | |
337 | #ifdef __SSSE3__ |
338 | #define gvabsq_s32(d, s) d.m = _mm_abs_epi32(s.m) |
339 | #define gvtbl2_u8(d, a, b) d.m = _mm_shuffle_epi8(a.m, b.m) |
340 | #else |
341 | // must supply these here or else gcc will produce something terrible with __builtin_shuffle |
342 | #define gvmovn_u16(d, s) { \ |
343 | __m128i t2_ = _mm_and_si128(s.m, _mm_set1_epi16(0xff)); \ |
344 | d.m = _mm_packus_epi16(t2_, t2_); \ |
345 | } |
346 | #define gvmovn_u32(d, s) { \ |
347 | __m128i t2_; \ |
348 | t2_ = _mm_shufflelo_epi16(s.m, (0 << 0) | (2 << 2)); \ |
349 | t2_ = _mm_shufflehi_epi16(t2_, (0 << 0) | (2 << 2)); \ |
350 | d.m = _mm_shuffle_epi32(t2_, (0 << 0) | (2 << 2)); \ |
351 | } |
352 | #define gvmovn_top_u32(d, s) { \ |
353 | __m128i t2_; \ |
354 | t2_ = _mm_shufflelo_epi16(s.m, (1 << 0) | (3 << 2)); \ |
355 | t2_ = _mm_shufflehi_epi16(t2_, (1 << 0) | (3 << 2)); \ |
356 | d.m = _mm_shuffle_epi32(t2_, (0 << 0) | (2 << 2)); \ |
357 | } |
358 | #endif // !__SSSE3__ |
359 | #ifdef __SSE4_1__ |
2d658c89 |
360 | #define gvmin_u16(d, a, b) d.m = _mm_min_epu16(a.m, b.m) |
361 | #define gvminq_u16 gvmin_u16 |
9088aca1 |
362 | #define gvmovl_u8(d, s) d.m = _mm_cvtepu8_epi16(s.m) |
363 | #define gvmovl_s8(d, s) d.m = _mm_cvtepi8_epi16(s.m) |
364 | #define gvmovl_s32(d, s) d.m = _mm_cvtepi32_epi64(s.m) |
365 | #define gvmull_s32(d, a, b) { \ |
366 | __m128i a_ = _mm_unpacklo_epi32(a.m, a.m); /* lanes 0,1 -> 0,2 */ \ |
367 | __m128i b_ = _mm_unpacklo_epi32(b.m, b.m); \ |
368 | d.m = _mm_mul_epi32(a_, b_); \ |
369 | } |
370 | #else |
371 | #define gvmovl_u8(d, s) d.m = _mm_unpacklo_epi8(s.m, gv0()) |
372 | #define gvmovl_s8(d, s) d.m = _mm_unpacklo_epi8(s.m, _mm_cmpgt_epi8(gv0(), s.m)) |
373 | #define gvmovl_s32(d, s) d.m = _mm_unpacklo_epi32(s.m, _mm_srai_epi32(s.m, 31)) |
374 | #endif // !__SSE4_1__ |
375 | #ifndef __AVX2__ |
376 | #define gvshlq_u64(d, a, b) { \ |
377 | gvreg t1_, t2_; \ |
378 | t1_.m = _mm_sll_epi64(a.m, b.m); \ |
379 | t2_.m = _mm_sll_epi64(a.m, _mm_shuffle_epi32(b.m, (2 << 0) | (3 << 2))); \ |
380 | d.u64 = (gvu64){ t1_.u64[0], t2_.u64[1] }; \ |
381 | } |
382 | #endif // __AVX2__ |
383 | |
384 | #define gvlo(s) s |
385 | #define gvhi(s) ((gvreg)_mm_shuffle_epi32(s.m, (2 << 0) | (3 << 2))) |
386 | #define gvget_lo(d, s) d = gvlo(s) |
387 | #define gvget_hi(d, s) d = gvhi(s) |
388 | |
389 | #define gvset_lo(d, s) d.m = _mm_unpacklo_epi64(s.m, gvhi(d).m) |
390 | #define gvset_hi(d, s) d.m = _mm_unpacklo_epi64(d.m, s.m) |
391 | |
392 | #define gvld1_u8(d, s) d.m = _mm_loadu_si64(s) |
393 | #define gvld1_u32 gvld1_u8 |
ee060c58 |
394 | #define gvld1_u64 gvld1_u8 |
9088aca1 |
395 | #define gvld1q_u8(d, s) d.m = _mm_loadu_si128((__m128i *)(s)) |
396 | #define gvld1q_u16 gvld1q_u8 |
397 | #define gvld1q_u32 gvld1q_u8 |
398 | |
ee060c58 |
399 | #define gvst1_u8(v, p) _mm_storeu_si64(p, v.m) |
400 | #define gvst1_u64 gvst1_u8 |
401 | |
9088aca1 |
402 | #define gvst4_4_inc_u32(v0, v1, v2, v3, p, i) { \ |
403 | __m128i t0 = _mm_unpacklo_epi32(v0.m, v1.m); \ |
404 | __m128i t1 = _mm_unpacklo_epi32(v2.m, v3.m); \ |
405 | _mm_storeu_si128(((__m128i *)(p)) + 0, _mm_unpacklo_epi64(t0, t1)); \ |
406 | _mm_storeu_si128(((__m128i *)(p)) + 1, _mm_unpackhi_epi64(t0, t1)); \ |
407 | p += (i) / sizeof(*p); \ |
408 | } |
409 | #define gvst4_pi_u16(v0, v1, v2, v3, p) { \ |
410 | __m128i t0 = _mm_unpacklo_epi16(v0.m, v1.m); \ |
411 | __m128i t1 = _mm_unpacklo_epi16(v2.m, v3.m); \ |
412 | _mm_storeu_si128(((__m128i *)(p)) + 0, _mm_unpacklo_epi32(t0, t1)); \ |
413 | _mm_storeu_si128(((__m128i *)(p)) + 1, _mm_unpackhi_epi32(t0, t1)); \ |
414 | p += sizeof(t0) * 2 / sizeof(*p); \ |
415 | } |
416 | |
a2cb152a |
417 | #else |
418 | #error "arch not supported or SIMD support was not enabled by your compiler" |
419 | #endif |
420 | |
421 | // the below have intrinsics but they evaluate to basic operations on both gcc and clang |
422 | #define gvadd_s64(d, a, b) d.s64 = a.s64 + b.s64 |
423 | #define gvadd_u8(d, a, b) d.u8 = a.u8 + b.u8 |
424 | #define gvadd_u16(d, a, b) d.u16 = a.u16 + b.u16 |
425 | #define gvadd_u32(d, a, b) d.u32 = a.u32 + b.u32 |
426 | #define gvaddq_s64 gvadd_s64 |
427 | #define gvaddq_u16 gvadd_u16 |
428 | #define gvaddq_u32 gvadd_u32 |
429 | #define gvand(d, a, b) d.u32 = a.u32 & b.u32 |
9088aca1 |
430 | #define gvand_n_u32(d, n) d.u32 &= n |
a2cb152a |
431 | #define gvbic(d, a, b) d.u32 = a.u32 & ~b.u32 |
432 | #define gvbicq gvbic |
433 | #define gveor(d, a, b) d.u32 = a.u32 ^ b.u32 |
434 | #define gveorq gveor |
435 | #define gvceqz_u16(d, s) d.u16 = s.u16 == 0 |
436 | #define gvceqzq_u16 gvceqz_u16 |
437 | #define gvcltz_s16(d, s) d.s16 = s.s16 < 0 |
438 | #define gvcltzq_s16 gvcltz_s16 |
439 | #define gvsub_u16(d, a, b) d.u16 = a.u16 - b.u16 |
440 | #define gvsub_u32(d, a, b) d.u32 = a.u32 - b.u32 |
441 | #define gvsubq_u16 gvsub_u16 |
442 | #define gvsubq_u32 gvsub_u32 |
443 | #define gvorr(d, a, b) d.u32 = a.u32 | b.u32 |
444 | #define gvorrq gvorr |
9088aca1 |
445 | #define gvorr_n_u16(d, n) d.u16 |= n |
446 | |
447 | // fallbacks |
448 | #if 1 |
449 | |
450 | #ifndef gvaddhn_u32 |
451 | #define gvaddhn_u32(d, a, b) { \ |
452 | gvreg tmp1_ = { .u32 = a.u32 + b.u32 }; \ |
453 | gvmovn_top_u32(d, tmp1_); \ |
454 | } |
455 | #endif |
456 | #ifndef gvabsq_s32 |
457 | #define gvabsq_s32(d, s) { \ |
458 | gvreg tmp1_ = { .s32 = (gvs32){} - s.s32 }; \ |
459 | gvreg mask_ = { .s32 = s.s32 >> 31 }; \ |
460 | gvbslq_(d, mask_, tmp1_, s); \ |
461 | } |
462 | #endif |
463 | #ifndef gvbit |
464 | #define gvbslq_(d, s, a, b) d.u32 = (a.u32 & s.u32) | (b.u32 & ~s.u32) |
465 | #define gvbifq(d, a, b) gvbslq_(d, b, d, a) |
466 | #define gvbit(d, a, b) gvbslq_(d, b, a, d) |
467 | #endif |
468 | #ifndef gvaddw_s32 |
469 | #define gvaddw_s32(d, a, b) {gvreg t_; gvmovl_s32(t_, b); d.s64 += t_.s64;} |
470 | #endif |
471 | #ifndef gvhaddq_u16 |
472 | // can do this because the caller needs the msb clear |
473 | #define gvhaddq_u16(d, a, b) d.u16 = (a.u16 + b.u16) >> 1 |
474 | #endif |
2d658c89 |
475 | #ifndef gvmin_u16 |
476 | #define gvmin_u16(d, a, b) { \ |
9088aca1 |
477 | gvu16 t_ = a.u16 < b.u16; \ |
478 | d.u16 = (a.u16 & t_) | (b.u16 & ~t_); \ |
479 | } |
2d658c89 |
480 | #define gvminq_u16 gvmin_u16 |
9088aca1 |
481 | #endif |
482 | #ifndef gvmlsq_s32 |
483 | #define gvmlsq_s32(d, a, b) d.s32 -= a.s32 * b.s32 |
484 | #endif |
485 | #ifndef gvmlsq_l_s32 |
486 | #define gvmlsq_l_s32(d, a, b, l){gvreg t_; gvdupq_l_u32(t_, b, l); d.s32 -= a.s32 * t_.s32;} |
487 | #endif |
488 | #ifndef gvmla_s32 |
489 | #define gvmla_s32(d, a, b) d.s32 += a.s32 * b.s32 |
490 | #endif |
491 | #ifndef gvmla_u32 |
492 | #define gvmla_u32 gvmla_s32 |
493 | #endif |
494 | #ifndef gvmlaq_s32 |
495 | #define gvmlaq_s32(d, a, b) d.s32 += a.s32 * b.s32 |
496 | #endif |
497 | #ifndef gvmlaq_u32 |
498 | #define gvmlaq_u32 gvmlaq_s32 |
499 | #endif |
500 | #ifndef gvmlal_u8 |
501 | #define gvmlal_u8(d, a, b) {gvreg t_; gvmull_u8(t_, a, b); d.u16 += t_.u16;} |
502 | #endif |
503 | #ifndef gvmlal_s32 |
504 | #define gvmlal_s32(d, a, b) {gvreg t_; gvmull_s32(t_, a, b); d.s64 += t_.s64;} |
505 | #endif |
506 | #ifndef gvmov_l_s32 |
507 | #define gvmov_l_s32(d, s, l) d.s32[l] = s |
508 | #endif |
509 | #ifndef gvmov_l_u32 |
510 | #define gvmov_l_u32(d, s, l) d.u32[l] = s |
511 | #endif |
512 | #ifndef gvmul_s32 |
513 | #define gvmul_s32(d, a, b) d.s32 = a.s32 * b.s32 |
514 | #endif |
515 | #ifndef gvmull_u8 |
516 | #define gvmull_u8(d, a, b) { \ |
517 | gvreg t1_, t2_; \ |
518 | gvmovl_u8(t1_, a); \ |
519 | gvmovl_u8(t2_, b); \ |
520 | d.u16 = t1_.u16 * t2_.u16; \ |
521 | } |
522 | #endif |
523 | #ifndef gvmull_s32 |
524 | // note: compilers tend to use int regs here |
525 | #define gvmull_s32(d, a, b) { \ |
526 | d.s64[0] = (s64)a.s32[0] * b.s32[0]; \ |
527 | d.s64[1] = (s64)a.s32[1] * b.s32[1]; \ |
528 | } |
529 | #endif |
530 | #ifndef gvneg_s32 |
531 | #define gvneg_s32(d, s) d.s32 = -s.s32 |
532 | #endif |
533 | // x86 note: needs _mm_sllv_epi16 (avx512), else this sucks terribly |
534 | #ifndef gvshl_u16 |
535 | #define gvshl_u16(d, a, b) d.u16 = a.u16 << b.u16 |
536 | #endif |
537 | // x86 note: needs _mm_sllv_* (avx2) |
538 | #ifndef gvshlq_u64 |
539 | #define gvshlq_u64(d, a, b) d.u64 = a.u64 << b.u64 |
540 | #endif |
541 | #ifndef gvshll_n_s8 |
542 | #define gvshll_n_s8(d, s, n) {gvreg t_; gvmovl_s8(t_, s); gvshlq_n_u16(d, t_, n);} |
543 | #endif |
544 | #ifndef gvshll_n_u8 |
545 | #define gvshll_n_u8(d, s, n) {gvreg t_; gvmovl_u8(t_, s); gvshlq_n_u16(d, t_, n);} |
546 | #endif |
547 | #ifndef gvshr_n_u8 |
548 | #define gvshr_n_u8(d, s, n) d.u8 = s.u8 >> (n) |
549 | #endif |
550 | #ifndef gvshrq_n_s64 |
551 | #define gvshrq_n_s64(d, s, n) d.s64 = s.s64 >> (n) |
552 | #endif |
553 | #ifndef gvshrn_n_u16 |
554 | #define gvshrn_n_u16(d, s, n) {gvreg t_; gvshrq_n_u16(t_, s, n); gvmovn_u16(d, t_);} |
555 | #endif |
556 | #ifndef gvshrn_n_u32 |
557 | #define gvshrn_n_u32(d, s, n) {gvreg t_; gvshrq_n_u32(t_, s, n); gvmovn_u32(d, t_);} |
558 | #endif |
559 | #ifndef gvsli_n_u8 |
560 | #define gvsli_n_u8(d, s, n) d.u8 = (s.u8 << (n)) | (d.u8 & ((1u << (n)) - 1u)) |
561 | #endif |
562 | #ifndef gvsri_n_u8 |
563 | #define gvsri_n_u8(d, s, n) d.u8 = (s.u8 >> (n)) | (d.u8 & ((0xff00u >> (n)) & 0xffu)) |
564 | #endif |
565 | #ifndef gvtstq_u16 |
566 | #define gvtstq_u16(d, a, b) d.u16 = (a.u16 & b.u16) != 0 |
567 | #endif |
568 | |
569 | #ifndef gvld2_u8_dup |
570 | #define gvld2_u8_dup(v0, v1, p) { \ |
571 | gvdup_n_u8(v0, ((const u8 *)(p))[0]); \ |
572 | gvdup_n_u8(v1, ((const u8 *)(p))[1]); \ |
573 | } |
574 | #endif |
575 | #ifndef gvst1_u8 |
576 | #define gvst1_u8(v, p) *(uint64_t_ua *)(p) = v.u64[0] |
577 | #endif |
578 | #ifndef gvst1q_u16 |
579 | #define gvst1q_u16(v, p) *(gvreg_ua *)(p) = v |
580 | #endif |
581 | #ifndef gvst1q_inc_u32 |
582 | #define gvst1q_inc_u32(v, p, i) {*(gvreg_ua *)(p) = v; p += (i) / sizeof(*p);} |
583 | #endif |
584 | #ifndef gvst1q_pi_u32 |
585 | #define gvst1q_pi_u32(v, p) gvst1q_inc_u32(v, p, sizeof(v)) |
586 | #endif |
587 | #ifndef gvst1q_2_pi_u32 |
588 | #define gvst1q_2_pi_u32(v0, v1, p) { \ |
589 | gvst1q_inc_u32(v0, p, sizeof(v0)); \ |
590 | gvst1q_inc_u32(v1, p, sizeof(v1)); \ |
591 | } |
592 | #endif |
593 | #ifndef gvst2_u8 |
594 | #define gvst2_u8(v0, v1, p) {gvreg t_; gvzip_u8(t_, v0, v1); *(gvu8_ua *)(p) = t_.u8;} |
595 | #endif |
596 | #ifndef gvst2_u16 |
597 | #define gvst2_u16(v0, v1, p) {gvreg t_; gvzip_u16(t_, v0, v1); *(gvu16_ua *)(p) = t_.u16;} |
598 | #endif |
599 | |
600 | // note: these shuffles assume sizeof(gvhreg) == 16 && sizeof(gvreg) == 16 |
601 | #ifndef __has_builtin |
602 | #define __has_builtin(x) 0 |
603 | #endif |
604 | |
605 | // prefer __builtin_shuffle on gcc as it handles -1 poorly |
606 | #if __has_builtin(__builtin_shufflevector) && !__has_builtin(__builtin_shuffle) |
607 | |
608 | #ifndef gvld2q_u8 |
609 | #define gvld2q_u8(v0, v1, p) { \ |
610 | gvu8 v0_ = ((gvu8_ua *)(p))[0]; \ |
611 | gvu8 v1_ = ((gvu8_ua *)(p))[1]; \ |
612 | v0.u8 = __builtin_shufflevector(v0_, v1_, 0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30); \ |
613 | v1.u8 = __builtin_shufflevector(v0_, v1_, 1,3,5,7,9,11,13,15,17,19,21,23,25,27,29,31); \ |
614 | } |
615 | #endif |
616 | #ifndef gvmovn_u16 |
617 | #define gvmovn_u16(d, s) \ |
618 | d.u8 = __builtin_shufflevector(s.u8, s.u8, 0,2,4,6,8,10,12,14,-1,-1,-1,-1,-1,-1,-1,-1) |
619 | #endif |
620 | #ifndef gvmovn_u32 |
621 | #define gvmovn_u32(d, s) \ |
622 | d.u16 = __builtin_shufflevector(s.u16, s.u16, 0,2,4,6,-1,-1,-1,-1) |
623 | #endif |
624 | #ifndef gvmovn_top_u32 |
625 | #define gvmovn_top_u32(d, s) \ |
626 | d.u16 = __builtin_shufflevector(s.u16, s.u16, 1,3,5,7,-1,-1,-1,-1) |
627 | #endif |
628 | #ifndef gvzip_u8 |
629 | #define gvzip_u8(d, a, b) \ |
630 | d.u8 = __builtin_shufflevector(a.u8, b.u8, 0,16,1,17,2,18,3,19,4,20,5,21,6,22,7,23) |
631 | #endif |
632 | #ifndef gvzip_u16 |
633 | #define gvzip_u16(d, a, b) \ |
634 | d.u16 = __builtin_shufflevector(a.u16, b.u16, 0,8,1,9,2,10,3,11) |
635 | #endif |
636 | #ifndef gvzipq_u16 |
637 | #define gvzipq_u16(d0, d1, s0, s1) { \ |
638 | gvu16 t_ = __builtin_shufflevector(s0.u16, s1.u16, 0, 8, 1, 9, 2, 10, 3, 11); \ |
639 | d1.u16 = __builtin_shufflevector(s0.u16, s1.u16, 4,12, 5,13, 6, 14, 7, 15); \ |
640 | d0.u16 = t_; \ |
641 | } |
642 | #endif |
643 | |
644 | #else // !__has_builtin(__builtin_shufflevector) |
645 | |
646 | #ifndef gvld2q_u8 |
647 | #define gvld2q_u8(v0, v1, p) { \ |
648 | gvu8 v0_ = ((gvu8_ua *)(p))[0]; \ |
649 | gvu8 v1_ = ((gvu8_ua *)(p))[1]; \ |
650 | v0.u8 = __builtin_shuffle(v0_, v1_, (gvu8){0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30}); \ |
651 | v1.u8 = __builtin_shuffle(v0_, v1_, (gvu8){1,3,5,7,9,11,13,15,17,19,21,23,25,27,29,31}); \ |
652 | } |
653 | #endif |
654 | #ifndef gvmovn_u16 |
655 | #define gvmovn_u16(d, s) \ |
656 | d.u8 = __builtin_shuffle(s.u8, (gvu8){0,2,4,6,8,10,12,14,0,2,4,6,8,10,12,14}) |
657 | #endif |
658 | #ifndef gvmovn_u32 |
659 | #define gvmovn_u32(d, s) \ |
660 | d.u16 = __builtin_shuffle(s.u16, (gvu16){0,2,4,6,0,2,4,6}) |
661 | #endif |
662 | #ifndef gvmovn_top_u32 |
663 | #define gvmovn_top_u32(d, s) \ |
664 | d.u16 = __builtin_shuffle(s.u16, (gvu16){1,3,5,7,1,3,5,7}) |
665 | #endif |
666 | #ifndef gvtbl2_u8 |
667 | #define gvtbl2_u8(d, a, b) d.u8 = __builtin_shuffle(a.u8, b.u8) |
668 | #endif |
669 | #ifndef gvzip_u8 |
670 | #define gvzip_u8(d, a, b) \ |
671 | d.u8 = __builtin_shuffle(a.u8, b.u8, (gvu8){0,16,1,17,2,18,3,19,4,20,5,21,6,22,7,23}) |
672 | #endif |
673 | #ifndef gvzip_u16 |
674 | #define gvzip_u16(d, a, b) \ |
675 | d.u16 = __builtin_shuffle(a.u16, b.u16, (gvu16){0,8,1,9,2,10,3,11}) |
676 | #endif |
677 | #ifndef gvzipq_u16 |
678 | #define gvzipq_u16(d0, d1, s0, s1) { \ |
679 | gvu16 t_ = __builtin_shuffle(s0.u16, s1.u16, (gvu16){0, 8, 1, 9, 2, 10, 3, 11}); \ |
680 | d1.u16 = __builtin_shuffle(s0.u16, s1.u16, (gvu16){4,12, 5,13, 6, 14, 7, 15}); \ |
681 | d0.u16 = t_; \ |
682 | } |
683 | #endif |
684 | |
685 | #endif // __builtin_shufflevector || __builtin_shuffle |
686 | |
687 | #ifndef gvtbl2_u8 |
688 | #define gvtbl2_u8(d, a, b) { \ |
689 | int i_; \ |
690 | for (i_ = 0; i_ < 16; i_++) \ |
691 | d.u8[i_] = a.u8[b.u8[i_]]; \ |
692 | } |
693 | #endif |
694 | |
695 | #endif // fallbacks |
a2cb152a |
696 | |
697 | #if defined(__arm__) |
698 | |
699 | #define gssub16(d, a, b) asm("ssub16 %0,%1,%2" : "=r"(d) : "r"(a), "r"(b)) |
700 | #define gsmusdx(d, a, b) asm("smusdx %0,%1,%2" : "=r"(d) : "r"(a), "r"(b)) |
701 | |
702 | #if 0 |
703 | // gcc/config/arm/arm.c |
704 | #undef gvadd_s64 |
705 | #define gvadd_s64(d, a, b) asm("vadd.i64 %P0,%P1,%P2" : "=w"(d.s64) : "w"(a.s64), "w"(b.s64)) |
706 | #endif |
707 | |
708 | #else |
709 | |
710 | #define gssub16(d, a, b) d = (u16)((a) - (b)) | ((((a) >> 16) - ((b) >> 16)) << 16) |
711 | #define gsmusdx(d, a, b) d = ((s32)(s16)(a) * ((s32)(b) >> 16)) \ |
712 | - (((s32)(a) >> 16) * (s16)(b)) |
713 | |
714 | #endif |
715 | |
716 | // for compatibility with the original psx_gpu.c code |
717 | #define vec_2x64s gvreg |
718 | #define vec_2x64u gvreg |
719 | #define vec_4x32s gvreg |
720 | #define vec_4x32u gvreg |
721 | #define vec_8x16s gvreg |
722 | #define vec_8x16u gvreg |
723 | #define vec_16x8s gvreg |
724 | #define vec_16x8u gvreg |
725 | #define vec_1x64s gvhreg |
726 | #define vec_1x64u gvhreg |
727 | #define vec_2x32s gvhreg |
728 | #define vec_2x32u gvhreg |
729 | #define vec_4x16s gvhreg |
730 | #define vec_4x16u gvhreg |
731 | #define vec_8x8s gvhreg |
732 | #define vec_8x8u gvhreg |
733 | |
734 | #if 0 |
735 | #include <stdio.h> |
736 | #include <stdlib.h> |
737 | #include <unistd.h> |
aafce833 |
738 | static int ccount, dump_enabled; |
a2cb152a |
739 | void cmpp(const char *name, const void *a_, const void *b_, size_t len) |
740 | { |
741 | const uint32_t *a = a_, *b = b_, masks[] = { 0, 0xff, 0xffff, 0xffffff }; |
742 | size_t i, left; |
743 | uint32_t mask; |
744 | for (i = 0; i < (len + 3)/4; i++) { |
745 | left = len - i*4; |
746 | mask = left >= 4 ? ~0u : masks[left]; |
747 | if ((a[i] ^ b[i]) & mask) { |
748 | printf("%s: %08x %08x [%03zx/%zu] #%d\n", |
749 | name, a[i] & mask, b[i] & mask, i*4, i, ccount); |
750 | exit(1); |
751 | } |
752 | } |
753 | ccount++; |
754 | } |
755 | #define ccmpf(n) cmpp(#n, &psx_gpu->n, &n##_c, sizeof(n##_c)) |
756 | #define ccmpa(n,c) cmpp(#n, &psx_gpu->n, &n##_c, sizeof(n##_c[0]) * c) |
757 | |
758 | void dump_r_(const char *name, void *dump, int is_q) |
759 | { |
760 | unsigned long long *u = dump; |
aafce833 |
761 | if (!dump_enabled) return; |
a2cb152a |
762 | //if (ccount > 1) return; |
aafce833 |
763 | printf("%20s %016llx ", name, u[0]); |
a2cb152a |
764 | if (is_q) |
765 | printf("%016llx", u[1]); |
766 | puts(""); |
767 | } |
768 | void __attribute__((noinline,noclone)) dump_r_d(const char *name, void *dump) |
769 | { dump_r_(name, dump, 0); } |
770 | void __attribute__((noinline,noclone)) dump_r_q(const char *name, void *dump) |
771 | { dump_r_(name, dump, 1); } |
772 | #define dumprd(n) { u8 dump_[8]; gvst1_u8(n, dump_); dump_r_d(#n, dump_); } |
773 | #define dumprq(n) { u16 dump_[8]; gvst1q_u16(n, dump_); dump_r_q(#n, dump_); } |
774 | #endif |
775 | |
776 | void compute_all_gradients(psx_gpu_struct * __restrict__ psx_gpu, |
777 | const vertex_struct * __restrict__ a, const vertex_struct * __restrict__ b, |
778 | const vertex_struct * __restrict__ c) |
779 | { |
780 | union { double d; struct { u32 l; u32 h; } i; } divident, divider; |
781 | union { double d; gvhreg v; } d30; |
782 | |
783 | #if 0 |
784 | compute_all_gradients_(psx_gpu, a, b, c); |
785 | return; |
786 | #endif |
787 | // First compute the triangle area reciprocal and shift. The division will |
788 | // happen concurrently with much of the work which follows. |
789 | |
790 | // load exponent of 62 into upper half of double |
791 | u32 shift = __builtin_clz(psx_gpu->triangle_area); |
792 | u32 triangle_area_normalized = psx_gpu->triangle_area << shift; |
793 | |
794 | // load area normalized into lower half of double |
795 | divident.i.l = triangle_area_normalized >> 10; |
796 | divident.i.h = (62 + 1023) << 20; |
797 | |
798 | divider.i.l = triangle_area_normalized << 20; |
799 | divider.i.h = ((1022 + 31) << 20) + (triangle_area_normalized >> 11); |
800 | |
801 | d30.d = divident.d / divider.d; // d30 = ((1 << 62) + ta_n) / ta_n |
802 | |
803 | // ((x1 - x0) * (y2 - y1)) - ((x2 - x1) * (y1 - y0)) = |
804 | // ( d0 * d1 ) - ( d2 * d3 ) = |
805 | // ( m0 ) - ( m1 ) = gradient |
806 | |
807 | // This is split to do 12 elements at a time over three sets: a, b, and c. |
808 | // Technically we only need to do 10 elements (uvrgb_x and uvrgb_y), so |
809 | // two of the slots are unused. |
810 | |
811 | // Inputs are all 16-bit signed. The m0/m1 results are 32-bit signed, as |
812 | // is g. |
813 | |
814 | // First type is: uvrg bxxx xxxx |
815 | // Second type is: yyyy ybyy uvrg |
816 | // Since x_a and y_c are the same the same variable is used for both. |
817 | |
818 | gvreg v0; |
819 | gvreg v1; |
820 | gvreg v2; |
821 | gvreg uvrg_xxxx0; |
822 | gvreg uvrg_xxxx1; |
823 | gvreg uvrg_xxxx2; |
824 | |
825 | gvreg y0_ab; |
826 | gvreg y1_ab; |
827 | gvreg y2_ab; |
828 | |
829 | gvreg d0_ab; |
830 | gvreg d1_ab; |
831 | gvreg d2_ab; |
832 | gvreg d3_ab; |
833 | |
834 | gvreg ga_uvrg_x; |
835 | gvreg ga_uvrg_y; |
836 | gvreg gw_rg_x; |
837 | gvreg gw_rg_y; |
838 | gvreg w_mask; |
839 | gvreg r_shift; |
840 | gvreg uvrg_dx2, uvrg_dx3; |
841 | gvreg uvrgb_phase; |
842 | gvhreg zero, tmp_lo, tmp_hi; |
843 | |
844 | gvld1q_u8(v0, (u8 *)a); // v0 = { uvrg0, b0, x0, y0 } |
845 | gvld1q_u8(v1, (u8 *)b); // v1 = { uvrg1, b1, x1, y1 } |
846 | gvld1q_u8(v2, (u8 *)c); // v2 = { uvrg2, b2, x2, y2 } |
847 | |
848 | gvmovl_u8(uvrg_xxxx0, gvlo(v0)); // uvrg_xxxx0 = { uv0, rg0, b0-, -- } |
849 | gvmovl_u8(uvrg_xxxx1, gvlo(v1)); // uvrg_xxxx1 = { uv1, rg1, b1-, -- } |
850 | gvmovl_u8(uvrg_xxxx2, gvlo(v2)); // uvrg_xxxx2 = { uv2, rg2, b2-, -- } |
851 | |
852 | gvdup_l_u16(tmp_lo, gvhi(v0), 1); // yyyy0 = { yy0, yy0 } |
853 | gvcombine_u16(y0_ab, tmp_lo, gvlo(uvrg_xxxx0)); |
854 | |
855 | gvdup_l_u16(tmp_lo, gvhi(v0), 0); // xxxx0 = { xx0, xx0 } |
856 | gvset_hi(uvrg_xxxx0, tmp_lo); |
857 | |
858 | u32 x1_x2 = (u16)b->x | (c->x << 16); // x1_x2 = { x1, x2 } |
859 | u32 x0_x1 = (u16)a->x | (b->x << 16); // x0_x1 = { x0, x1 } |
860 | |
861 | gvdup_l_u16(tmp_lo, gvhi(v1), 1); // yyyy1 = { yy1, yy1 } |
862 | gvcombine_u16(y1_ab, tmp_lo, gvlo(uvrg_xxxx1)); |
863 | |
864 | gvdup_l_u16(tmp_lo, gvhi(v1), 0); // xxxx1 = { xx1, xx1 } |
865 | gvset_hi(uvrg_xxxx1, tmp_lo); |
866 | |
867 | gvdup_l_u16(tmp_lo, gvhi(v2), 1); // yyyy2 = { yy2, yy2 } |
868 | gvcombine_u16(y2_ab, tmp_lo, gvlo(uvrg_xxxx2)); |
869 | |
870 | gvdup_l_u16(tmp_lo, gvhi(v2), 0); // xxxx2 = { xx2, xx2 } |
871 | gvset_hi(uvrg_xxxx2, tmp_lo); |
872 | |
873 | u32 y0_y1 = (u16)a->y | (b->y << 16); // y0_y1 = { y0, y1 } |
874 | u32 y1_y2 = (u16)b->y | (c->y << 16); // y1_y2 = { y1, y2 } |
875 | |
876 | gvsubq_u16(d0_ab, uvrg_xxxx1, uvrg_xxxx0); |
877 | |
878 | u32 b1_b2 = b->b | (c->b << 16); // b1_b2 = { b1, b2 } |
879 | |
880 | gvsubq_u16(d2_ab, uvrg_xxxx2, uvrg_xxxx1); |
881 | |
882 | gvsubq_u16(d1_ab, y2_ab, y1_ab); |
883 | |
884 | u32 b0_b1 = a->b | (b->b << 16); // b0_b1 = { b0, b1 } |
885 | |
886 | u32 dx, dy, db; |
887 | gssub16(dx, x1_x2, x0_x1); // dx = { x1 - x0, x2 - x1 } |
888 | gssub16(dy, y1_y2, y0_y1); // dy = { y1 - y0, y2 - y1 } |
889 | gssub16(db, b1_b2, b0_b1); // db = { b1 - b0, b2 - b1 } |
890 | |
891 | u32 ga_by, ga_bx; |
892 | gvsubq_u16(d3_ab, y1_ab, y0_ab); |
893 | gsmusdx(ga_by, dx, db); // ga_by = ((x1 - x0) * (b2 - b1)) - |
894 | // ((x2 - X1) * (b1 - b0)) |
895 | gvmull_s16(ga_uvrg_x, gvlo(d0_ab), gvlo(d1_ab)); |
896 | gsmusdx(ga_bx, db, dy); // ga_bx = ((b1 - b0) * (y2 - y1)) - |
897 | // ((b2 - b1) * (y1 - y0)) |
898 | gvmlsl_s16(ga_uvrg_x, gvlo(d2_ab), gvlo(d3_ab)); |
899 | u32 gs_bx = (s32)ga_bx >> 31; // movs |
900 | |
901 | gvmull_s16(ga_uvrg_y, gvhi(d0_ab), gvhi(d1_ab)); |
902 | if ((s32)gs_bx < 0) ga_bx = -ga_bx; // rsbmi |
903 | |
904 | gvmlsl_s16(ga_uvrg_y, gvhi(d2_ab), gvhi(d3_ab)); |
905 | u32 gs_by = (s32)ga_by >> 31; // movs |
906 | |
907 | gvhreg d0; |
908 | gvshr_n_u64(d0, d30.v, 22); // note: on "d30 >> 22" gcc generates junk code |
909 | |
910 | gvdupq_n_u32(uvrgb_phase, psx_gpu->uvrgb_phase); |
911 | u32 b_base = psx_gpu->uvrgb_phase + (a->b << 16); |
912 | |
913 | if ((s32)gs_by < 0) ga_by = -ga_by; // rsbmi |
914 | gvreg gs_uvrg_x, gs_uvrg_y; |
915 | gs_uvrg_x.s32 = ga_uvrg_x.s32 < 0; // gs_uvrg_x = ga_uvrg_x < 0 |
916 | gs_uvrg_y.s32 = ga_uvrg_y.s32 < 0; // gs_uvrg_y = ga_uvrg_y < 0 |
917 | |
918 | gvdupq_n_u32(w_mask, -psx_gpu->triangle_winding); // w_mask = { -w, -w, -w, -w } |
919 | shift -= 62 - 12; // shift -= (62 - FIXED_BITS) |
920 | |
921 | gvreg uvrg_base; |
922 | gvshll_n_u16(uvrg_base, gvlo(uvrg_xxxx0), 16); // uvrg_base = uvrg0 << 16 |
a2cb152a |
923 | |
924 | gvaddq_u32(uvrg_base, uvrg_base, uvrgb_phase); |
925 | gvabsq_s32(ga_uvrg_x, ga_uvrg_x); // ga_uvrg_x = abs(ga_uvrg_x) |
926 | |
927 | u32 area_r_s = d0.u32[0]; // area_r_s = triangle_reciprocal |
928 | gvabsq_s32(ga_uvrg_y, ga_uvrg_y); // ga_uvrg_y = abs(ga_uvrg_y) |
929 | |
930 | gvmull_l_u32(gw_rg_x, gvhi(ga_uvrg_x), d0, 0); |
931 | gvmull_l_u32(ga_uvrg_x, gvlo(ga_uvrg_x), d0, 0); |
932 | gvmull_l_u32(gw_rg_y, gvhi(ga_uvrg_y), d0, 0); |
933 | gvmull_l_u32(ga_uvrg_y, gvlo(ga_uvrg_y), d0, 0); |
934 | |
9088aca1 |
935 | #if defined(__ARM_NEON) || defined(__ARM_NEON__) |
936 | gvdupq_n_s64(r_shift, shift); // r_shift = { shift, shift } |
a2cb152a |
937 | gvshlq_u64(gw_rg_x, gw_rg_x, r_shift); |
938 | gvshlq_u64(ga_uvrg_x, ga_uvrg_x, r_shift); |
939 | gvshlq_u64(gw_rg_y, gw_rg_y, r_shift); |
940 | gvshlq_u64(ga_uvrg_y, ga_uvrg_y, r_shift); |
9088aca1 |
941 | #elif defined(__SSE2__) |
942 | r_shift.m = _mm_cvtsi32_si128(-shift); |
943 | gw_rg_x.m = _mm_srl_epi64(gw_rg_x.m, r_shift.m); |
944 | ga_uvrg_x.m = _mm_srl_epi64(ga_uvrg_x.m, r_shift.m); |
945 | gw_rg_y.m = _mm_srl_epi64(gw_rg_y.m, r_shift.m); |
946 | ga_uvrg_y.m = _mm_srl_epi64(ga_uvrg_y.m, r_shift.m); |
947 | #else |
948 | gvdupq_n_s64(r_shift, -shift); // r_shift = { shift, shift } |
949 | gvshrq_u64(gw_rg_x, gw_rg_x, r_shift); |
950 | gvshrq_u64(ga_uvrg_x, ga_uvrg_x, r_shift); |
951 | gvshrq_u64(gw_rg_y, gw_rg_y, r_shift); |
952 | gvshrq_u64(ga_uvrg_y, ga_uvrg_y, r_shift); |
953 | #endif |
a2cb152a |
954 | |
955 | gveorq(gs_uvrg_x, gs_uvrg_x, w_mask); |
956 | gvmovn_u64(tmp_lo, ga_uvrg_x); |
957 | |
958 | gveorq(gs_uvrg_y, gs_uvrg_y, w_mask); |
959 | gvmovn_u64(tmp_hi, gw_rg_x); |
960 | |
961 | gvcombine_u32(ga_uvrg_x, tmp_lo, tmp_hi); |
962 | |
963 | gveorq(ga_uvrg_x, ga_uvrg_x, gs_uvrg_x); |
964 | gvmovn_u64(tmp_lo, ga_uvrg_y); |
965 | |
966 | gvsubq_u32(ga_uvrg_x, ga_uvrg_x, gs_uvrg_x); |
967 | gvmovn_u64(tmp_hi, gw_rg_y); |
968 | |
969 | gvcombine_u32(ga_uvrg_y, tmp_lo, tmp_hi); |
970 | |
971 | gveorq(ga_uvrg_y, ga_uvrg_y, gs_uvrg_y); |
972 | ga_bx = ga_bx << 13; |
973 | |
974 | gvsubq_u32(ga_uvrg_y, ga_uvrg_y, gs_uvrg_y); |
975 | ga_by = ga_by << 13; |
976 | |
977 | u32 gw_bx_h, gw_by_h; |
978 | gw_bx_h = (u64)ga_bx * area_r_s >> 32; |
979 | |
980 | gvshlq_n_u32(ga_uvrg_x, ga_uvrg_x, 4); |
981 | gvshlq_n_u32(ga_uvrg_y, ga_uvrg_y, 4); |
982 | |
983 | gw_by_h = (u64)ga_by * area_r_s >> 32; |
984 | gvdup_n_u32(tmp_lo, a->x); |
985 | gvmlsq_l_s32(uvrg_base, ga_uvrg_x, tmp_lo, 0); |
986 | |
987 | gs_bx = gs_bx ^ -psx_gpu->triangle_winding; |
988 | gvaddq_u32(uvrg_dx2, ga_uvrg_x, ga_uvrg_x); |
989 | |
990 | gs_by = gs_by ^ -psx_gpu->triangle_winding; |
991 | |
992 | u32 r11 = -shift; // r11 = negative shift for scalar lsr |
993 | u32 *store_a = psx_gpu->uvrg.e; |
994 | r11 = r11 - (32 - 13); |
995 | u32 *store_b = store_a + 16 / sizeof(u32); |
996 | |
997 | gvaddq_u32(uvrg_dx3, uvrg_dx2, ga_uvrg_x); |
998 | gvst1q_inc_u32(uvrg_base, store_a, 32); |
999 | |
1000 | gvst1q_inc_u32(ga_uvrg_x, store_b, 32); |
1001 | u32 g_bx = (u32)gw_bx_h >> r11; |
1002 | |
1003 | gvst1q_inc_u32(ga_uvrg_y, store_a, 32); |
1004 | u32 g_by = (u32)gw_by_h >> r11; |
1005 | |
1006 | gvdup_n_u32(zero, 0); |
1007 | |
1008 | gvst4_4_inc_u32(zero, gvlo(ga_uvrg_x), gvlo(uvrg_dx2), gvlo(uvrg_dx3), store_b, 32); |
1009 | g_bx = g_bx ^ gs_bx; |
1010 | |
1011 | gvst4_4_inc_u32(zero, gvhi(ga_uvrg_x), gvhi(uvrg_dx2), gvhi(uvrg_dx3), store_b, 32); |
1012 | g_bx = g_bx - gs_bx; |
1013 | |
1014 | g_bx = g_bx << 4; |
1015 | g_by = g_by ^ gs_by; |
1016 | |
1017 | b_base -= g_bx * a->x; |
1018 | g_by = g_by - gs_by; |
1019 | |
1020 | g_by = g_by << 4; |
1021 | |
1022 | u32 g_bx2 = g_bx + g_bx; |
1023 | u32 g_bx3 = g_bx + g_bx2; |
1024 | |
1025 | // 112 |
1026 | store_b[0] = 0; |
1027 | store_b[1] = g_bx; |
1028 | store_b[2] = g_bx2; |
1029 | store_b[3] = g_bx3; |
1030 | store_b[4] = b_base; |
1031 | store_b[5] = g_by; // 132 |
1032 | } |
1033 | |
1034 | #define setup_spans_debug_check(span_edge_data_element) \ |
1035 | |
1036 | #define setup_spans_prologue_alternate_yes() \ |
1037 | vec_2x64s alternate_x; \ |
1038 | vec_2x64s alternate_dx_dy; \ |
1039 | vec_4x32s alternate_x_32; \ |
aafce833 |
1040 | vec_4x16u alternate_x_16; \ |
a2cb152a |
1041 | \ |
1042 | vec_4x16u alternate_select; \ |
1043 | vec_4x16s y_mid_point; \ |
1044 | \ |
1045 | s32 y_b = v_b->y; \ |
1046 | s64 edge_alt; \ |
1047 | s32 edge_dx_dy_alt; \ |
1048 | u32 edge_shift_alt \ |
1049 | |
1050 | #define setup_spans_prologue_alternate_no() \ |
1051 | |
1052 | #define setup_spans_prologue(alternate_active) \ |
1053 | edge_data_struct *span_edge_data; \ |
1054 | vec_4x32u *span_uvrg_offset; \ |
1055 | u32 *span_b_offset; \ |
1056 | \ |
1057 | s32 clip; \ |
1058 | vec_4x32u v_clip; \ |
1059 | \ |
9088aca1 |
1060 | vec_2x64s edges_xy; \ |
a2cb152a |
1061 | vec_2x32s edges_dx_dy; \ |
1062 | vec_2x32u edge_shifts; \ |
1063 | \ |
1064 | vec_2x64s left_x, right_x; \ |
1065 | vec_2x64s left_dx_dy, right_dx_dy; \ |
1066 | vec_4x32s left_x_32, right_x_32; \ |
1067 | vec_2x32s left_x_32_lo, right_x_32_lo; \ |
1068 | vec_2x32s left_x_32_hi, right_x_32_hi; \ |
1069 | vec_4x16s left_right_x_16_lo, left_right_x_16_hi; \ |
1070 | vec_4x16s y_x4; \ |
1071 | vec_8x16s left_edge; \ |
1072 | vec_8x16s right_edge; \ |
1073 | vec_4x16u span_shift; \ |
1074 | \ |
1075 | vec_2x32u c_0x01; \ |
1076 | vec_4x16u c_0x04; \ |
1077 | vec_4x16u c_0xFFFE; \ |
1078 | vec_4x16u c_0x07; \ |
1079 | \ |
1080 | vec_2x32s x_starts; \ |
1081 | vec_2x32s x_ends; \ |
1082 | \ |
1083 | s32 x_a = v_a->x; \ |
1084 | s32 x_b = v_b->x; \ |
1085 | s32 x_c = v_c->x; \ |
1086 | s32 y_a = v_a->y; \ |
1087 | s32 y_c = v_c->y; \ |
1088 | \ |
1089 | vec_4x32u uvrg; \ |
1090 | vec_4x32u uvrg_dy; \ |
1091 | u32 b = psx_gpu->b; \ |
1092 | u32 b_dy = psx_gpu->b_dy; \ |
1093 | const u32 *reciprocal_table = psx_gpu->reciprocal_table_ptr; \ |
1094 | \ |
1095 | gvld1q_u32(uvrg, psx_gpu->uvrg.e); \ |
1096 | gvld1q_u32(uvrg_dy, psx_gpu->uvrg_dy.e); \ |
1097 | gvdup_n_u32(c_0x01, 0x01); \ |
1098 | setup_spans_prologue_alternate_##alternate_active() \ |
1099 | |
1100 | #define setup_spans_prologue_b() \ |
1101 | span_edge_data = psx_gpu->span_edge_data; \ |
1102 | span_uvrg_offset = (vec_4x32u *)psx_gpu->span_uvrg_offset; \ |
1103 | span_b_offset = psx_gpu->span_b_offset; \ |
1104 | \ |
1105 | vec_8x16u c_0x0001; \ |
2d658c89 |
1106 | vec_4x16u c_max_blocks_per_row; \ |
a2cb152a |
1107 | \ |
1108 | gvdupq_n_u16(c_0x0001, 0x0001); \ |
1109 | gvdupq_n_u16(left_edge, psx_gpu->viewport_start_x); \ |
1110 | gvdupq_n_u16(right_edge, psx_gpu->viewport_end_x); \ |
1111 | gvaddq_u16(right_edge, right_edge, c_0x0001); \ |
1112 | gvdup_n_u16(c_0x04, 0x04); \ |
1113 | gvdup_n_u16(c_0x07, 0x07); \ |
1114 | gvdup_n_u16(c_0xFFFE, 0xFFFE); \ |
2d658c89 |
1115 | gvdup_n_u16(c_max_blocks_per_row, MAX_BLOCKS_PER_ROW); \ |
a2cb152a |
1116 | |
9088aca1 |
1117 | #if defined(__ARM_NEON) || defined(__ARM_NEON__) |
1118 | // better encoding, remaining bits are unused anyway |
1119 | #define mask_edge_shifts(edge_shifts) \ |
1120 | gvbic_n_u16(edge_shifts, 0xE0) |
1121 | #else |
1122 | #define mask_edge_shifts(edge_shifts) \ |
1123 | gvand_n_u32(edge_shifts, 0x1F) |
1124 | #endif |
a2cb152a |
1125 | |
1126 | #define compute_edge_delta_x2() \ |
1127 | { \ |
1128 | vec_2x32s heights; \ |
1129 | vec_2x32s height_reciprocals; \ |
1130 | vec_2x32s heights_b; \ |
1131 | vec_2x32u widths; \ |
1132 | \ |
1133 | u32 edge_shift = reciprocal_table[height]; \ |
1134 | \ |
1135 | gvdup_n_u32(heights, height); \ |
1136 | gvsub_u32(widths, x_ends, x_starts); \ |
1137 | \ |
1138 | gvdup_n_u32(edge_shifts, edge_shift); \ |
1139 | gvsub_u32(heights_b, heights, c_0x01); \ |
1140 | gvshr_n_u32(height_reciprocals, edge_shifts, 10); \ |
1141 | \ |
1142 | gvmla_s32(heights_b, x_starts, heights); \ |
9088aca1 |
1143 | mask_edge_shifts(edge_shifts); \ |
a2cb152a |
1144 | gvmul_s32(edges_dx_dy, widths, height_reciprocals); \ |
9088aca1 |
1145 | gvmull_s32(edges_xy, heights_b, height_reciprocals); \ |
a2cb152a |
1146 | } \ |
1147 | |
1148 | #define compute_edge_delta_x3(start_c, height_a, height_b) \ |
1149 | { \ |
1150 | vec_2x32s heights; \ |
1151 | vec_2x32s height_reciprocals; \ |
1152 | vec_2x32s heights_b; \ |
1153 | vec_2x32u widths; \ |
1154 | \ |
1155 | u32 width_alt; \ |
1156 | s32 height_b_alt; \ |
1157 | u32 height_reciprocal_alt; \ |
1158 | \ |
1159 | gvcreate_u32(heights, height_a, height_b); \ |
1160 | gvcreate_u32(edge_shifts, reciprocal_table[height_a], reciprocal_table[height_b]); \ |
1161 | \ |
1162 | edge_shift_alt = reciprocal_table[height_minor_b]; \ |
1163 | \ |
1164 | gvsub_u32(widths, x_ends, x_starts); \ |
1165 | width_alt = x_c - start_c; \ |
1166 | \ |
1167 | gvshr_n_u32(height_reciprocals, edge_shifts, 10); \ |
1168 | height_reciprocal_alt = edge_shift_alt >> 10; \ |
1169 | \ |
9088aca1 |
1170 | mask_edge_shifts(edge_shifts); \ |
a2cb152a |
1171 | edge_shift_alt &= 0x1F; \ |
1172 | \ |
1173 | gvsub_u32(heights_b, heights, c_0x01); \ |
1174 | height_b_alt = height_minor_b - 1; \ |
1175 | \ |
1176 | gvmla_s32(heights_b, x_starts, heights); \ |
1177 | height_b_alt += height_minor_b * start_c; \ |
1178 | \ |
9088aca1 |
1179 | gvmull_s32(edges_xy, heights_b, height_reciprocals); \ |
a2cb152a |
1180 | edge_alt = (s64)height_b_alt * height_reciprocal_alt; \ |
1181 | \ |
1182 | gvmul_s32(edges_dx_dy, widths, height_reciprocals); \ |
1183 | edge_dx_dy_alt = width_alt * height_reciprocal_alt; \ |
1184 | } \ |
1185 | |
1186 | |
1187 | #define setup_spans_adjust_y_up() \ |
1188 | gvsub_u32(y_x4, y_x4, c_0x04) \ |
1189 | |
1190 | #define setup_spans_adjust_y_down() \ |
1191 | gvadd_u32(y_x4, y_x4, c_0x04) \ |
1192 | |
1193 | #define setup_spans_adjust_interpolants_up() \ |
1194 | gvsubq_u32(uvrg, uvrg, uvrg_dy); \ |
1195 | b -= b_dy \ |
1196 | |
1197 | #define setup_spans_adjust_interpolants_down() \ |
1198 | gvaddq_u32(uvrg, uvrg, uvrg_dy); \ |
1199 | b += b_dy \ |
1200 | |
1201 | |
1202 | #define setup_spans_clip_interpolants_increment() \ |
1203 | gvmlaq_s32(uvrg, uvrg_dy, v_clip); \ |
1204 | b += b_dy * clip \ |
1205 | |
1206 | #define setup_spans_clip_interpolants_decrement() \ |
1207 | gvmlsq_s32(uvrg, uvrg_dy, v_clip); \ |
1208 | b -= b_dy * clip \ |
1209 | |
1210 | #define setup_spans_clip_alternate_yes() \ |
1211 | edge_alt += edge_dx_dy_alt * (s64)(clip) \ |
1212 | |
1213 | #define setup_spans_clip_alternate_no() \ |
1214 | |
1215 | #define setup_spans_clip(direction, alternate_active) \ |
1216 | { \ |
1217 | gvdupq_n_u32(v_clip, clip); \ |
9088aca1 |
1218 | gvmlal_s32(edges_xy, edges_dx_dy, gvlo(v_clip)); \ |
a2cb152a |
1219 | setup_spans_clip_alternate_##alternate_active(); \ |
1220 | setup_spans_clip_interpolants_##direction(); \ |
1221 | } \ |
1222 | |
1223 | |
9088aca1 |
1224 | #define setup_spans_adjust_edges_alternate_no(left_half, right_half) \ |
a2cb152a |
1225 | { \ |
1226 | vec_2x64s edge_shifts_64; \ |
9088aca1 |
1227 | vec_2x64s edges_dx_dy_64; \ |
a2cb152a |
1228 | vec_1x64s left_x_hi, right_x_hi; \ |
1229 | \ |
1230 | gvmovl_s32(edge_shifts_64, edge_shifts); \ |
9088aca1 |
1231 | gvshlq_u64(edges_xy, edges_xy, edge_shifts_64); \ |
a2cb152a |
1232 | \ |
9088aca1 |
1233 | gvmovl_s32(edges_dx_dy_64, edges_dx_dy); \ |
1234 | gvshlq_u64(edges_dx_dy_64, edges_dx_dy_64, edge_shifts_64); \ |
a2cb152a |
1235 | \ |
9088aca1 |
1236 | gvdupq_l_s64(left_x, gv##left_half(edges_xy), 0); \ |
1237 | gvdupq_l_s64(right_x, gv##right_half(edges_xy), 0); \ |
a2cb152a |
1238 | \ |
9088aca1 |
1239 | gvdupq_l_s64(left_dx_dy, gv##left_half(edges_dx_dy_64), 0); \ |
1240 | gvdupq_l_s64(right_dx_dy, gv##right_half(edges_dx_dy_64), 0); \ |
a2cb152a |
1241 | \ |
1242 | gvadd_s64(left_x_hi, gvlo(left_x), gvlo(left_dx_dy)); \ |
1243 | gvadd_s64(right_x_hi, gvlo(right_x), gvlo(right_dx_dy)); \ |
1244 | \ |
1245 | gvset_hi(left_x, left_x_hi); \ |
1246 | gvset_hi(right_x, right_x_hi); \ |
1247 | \ |
1248 | gvaddq_s64(left_dx_dy, left_dx_dy, left_dx_dy); \ |
1249 | gvaddq_s64(right_dx_dy, right_dx_dy, right_dx_dy); \ |
1250 | } \ |
1251 | |
9088aca1 |
1252 | #define setup_spans_adjust_edges_alternate_yes(left_half, right_half) \ |
a2cb152a |
1253 | { \ |
9088aca1 |
1254 | setup_spans_adjust_edges_alternate_no(left_half, right_half); \ |
a2cb152a |
1255 | s64 edge_dx_dy_alt_64; \ |
1256 | vec_1x64s alternate_x_hi; \ |
1257 | \ |
1258 | gvdup_n_u16(y_mid_point, y_b); \ |
1259 | \ |
1260 | edge_alt <<= edge_shift_alt; \ |
1261 | edge_dx_dy_alt_64 = (s64)edge_dx_dy_alt << edge_shift_alt; \ |
1262 | \ |
1263 | gvdupq_n_s64(alternate_x, edge_alt); \ |
1264 | gvdupq_n_s64(alternate_dx_dy, edge_dx_dy_alt_64); \ |
1265 | \ |
1266 | gvadd_s64(alternate_x_hi, gvlo(alternate_x), gvlo(alternate_dx_dy)); \ |
1267 | gvaddq_s64(alternate_dx_dy, alternate_dx_dy, alternate_dx_dy); \ |
1268 | gvset_hi(alternate_x, alternate_x_hi); \ |
1269 | } \ |
1270 | |
1271 | |
1272 | #define setup_spans_y_select_up() \ |
1273 | gvclt_s16(alternate_select, y_x4, y_mid_point) \ |
1274 | |
1275 | #define setup_spans_y_select_down() \ |
1276 | gvcgt_s16(alternate_select, y_x4, y_mid_point) \ |
1277 | |
1278 | #define setup_spans_y_select_alternate_yes(direction) \ |
1279 | setup_spans_y_select_##direction() \ |
1280 | |
1281 | #define setup_spans_y_select_alternate_no(direction) \ |
1282 | |
1283 | #define setup_spans_alternate_select_left() \ |
1284 | gvbit(left_right_x_16_lo, alternate_x_16, alternate_select); \ |
1285 | |
1286 | #define setup_spans_alternate_select_right() \ |
1287 | gvbit(left_right_x_16_hi, alternate_x_16, alternate_select); \ |
1288 | |
1289 | #define setup_spans_alternate_select_none() \ |
1290 | |
1291 | #define setup_spans_increment_alternate_yes() \ |
1292 | { \ |
1293 | vec_2x32s alternate_x_32_lo, alternate_x_32_hi; \ |
9088aca1 |
1294 | gvmovn_top_u64(alternate_x_32_lo, alternate_x); \ |
a2cb152a |
1295 | gvaddq_s64(alternate_x, alternate_x, alternate_dx_dy); \ |
9088aca1 |
1296 | gvmovn_top_u64(alternate_x_32_hi, alternate_x); \ |
a2cb152a |
1297 | gvaddq_s64(alternate_x, alternate_x, alternate_dx_dy); \ |
1298 | gvcombine_u32(alternate_x_32, alternate_x_32_lo, alternate_x_32_hi); \ |
1299 | gvmovn_u32(alternate_x_16, alternate_x_32); \ |
1300 | } \ |
1301 | |
1302 | #define setup_spans_increment_alternate_no() \ |
1303 | |
9088aca1 |
1304 | #if defined(__SSE2__) && !(defined(__AVX512BW__) && defined(__AVX512VL__)) |
1305 | #define setup_spans_make_span_shift(span_shift) { \ |
1306 | gvreg tab1_ = { .u8 = { 0xfe, 0xfc, 0xf8, 0xf0, 0xe0, 0xc0, 0x80, 0x00 } }; \ |
1307 | gvtbl2_u8(span_shift, tab1_, span_shift); \ |
1308 | gvorr_n_u16(span_shift, 0xff00); \ |
1309 | (void)c_0xFFFE; \ |
1310 | } |
1311 | #else |
1312 | #define setup_spans_make_span_shift(span_shift) \ |
1313 | gvshl_u16(span_shift, c_0xFFFE, span_shift) |
1314 | #endif |
1315 | |
a2cb152a |
1316 | #define setup_spans_set_x4(alternate, direction, alternate_active) \ |
1317 | { \ |
1318 | gvst1q_pi_u32(uvrg, span_uvrg_offset); \ |
1319 | *span_b_offset++ = b; \ |
1320 | setup_spans_adjust_interpolants_##direction(); \ |
1321 | \ |
1322 | gvst1q_pi_u32(uvrg, span_uvrg_offset); \ |
1323 | *span_b_offset++ = b; \ |
1324 | setup_spans_adjust_interpolants_##direction(); \ |
1325 | \ |
1326 | gvst1q_pi_u32(uvrg, span_uvrg_offset); \ |
1327 | *span_b_offset++ = b; \ |
1328 | setup_spans_adjust_interpolants_##direction(); \ |
1329 | \ |
1330 | gvst1q_pi_u32(uvrg, span_uvrg_offset); \ |
1331 | *span_b_offset++ = b; \ |
1332 | setup_spans_adjust_interpolants_##direction(); \ |
1333 | \ |
9088aca1 |
1334 | gvmovn_top_u64(left_x_32_lo, left_x); \ |
1335 | gvmovn_top_u64(right_x_32_lo, right_x); \ |
a2cb152a |
1336 | \ |
1337 | gvaddq_s64(left_x, left_x, left_dx_dy); \ |
1338 | gvaddq_s64(right_x, right_x, right_dx_dy); \ |
1339 | \ |
9088aca1 |
1340 | gvmovn_top_u64(left_x_32_hi, left_x); \ |
1341 | gvmovn_top_u64(right_x_32_hi, right_x); \ |
a2cb152a |
1342 | \ |
1343 | gvaddq_s64(left_x, left_x, left_dx_dy); \ |
1344 | gvaddq_s64(right_x, right_x, right_dx_dy); \ |
1345 | \ |
1346 | gvcombine_s64(left_x_32, left_x_32_lo, left_x_32_hi); \ |
1347 | gvcombine_s64(right_x_32, right_x_32_lo, right_x_32_hi); \ |
1348 | \ |
1349 | gvmovn_u32(left_right_x_16_lo, left_x_32); \ |
1350 | gvmovn_u32(left_right_x_16_hi, right_x_32); \ |
1351 | \ |
1352 | setup_spans_increment_alternate_##alternate_active(); \ |
1353 | setup_spans_y_select_alternate_##alternate_active(direction); \ |
1354 | setup_spans_alternate_select_##alternate(); \ |
1355 | \ |
1356 | gvmax_s16(left_right_x_16_lo, left_right_x_16_lo, gvlo(left_edge)); \ |
1357 | gvmax_s16(left_right_x_16_hi, left_right_x_16_hi, gvhi(left_edge)); \ |
1358 | gvmin_s16(left_right_x_16_lo, left_right_x_16_lo, gvlo(right_edge)); \ |
1359 | gvmin_s16(left_right_x_16_hi, left_right_x_16_hi, gvhi(right_edge)); \ |
1360 | \ |
1361 | gvsub_u16(left_right_x_16_hi, left_right_x_16_hi, left_right_x_16_lo); \ |
1362 | gvadd_u16(left_right_x_16_hi, left_right_x_16_hi, c_0x07); \ |
1363 | gvand(span_shift, left_right_x_16_hi, c_0x07); \ |
9088aca1 |
1364 | setup_spans_make_span_shift(span_shift); \ |
a2cb152a |
1365 | gvshr_n_u16(left_right_x_16_hi, left_right_x_16_hi, 3); \ |
2d658c89 |
1366 | gvmin_u16(left_right_x_16_hi, left_right_x_16_hi, c_max_blocks_per_row); \ |
a2cb152a |
1367 | \ |
1368 | gvst4_pi_u16(left_right_x_16_lo, left_right_x_16_hi, span_shift, y_x4, \ |
1369 | span_edge_data); \ |
1370 | \ |
1371 | setup_spans_adjust_y_##direction(); \ |
1372 | } \ |
1373 | |
1374 | |
1375 | #define setup_spans_alternate_adjust_yes() \ |
1376 | edge_alt -= edge_dx_dy_alt * (s64)height_minor_a \ |
1377 | |
1378 | #define setup_spans_alternate_adjust_no() \ |
1379 | |
1380 | |
9088aca1 |
1381 | #define setup_spans_down(left_half, right_half, alternate, alternate_active) \ |
a2cb152a |
1382 | setup_spans_alternate_adjust_##alternate_active(); \ |
1383 | if(y_c > psx_gpu->viewport_end_y) \ |
1384 | height -= y_c - psx_gpu->viewport_end_y - 1; \ |
1385 | \ |
1386 | clip = psx_gpu->viewport_start_y - y_a; \ |
1387 | if(clip > 0) \ |
1388 | { \ |
1389 | height -= clip; \ |
1390 | y_a += clip; \ |
1391 | setup_spans_clip(increment, alternate_active); \ |
1392 | } \ |
1393 | \ |
1394 | setup_spans_prologue_b(); \ |
1395 | \ |
2d658c89 |
1396 | if (height > 512) \ |
1397 | height = 512; \ |
1398 | if (height > 0) \ |
a2cb152a |
1399 | { \ |
1400 | u64 y_x4_ = ((u64)(y_a + 3) << 48) | ((u64)(u16)(y_a + 2) << 32) \ |
1401 | | (u32)((y_a + 1) << 16) | (u16)y_a; \ |
1402 | gvcreate_u64(y_x4, y_x4_); \ |
9088aca1 |
1403 | setup_spans_adjust_edges_alternate_##alternate_active(left_half, right_half); \ |
a2cb152a |
1404 | \ |
1405 | psx_gpu->num_spans = height; \ |
1406 | do \ |
1407 | { \ |
1408 | setup_spans_set_x4(alternate, down, alternate_active); \ |
1409 | height -= 4; \ |
1410 | } while(height > 0); \ |
ee060c58 |
1411 | if (psx_gpu->hacks_active & (AHACK_TEXTURE_ADJ_U | AHACK_TEXTURE_ADJ_V)) \ |
1412 | { \ |
1413 | vec_2x32u tmp; \ |
1414 | gvld1_u64(tmp, &span_uvrg_offset[height - 2]); \ |
1415 | gvst1_u64(tmp, &span_uvrg_offset[height - 1]); \ |
1416 | } \ |
a2cb152a |
1417 | } \ |
1418 | |
1419 | |
1420 | #define setup_spans_alternate_pre_increment_yes() \ |
1421 | edge_alt += edge_dx_dy_alt \ |
1422 | |
1423 | #define setup_spans_alternate_pre_increment_no() \ |
1424 | |
1425 | #define setup_spans_up_decrement_height_yes() \ |
1426 | height-- \ |
1427 | |
1428 | #define setup_spans_up_decrement_height_no() \ |
1429 | {} \ |
1430 | |
9088aca1 |
1431 | #define setup_spans_up(left_half, right_half, alternate, alternate_active) \ |
a2cb152a |
1432 | setup_spans_alternate_adjust_##alternate_active(); \ |
1433 | y_a--; \ |
1434 | \ |
1435 | if(y_c < psx_gpu->viewport_start_y) \ |
1436 | height -= psx_gpu->viewport_start_y - y_c; \ |
1437 | else \ |
1438 | setup_spans_up_decrement_height_##alternate_active(); \ |
1439 | \ |
1440 | clip = y_a - psx_gpu->viewport_end_y; \ |
1441 | if(clip > 0) \ |
1442 | { \ |
1443 | height -= clip; \ |
1444 | y_a -= clip; \ |
1445 | setup_spans_clip(decrement, alternate_active); \ |
1446 | } \ |
1447 | \ |
1448 | setup_spans_prologue_b(); \ |
1449 | \ |
2d658c89 |
1450 | if (height > 512) \ |
1451 | height = 512; \ |
1452 | if (height > 0) \ |
a2cb152a |
1453 | { \ |
1454 | u64 y_x4_ = ((u64)(y_a - 3) << 48) | ((u64)(u16)(y_a - 2) << 32) \ |
1455 | | (u32)((y_a - 1) << 16) | (u16)y_a; \ |
1456 | gvcreate_u64(y_x4, y_x4_); \ |
9088aca1 |
1457 | gvaddw_s32(edges_xy, edges_xy, edges_dx_dy); \ |
a2cb152a |
1458 | setup_spans_alternate_pre_increment_##alternate_active(); \ |
9088aca1 |
1459 | setup_spans_adjust_edges_alternate_##alternate_active(left_half, right_half); \ |
a2cb152a |
1460 | setup_spans_adjust_interpolants_up(); \ |
1461 | \ |
1462 | psx_gpu->num_spans = height; \ |
1463 | while(height > 0) \ |
1464 | { \ |
1465 | setup_spans_set_x4(alternate, up, alternate_active); \ |
1466 | height -= 4; \ |
1467 | } \ |
ee060c58 |
1468 | if (psx_gpu->hacks_active & AHACK_TEXTURE_ADJ_V) \ |
1469 | { \ |
1470 | vec_2x32u tmp; \ |
1471 | gvld1_u64(tmp, &psx_gpu->span_uvrg_offset[1]); \ |
1472 | gvst1_u64(tmp, &psx_gpu->span_uvrg_offset[0]); \ |
1473 | } \ |
a2cb152a |
1474 | } \ |
1475 | |
9088aca1 |
1476 | #define half_left lo |
1477 | #define half_right hi |
a2cb152a |
1478 | |
1479 | #define setup_spans_up_up(minor, major) \ |
1480 | setup_spans_prologue(yes); \ |
1481 | s32 height_minor_a = y_a - y_b; \ |
1482 | s32 height_minor_b = y_b - y_c; \ |
1483 | s32 height = y_a - y_c; \ |
1484 | \ |
1485 | gvdup_n_u32(x_starts, x_a); \ |
1486 | gvcreate_u32(x_ends, x_c, x_b); \ |
1487 | \ |
1488 | compute_edge_delta_x3(x_b, height, height_minor_a); \ |
9088aca1 |
1489 | setup_spans_up(half_##major, half_##minor, minor, yes) \ |
a2cb152a |
1490 | |
1491 | void setup_spans_up_left(psx_gpu_struct *psx_gpu, vertex_struct *v_a, |
1492 | vertex_struct *v_b, vertex_struct *v_c) |
1493 | { |
1494 | #if 0 |
1495 | setup_spans_up_left_(psx_gpu, v_a, v_b, v_c); |
1496 | return; |
1497 | #endif |
1498 | setup_spans_up_up(left, right) |
1499 | } |
1500 | |
1501 | void setup_spans_up_right(psx_gpu_struct *psx_gpu, vertex_struct *v_a, |
1502 | vertex_struct *v_b, vertex_struct *v_c) |
1503 | { |
1504 | #if 0 |
1505 | setup_spans_up_right_(psx_gpu, v_a, v_b, v_c); |
1506 | return; |
1507 | #endif |
1508 | setup_spans_up_up(right, left) |
1509 | } |
1510 | |
1511 | #define setup_spans_down_down(minor, major) \ |
1512 | setup_spans_prologue(yes); \ |
1513 | s32 height_minor_a = y_b - y_a; \ |
1514 | s32 height_minor_b = y_c - y_b; \ |
1515 | s32 height = y_c - y_a; \ |
1516 | \ |
1517 | gvdup_n_u32(x_starts, x_a); \ |
1518 | gvcreate_u32(x_ends, x_c, x_b); \ |
1519 | \ |
1520 | compute_edge_delta_x3(x_b, height, height_minor_a); \ |
9088aca1 |
1521 | setup_spans_down(half_##major, half_##minor, minor, yes) \ |
a2cb152a |
1522 | |
1523 | void setup_spans_down_left(psx_gpu_struct *psx_gpu, vertex_struct *v_a, |
1524 | vertex_struct *v_b, vertex_struct *v_c) |
1525 | { |
1526 | #if 0 |
1527 | setup_spans_down_left_(psx_gpu, v_a, v_b, v_c); |
1528 | return; |
1529 | #endif |
1530 | setup_spans_down_down(left, right) |
1531 | } |
1532 | |
1533 | void setup_spans_down_right(psx_gpu_struct *psx_gpu, vertex_struct *v_a, |
1534 | vertex_struct *v_b, vertex_struct *v_c) |
1535 | { |
1536 | #if 0 |
1537 | setup_spans_down_right_(psx_gpu, v_a, v_b, v_c); |
1538 | return; |
1539 | #endif |
1540 | setup_spans_down_down(right, left) |
1541 | } |
1542 | |
1543 | #define setup_spans_up_flat() \ |
1544 | s32 height = y_a - y_c; \ |
1545 | \ |
1546 | compute_edge_delta_x2(); \ |
9088aca1 |
1547 | setup_spans_up(half_left, half_right, none, no) \ |
a2cb152a |
1548 | |
1549 | void setup_spans_up_a(psx_gpu_struct *psx_gpu, vertex_struct *v_a, |
1550 | vertex_struct *v_b, vertex_struct *v_c) |
1551 | { |
1552 | #if 0 |
1553 | setup_spans_up_a_(psx_gpu, v_a, v_b, v_c); |
1554 | return; |
1555 | #endif |
1556 | setup_spans_prologue(no); |
1557 | |
1558 | gvcreate_u32(x_starts, x_a, x_b); |
1559 | gvdup_n_u32(x_ends, x_c); |
1560 | |
1561 | setup_spans_up_flat() |
1562 | } |
1563 | |
1564 | void setup_spans_up_b(psx_gpu_struct *psx_gpu, vertex_struct *v_a, |
1565 | vertex_struct *v_b, vertex_struct *v_c) |
1566 | { |
1567 | #if 0 |
1568 | setup_spans_up_b_(psx_gpu, v_a, v_b, v_c); |
1569 | return; |
1570 | #endif |
1571 | setup_spans_prologue(no); |
1572 | |
1573 | gvdup_n_u32(x_starts, x_a); |
1574 | gvcreate_u32(x_ends, x_b, x_c); |
1575 | |
1576 | setup_spans_up_flat() |
1577 | } |
1578 | |
1579 | #define setup_spans_down_flat() \ |
1580 | s32 height = y_c - y_a; \ |
1581 | \ |
1582 | compute_edge_delta_x2(); \ |
9088aca1 |
1583 | setup_spans_down(half_left, half_right, none, no) \ |
a2cb152a |
1584 | |
1585 | void setup_spans_down_a(psx_gpu_struct *psx_gpu, vertex_struct *v_a, |
1586 | vertex_struct *v_b, vertex_struct *v_c) |
1587 | { |
1588 | #if 0 |
1589 | setup_spans_down_a_(psx_gpu, v_a, v_b, v_c); |
1590 | return; |
1591 | #endif |
1592 | setup_spans_prologue(no); |
1593 | |
1594 | gvcreate_u32(x_starts, x_a, x_b); |
1595 | gvdup_n_u32(x_ends, x_c); |
1596 | |
1597 | setup_spans_down_flat() |
1598 | } |
1599 | |
1600 | void setup_spans_down_b(psx_gpu_struct *psx_gpu, vertex_struct *v_a, |
1601 | vertex_struct *v_b, vertex_struct *v_c) |
1602 | { |
1603 | #if 0 |
1604 | setup_spans_down_b_(psx_gpu, v_a, v_b, v_c); |
1605 | return; |
1606 | #endif |
1607 | setup_spans_prologue(no) |
1608 | |
1609 | gvdup_n_u32(x_starts, x_a); |
1610 | gvcreate_u32(x_ends, x_b, x_c); |
1611 | |
1612 | setup_spans_down_flat() |
1613 | } |
1614 | |
1615 | void setup_spans_up_down(psx_gpu_struct *psx_gpu, vertex_struct *v_a, |
1616 | vertex_struct *v_b, vertex_struct *v_c) |
1617 | { |
1618 | #if 0 |
1619 | setup_spans_up_down_(psx_gpu, v_a, v_b, v_c); |
1620 | return; |
1621 | #endif |
1622 | setup_spans_prologue(no); |
1623 | |
1624 | s32 y_b = v_b->y; |
1625 | s64 edge_alt; |
1626 | s32 edge_dx_dy_alt; |
1627 | u32 edge_shift_alt; |
1628 | |
1629 | s32 middle_y = y_a; |
1630 | s32 height_minor_a = y_a - y_b; |
1631 | s32 height_minor_b = y_c - y_a; |
1632 | s32 height_major = y_c - y_b; |
1633 | |
1634 | vec_2x64s edges_xy_b; |
1635 | vec_1x64s edges_xy_b_left; |
1636 | vec_2x32s edges_dx_dy_b; |
1637 | vec_2x32u edge_shifts_b; |
1638 | |
1639 | vec_2x32s height_increment; |
1640 | |
1641 | gvcreate_u32(x_starts, x_a, x_c); |
1642 | gvdup_n_u32(x_ends, x_b); |
1643 | |
1644 | compute_edge_delta_x3(x_a, height_minor_a, height_major); |
1645 | |
1646 | gvcreate_s32(height_increment, 0, height_minor_b); |
1647 | |
9088aca1 |
1648 | gvmlal_s32(edges_xy, edges_dx_dy, height_increment); |
a2cb152a |
1649 | |
1650 | gvcreate_s64(edges_xy_b_left, edge_alt); |
9088aca1 |
1651 | gvcombine_s64(edges_xy_b, edges_xy_b_left, gvhi(edges_xy)); |
a2cb152a |
1652 | |
1653 | edge_shifts_b = edge_shifts; |
1654 | gvmov_l_u32(edge_shifts_b, edge_shift_alt, 0); |
1655 | |
1656 | gvneg_s32(edges_dx_dy_b, edges_dx_dy); |
1657 | gvmov_l_s32(edges_dx_dy_b, edge_dx_dy_alt, 0); |
1658 | |
1659 | y_a--; |
1660 | |
1661 | if(y_b < psx_gpu->viewport_start_y) |
1662 | height_minor_a -= psx_gpu->viewport_start_y - y_b; |
1663 | |
1664 | clip = y_a - psx_gpu->viewport_end_y; |
1665 | if(clip > 0) |
1666 | { |
1667 | height_minor_a -= clip; |
1668 | y_a -= clip; |
1669 | setup_spans_clip(decrement, no); |
1670 | } |
1671 | |
1672 | setup_spans_prologue_b(); |
1673 | |
2d658c89 |
1674 | if (height_minor_a > 512) |
1675 | height_minor_a = 512; |
1676 | if (height_minor_a > 0) |
a2cb152a |
1677 | { |
1678 | u64 y_x4_ = ((u64)(y_a - 3) << 48) | ((u64)(u16)(y_a - 2) << 32) |
1679 | | (u32)((y_a - 1) << 16) | (u16)y_a; |
1680 | gvcreate_u64(y_x4, y_x4_); |
9088aca1 |
1681 | gvaddw_s32(edges_xy, edges_xy, edges_dx_dy); |
1682 | setup_spans_adjust_edges_alternate_no(lo, hi); |
a2cb152a |
1683 | setup_spans_adjust_interpolants_up(); |
1684 | |
1685 | psx_gpu->num_spans = height_minor_a; |
1686 | while(height_minor_a > 0) |
1687 | { |
1688 | setup_spans_set_x4(none, up, no); |
1689 | height_minor_a -= 4; |
1690 | } |
1691 | |
1692 | span_edge_data += height_minor_a; |
1693 | span_uvrg_offset += height_minor_a; |
1694 | span_b_offset += height_minor_a; |
1695 | } |
1696 | |
9088aca1 |
1697 | edges_xy = edges_xy_b; |
a2cb152a |
1698 | edges_dx_dy = edges_dx_dy_b; |
1699 | edge_shifts = edge_shifts_b; |
1700 | |
1701 | gvld1q_u32(uvrg, psx_gpu->uvrg.e); |
1702 | b = psx_gpu->b; |
1703 | |
1704 | y_a = middle_y; |
1705 | |
1706 | if(y_c > psx_gpu->viewport_end_y) |
1707 | height_minor_b -= y_c - psx_gpu->viewport_end_y - 1; |
1708 | |
1709 | clip = psx_gpu->viewport_start_y - y_a; |
1710 | if(clip > 0) |
1711 | { |
1712 | height_minor_b -= clip; |
1713 | y_a += clip; |
1714 | setup_spans_clip(increment, no); |
1715 | } |
1716 | |
2d658c89 |
1717 | if (height_minor_b > 512) |
1718 | height_minor_b = 512; |
1719 | if (height_minor_b > 0) |
a2cb152a |
1720 | { |
1721 | u64 y_x4_ = ((u64)(y_a + 3) << 48) | ((u64)(u16)(y_a + 2) << 32) |
1722 | | (u32)((y_a + 1) << 16) | (u16)y_a; |
1723 | gvcreate_u64(y_x4, y_x4_); |
9088aca1 |
1724 | setup_spans_adjust_edges_alternate_no(lo, hi); |
a2cb152a |
1725 | |
1726 | // FIXME: overflow corner case |
1727 | if(psx_gpu->num_spans + height_minor_b == MAX_SPANS) |
1728 | height_minor_b &= ~3; |
1729 | |
1730 | psx_gpu->num_spans += height_minor_b; |
1731 | while(height_minor_b > 0) |
1732 | { |
1733 | setup_spans_set_x4(none, down, no); |
1734 | height_minor_b -= 4; |
1735 | } |
ee060c58 |
1736 | if (psx_gpu->hacks_active & (AHACK_TEXTURE_ADJ_U | AHACK_TEXTURE_ADJ_V)) |
1737 | { |
1738 | vec_2x32u tmp; |
1739 | gvld1_u64(tmp, &span_uvrg_offset[height_minor_b - 2]); |
1740 | gvst1_u64(tmp, &span_uvrg_offset[height_minor_b - 1]); |
1741 | } |
a2cb152a |
1742 | } |
1743 | } |
1744 | |
1745 | |
1746 | #define dither_table_entry_normal(value) \ |
1747 | (value) \ |
1748 | |
1749 | #define setup_blocks_load_msb_mask_indirect() \ |
1750 | |
1751 | #define setup_blocks_load_msb_mask_direct() \ |
1752 | vec_8x16u msb_mask; \ |
1753 | gvdupq_n_u16(msb_mask, psx_gpu->mask_msb); \ |
1754 | |
1755 | #define setup_blocks_variables_shaded_textured(target) \ |
1756 | vec_4x32u u_block; \ |
1757 | vec_4x32u v_block; \ |
1758 | vec_4x32u r_block; \ |
1759 | vec_4x32u g_block; \ |
1760 | vec_4x32u b_block; \ |
1761 | vec_4x32u uvrg_dx; \ |
1762 | vec_4x32u uvrg_dx4; \ |
1763 | vec_4x32u uvrg_dx8; \ |
1764 | vec_4x32u uvrg; \ |
1765 | vec_16x8u texture_mask; \ |
1766 | vec_8x8u texture_mask_lo, texture_mask_hi; \ |
1767 | u32 b_dx = psx_gpu->b_block_span.e[1]; \ |
1768 | u32 b_dx4 = b_dx << 2; \ |
1769 | u32 b_dx8 = b_dx << 3; \ |
1770 | u32 b; \ |
1771 | \ |
1772 | gvld1q_u32(uvrg_dx, psx_gpu->uvrg_dx.e); \ |
1773 | gvshlq_n_u32(uvrg_dx4, uvrg_dx, 2); \ |
1774 | gvshlq_n_u32(uvrg_dx8, uvrg_dx, 3); \ |
9088aca1 |
1775 | gvld2_u8_dup(texture_mask_lo, texture_mask_hi, &psx_gpu->texture_mask_width); \ |
a2cb152a |
1776 | gvcombine_u16(texture_mask, texture_mask_lo, texture_mask_hi) \ |
1777 | |
1778 | #define setup_blocks_variables_shaded_untextured(target) \ |
1779 | vec_4x32u r_block; \ |
1780 | vec_4x32u g_block; \ |
1781 | vec_4x32u b_block; \ |
1782 | vec_4x32u rgb_dx; \ |
1783 | vec_2x32u rgb_dx_lo, rgb_dx_hi; \ |
1784 | vec_4x32u rgb_dx4; \ |
1785 | vec_4x32u rgb_dx8; \ |
1786 | vec_4x32u rgb; \ |
1787 | vec_2x32u rgb_lo, rgb_hi; \ |
1788 | \ |
1789 | vec_8x8u d64_0x07; \ |
1790 | vec_8x8u d64_1; \ |
1791 | vec_8x8u d64_4; \ |
1792 | vec_8x8u d64_128; \ |
1793 | \ |
1794 | gvdup_n_u8(d64_0x07, 0x07); \ |
1795 | gvdup_n_u8(d64_1, 1); \ |
1796 | gvdup_n_u8(d64_4, 4); \ |
9088aca1 |
1797 | gvdup_n_u8(d64_128, 128u); \ |
a2cb152a |
1798 | \ |
1799 | gvld1_u32(rgb_dx_lo, &psx_gpu->uvrg_dx.e[2]); \ |
1800 | gvcreate_u32(rgb_dx_hi, psx_gpu->b_block_span.e[1], 0); \ |
1801 | gvcombine_u32(rgb_dx, rgb_dx_lo, rgb_dx_hi); \ |
1802 | gvshlq_n_u32(rgb_dx4, rgb_dx, 2); \ |
1803 | gvshlq_n_u32(rgb_dx8, rgb_dx, 3) \ |
1804 | |
1805 | #define setup_blocks_variables_unshaded_textured(target) \ |
1806 | vec_4x32u u_block; \ |
1807 | vec_4x32u v_block; \ |
1808 | vec_2x32u uv_dx; \ |
1809 | vec_2x32u uv_dx4; \ |
1810 | vec_2x32u uv_dx8; \ |
1811 | vec_2x32u uv; \ |
1812 | vec_16x8u texture_mask; \ |
1813 | vec_8x8u texture_mask_lo, texture_mask_hi; \ |
1814 | \ |
1815 | gvld1_u32(uv_dx, psx_gpu->uvrg_dx.e); \ |
1816 | gvld1_u32(uv, psx_gpu->uvrg.e); \ |
1817 | gvshl_n_u32(uv_dx4, uv_dx, 2); \ |
1818 | gvshl_n_u32(uv_dx8, uv_dx, 3); \ |
9088aca1 |
1819 | gvld2_u8_dup(texture_mask_lo, texture_mask_hi, &psx_gpu->texture_mask_width); \ |
a2cb152a |
1820 | gvcombine_u16(texture_mask, texture_mask_lo, texture_mask_hi) \ |
1821 | |
1822 | #define setup_blocks_variables_unshaded_untextured_direct() \ |
1823 | gvorrq(colors, colors, msb_mask) \ |
1824 | |
1825 | #define setup_blocks_variables_unshaded_untextured_indirect() \ |
1826 | |
1827 | #define setup_blocks_variables_unshaded_untextured(target) \ |
1828 | u32 color = psx_gpu->triangle_color; \ |
1829 | vec_8x16u colors; \ |
1830 | \ |
1831 | u32 color_r = color & 0xFF; \ |
1832 | u32 color_g = (color >> 8) & 0xFF; \ |
1833 | u32 color_b = (color >> 16) & 0xFF; \ |
1834 | \ |
1835 | color = (color_r >> 3) | ((color_g >> 3) << 5) | \ |
1836 | ((color_b >> 3) << 10); \ |
1837 | gvdupq_n_u16(colors, color); \ |
1838 | setup_blocks_variables_unshaded_untextured_##target() \ |
1839 | |
1840 | #define setup_blocks_span_initialize_dithered_textured() \ |
1841 | vec_8x16u dither_offsets; \ |
1842 | gvshll_n_s8(dither_offsets, dither_offsets_short, 4) \ |
1843 | |
1844 | #define setup_blocks_span_initialize_dithered_untextured() \ |
1845 | vec_8x8u dither_offsets; \ |
1846 | gvadd_u8(dither_offsets, dither_offsets_short, d64_4) \ |
1847 | |
1848 | #define setup_blocks_span_initialize_dithered(texturing) \ |
1849 | u32 dither_row = psx_gpu->dither_table[y & 0x3]; \ |
1850 | u32 dither_shift = (span_edge_data->left_x & 0x3) * 8; \ |
1851 | vec_8x8s dither_offsets_short; \ |
1852 | \ |
1853 | dither_row = \ |
f707f14b |
1854 | (dither_row >> dither_shift) | ((u64)dither_row << (32 - dither_shift)); \ |
a2cb152a |
1855 | gvdup_n_u32(dither_offsets_short, dither_row); \ |
1856 | setup_blocks_span_initialize_dithered_##texturing() \ |
1857 | |
1858 | #define setup_blocks_span_initialize_undithered(texturing) \ |
1859 | |
1860 | #define setup_blocks_span_initialize_shaded_textured() \ |
1861 | { \ |
1862 | u32 left_x = span_edge_data->left_x; \ |
1863 | vec_4x32u block_span; \ |
1864 | vec_4x32u v_left_x; \ |
1865 | \ |
1866 | gvld1q_u32(uvrg, span_uvrg_offset); \ |
1867 | gvdupq_n_u32(v_left_x, left_x); \ |
1868 | gvmlaq_u32(uvrg, uvrg_dx, v_left_x); \ |
1869 | b = *span_b_offset; \ |
1870 | b += b_dx * left_x; \ |
1871 | \ |
1872 | gvdupq_l_u32(u_block, gvlo(uvrg), 0); \ |
1873 | gvdupq_l_u32(v_block, gvlo(uvrg), 1); \ |
1874 | gvdupq_l_u32(r_block, gvhi(uvrg), 0); \ |
1875 | gvdupq_l_u32(g_block, gvhi(uvrg), 1); \ |
1876 | gvdupq_n_u32(b_block, b); \ |
1877 | \ |
1878 | gvld1q_u32(block_span, psx_gpu->u_block_span.e); \ |
1879 | gvaddq_u32(u_block, u_block, block_span); \ |
1880 | gvld1q_u32(block_span, psx_gpu->v_block_span.e); \ |
1881 | gvaddq_u32(v_block, v_block, block_span); \ |
1882 | gvld1q_u32(block_span, psx_gpu->r_block_span.e); \ |
1883 | gvaddq_u32(r_block, r_block, block_span); \ |
1884 | gvld1q_u32(block_span, psx_gpu->g_block_span.e); \ |
1885 | gvaddq_u32(g_block, g_block, block_span); \ |
1886 | gvld1q_u32(block_span, psx_gpu->b_block_span.e); \ |
1887 | gvaddq_u32(b_block, b_block, block_span); \ |
1888 | } |
1889 | |
1890 | #define setup_blocks_span_initialize_shaded_untextured() \ |
1891 | { \ |
1892 | u32 left_x = span_edge_data->left_x; \ |
1893 | u32 *span_uvrg_offset_high = (u32 *)span_uvrg_offset + 2; \ |
1894 | vec_4x32u block_span; \ |
1895 | vec_4x32u v_left_x; \ |
1896 | \ |
1897 | gvld1_u32(rgb_lo, span_uvrg_offset_high); \ |
1898 | gvcreate_u32(rgb_hi, *span_b_offset, 0); \ |
1899 | gvcombine_u32(rgb, rgb_lo, rgb_hi); \ |
1900 | gvdupq_n_u32(v_left_x, left_x); \ |
1901 | gvmlaq_u32(rgb, rgb_dx, v_left_x); \ |
1902 | \ |
1903 | gvdupq_l_u32(r_block, gvlo(rgb), 0); \ |
1904 | gvdupq_l_u32(g_block, gvlo(rgb), 1); \ |
1905 | gvdupq_l_u32(b_block, gvhi(rgb), 0); \ |
1906 | \ |
1907 | gvld1q_u32(block_span, psx_gpu->r_block_span.e); \ |
1908 | gvaddq_u32(r_block, r_block, block_span); \ |
1909 | gvld1q_u32(block_span, psx_gpu->g_block_span.e); \ |
1910 | gvaddq_u32(g_block, g_block, block_span); \ |
1911 | gvld1q_u32(block_span, psx_gpu->b_block_span.e); \ |
1912 | gvaddq_u32(b_block, b_block, block_span); \ |
1913 | } \ |
1914 | |
1915 | #define setup_blocks_span_initialize_unshaded_textured() \ |
1916 | { \ |
1917 | u32 left_x = span_edge_data->left_x; \ |
1918 | vec_4x32u block_span; \ |
1919 | vec_2x32u v_left_x; \ |
1920 | \ |
1921 | gvld1_u32(uv, span_uvrg_offset); \ |
1922 | gvdup_n_u32(v_left_x, left_x); \ |
1923 | gvmla_u32(uv, uv_dx, v_left_x); \ |
1924 | \ |
1925 | gvdupq_l_u32(u_block, uv, 0); \ |
1926 | gvdupq_l_u32(v_block, uv, 1); \ |
1927 | \ |
1928 | gvld1q_u32(block_span, psx_gpu->u_block_span.e); \ |
1929 | gvaddq_u32(u_block, u_block, block_span); \ |
1930 | gvld1q_u32(block_span, psx_gpu->v_block_span.e); \ |
1931 | gvaddq_u32(v_block, v_block, block_span); \ |
91cb0908 |
1932 | (void)(span_b_offset); \ |
a2cb152a |
1933 | } \ |
1934 | |
1935 | #define setup_blocks_span_initialize_unshaded_untextured() \ |
91cb0908 |
1936 | (void)(span_uvrg_offset); \ |
1937 | (void)(span_b_offset) \ |
a2cb152a |
1938 | |
1939 | #define setup_blocks_texture_swizzled() \ |
1940 | { \ |
1941 | vec_8x8u u_saved = u; \ |
1942 | gvsli_n_u8(u, v, 4); \ |
1943 | gvsri_n_u8(v, u_saved, 4); \ |
1944 | } \ |
1945 | |
1946 | #define setup_blocks_texture_unswizzled() \ |
1947 | |
1948 | #define setup_blocks_store_shaded_textured(swizzling, dithering, target, \ |
1949 | edge_type) \ |
1950 | { \ |
1951 | vec_8x16u u_whole; \ |
1952 | vec_8x16u v_whole; \ |
1953 | vec_8x16u r_whole; \ |
1954 | vec_8x16u g_whole; \ |
1955 | vec_8x16u b_whole; \ |
1956 | vec_4x16u u_whole_lo, u_whole_hi; \ |
1957 | vec_4x16u v_whole_lo, v_whole_hi; \ |
1958 | vec_4x16u r_whole_lo, r_whole_hi; \ |
1959 | vec_4x16u g_whole_lo, g_whole_hi; \ |
1960 | vec_4x16u b_whole_lo, b_whole_hi; \ |
1961 | \ |
1962 | vec_8x8u u; \ |
1963 | vec_8x8u v; \ |
1964 | vec_8x8u r; \ |
1965 | vec_8x8u g; \ |
1966 | vec_8x8u b; \ |
1967 | \ |
1968 | vec_4x32u dx4; \ |
1969 | vec_4x32u dx8; \ |
1970 | \ |
1971 | gvshrn_n_u32(u_whole_lo, u_block, 16); \ |
1972 | gvshrn_n_u32(v_whole_lo, v_block, 16); \ |
1973 | gvshrn_n_u32(r_whole_lo, r_block, 16); \ |
1974 | gvshrn_n_u32(g_whole_lo, g_block, 16); \ |
1975 | gvshrn_n_u32(b_whole_lo, b_block, 16); \ |
1976 | \ |
1977 | gvdupq_l_u32(dx4, gvlo(uvrg_dx4), 0); \ |
1978 | gvaddhn_u32(u_whole_hi, u_block, dx4); \ |
1979 | gvdupq_l_u32(dx4, gvlo(uvrg_dx4), 1); \ |
1980 | gvaddhn_u32(v_whole_hi, v_block, dx4); \ |
1981 | gvdupq_l_u32(dx4, gvhi(uvrg_dx4), 0); \ |
1982 | gvaddhn_u32(r_whole_hi, r_block, dx4); \ |
1983 | gvdupq_l_u32(dx4, gvhi(uvrg_dx4), 1); \ |
1984 | gvaddhn_u32(g_whole_hi, g_block, dx4); \ |
1985 | gvdupq_n_u32(dx4, b_dx4); \ |
1986 | gvaddhn_u32(b_whole_hi, b_block, dx4); \ |
1987 | \ |
1988 | gvcombine_u16(u_whole, u_whole_lo, u_whole_hi); \ |
1989 | gvcombine_u16(v_whole, v_whole_lo, v_whole_hi); \ |
1990 | gvcombine_u16(r_whole, r_whole_lo, r_whole_hi); \ |
1991 | gvcombine_u16(g_whole, g_whole_lo, g_whole_hi); \ |
1992 | gvcombine_u16(b_whole, b_whole_lo, b_whole_hi); \ |
1993 | gvmovn_u16(u, u_whole); \ |
1994 | gvmovn_u16(v, v_whole); \ |
1995 | gvmovn_u16(r, r_whole); \ |
1996 | gvmovn_u16(g, g_whole); \ |
1997 | gvmovn_u16(b, b_whole); \ |
1998 | \ |
1999 | gvdupq_l_u32(dx8, gvlo(uvrg_dx8), 0); \ |
2000 | gvaddq_u32(u_block, u_block, dx8); \ |
2001 | gvdupq_l_u32(dx8, gvlo(uvrg_dx8), 1); \ |
2002 | gvaddq_u32(v_block, v_block, dx8); \ |
2003 | gvdupq_l_u32(dx8, gvhi(uvrg_dx8), 0); \ |
2004 | gvaddq_u32(r_block, r_block, dx8); \ |
2005 | gvdupq_l_u32(dx8, gvhi(uvrg_dx8), 1); \ |
2006 | gvaddq_u32(g_block, g_block, dx8); \ |
2007 | gvdupq_n_u32(dx8, b_dx8); \ |
2008 | gvaddq_u32(b_block, b_block, dx8); \ |
2009 | \ |
2010 | gvand(u, u, gvlo(texture_mask)); \ |
2011 | gvand(v, v, gvhi(texture_mask)); \ |
2012 | setup_blocks_texture_##swizzling(); \ |
2013 | \ |
2014 | gvst2_u8(u, v, (u8 *)block->uv.e); \ |
2015 | gvst1_u8(r, block->r.e); \ |
2016 | gvst1_u8(g, block->g.e); \ |
2017 | gvst1_u8(b, block->b.e); \ |
2018 | gvst1q_u16(dither_offsets, (u16 *)block->dither_offsets.e); \ |
2019 | block->fb_ptr = fb_ptr; \ |
2020 | } \ |
2021 | |
2022 | #define setup_blocks_store_unshaded_textured(swizzling, dithering, target, \ |
2023 | edge_type) \ |
2024 | { \ |
2025 | vec_8x16u u_whole; \ |
2026 | vec_8x16u v_whole; \ |
2027 | vec_4x16u u_whole_lo, u_whole_hi; \ |
2028 | vec_4x16u v_whole_lo, v_whole_hi; \ |
2029 | \ |
2030 | vec_8x8u u; \ |
2031 | vec_8x8u v; \ |
2032 | \ |
2033 | vec_4x32u dx4; \ |
2034 | vec_4x32u dx8; \ |
2035 | \ |
2036 | gvshrn_n_u32(u_whole_lo, u_block, 16); \ |
2037 | gvshrn_n_u32(v_whole_lo, v_block, 16); \ |
2038 | \ |
2039 | gvdupq_l_u32(dx4, uv_dx4, 0); \ |
2040 | gvaddhn_u32(u_whole_hi, u_block, dx4); \ |
2041 | gvdupq_l_u32(dx4, uv_dx4, 1); \ |
2042 | gvaddhn_u32(v_whole_hi, v_block, dx4); \ |
2043 | \ |
2044 | gvcombine_u16(u_whole, u_whole_lo, u_whole_hi); \ |
2045 | gvcombine_u16(v_whole, v_whole_lo, v_whole_hi); \ |
2046 | gvmovn_u16(u, u_whole); \ |
2047 | gvmovn_u16(v, v_whole); \ |
2048 | \ |
2049 | gvdupq_l_u32(dx8, uv_dx8, 0); \ |
2050 | gvaddq_u32(u_block, u_block, dx8); \ |
2051 | gvdupq_l_u32(dx8, uv_dx8, 1); \ |
2052 | gvaddq_u32(v_block, v_block, dx8); \ |
2053 | \ |
2054 | gvand(u, u, gvlo(texture_mask)); \ |
2055 | gvand(v, v, gvhi(texture_mask)); \ |
2056 | setup_blocks_texture_##swizzling(); \ |
2057 | \ |
2058 | gvst2_u8(u, v, (u8 *)block->uv.e); \ |
2059 | gvst1q_u16(dither_offsets, (u16 *)block->dither_offsets.e); \ |
2060 | block->fb_ptr = fb_ptr; \ |
2061 | } \ |
2062 | |
2063 | #define setup_blocks_store_shaded_untextured_dithered() \ |
2064 | gvqadd_u8(r, r, dither_offsets); \ |
2065 | gvqadd_u8(g, g, dither_offsets); \ |
2066 | gvqadd_u8(b, b, dither_offsets); \ |
2067 | \ |
2068 | gvqsub_u8(r, r, d64_4); \ |
2069 | gvqsub_u8(g, g, d64_4); \ |
2070 | gvqsub_u8(b, b, d64_4) \ |
2071 | |
2072 | #define setup_blocks_store_shaded_untextured_undithered() \ |
2073 | |
2074 | #define setup_blocks_store_untextured_pixels_indirect_full(_pixels) \ |
2075 | gvst1q_u16(_pixels, block->pixels.e); \ |
2076 | block->fb_ptr = fb_ptr \ |
2077 | |
2078 | #define setup_blocks_store_untextured_pixels_indirect_edge(_pixels) \ |
2079 | gvst1q_u16(_pixels, block->pixels.e); \ |
2080 | block->fb_ptr = fb_ptr \ |
2081 | |
2082 | #define setup_blocks_store_shaded_untextured_seed_pixels_indirect() \ |
2083 | gvmull_u8(pixels, r, d64_1) \ |
2084 | |
2085 | #define setup_blocks_store_untextured_pixels_direct_full(_pixels) \ |
2086 | gvst1q_u16(_pixels, fb_ptr) \ |
2087 | |
2088 | #define setup_blocks_store_untextured_pixels_direct_edge(_pixels) \ |
2089 | { \ |
2090 | vec_8x16u fb_pixels; \ |
2091 | vec_8x16u draw_mask; \ |
2092 | vec_8x16u test_mask; \ |
2093 | \ |
2094 | gvld1q_u16(test_mask, psx_gpu->test_mask.e); \ |
2095 | gvld1q_u16(fb_pixels, fb_ptr); \ |
2096 | gvdupq_n_u16(draw_mask, span_edge_data->right_mask); \ |
2097 | gvtstq_u16(draw_mask, draw_mask, test_mask); \ |
2098 | gvbifq(fb_pixels, _pixels, draw_mask); \ |
2099 | gvst1q_u16(fb_pixels, fb_ptr); \ |
2100 | } \ |
2101 | |
2102 | #define setup_blocks_store_shaded_untextured_seed_pixels_direct() \ |
2103 | pixels = msb_mask; \ |
2104 | gvmlal_u8(pixels, r, d64_1) \ |
2105 | |
2106 | #define setup_blocks_store_shaded_untextured(swizzling, dithering, target, \ |
2107 | edge_type) \ |
2108 | { \ |
2109 | vec_8x16u r_whole; \ |
2110 | vec_8x16u g_whole; \ |
2111 | vec_8x16u b_whole; \ |
2112 | vec_4x16u r_whole_lo, r_whole_hi; \ |
2113 | vec_4x16u g_whole_lo, g_whole_hi; \ |
2114 | vec_4x16u b_whole_lo, b_whole_hi; \ |
2115 | \ |
2116 | vec_8x8u r; \ |
2117 | vec_8x8u g; \ |
2118 | vec_8x8u b; \ |
2119 | \ |
2120 | vec_4x32u dx4; \ |
2121 | vec_4x32u dx8; \ |
2122 | \ |
2123 | vec_8x16u pixels; \ |
2124 | \ |
2125 | gvshrn_n_u32(r_whole_lo, r_block, 16); \ |
2126 | gvshrn_n_u32(g_whole_lo, g_block, 16); \ |
2127 | gvshrn_n_u32(b_whole_lo, b_block, 16); \ |
2128 | \ |
2129 | gvdupq_l_u32(dx4, gvlo(rgb_dx4), 0); \ |
2130 | gvaddhn_u32(r_whole_hi, r_block, dx4); \ |
2131 | gvdupq_l_u32(dx4, gvlo(rgb_dx4), 1); \ |
2132 | gvaddhn_u32(g_whole_hi, g_block, dx4); \ |
2133 | gvdupq_l_u32(dx4, gvhi(rgb_dx4), 0); \ |
2134 | gvaddhn_u32(b_whole_hi, b_block, dx4); \ |
2135 | \ |
2136 | gvcombine_u16(r_whole, r_whole_lo, r_whole_hi); \ |
2137 | gvcombine_u16(g_whole, g_whole_lo, g_whole_hi); \ |
2138 | gvcombine_u16(b_whole, b_whole_lo, b_whole_hi); \ |
2139 | gvmovn_u16(r, r_whole); \ |
2140 | gvmovn_u16(g, g_whole); \ |
2141 | gvmovn_u16(b, b_whole); \ |
2142 | \ |
2143 | gvdupq_l_u32(dx8, gvlo(rgb_dx8), 0); \ |
2144 | gvaddq_u32(r_block, r_block, dx8); \ |
2145 | gvdupq_l_u32(dx8, gvlo(rgb_dx8), 1); \ |
2146 | gvaddq_u32(g_block, g_block, dx8); \ |
2147 | gvdupq_l_u32(dx8, gvhi(rgb_dx8), 0); \ |
2148 | gvaddq_u32(b_block, b_block, dx8); \ |
2149 | \ |
2150 | setup_blocks_store_shaded_untextured_##dithering(); \ |
2151 | \ |
2152 | gvshr_n_u8(r, r, 3); \ |
2153 | gvbic(g, g, d64_0x07); \ |
2154 | gvbic(b, b, d64_0x07); \ |
2155 | \ |
2156 | setup_blocks_store_shaded_untextured_seed_pixels_##target(); \ |
2157 | gvmlal_u8(pixels, g, d64_4); \ |
2158 | gvmlal_u8(pixels, b, d64_128); \ |
2159 | \ |
2160 | setup_blocks_store_untextured_pixels_##target##_##edge_type(pixels); \ |
2161 | } \ |
2162 | |
2163 | #define setup_blocks_store_unshaded_untextured(swizzling, dithering, target, \ |
2164 | edge_type) \ |
2165 | setup_blocks_store_untextured_pixels_##target##_##edge_type(colors) \ |
2166 | |
2167 | #define setup_blocks_store_draw_mask_textured_indirect(_block, bits) \ |
2168 | (_block)->draw_mask_bits = bits \ |
2169 | |
2170 | #define setup_blocks_store_draw_mask_untextured_indirect(_block, bits) \ |
2171 | { \ |
2172 | vec_8x16u bits_mask; \ |
2173 | vec_8x16u test_mask; \ |
2174 | \ |
2175 | gvld1q_u16(test_mask, psx_gpu->test_mask.e); \ |
2176 | gvdupq_n_u16(bits_mask, bits); \ |
2177 | gvtstq_u16(bits_mask, bits_mask, test_mask); \ |
2178 | gvst1q_u16(bits_mask, (_block)->draw_mask.e); \ |
2179 | } \ |
2180 | |
2181 | #define setup_blocks_store_draw_mask_untextured_direct(_block, bits) \ |
91cb0908 |
2182 | (void)(_block) \ |
a2cb152a |
2183 | |
ee060c58 |
2184 | #define setup_blocks_uv_adj_hack_untextured(_block, edge_data, uvrg_offset) \ |
2185 | |
2186 | #define setup_blocks_uv_adj_hack_textured(_block, edge_data, uvrg_offset) \ |
2187 | { \ |
2188 | u32 m_ = AHACK_TEXTURE_ADJ_U | AHACK_TEXTURE_ADJ_V; \ |
2189 | if (unlikely(psx_gpu->hacks_active & m_)) \ |
2190 | setup_blocks_uv_adj_hack(psx_gpu, _block, edge_data, (void *)uvrg_offset); \ |
2191 | } \ |
2192 | |
a2cb152a |
2193 | #define setup_blocks_add_blocks_indirect() \ |
2194 | num_blocks += span_num_blocks; \ |
2195 | \ |
2196 | if(num_blocks > MAX_BLOCKS) \ |
2197 | { \ |
2198 | psx_gpu->num_blocks = num_blocks - span_num_blocks; \ |
2199 | flush_render_block_buffer(psx_gpu); \ |
2200 | num_blocks = span_num_blocks; \ |
2201 | block = psx_gpu->blocks; \ |
2202 | } \ |
2203 | |
2204 | #define setup_blocks_add_blocks_direct() \ |
2205 | |
2206 | #define setup_blocks_do(shading, texturing, dithering, sw, target) \ |
2207 | setup_blocks_load_msb_mask_##target(); \ |
2208 | setup_blocks_variables_##shading##_##texturing(target); \ |
2209 | \ |
2210 | edge_data_struct *span_edge_data = psx_gpu->span_edge_data; \ |
2211 | vec_4x32u *span_uvrg_offset = (vec_4x32u *)psx_gpu->span_uvrg_offset; \ |
2212 | u32 *span_b_offset = psx_gpu->span_b_offset; \ |
2213 | \ |
2214 | block_struct *block = psx_gpu->blocks + psx_gpu->num_blocks; \ |
2215 | \ |
2216 | u32 num_spans = psx_gpu->num_spans; \ |
2217 | \ |
9088aca1 |
2218 | u16 * __restrict__ fb_ptr; \ |
a2cb152a |
2219 | u32 y; \ |
2220 | \ |
2221 | u32 num_blocks = psx_gpu->num_blocks; \ |
2222 | u32 span_num_blocks; \ |
2223 | \ |
2224 | while(num_spans) \ |
2225 | { \ |
2226 | span_num_blocks = span_edge_data->num_blocks; \ |
2227 | if(span_num_blocks) \ |
2228 | { \ |
2229 | y = span_edge_data->y; \ |
2230 | fb_ptr = psx_gpu->vram_out_ptr + span_edge_data->left_x + (y * 1024); \ |
2231 | \ |
2232 | setup_blocks_span_initialize_##shading##_##texturing(); \ |
2233 | setup_blocks_span_initialize_##dithering(texturing); \ |
2234 | \ |
2235 | setup_blocks_add_blocks_##target(); \ |
2236 | \ |
a2cb152a |
2237 | span_num_blocks--; \ |
2238 | while(span_num_blocks) \ |
2239 | { \ |
2240 | setup_blocks_store_##shading##_##texturing(sw, dithering, target, \ |
2241 | full); \ |
2242 | setup_blocks_store_draw_mask_##texturing##_##target(block, 0x00); \ |
2243 | \ |
2244 | fb_ptr += 8; \ |
2245 | block++; \ |
2246 | span_num_blocks--; \ |
2247 | } \ |
2248 | \ |
2249 | setup_blocks_store_##shading##_##texturing(sw, dithering, target, edge); \ |
2250 | setup_blocks_store_draw_mask_##texturing##_##target(block, \ |
2251 | span_edge_data->right_mask); \ |
ee060c58 |
2252 | setup_blocks_uv_adj_hack_##texturing(block, span_edge_data, \ |
2253 | span_uvrg_offset); \ |
a2cb152a |
2254 | \ |
2255 | block++; \ |
2256 | } \ |
2257 | \ |
2258 | num_spans--; \ |
2259 | span_edge_data++; \ |
2260 | span_uvrg_offset++; \ |
2261 | span_b_offset++; \ |
2262 | } \ |
2263 | \ |
2264 | psx_gpu->num_blocks = num_blocks \ |
2265 | |
2266 | void setup_blocks_shaded_textured_dithered_swizzled_indirect(psx_gpu_struct |
2267 | *psx_gpu) |
2268 | { |
2269 | #if 0 |
2270 | setup_blocks_shaded_textured_dithered_swizzled_indirect_(psx_gpu); |
2271 | return; |
2272 | #endif |
2273 | setup_blocks_do(shaded, textured, dithered, swizzled, indirect); |
2274 | } |
2275 | |
2276 | void setup_blocks_shaded_textured_dithered_unswizzled_indirect(psx_gpu_struct |
2277 | *psx_gpu) |
2278 | { |
2279 | #if 0 |
2280 | setup_blocks_shaded_textured_dithered_unswizzled_indirect_(psx_gpu); |
2281 | return; |
2282 | #endif |
2283 | setup_blocks_do(shaded, textured, dithered, unswizzled, indirect); |
2284 | } |
2285 | |
2286 | void setup_blocks_unshaded_textured_dithered_swizzled_indirect(psx_gpu_struct |
2287 | *psx_gpu) |
2288 | { |
2289 | #if 0 |
2290 | setup_blocks_unshaded_textured_dithered_swizzled_indirect_(psx_gpu); |
2291 | return; |
2292 | #endif |
2293 | setup_blocks_do(unshaded, textured, dithered, swizzled, indirect); |
2294 | } |
2295 | |
2296 | void setup_blocks_unshaded_textured_dithered_unswizzled_indirect(psx_gpu_struct |
2297 | *psx_gpu) |
2298 | { |
2299 | #if 0 |
2300 | setup_blocks_unshaded_textured_dithered_unswizzled_indirect_(psx_gpu); |
2301 | return; |
2302 | #endif |
2303 | setup_blocks_do(unshaded, textured, dithered, unswizzled, indirect); |
2304 | } |
2305 | |
2306 | void setup_blocks_unshaded_untextured_undithered_unswizzled_indirect( |
2307 | psx_gpu_struct *psx_gpu) |
2308 | { |
2309 | #if 0 |
2310 | setup_blocks_unshaded_untextured_undithered_unswizzled_indirect_(psx_gpu); |
2311 | return; |
2312 | #endif |
2313 | setup_blocks_do(unshaded, untextured, undithered, unswizzled, indirect); |
2314 | } |
2315 | |
2316 | void setup_blocks_unshaded_untextured_undithered_unswizzled_direct( |
2317 | psx_gpu_struct *psx_gpu) |
2318 | { |
2319 | #if 0 |
2320 | setup_blocks_unshaded_untextured_undithered_unswizzled_direct_(psx_gpu); |
2321 | return; |
2322 | #endif |
2323 | setup_blocks_do(unshaded, untextured, undithered, unswizzled, direct); |
2324 | } |
2325 | |
2326 | void setup_blocks_shaded_untextured_undithered_unswizzled_indirect(psx_gpu_struct |
2327 | *psx_gpu) |
2328 | { |
2329 | #if 0 |
2330 | setup_blocks_shaded_untextured_undithered_unswizzled_indirect_(psx_gpu); |
2331 | return; |
2332 | #endif |
2333 | setup_blocks_do(shaded, untextured, undithered, unswizzled, indirect); |
2334 | } |
2335 | |
2336 | void setup_blocks_shaded_untextured_dithered_unswizzled_indirect(psx_gpu_struct |
2337 | *psx_gpu) |
2338 | { |
2339 | #if 0 |
2340 | setup_blocks_shaded_untextured_dithered_unswizzled_indirect_(psx_gpu); |
2341 | return; |
2342 | #endif |
2343 | setup_blocks_do(shaded, untextured, dithered, unswizzled, indirect); |
2344 | } |
2345 | |
2346 | void setup_blocks_shaded_untextured_undithered_unswizzled_direct( |
2347 | psx_gpu_struct *psx_gpu) |
2348 | { |
2349 | #if 0 |
2350 | setup_blocks_shaded_untextured_undithered_unswizzled_direct_(psx_gpu); |
2351 | return; |
2352 | #endif |
2353 | setup_blocks_do(shaded, untextured, undithered, unswizzled, direct); |
2354 | } |
2355 | |
2356 | void setup_blocks_shaded_untextured_dithered_unswizzled_direct(psx_gpu_struct |
2357 | *psx_gpu) |
2358 | { |
2359 | #if 0 |
2360 | setup_blocks_shaded_untextured_dithered_unswizzled_direct_(psx_gpu); |
2361 | return; |
2362 | #endif |
2363 | setup_blocks_do(shaded, untextured, dithered, unswizzled, direct); |
2364 | } |
2365 | |
2366 | static void update_texture_4bpp_cache(psx_gpu_struct *psx_gpu) |
2367 | { |
2368 | u32 current_texture_page = psx_gpu->current_texture_page; |
2369 | u8 *texture_page_ptr = psx_gpu->texture_page_base; |
2370 | const u16 *vram_ptr = psx_gpu->vram_ptr; |
2371 | u32 tile_x, tile_y; |
2372 | u32 sub_y; |
2373 | vec_8x16u c_0x00f0; |
2374 | |
2375 | vram_ptr += (current_texture_page >> 4) * 256 * 1024; |
2376 | vram_ptr += (current_texture_page & 0xF) * 64; |
2377 | |
2378 | gvdupq_n_u16(c_0x00f0, 0x00f0); |
2379 | |
2380 | psx_gpu->dirty_textures_4bpp_mask &= ~(psx_gpu->current_texture_mask); |
2381 | |
2382 | for (tile_y = 16; tile_y; tile_y--) |
2383 | { |
2384 | for (tile_x = 16; tile_x; tile_x--) |
2385 | { |
2386 | for (sub_y = 8; sub_y; sub_y--) |
2387 | { |
2388 | vec_8x8u texel_block_a, texel_block_b; |
2389 | vec_8x16u texel_block_expanded_a, texel_block_expanded_b; |
2390 | vec_8x16u texel_block_expanded_c, texel_block_expanded_d; |
2391 | vec_8x16u texel_block_expanded_ab, texel_block_expanded_cd; |
2392 | |
2393 | gvld1_u8(texel_block_a, (u8 *)vram_ptr); vram_ptr += 1024; |
2394 | gvld1_u8(texel_block_b, (u8 *)vram_ptr); vram_ptr += 1024; |
2395 | |
2396 | gvmovl_u8(texel_block_expanded_a, texel_block_a); |
2397 | gvshll_n_u8(texel_block_expanded_b, texel_block_a, 4); |
2398 | gvmovl_u8(texel_block_expanded_c, texel_block_b); |
2399 | gvshll_n_u8(texel_block_expanded_d, texel_block_b, 4); |
2400 | |
2401 | gvbicq(texel_block_expanded_a, texel_block_expanded_a, c_0x00f0); |
2402 | gvbicq(texel_block_expanded_b, texel_block_expanded_b, c_0x00f0); |
2403 | gvbicq(texel_block_expanded_c, texel_block_expanded_c, c_0x00f0); |
2404 | gvbicq(texel_block_expanded_d, texel_block_expanded_d, c_0x00f0); |
2405 | |
2406 | gvorrq(texel_block_expanded_ab, texel_block_expanded_a, texel_block_expanded_b); |
2407 | gvorrq(texel_block_expanded_cd, texel_block_expanded_c, texel_block_expanded_d); |
2408 | |
2409 | gvst1q_2_pi_u32(texel_block_expanded_ab, texel_block_expanded_cd, texture_page_ptr); |
2410 | } |
2411 | |
2412 | vram_ptr -= (1024 * 16) - 4; |
2413 | } |
2414 | |
2415 | vram_ptr += (16 * 1024) - (4 * 16); |
2416 | } |
2417 | } |
2418 | |
2419 | void update_texture_8bpp_cache_slice(psx_gpu_struct *psx_gpu, |
2420 | u32 texture_page) |
2421 | { |
2422 | #if 0 |
2423 | update_texture_8bpp_cache_slice_(psx_gpu, texture_page); |
2424 | return; |
2425 | #endif |
2426 | u16 *texture_page_ptr = psx_gpu->texture_page_base; |
2427 | u16 *vram_ptr = psx_gpu->vram_ptr; |
2428 | |
2429 | u32 tile_x, tile_y; |
2430 | u32 sub_y; |
2431 | |
2432 | vram_ptr += (texture_page >> 4) * 256 * 1024; |
2433 | vram_ptr += (texture_page & 0xF) * 64; |
2434 | |
2435 | if((texture_page ^ psx_gpu->current_texture_page) & 0x1) |
2436 | texture_page_ptr += (8 * 16) * 8; |
2437 | |
2438 | for (tile_y = 16; tile_y; tile_y--) |
2439 | { |
2440 | for (tile_x = 8; tile_x; tile_x--) |
2441 | { |
2442 | for (sub_y = 4; sub_y; sub_y--) |
2443 | { |
2444 | vec_4x32u texels_a, texels_b, texels_c, texels_d = {}; |
2445 | gvld1q_u32(texels_a, vram_ptr); vram_ptr += 1024; |
2446 | gvld1q_u32(texels_b, vram_ptr); vram_ptr += 1024; |
2447 | gvld1q_u32(texels_c, vram_ptr); vram_ptr += 1024; |
2448 | gvld1q_u32(texels_d, vram_ptr); vram_ptr += 1024; |
2449 | |
2450 | gvst1q_2_pi_u32(texels_a, texels_b, texture_page_ptr); |
2451 | gvst1q_2_pi_u32(texels_c, texels_d, texture_page_ptr); |
2452 | } |
2453 | |
2454 | vram_ptr -= (1024 * 16) - 8; |
2455 | } |
2456 | |
2457 | vram_ptr -= (8 * 8); |
2458 | vram_ptr += (16 * 1024); |
2459 | |
2460 | texture_page_ptr += (8 * 16) * 8; |
2461 | } |
2462 | } |
2463 | |
2464 | void texture_blocks_untextured(psx_gpu_struct *psx_gpu) |
2465 | { |
2466 | } |
2467 | |
2468 | void texture_blocks_4bpp(psx_gpu_struct *psx_gpu) |
2469 | { |
2470 | #if 0 |
2471 | texture_blocks_4bpp_(psx_gpu); |
2472 | return; |
2473 | #endif |
2474 | block_struct *block = psx_gpu->blocks; |
2475 | u32 num_blocks = psx_gpu->num_blocks; |
2476 | |
2477 | vec_8x8u texels_low; |
2478 | vec_8x8u texels_high; |
2479 | |
2480 | vec_16x8u clut_low; |
2481 | vec_16x8u clut_high; |
2482 | |
2483 | const u8 *texture_ptr_8bpp = psx_gpu->texture_page_ptr; |
2484 | |
2485 | gvld2q_u8(clut_low, clut_high, (u8 *)psx_gpu->clut_ptr); |
2486 | |
2487 | if(psx_gpu->current_texture_mask & psx_gpu->dirty_textures_4bpp_mask) |
2488 | update_texture_4bpp_cache(psx_gpu); |
2489 | |
2490 | while(num_blocks) |
2491 | { |
2492 | vec_8x8u texels = |
2493 | { |
2494 | .u8 = |
2495 | { |
2496 | texture_ptr_8bpp[block->uv.e[0]], |
2497 | texture_ptr_8bpp[block->uv.e[1]], |
2498 | texture_ptr_8bpp[block->uv.e[2]], |
2499 | texture_ptr_8bpp[block->uv.e[3]], |
2500 | texture_ptr_8bpp[block->uv.e[4]], |
2501 | texture_ptr_8bpp[block->uv.e[5]], |
2502 | texture_ptr_8bpp[block->uv.e[6]], |
2503 | texture_ptr_8bpp[block->uv.e[7]] |
2504 | } |
2505 | }; |
2506 | |
2507 | gvtbl2_u8(texels_low, clut_low, texels); |
2508 | gvtbl2_u8(texels_high, clut_high, texels); |
2509 | |
2510 | gvst2_u8(texels_low, texels_high, (u8 *)block->texels.e); |
2511 | |
2512 | num_blocks--; |
2513 | block++; |
2514 | } |
2515 | } |
2516 | |
2517 | void texture_blocks_8bpp(psx_gpu_struct *psx_gpu) |
2518 | { |
2519 | #if 0 |
2520 | texture_blocks_8bpp_(psx_gpu); |
2521 | return; |
2522 | #endif |
2523 | u32 num_blocks = psx_gpu->num_blocks; |
2524 | |
2525 | if(psx_gpu->current_texture_mask & psx_gpu->dirty_textures_8bpp_mask) |
2526 | update_texture_8bpp_cache(psx_gpu); |
2527 | |
2528 | const u8 * __restrict__ texture_ptr_8bpp = psx_gpu->texture_page_ptr; |
2529 | const u16 * __restrict__ clut_ptr = psx_gpu->clut_ptr; |
2530 | block_struct * __restrict__ block = psx_gpu->blocks; |
2531 | |
2532 | while(num_blocks) |
2533 | { |
2534 | u16 offset; |
2535 | #define load_one(i_) \ |
2536 | offset = block->uv.e[i_]; u16 texel##i_ = texture_ptr_8bpp[offset] |
2537 | #define store_one(i_) \ |
2538 | block->texels.e[i_] = clut_ptr[texel##i_] |
2539 | load_one(0); load_one(1); load_one(2); load_one(3); |
2540 | load_one(4); load_one(5); load_one(6); load_one(7); |
2541 | store_one(0); store_one(1); store_one(2); store_one(3); |
2542 | store_one(4); store_one(5); store_one(6); store_one(7); |
2543 | #undef load_one |
2544 | #undef store_one |
2545 | |
2546 | num_blocks--; |
2547 | block++; |
2548 | } |
2549 | } |
2550 | |
2551 | void texture_blocks_16bpp(psx_gpu_struct *psx_gpu) |
2552 | { |
2553 | #if 0 |
2554 | texture_blocks_16bpp_(psx_gpu); |
2555 | return; |
2556 | #endif |
2557 | u32 num_blocks = psx_gpu->num_blocks; |
2558 | const u16 * __restrict__ texture_ptr_16bpp = psx_gpu->texture_page_ptr; |
2559 | block_struct * __restrict__ block = psx_gpu->blocks; |
2560 | |
2561 | while(num_blocks) |
2562 | { |
2563 | u32 offset; |
2564 | #define load_one(i_) \ |
2565 | offset = block->uv.e[i_]; \ |
2566 | offset += ((offset & 0xFF00) * 3); \ |
2567 | u16 texel##i_ = texture_ptr_16bpp[offset] |
2568 | #define store_one(i_) \ |
2569 | block->texels.e[i_] = texel##i_ |
2570 | load_one(0); load_one(1); load_one(2); load_one(3); |
2571 | load_one(4); load_one(5); load_one(6); load_one(7); |
2572 | store_one(0); store_one(1); store_one(2); store_one(3); |
2573 | store_one(4); store_one(5); store_one(6); store_one(7); |
2574 | #undef load_one |
2575 | #undef store_one |
2576 | |
2577 | num_blocks--; |
2578 | block++; |
2579 | } |
2580 | } |
2581 | |
2582 | #define shade_blocks_load_msb_mask_indirect() \ |
2583 | |
2584 | #define shade_blocks_load_msb_mask_direct() \ |
2585 | vec_8x16u msb_mask; \ |
2586 | gvdupq_n_u16(msb_mask, psx_gpu->mask_msb); \ |
2587 | |
2588 | #define shade_blocks_store_indirect(_draw_mask, _pixels) \ |
2589 | gvst1q_u16(_draw_mask, block->draw_mask.e); \ |
2590 | gvst1q_u16(_pixels, block->pixels.e); \ |
2591 | |
2592 | #define shade_blocks_store_direct(_draw_mask, _pixels) \ |
2593 | { \ |
9088aca1 |
2594 | u16 * __restrict__ fb_ptr = block->fb_ptr; \ |
a2cb152a |
2595 | vec_8x16u fb_pixels; \ |
9088aca1 |
2596 | gvld1q_u16(fb_pixels, fb_ptr); \ |
a2cb152a |
2597 | gvorrq(_pixels, _pixels, msb_mask); \ |
a2cb152a |
2598 | gvbifq(fb_pixels, _pixels, _draw_mask); \ |
9088aca1 |
2599 | gvst1q_u16(fb_pixels, fb_ptr); \ |
a2cb152a |
2600 | } \ |
2601 | |
2602 | #define shade_blocks_textured_false_modulated_check_dithered(target) \ |
2603 | |
2604 | #define shade_blocks_textured_false_modulated_check_undithered(target) \ |
2605 | if(psx_gpu->triangle_color == 0x808080) \ |
2606 | { \ |
2607 | shade_blocks_textured_unmodulated_##target(psx_gpu); \ |
2608 | return; \ |
2609 | } \ |
2610 | |
2611 | #define shade_blocks_textured_modulated_shaded_primitive_load(dithering, \ |
2612 | target) \ |
2613 | |
2614 | #define shade_blocks_textured_modulated_unshaded_primitive_load(dithering, \ |
2615 | target) \ |
2616 | { \ |
2617 | u32 color = psx_gpu->triangle_color; \ |
2618 | gvdup_n_u8(colors_r, color); \ |
2619 | gvdup_n_u8(colors_g, color >> 8); \ |
2620 | gvdup_n_u8(colors_b, color >> 16); \ |
2621 | shade_blocks_textured_false_modulated_check_##dithering(target); \ |
2622 | } \ |
2623 | |
2624 | #define shade_blocks_textured_modulated_shaded_block_load() \ |
2625 | gvld1_u8(colors_r, block->r.e); \ |
2626 | gvld1_u8(colors_g, block->g.e); \ |
2627 | gvld1_u8(colors_b, block->b.e) \ |
2628 | |
2629 | #define shade_blocks_textured_modulated_unshaded_block_load() \ |
2630 | |
2631 | #define shade_blocks_textured_modulate_dithered(component) \ |
2632 | gvld1q_u16(pixels_##component, block->dither_offsets.e); \ |
2633 | gvmlal_u8(pixels_##component, texels_##component, colors_##component) \ |
2634 | |
2635 | #define shade_blocks_textured_modulate_undithered(component) \ |
2636 | gvmull_u8(pixels_##component, texels_##component, colors_##component) \ |
2637 | |
2638 | #define shade_blocks_textured_modulated_do(shading, dithering, target) \ |
4ebb76b3 |
2639 | block_struct * __restrict__ block = psx_gpu->blocks; \ |
a2cb152a |
2640 | u32 num_blocks = psx_gpu->num_blocks; \ |
2641 | vec_8x16u texels; \ |
2642 | \ |
2643 | vec_8x8u texels_r; \ |
2644 | vec_8x8u texels_g; \ |
2645 | vec_8x8u texels_b; \ |
2646 | \ |
2647 | vec_8x8u colors_r; \ |
2648 | vec_8x8u colors_g; \ |
2649 | vec_8x8u colors_b; \ |
2650 | \ |
2651 | vec_8x8u pixels_r_low; \ |
2652 | vec_8x8u pixels_g_low; \ |
2653 | vec_8x8u pixels_b_low; \ |
2654 | vec_8x16u pixels; \ |
2655 | \ |
2656 | vec_8x16u pixels_r; \ |
2657 | vec_8x16u pixels_g; \ |
2658 | vec_8x16u pixels_b; \ |
2659 | \ |
2660 | vec_8x16u draw_mask; \ |
2661 | vec_8x16u zero_mask; \ |
2662 | \ |
2663 | vec_8x8u d64_0x07; \ |
2664 | vec_8x8u d64_0x1F; \ |
2665 | vec_8x8u d64_1; \ |
2666 | vec_8x8u d64_4; \ |
2667 | vec_8x8u d64_128; \ |
2668 | \ |
2669 | vec_8x16u d128_0x8000; \ |
2670 | \ |
2671 | vec_8x16u test_mask; \ |
2672 | u32 draw_mask_bits; \ |
2673 | \ |
2674 | gvld1q_u16(test_mask, psx_gpu->test_mask.e); \ |
2675 | shade_blocks_load_msb_mask_##target(); \ |
2676 | \ |
2677 | gvdup_n_u8(d64_0x07, 0x07); \ |
2678 | gvdup_n_u8(d64_0x1F, 0x1F); \ |
2679 | gvdup_n_u8(d64_1, 1); \ |
2680 | gvdup_n_u8(d64_4, 4); \ |
9088aca1 |
2681 | gvdup_n_u8(d64_128, 128u); \ |
a2cb152a |
2682 | \ |
2683 | gvdupq_n_u16(d128_0x8000, 0x8000); \ |
2684 | \ |
2685 | shade_blocks_textured_modulated_##shading##_primitive_load(dithering, \ |
2686 | target); \ |
2687 | \ |
2688 | while(num_blocks) \ |
2689 | { \ |
2690 | draw_mask_bits = block->draw_mask_bits; \ |
2691 | gvdupq_n_u16(draw_mask, draw_mask_bits); \ |
2692 | gvtstq_u16(draw_mask, draw_mask, test_mask); \ |
2693 | \ |
2694 | shade_blocks_textured_modulated_##shading##_block_load(); \ |
2695 | \ |
2696 | gvld1q_u16(texels, block->texels.e); \ |
2697 | \ |
2698 | gvmovn_u16(texels_r, texels); \ |
2699 | gvshrn_n_u16(texels_g, texels, 5); \ |
2700 | gvshrn_n_u16(texels_b, texels, 7); \ |
2701 | \ |
2702 | gvand(texels_r, texels_r, d64_0x1F); \ |
2703 | gvand(texels_g, texels_g, d64_0x1F); \ |
2704 | gvshr_n_u8(texels_b, texels_b, 3); \ |
2705 | \ |
2706 | shade_blocks_textured_modulate_##dithering(r); \ |
2707 | shade_blocks_textured_modulate_##dithering(g); \ |
2708 | shade_blocks_textured_modulate_##dithering(b); \ |
2709 | \ |
2710 | gvceqzq_u16(zero_mask, texels); \ |
2711 | gvand(pixels, texels, d128_0x8000); \ |
2712 | \ |
2713 | gvqshrun_n_s16(pixels_r_low, pixels_r, 4); \ |
2714 | gvqshrun_n_s16(pixels_g_low, pixels_g, 4); \ |
2715 | gvqshrun_n_s16(pixels_b_low, pixels_b, 4); \ |
2716 | \ |
2717 | gvorrq(zero_mask, draw_mask, zero_mask); \ |
2718 | \ |
2719 | gvshr_n_u8(pixels_r_low, pixels_r_low, 3); \ |
2720 | gvbic(pixels_g_low, pixels_g_low, d64_0x07); \ |
2721 | gvbic(pixels_b_low, pixels_b_low, d64_0x07); \ |
2722 | \ |
2723 | gvmlal_u8(pixels, pixels_r_low, d64_1); \ |
2724 | gvmlal_u8(pixels, pixels_g_low, d64_4); \ |
2725 | gvmlal_u8(pixels, pixels_b_low, d64_128); \ |
2726 | \ |
2727 | shade_blocks_store_##target(zero_mask, pixels); \ |
2728 | \ |
2729 | num_blocks--; \ |
2730 | block++; \ |
2731 | } \ |
2732 | |
2733 | void shade_blocks_shaded_textured_modulated_dithered_direct(psx_gpu_struct |
2734 | *psx_gpu) |
2735 | { |
2736 | #if 0 |
2737 | shade_blocks_shaded_textured_modulated_dithered_direct_(psx_gpu); |
2738 | return; |
2739 | #endif |
2740 | shade_blocks_textured_modulated_do(shaded, dithered, direct); |
2741 | } |
2742 | |
2743 | void shade_blocks_shaded_textured_modulated_undithered_direct(psx_gpu_struct |
2744 | *psx_gpu) |
2745 | { |
2746 | #if 0 |
2747 | shade_blocks_shaded_textured_modulated_undithered_direct_(psx_gpu); |
2748 | return; |
2749 | #endif |
2750 | shade_blocks_textured_modulated_do(shaded, undithered, direct); |
2751 | } |
2752 | |
2753 | void shade_blocks_unshaded_textured_modulated_dithered_direct(psx_gpu_struct |
2754 | *psx_gpu) |
2755 | { |
2756 | #if 0 |
2757 | shade_blocks_unshaded_textured_modulated_dithered_direct_(psx_gpu); |
2758 | return; |
2759 | #endif |
2760 | shade_blocks_textured_modulated_do(unshaded, dithered, direct); |
2761 | } |
2762 | |
2763 | void shade_blocks_unshaded_textured_modulated_undithered_direct(psx_gpu_struct |
2764 | *psx_gpu) |
2765 | { |
2766 | #if 0 |
2767 | shade_blocks_unshaded_textured_modulated_undithered_direct_(psx_gpu); |
2768 | return; |
2769 | #endif |
2770 | shade_blocks_textured_modulated_do(unshaded, undithered, direct); |
2771 | } |
2772 | |
2773 | void shade_blocks_shaded_textured_modulated_dithered_indirect(psx_gpu_struct |
2774 | *psx_gpu) |
2775 | { |
2776 | #if 0 |
2777 | shade_blocks_shaded_textured_modulated_dithered_indirect_(psx_gpu); |
2778 | return; |
2779 | #endif |
2780 | shade_blocks_textured_modulated_do(shaded, dithered, indirect); |
2781 | } |
2782 | |
2783 | void shade_blocks_shaded_textured_modulated_undithered_indirect(psx_gpu_struct |
2784 | *psx_gpu) |
2785 | { |
2786 | #if 0 |
2787 | shade_blocks_shaded_textured_modulated_undithered_indirect_(psx_gpu); |
2788 | return; |
2789 | #endif |
2790 | shade_blocks_textured_modulated_do(shaded, undithered, indirect); |
2791 | } |
2792 | |
2793 | void shade_blocks_unshaded_textured_modulated_dithered_indirect(psx_gpu_struct |
2794 | *psx_gpu) |
2795 | { |
2796 | #if 0 |
2797 | shade_blocks_unshaded_textured_modulated_dithered_indirect_(psx_gpu); |
2798 | return; |
2799 | #endif |
2800 | shade_blocks_textured_modulated_do(unshaded, dithered, indirect); |
2801 | } |
2802 | |
2803 | void shade_blocks_unshaded_textured_modulated_undithered_indirect(psx_gpu_struct |
2804 | *psx_gpu) |
2805 | { |
2806 | #if 0 |
2807 | shade_blocks_unshaded_textured_modulated_undithered_indirect_(psx_gpu); |
2808 | return; |
2809 | #endif |
2810 | shade_blocks_textured_modulated_do(unshaded, undithered, indirect); |
2811 | } |
2812 | |
2813 | #define shade_blocks_textured_unmodulated_do(target) \ |
2814 | block_struct *block = psx_gpu->blocks; \ |
2815 | u32 num_blocks = psx_gpu->num_blocks; \ |
2816 | vec_8x16u draw_mask; \ |
2817 | vec_8x16u test_mask; \ |
2818 | u32 draw_mask_bits; \ |
2819 | \ |
2820 | vec_8x16u pixels; \ |
2821 | \ |
2822 | gvld1q_u16(test_mask, psx_gpu->test_mask.e); \ |
2823 | shade_blocks_load_msb_mask_##target(); \ |
2824 | \ |
2825 | while(num_blocks) \ |
2826 | { \ |
2827 | vec_8x16u zero_mask; \ |
2828 | \ |
2829 | draw_mask_bits = block->draw_mask_bits; \ |
2830 | gvdupq_n_u16(draw_mask, draw_mask_bits); \ |
2831 | gvtstq_u16(draw_mask, draw_mask, test_mask); \ |
2832 | \ |
2833 | gvld1q_u16(pixels, block->texels.e); \ |
2834 | \ |
2835 | gvceqzq_u16(zero_mask, pixels); \ |
2836 | gvorrq(zero_mask, draw_mask, zero_mask); \ |
2837 | \ |
2838 | shade_blocks_store_##target(zero_mask, pixels); \ |
2839 | \ |
2840 | num_blocks--; \ |
2841 | block++; \ |
2842 | } \ |
2843 | |
2844 | void shade_blocks_textured_unmodulated_indirect(psx_gpu_struct *psx_gpu) |
2845 | { |
2846 | #if 0 |
2847 | shade_blocks_textured_unmodulated_indirect_(psx_gpu); |
2848 | return; |
2849 | #endif |
2850 | shade_blocks_textured_unmodulated_do(indirect) |
2851 | } |
2852 | |
2853 | void shade_blocks_textured_unmodulated_direct(psx_gpu_struct *psx_gpu) |
2854 | { |
2855 | #if 0 |
2856 | shade_blocks_textured_unmodulated_direct_(psx_gpu); |
2857 | return; |
2858 | #endif |
2859 | shade_blocks_textured_unmodulated_do(direct) |
2860 | } |
2861 | |
2862 | void shade_blocks_unshaded_untextured_indirect(psx_gpu_struct *psx_gpu) |
2863 | { |
2864 | } |
2865 | |
2866 | void shade_blocks_unshaded_untextured_direct(psx_gpu_struct *psx_gpu) |
2867 | { |
2868 | #if 0 |
2869 | shade_blocks_unshaded_untextured_direct_(psx_gpu); |
2870 | return; |
2871 | #endif |
2872 | block_struct *block = psx_gpu->blocks; |
2873 | u32 num_blocks = psx_gpu->num_blocks; |
2874 | |
2875 | vec_8x16u pixels; |
b0d96051 |
2876 | gvld1q_u16(pixels, block->pixels.e); |
a2cb152a |
2877 | shade_blocks_load_msb_mask_direct(); |
2878 | |
2879 | while(num_blocks) |
2880 | { |
2881 | vec_8x16u draw_mask; |
2882 | gvld1q_u16(draw_mask, block->draw_mask.e); |
2883 | shade_blocks_store_direct(draw_mask, pixels); |
2884 | |
2885 | num_blocks--; |
2886 | block++; |
2887 | } |
2888 | } |
2889 | |
2890 | #define blend_blocks_mask_evaluate_on() \ |
2891 | vec_8x16u mask_pixels; \ |
2892 | gvcltzq_s16(mask_pixels, framebuffer_pixels); \ |
2893 | gvorrq(draw_mask, draw_mask, mask_pixels) \ |
2894 | |
2895 | #define blend_blocks_mask_evaluate_off() \ |
2896 | |
2897 | #define blend_blocks_average() \ |
2898 | { \ |
2899 | vec_8x16u pixels_no_msb; \ |
2900 | vec_8x16u fb_pixels_no_msb; \ |
2901 | \ |
2902 | vec_8x16u d128_0x0421; \ |
2903 | \ |
2904 | gvdupq_n_u16(d128_0x0421, 0x0421); \ |
2905 | \ |
2906 | gveorq(blend_pixels, pixels, framebuffer_pixels); \ |
2907 | gvbicq(pixels_no_msb, pixels, d128_0x8000); \ |
2908 | gvand(blend_pixels, blend_pixels, d128_0x0421); \ |
2909 | gvsubq_u16(blend_pixels, pixels_no_msb, blend_pixels); \ |
2910 | gvbicq(fb_pixels_no_msb, framebuffer_pixels, d128_0x8000); \ |
2911 | gvhaddq_u16(blend_pixels, fb_pixels_no_msb, blend_pixels); \ |
2912 | } \ |
2913 | |
2914 | #define blend_blocks_add() \ |
2915 | { \ |
2916 | vec_8x16u pixels_rb, pixels_g; \ |
2917 | vec_8x16u fb_rb, fb_g; \ |
2918 | \ |
2919 | vec_8x16u d128_0x7C1F; \ |
2920 | vec_8x16u d128_0x03E0; \ |
2921 | \ |
2922 | gvdupq_n_u16(d128_0x7C1F, 0x7C1F); \ |
2923 | gvdupq_n_u16(d128_0x03E0, 0x03E0); \ |
2924 | \ |
2925 | gvand(pixels_rb, pixels, d128_0x7C1F); \ |
2926 | gvand(pixels_g, pixels, d128_0x03E0); \ |
2927 | \ |
2928 | gvand(fb_rb, framebuffer_pixels, d128_0x7C1F); \ |
2929 | gvand(fb_g, framebuffer_pixels, d128_0x03E0); \ |
2930 | \ |
2931 | gvaddq_u16(fb_rb, fb_rb, pixels_rb); \ |
2932 | gvaddq_u16(fb_g, fb_g, pixels_g); \ |
2933 | \ |
2934 | gvminq_u8(fb_rb, fb_rb, d128_0x7C1F); \ |
2935 | gvminq_u16(fb_g, fb_g, d128_0x03E0); \ |
2936 | \ |
2937 | gvorrq(blend_pixels, fb_rb, fb_g); \ |
2938 | } \ |
2939 | |
2940 | #define blend_blocks_subtract() \ |
2941 | { \ |
2942 | vec_8x16u pixels_rb, pixels_g; \ |
2943 | vec_8x16u fb_rb, fb_g; \ |
2944 | \ |
2945 | vec_8x16u d128_0x7C1F; \ |
2946 | vec_8x16u d128_0x03E0; \ |
2947 | \ |
2948 | gvdupq_n_u16(d128_0x7C1F, 0x7C1F); \ |
2949 | gvdupq_n_u16(d128_0x03E0, 0x03E0); \ |
2950 | \ |
2951 | gvand(pixels_rb, pixels, d128_0x7C1F); \ |
2952 | gvand(pixels_g, pixels, d128_0x03E0); \ |
2953 | \ |
2954 | gvand(fb_rb, framebuffer_pixels, d128_0x7C1F); \ |
2955 | gvand(fb_g, framebuffer_pixels, d128_0x03E0); \ |
2956 | \ |
2957 | gvqsubq_u8(fb_rb, fb_rb, pixels_rb); \ |
2958 | gvqsubq_u16(fb_g, fb_g, pixels_g); \ |
2959 | \ |
2960 | gvorrq(blend_pixels, fb_rb, fb_g); \ |
2961 | } \ |
2962 | |
2963 | #define blend_blocks_add_fourth() \ |
2964 | { \ |
2965 | vec_8x16u pixels_rb, pixels_g; \ |
2966 | vec_8x16u pixels_fourth; \ |
2967 | vec_8x16u fb_rb, fb_g; \ |
2968 | \ |
2969 | vec_8x16u d128_0x7C1F; \ |
2970 | vec_8x16u d128_0x1C07; \ |
2971 | vec_8x16u d128_0x03E0; \ |
2972 | vec_8x16u d128_0x00E0; \ |
2973 | \ |
2974 | gvdupq_n_u16(d128_0x7C1F, 0x7C1F); \ |
2975 | gvdupq_n_u16(d128_0x1C07, 0x1C07); \ |
2976 | gvdupq_n_u16(d128_0x03E0, 0x03E0); \ |
2977 | gvdupq_n_u16(d128_0x00E0, 0x00E0); \ |
2978 | \ |
2979 | gvshrq_n_u16(pixels_fourth, pixels, 2); \ |
2980 | \ |
2981 | gvand(fb_rb, framebuffer_pixels, d128_0x7C1F); \ |
2982 | gvand(fb_g, framebuffer_pixels, d128_0x03E0); \ |
2983 | \ |
2984 | gvand(pixels_rb, pixels_fourth, d128_0x1C07); \ |
2985 | gvand(pixels_g, pixels_fourth, d128_0x00E0); \ |
2986 | \ |
2987 | gvaddq_u16(fb_rb, fb_rb, pixels_rb); \ |
2988 | gvaddq_u16(fb_g, fb_g, pixels_g); \ |
2989 | \ |
2990 | gvminq_u8(fb_rb, fb_rb, d128_0x7C1F); \ |
2991 | gvminq_u16(fb_g, fb_g, d128_0x03E0); \ |
2992 | \ |
2993 | gvorrq(blend_pixels, fb_rb, fb_g); \ |
2994 | } \ |
2995 | |
2996 | #define blend_blocks_blended_combine_textured() \ |
2997 | { \ |
2998 | vec_8x16u blend_mask; \ |
2999 | gvcltzq_s16(blend_mask, pixels); \ |
3000 | \ |
3001 | gvorrq(blend_pixels, blend_pixels, d128_0x8000); \ |
3002 | gvbifq(blend_pixels, pixels, blend_mask); \ |
3003 | } \ |
3004 | |
3005 | #define blend_blocks_blended_combine_untextured() \ |
3006 | |
3007 | #define blend_blocks_body_blend(blend_mode, texturing) \ |
3008 | { \ |
3009 | blend_blocks_##blend_mode(); \ |
3010 | blend_blocks_blended_combine_##texturing(); \ |
3011 | } \ |
3012 | |
3013 | #define blend_blocks_body_average(texturing) \ |
3014 | blend_blocks_body_blend(average, texturing) \ |
3015 | |
3016 | #define blend_blocks_body_add(texturing) \ |
3017 | blend_blocks_body_blend(add, texturing) \ |
3018 | |
3019 | #define blend_blocks_body_subtract(texturing) \ |
3020 | blend_blocks_body_blend(subtract, texturing) \ |
3021 | |
3022 | #define blend_blocks_body_add_fourth(texturing) \ |
3023 | blend_blocks_body_blend(add_fourth, texturing) \ |
3024 | |
3025 | #define blend_blocks_body_unblended(texturing) \ |
3026 | blend_pixels = pixels \ |
3027 | |
3028 | #define blend_blocks_do(texturing, blend_mode, mask_evaluate) \ |
3029 | block_struct *block = psx_gpu->blocks; \ |
3030 | u32 num_blocks = psx_gpu->num_blocks; \ |
3031 | vec_8x16u draw_mask; \ |
3032 | vec_8x16u pixels; \ |
3033 | vec_8x16u blend_pixels; \ |
3034 | vec_8x16u framebuffer_pixels; \ |
3035 | vec_8x16u msb_mask; \ |
3036 | vec_8x16u d128_0x8000; \ |
3037 | \ |
3038 | u16 *fb_ptr; \ |
3039 | \ |
3040 | gvdupq_n_u16(d128_0x8000, 0x8000); \ |
3041 | gvdupq_n_u16(msb_mask, psx_gpu->mask_msb); \ |
3042 | (void)d128_0x8000; /* sometimes unused */ \ |
3043 | \ |
3044 | while(num_blocks) \ |
3045 | { \ |
3046 | gvld1q_u16(pixels, block->pixels.e); \ |
3047 | gvld1q_u16(draw_mask, block->draw_mask.e); \ |
3048 | fb_ptr = block->fb_ptr; \ |
3049 | \ |
3050 | gvld1q_u16(framebuffer_pixels, fb_ptr); \ |
3051 | \ |
3052 | blend_blocks_mask_evaluate_##mask_evaluate(); \ |
3053 | blend_blocks_body_##blend_mode(texturing); \ |
3054 | \ |
3055 | gvorrq(blend_pixels, blend_pixels, msb_mask); \ |
3056 | gvbifq(framebuffer_pixels, blend_pixels, draw_mask); \ |
3057 | gvst1q_u16(framebuffer_pixels, fb_ptr); \ |
3058 | \ |
3059 | num_blocks--; \ |
3060 | block++; \ |
3061 | } \ |
3062 | |
3063 | |
3064 | void blend_blocks_textured_average_off(psx_gpu_struct *psx_gpu) |
3065 | { |
3066 | #if 0 |
3067 | blend_blocks_textured_average_off_(psx_gpu); |
3068 | return; |
3069 | #endif |
3070 | blend_blocks_do(textured, average, off); |
3071 | } |
3072 | |
3073 | void blend_blocks_untextured_average_off(psx_gpu_struct *psx_gpu) |
3074 | { |
3075 | #if 0 |
3076 | blend_blocks_untextured_average_off_(psx_gpu); |
3077 | return; |
3078 | #endif |
3079 | blend_blocks_do(untextured, average, off); |
3080 | } |
3081 | |
3082 | void blend_blocks_textured_average_on(psx_gpu_struct *psx_gpu) |
3083 | { |
3084 | #if 0 |
3085 | blend_blocks_textured_average_on_(psx_gpu); |
3086 | return; |
3087 | #endif |
3088 | blend_blocks_do(textured, average, on); |
3089 | } |
3090 | |
3091 | void blend_blocks_untextured_average_on(psx_gpu_struct *psx_gpu) |
3092 | { |
3093 | #if 0 |
3094 | blend_blocks_untextured_average_on_(psx_gpu); |
3095 | return; |
3096 | #endif |
3097 | blend_blocks_do(untextured, average, on); |
3098 | } |
3099 | |
3100 | void blend_blocks_textured_add_off(psx_gpu_struct *psx_gpu) |
3101 | { |
3102 | #if 0 |
3103 | blend_blocks_textured_add_off_(psx_gpu); |
3104 | return; |
3105 | #endif |
3106 | blend_blocks_do(textured, add, off); |
3107 | } |
3108 | |
3109 | void blend_blocks_textured_add_on(psx_gpu_struct *psx_gpu) |
3110 | { |
3111 | #if 0 |
3112 | blend_blocks_textured_add_on_(psx_gpu); |
3113 | return; |
3114 | #endif |
3115 | blend_blocks_do(textured, add, on); |
3116 | } |
3117 | |
3118 | void blend_blocks_untextured_add_off(psx_gpu_struct *psx_gpu) |
3119 | { |
3120 | #if 0 |
3121 | blend_blocks_untextured_add_off_(psx_gpu); |
3122 | return; |
3123 | #endif |
3124 | blend_blocks_do(untextured, add, off); |
3125 | } |
3126 | |
3127 | void blend_blocks_untextured_add_on(psx_gpu_struct *psx_gpu) |
3128 | { |
3129 | #if 0 |
3130 | blend_blocks_untextured_add_on_(psx_gpu); |
3131 | return; |
3132 | #endif |
3133 | blend_blocks_do(untextured, add, on); |
3134 | } |
3135 | |
3136 | void blend_blocks_textured_subtract_off(psx_gpu_struct *psx_gpu) |
3137 | { |
3138 | #if 0 |
3139 | blend_blocks_textured_subtract_off_(psx_gpu); |
3140 | return; |
3141 | #endif |
3142 | blend_blocks_do(textured, subtract, off); |
3143 | } |
3144 | |
3145 | void blend_blocks_textured_subtract_on(psx_gpu_struct *psx_gpu) |
3146 | { |
3147 | #if 0 |
3148 | blend_blocks_textured_subtract_on_(psx_gpu); |
3149 | return; |
3150 | #endif |
3151 | blend_blocks_do(textured, subtract, on); |
3152 | } |
3153 | |
3154 | void blend_blocks_untextured_subtract_off(psx_gpu_struct *psx_gpu) |
3155 | { |
3156 | #if 0 |
3157 | blend_blocks_untextured_subtract_off_(psx_gpu); |
3158 | return; |
3159 | #endif |
3160 | blend_blocks_do(untextured, subtract, off); |
3161 | } |
3162 | |
3163 | void blend_blocks_untextured_subtract_on(psx_gpu_struct *psx_gpu) |
3164 | { |
3165 | #if 0 |
3166 | blend_blocks_untextured_subtract_on_(psx_gpu); |
3167 | return; |
3168 | #endif |
3169 | blend_blocks_do(untextured, subtract, on); |
3170 | } |
3171 | |
3172 | void blend_blocks_textured_add_fourth_off(psx_gpu_struct *psx_gpu) |
3173 | { |
3174 | #if 0 |
3175 | blend_blocks_textured_add_fourth_off_(psx_gpu); |
3176 | return; |
3177 | #endif |
3178 | blend_blocks_do(textured, add_fourth, off); |
3179 | } |
3180 | |
3181 | void blend_blocks_textured_add_fourth_on(psx_gpu_struct *psx_gpu) |
3182 | { |
3183 | #if 0 |
3184 | blend_blocks_textured_add_fourth_on_(psx_gpu); |
3185 | return; |
3186 | #endif |
3187 | blend_blocks_do(textured, add_fourth, on); |
3188 | } |
3189 | |
3190 | void blend_blocks_untextured_add_fourth_off(psx_gpu_struct *psx_gpu) |
3191 | { |
3192 | #if 0 |
3193 | blend_blocks_untextured_add_fourth_off_(psx_gpu); |
3194 | return; |
3195 | #endif |
3196 | blend_blocks_do(untextured, add_fourth, off); |
3197 | } |
3198 | |
3199 | void blend_blocks_untextured_add_fourth_on(psx_gpu_struct *psx_gpu) |
3200 | { |
3201 | #if 0 |
3202 | blend_blocks_untextured_add_fourth_on_(psx_gpu); |
3203 | return; |
3204 | #endif |
3205 | blend_blocks_do(untextured, add_fourth, on); |
3206 | } |
3207 | |
3208 | void blend_blocks_textured_unblended_on(psx_gpu_struct *psx_gpu) |
3209 | { |
3210 | #if 0 |
3211 | blend_blocks_textured_unblended_on_(psx_gpu); |
3212 | return; |
3213 | #endif |
3214 | blend_blocks_do(textured, unblended, on); |
3215 | } |
3216 | |
3217 | void blend_blocks_textured_unblended_off(psx_gpu_struct *psx_gpu) |
3218 | { |
3219 | } |
3220 | |
2d658c89 |
3221 | void setup_sprite_untextured_512(psx_gpu_struct *psx_gpu, s32 x, s32 y, s32 u, |
a2cb152a |
3222 | s32 v, s32 width, s32 height, u32 color) |
3223 | { |
a2cb152a |
3224 | #if 0 |
2d658c89 |
3225 | setup_sprite_untextured_512_(psx_gpu, x, y, u, v, width, height, color); |
a2cb152a |
3226 | return; |
3227 | #endif |
3228 | u32 right_width = ((width - 1) & 0x7) + 1; |
3229 | u32 right_mask_bits = (0xFF << right_width); |
3230 | u16 *fb_ptr = psx_gpu->vram_out_ptr + (y * 1024) + x; |
3231 | u32 block_width = (width + 7) / 8; |
3232 | u32 fb_ptr_pitch = 1024 - ((block_width - 1) * 8); |
3233 | u32 blocks_remaining; |
3234 | u32 num_blocks = psx_gpu->num_blocks; |
3235 | block_struct *block = psx_gpu->blocks + num_blocks; |
3236 | |
3237 | u32 color_r = color & 0xFF; |
3238 | u32 color_g = (color >> 8) & 0xFF; |
3239 | u32 color_b = (color >> 16) & 0xFF; |
3240 | vec_8x16u colors; |
3241 | vec_8x16u right_mask; |
3242 | vec_8x16u test_mask; |
3243 | vec_8x16u zero_mask; |
3244 | |
3245 | gvld1q_u16(test_mask, psx_gpu->test_mask.e); |
3246 | color = (color_r >> 3) | ((color_g >> 3) << 5) | ((color_b >> 3) << 10); |
3247 | |
3248 | gvdupq_n_u16(colors, color); |
3249 | gvdupq_n_u16(zero_mask, 0x00); |
3250 | gvdupq_n_u16(right_mask, right_mask_bits); |
3251 | gvtstq_u16(right_mask, right_mask, test_mask); |
3252 | |
3253 | while(height) |
3254 | { |
3255 | blocks_remaining = block_width - 1; |
3256 | num_blocks += block_width; |
3257 | |
3258 | if(num_blocks > MAX_BLOCKS) |
3259 | { |
3260 | flush_render_block_buffer(psx_gpu); |
3261 | num_blocks = block_width; |
3262 | block = psx_gpu->blocks; |
3263 | } |
3264 | |
3265 | while(blocks_remaining) |
3266 | { |
3267 | gvst1q_u16(colors, block->pixels.e); |
3268 | gvst1q_u16(zero_mask, block->draw_mask.e); |
3269 | block->fb_ptr = fb_ptr; |
3270 | |
3271 | fb_ptr += 8; |
3272 | block++; |
3273 | blocks_remaining--; |
3274 | } |
3275 | |
3276 | gvst1q_u16(colors, block->pixels.e); |
3277 | gvst1q_u16(right_mask, block->draw_mask.e); |
3278 | block->fb_ptr = fb_ptr; |
3279 | |
3280 | block++; |
3281 | fb_ptr += fb_ptr_pitch; |
3282 | |
3283 | height--; |
3284 | psx_gpu->num_blocks = num_blocks; |
3285 | } |
3286 | } |
3287 | |
3288 | #define setup_sprite_tiled_initialize_4bpp_clut() \ |
3289 | vec_16x8u clut_low, clut_high; \ |
3290 | \ |
3291 | gvld2q_u8(clut_low, clut_high, (u8 *)psx_gpu->clut_ptr) \ |
3292 | |
3293 | #define setup_sprite_tiled_initialize_4bpp() \ |
3294 | setup_sprite_tiled_initialize_4bpp_clut(); \ |
3295 | \ |
3296 | if(psx_gpu->current_texture_mask & psx_gpu->dirty_textures_4bpp_mask) \ |
3297 | update_texture_4bpp_cache(psx_gpu) \ |
3298 | |
3299 | #define setup_sprite_tiled_initialize_8bpp() \ |
3300 | if(psx_gpu->current_texture_mask & psx_gpu->dirty_textures_8bpp_mask) \ |
3301 | update_texture_8bpp_cache(psx_gpu) \ |
3302 | |
3303 | #define setup_sprite_tile_fetch_texel_block_8bpp(offset) \ |
3304 | texture_block_ptr = psx_gpu->texture_page_ptr + \ |
3305 | ((texture_offset + offset) & texture_mask); \ |
3306 | \ |
3307 | gvld1_u8(texels, (u8 *)texture_block_ptr) \ |
3308 | |
3309 | #define setup_sprite_tile_add_blocks(tile_num_blocks) \ |
3310 | num_blocks += tile_num_blocks; \ |
3311 | \ |
3312 | if(num_blocks > MAX_BLOCKS) \ |
3313 | { \ |
3314 | flush_render_block_buffer(psx_gpu); \ |
3315 | num_blocks = tile_num_blocks; \ |
3316 | block = psx_gpu->blocks; \ |
3317 | } \ |
3318 | |
3319 | #define setup_sprite_tile_full_4bpp(edge) \ |
3320 | { \ |
3321 | vec_8x8u texels_low, texels_high; \ |
3322 | setup_sprite_tile_add_blocks(sub_tile_height * 2); \ |
3323 | \ |
3324 | while(sub_tile_height) \ |
3325 | { \ |
3326 | setup_sprite_tile_fetch_texel_block_8bpp(0); \ |
3327 | gvtbl2_u8(texels_low, clut_low, texels); \ |
3328 | gvtbl2_u8(texels_high, clut_high, texels); \ |
3329 | \ |
3330 | gvst2_u8(texels_low, texels_high, (u8 *)block->texels.e); \ |
3331 | block->draw_mask_bits = left_mask_bits; \ |
3332 | block->fb_ptr = fb_ptr; \ |
3333 | block++; \ |
3334 | \ |
3335 | setup_sprite_tile_fetch_texel_block_8bpp(8); \ |
3336 | gvtbl2_u8(texels_low, clut_low, texels); \ |
3337 | gvtbl2_u8(texels_high, clut_high, texels); \ |
3338 | \ |
3339 | gvst2_u8(texels_low, texels_high, (u8 *)block->texels.e); \ |
3340 | block->draw_mask_bits = right_mask_bits; \ |
3341 | block->fb_ptr = fb_ptr + 8; \ |
3342 | block++; \ |
3343 | \ |
3344 | fb_ptr += 1024; \ |
3345 | texture_offset += 0x10; \ |
3346 | sub_tile_height--; \ |
3347 | } \ |
3348 | texture_offset += 0xF00; \ |
3349 | psx_gpu->num_blocks = num_blocks; \ |
3350 | } \ |
3351 | |
3352 | #define setup_sprite_tile_half_4bpp(edge) \ |
3353 | { \ |
3354 | vec_8x8u texels_low, texels_high; \ |
3355 | setup_sprite_tile_add_blocks(sub_tile_height); \ |
3356 | \ |
3357 | while(sub_tile_height) \ |
3358 | { \ |
3359 | setup_sprite_tile_fetch_texel_block_8bpp(0); \ |
3360 | gvtbl2_u8(texels_low, clut_low, texels); \ |
3361 | gvtbl2_u8(texels_high, clut_high, texels); \ |
3362 | \ |
3363 | gvst2_u8(texels_low, texels_high, (u8 *)block->texels.e); \ |
3364 | block->draw_mask_bits = edge##_mask_bits; \ |
3365 | block->fb_ptr = fb_ptr; \ |
3366 | block++; \ |
3367 | \ |
3368 | fb_ptr += 1024; \ |
3369 | texture_offset += 0x10; \ |
3370 | sub_tile_height--; \ |
3371 | } \ |
3372 | texture_offset += 0xF00; \ |
3373 | psx_gpu->num_blocks = num_blocks; \ |
3374 | } \ |
3375 | |
3376 | #define setup_sprite_tile_full_8bpp(edge) \ |
3377 | { \ |
3378 | setup_sprite_tile_add_blocks(sub_tile_height * 2); \ |
3379 | \ |
3380 | while(sub_tile_height) \ |
3381 | { \ |
3382 | setup_sprite_tile_fetch_texel_block_8bpp(0); \ |
3383 | gvst1_u8(texels, block->r.e); \ |
3384 | block->draw_mask_bits = left_mask_bits; \ |
3385 | block->fb_ptr = fb_ptr; \ |
3386 | block++; \ |
3387 | \ |
3388 | setup_sprite_tile_fetch_texel_block_8bpp(8); \ |
3389 | gvst1_u8(texels, block->r.e); \ |
3390 | block->draw_mask_bits = right_mask_bits; \ |
3391 | block->fb_ptr = fb_ptr + 8; \ |
3392 | block++; \ |
3393 | \ |
3394 | fb_ptr += 1024; \ |
3395 | texture_offset += 0x10; \ |
3396 | sub_tile_height--; \ |
3397 | } \ |
3398 | texture_offset += 0xF00; \ |
3399 | psx_gpu->num_blocks = num_blocks; \ |
3400 | } \ |
3401 | |
3402 | #define setup_sprite_tile_half_8bpp(edge) \ |
3403 | { \ |
df740cdc |
3404 | setup_sprite_tile_add_blocks(sub_tile_height); \ |
a2cb152a |
3405 | \ |
3406 | while(sub_tile_height) \ |
3407 | { \ |
3408 | setup_sprite_tile_fetch_texel_block_8bpp(0); \ |
3409 | gvst1_u8(texels, block->r.e); \ |
3410 | block->draw_mask_bits = edge##_mask_bits; \ |
3411 | block->fb_ptr = fb_ptr; \ |
3412 | block++; \ |
3413 | \ |
3414 | fb_ptr += 1024; \ |
3415 | texture_offset += 0x10; \ |
3416 | sub_tile_height--; \ |
3417 | } \ |
3418 | texture_offset += 0xF00; \ |
3419 | psx_gpu->num_blocks = num_blocks; \ |
3420 | } \ |
3421 | |
3422 | #define setup_sprite_tile_column_edge_pre_adjust_half_right() \ |
3423 | texture_offset = texture_offset_base + 8; \ |
3424 | fb_ptr += 8 \ |
3425 | |
3426 | #define setup_sprite_tile_column_edge_pre_adjust_half_left() \ |
3427 | texture_offset = texture_offset_base \ |
3428 | |
3429 | #define setup_sprite_tile_column_edge_pre_adjust_half(edge) \ |
3430 | setup_sprite_tile_column_edge_pre_adjust_half_##edge() \ |
3431 | |
3432 | #define setup_sprite_tile_column_edge_pre_adjust_full(edge) \ |
3433 | texture_offset = texture_offset_base \ |
3434 | |
3435 | #define setup_sprite_tile_column_edge_post_adjust_half_right() \ |
3436 | fb_ptr -= 8 \ |
3437 | |
3438 | #define setup_sprite_tile_column_edge_post_adjust_half_left() \ |
3439 | |
3440 | #define setup_sprite_tile_column_edge_post_adjust_half(edge) \ |
3441 | setup_sprite_tile_column_edge_post_adjust_half_##edge() \ |
3442 | |
3443 | #define setup_sprite_tile_column_edge_post_adjust_full(edge) \ |
3444 | |
3445 | |
3446 | #define setup_sprite_tile_column_height_single(edge_mode, edge, texture_mode, \ |
3447 | x4mode) \ |
3448 | do \ |
3449 | { \ |
3450 | sub_tile_height = column_data; \ |
3451 | setup_sprite_tile_column_edge_pre_adjust_##edge_mode##x4mode(edge); \ |
3452 | setup_sprite_tile_##edge_mode##_##texture_mode##x4mode(edge); \ |
3453 | setup_sprite_tile_column_edge_post_adjust_##edge_mode##x4mode(edge); \ |
3454 | } while(0) \ |
3455 | |
3456 | #define setup_sprite_tile_column_height_multi(edge_mode, edge, texture_mode, \ |
3457 | x4mode) \ |
3458 | do \ |
3459 | { \ |
3460 | u32 tiles_remaining = column_data >> 16; \ |
3461 | sub_tile_height = column_data & 0xFF; \ |
3462 | setup_sprite_tile_column_edge_pre_adjust_##edge_mode##x4mode(edge); \ |
3463 | setup_sprite_tile_##edge_mode##_##texture_mode##x4mode(edge); \ |
3464 | tiles_remaining -= 1; \ |
3465 | \ |
3466 | while(tiles_remaining) \ |
3467 | { \ |
3468 | sub_tile_height = 16; \ |
3469 | setup_sprite_tile_##edge_mode##_##texture_mode##x4mode(edge); \ |
3470 | tiles_remaining--; \ |
3471 | } \ |
3472 | \ |
3473 | sub_tile_height = (column_data >> 8) & 0xFF; \ |
3474 | setup_sprite_tile_##edge_mode##_##texture_mode##x4mode(edge); \ |
3475 | setup_sprite_tile_column_edge_post_adjust_##edge_mode##x4mode(edge); \ |
3476 | } while(0) \ |
3477 | |
3478 | |
3479 | #define setup_sprite_column_data_single() \ |
3480 | column_data = height \ |
3481 | |
3482 | #define setup_sprite_column_data_multi() \ |
3483 | column_data = 16 - offset_v; \ |
3484 | column_data |= ((height_rounded & 0xF) + 1) << 8; \ |
3485 | column_data |= (tile_height - 1) << 16 \ |
3486 | |
3487 | #define RIGHT_MASK_BIT_SHIFT 8 |
3488 | #define RIGHT_MASK_BIT_SHIFT_4x 16 |
3489 | |
3490 | #define setup_sprite_tile_column_width_single(texture_mode, multi_height, \ |
3491 | edge_mode, edge, x4mode) \ |
3492 | { \ |
3493 | setup_sprite_column_data_##multi_height(); \ |
3494 | left_mask_bits = left_block_mask | right_block_mask; \ |
3495 | right_mask_bits = left_mask_bits >> RIGHT_MASK_BIT_SHIFT##x4mode; \ |
3496 | \ |
3497 | setup_sprite_tile_column_height_##multi_height(edge_mode, edge, \ |
3498 | texture_mode, x4mode); \ |
3499 | } \ |
3500 | |
3501 | #define setup_sprite_tiled_advance_column() \ |
3502 | texture_offset_base += 0x100; \ |
3503 | if((texture_offset_base & 0xF00) == 0) \ |
3504 | texture_offset_base -= (0x100 + 0xF00) \ |
3505 | |
3506 | #define FB_PTR_MULTIPLIER 1 |
3507 | #define FB_PTR_MULTIPLIER_4x 2 |
3508 | |
3509 | #define setup_sprite_tile_column_width_multi(texture_mode, multi_height, \ |
3510 | left_mode, right_mode, x4mode) \ |
3511 | { \ |
3512 | setup_sprite_column_data_##multi_height(); \ |
3513 | s32 fb_ptr_advance_column = (16 - (1024 * height)) \ |
3514 | * FB_PTR_MULTIPLIER##x4mode; \ |
3515 | \ |
3516 | tile_width -= 2; \ |
3517 | left_mask_bits = left_block_mask; \ |
3518 | right_mask_bits = left_mask_bits >> RIGHT_MASK_BIT_SHIFT##x4mode; \ |
3519 | \ |
3520 | setup_sprite_tile_column_height_##multi_height(left_mode, right, \ |
3521 | texture_mode, x4mode); \ |
3522 | fb_ptr += fb_ptr_advance_column; \ |
3523 | \ |
3524 | left_mask_bits = 0x00; \ |
3525 | right_mask_bits = 0x00; \ |
3526 | \ |
3527 | while(tile_width) \ |
3528 | { \ |
3529 | setup_sprite_tiled_advance_column(); \ |
3530 | setup_sprite_tile_column_height_##multi_height(full, none, \ |
3531 | texture_mode, x4mode); \ |
3532 | fb_ptr += fb_ptr_advance_column; \ |
3533 | tile_width--; \ |
3534 | } \ |
3535 | \ |
3536 | left_mask_bits = right_block_mask; \ |
3537 | right_mask_bits = left_mask_bits >> RIGHT_MASK_BIT_SHIFT##x4mode; \ |
3538 | \ |
3539 | setup_sprite_tiled_advance_column(); \ |
3540 | setup_sprite_tile_column_height_##multi_height(right_mode, left, \ |
3541 | texture_mode, x4mode); \ |
3542 | } \ |
3543 | |
3544 | |
3545 | /* 4x stuff */ |
3546 | #define setup_sprite_tiled_initialize_4bpp_4x() \ |
3547 | setup_sprite_tiled_initialize_4bpp_clut() \ |
3548 | |
3549 | #define setup_sprite_tiled_initialize_8bpp_4x() \ |
3550 | |
3551 | #define setup_sprite_tile_full_4bpp_4x(edge) \ |
3552 | { \ |
3553 | vec_8x8u texels_low, texels_high; \ |
3554 | vec_8x16u pixels; \ |
3555 | vec_4x16u pixels_half; \ |
3556 | setup_sprite_tile_add_blocks(sub_tile_height * 2 * 4); \ |
3557 | u32 left_mask_bits_a = left_mask_bits & 0xFF; \ |
3558 | u32 left_mask_bits_b = left_mask_bits >> 8; \ |
3559 | u32 right_mask_bits_a = right_mask_bits & 0xFF; \ |
3560 | u32 right_mask_bits_b = right_mask_bits >> 8; \ |
3561 | \ |
3562 | while(sub_tile_height) \ |
3563 | { \ |
3564 | setup_sprite_tile_fetch_texel_block_8bpp(0); \ |
3565 | gvtbl2_u8(texels_low, clut_low, texels); \ |
3566 | gvtbl2_u8(texels_high, clut_high, texels); \ |
3567 | gvzip_u8(pixels, texels_low, texels_high); \ |
3568 | \ |
3569 | gvget_lo(pixels_half, pixels); \ |
3570 | gvst2_u16(pixels_half, pixels_half, block->texels.e); \ |
3571 | block->draw_mask_bits = left_mask_bits_a; \ |
3572 | block->fb_ptr = fb_ptr; \ |
3573 | block++; \ |
3574 | \ |
3575 | gvst2_u16(pixels_half, pixels_half, block->texels.e); \ |
3576 | block->draw_mask_bits = left_mask_bits_a; \ |
3577 | block->fb_ptr = fb_ptr + 1024; \ |
3578 | block++; \ |
3579 | \ |
3580 | gvget_hi(pixels_half, pixels); \ |
3581 | gvst2_u16(pixels_half, pixels_half, block->texels.e); \ |
3582 | block->draw_mask_bits = left_mask_bits_b; \ |
3583 | block->fb_ptr = fb_ptr + 8; \ |
3584 | block++; \ |
3585 | \ |
3586 | gvst2_u16(pixels_half, pixels_half, block->texels.e); \ |
3587 | block->draw_mask_bits = left_mask_bits_b; \ |
3588 | block->fb_ptr = fb_ptr + 1024 + 8; \ |
3589 | block++; \ |
3590 | \ |
3591 | setup_sprite_tile_fetch_texel_block_8bpp(8); \ |
3592 | gvtbl2_u8(texels_low, clut_low, texels); \ |
3593 | gvtbl2_u8(texels_high, clut_high, texels); \ |
3594 | gvzip_u8(pixels, texels_low, texels_high); \ |
3595 | \ |
3596 | gvget_lo(pixels_half, pixels); \ |
3597 | gvst2_u16(pixels_half, pixels_half, block->texels.e); \ |
3598 | block->draw_mask_bits = right_mask_bits_a; \ |
3599 | block->fb_ptr = fb_ptr + 16; \ |
3600 | block++; \ |
3601 | \ |
3602 | gvst2_u16(pixels_half, pixels_half, block->texels.e); \ |
3603 | block->draw_mask_bits = right_mask_bits_a; \ |
3604 | block->fb_ptr = fb_ptr + 1024 + 16; \ |
3605 | block++; \ |
3606 | \ |
3607 | gvget_hi(pixels_half, pixels); \ |
3608 | gvst2_u16(pixels_half, pixels_half, block->texels.e); \ |
3609 | block->draw_mask_bits = right_mask_bits_b; \ |
3610 | block->fb_ptr = fb_ptr + 24; \ |
3611 | block++; \ |
3612 | \ |
3613 | gvst2_u16(pixels_half, pixels_half, block->texels.e); \ |
3614 | block->draw_mask_bits = right_mask_bits_b; \ |
3615 | block->fb_ptr = fb_ptr + 1024 + 24; \ |
3616 | block++; \ |
3617 | \ |
3618 | fb_ptr += 2048; \ |
3619 | texture_offset += 0x10; \ |
3620 | sub_tile_height--; \ |
3621 | } \ |
3622 | texture_offset += 0xF00; \ |
3623 | psx_gpu->num_blocks = num_blocks; \ |
3624 | } \ |
3625 | |
3626 | #define setup_sprite_tile_half_4bpp_4x(edge) \ |
3627 | { \ |
3628 | vec_8x8u texels_low, texels_high; \ |
3629 | vec_8x16u pixels; \ |
3630 | vec_4x16u pixels_half; \ |
3631 | setup_sprite_tile_add_blocks(sub_tile_height * 4); \ |
3632 | u32 edge##_mask_bits_a = edge##_mask_bits & 0xFF; \ |
3633 | u32 edge##_mask_bits_b = edge##_mask_bits >> 8; \ |
3634 | \ |
3635 | while(sub_tile_height) \ |
3636 | { \ |
3637 | setup_sprite_tile_fetch_texel_block_8bpp(0); \ |
3638 | gvtbl2_u8(texels_low, clut_low, texels); \ |
3639 | gvtbl2_u8(texels_high, clut_high, texels); \ |
3640 | gvzip_u8(pixels, texels_low, texels_high); \ |
3641 | \ |
3642 | gvget_lo(pixels_half, pixels); \ |
3643 | gvst2_u16(pixels_half, pixels_half, block->texels.e); \ |
3644 | block->draw_mask_bits = edge##_mask_bits_a; \ |
3645 | block->fb_ptr = fb_ptr; \ |
3646 | block++; \ |
3647 | \ |
3648 | gvst2_u16(pixels_half, pixels_half, block->texels.e); \ |
3649 | block->draw_mask_bits = edge##_mask_bits_a; \ |
3650 | block->fb_ptr = fb_ptr + 1024; \ |
3651 | block++; \ |
3652 | \ |
3653 | gvget_hi(pixels_half, pixels); \ |
3654 | gvst2_u16(pixels_half, pixels_half, block->texels.e); \ |
3655 | block->draw_mask_bits = edge##_mask_bits_b; \ |
3656 | block->fb_ptr = fb_ptr + 8; \ |
3657 | block++; \ |
3658 | \ |
3659 | gvst2_u16(pixels_half, pixels_half, block->texels.e); \ |
3660 | block->draw_mask_bits = edge##_mask_bits_b; \ |
3661 | block->fb_ptr = fb_ptr + 1024 + 8; \ |
3662 | block++; \ |
3663 | \ |
3664 | fb_ptr += 2048; \ |
3665 | texture_offset += 0x10; \ |
3666 | sub_tile_height--; \ |
3667 | } \ |
3668 | texture_offset += 0xF00; \ |
3669 | psx_gpu->num_blocks = num_blocks; \ |
3670 | } \ |
3671 | |
3672 | #define setup_sprite_tile_full_8bpp_4x(edge) \ |
3673 | { \ |
3674 | setup_sprite_tile_add_blocks(sub_tile_height * 2 * 4); \ |
3675 | vec_8x16u texels_wide; \ |
3676 | vec_4x16u texels_half; \ |
3677 | u32 left_mask_bits_a = left_mask_bits & 0xFF; \ |
3678 | u32 left_mask_bits_b = left_mask_bits >> 8; \ |
3679 | u32 right_mask_bits_a = right_mask_bits & 0xFF; \ |
3680 | u32 right_mask_bits_b = right_mask_bits >> 8; \ |
3681 | \ |
3682 | while(sub_tile_height) \ |
3683 | { \ |
3684 | setup_sprite_tile_fetch_texel_block_8bpp(0); \ |
3685 | gvzip_u8(texels_wide, texels, texels); \ |
3686 | gvget_lo(texels_half, texels_wide); \ |
3687 | gvst1_u8(texels_half, block->r.e); \ |
3688 | block->draw_mask_bits = left_mask_bits_a; \ |
3689 | block->fb_ptr = fb_ptr; \ |
3690 | block++; \ |
3691 | \ |
3692 | gvst1_u8(texels_half, block->r.e); \ |
3693 | block->draw_mask_bits = left_mask_bits_a; \ |
3694 | block->fb_ptr = fb_ptr + 1024; \ |
3695 | block++; \ |
3696 | \ |
3697 | gvget_hi(texels_half, texels_wide); \ |
3698 | gvst1_u8(texels_half, block->r.e); \ |
3699 | block->draw_mask_bits = left_mask_bits_b; \ |
3700 | block->fb_ptr = fb_ptr + 8; \ |
3701 | block++; \ |
3702 | \ |
3703 | gvst1_u8(texels_half, block->r.e); \ |
3704 | block->draw_mask_bits = left_mask_bits_b; \ |
3705 | block->fb_ptr = fb_ptr + 1024 + 8; \ |
3706 | block++; \ |
3707 | \ |
3708 | setup_sprite_tile_fetch_texel_block_8bpp(8); \ |
3709 | gvzip_u8(texels_wide, texels, texels); \ |
3710 | gvget_lo(texels_half, texels_wide); \ |
3711 | gvst1_u8(texels_half, block->r.e); \ |
3712 | block->draw_mask_bits = right_mask_bits_a; \ |
3713 | block->fb_ptr = fb_ptr + 16; \ |
3714 | block++; \ |
3715 | \ |
3716 | gvst1_u8(texels_half, block->r.e); \ |
3717 | block->draw_mask_bits = right_mask_bits_a; \ |
3718 | block->fb_ptr = fb_ptr + 1024 + 16; \ |
3719 | block++; \ |
3720 | \ |
3721 | gvget_hi(texels_half, texels_wide); \ |
3722 | gvst1_u8(texels_half, block->r.e); \ |
3723 | block->draw_mask_bits = right_mask_bits_b; \ |
3724 | block->fb_ptr = fb_ptr + 24; \ |
3725 | block++; \ |
3726 | \ |
3727 | gvst1_u8(texels_half, block->r.e); \ |
3728 | block->draw_mask_bits = right_mask_bits_b; \ |
3729 | block->fb_ptr = fb_ptr + 24 + 1024; \ |
3730 | block++; \ |
3731 | \ |
3732 | fb_ptr += 2048; \ |
3733 | texture_offset += 0x10; \ |
3734 | sub_tile_height--; \ |
3735 | } \ |
3736 | texture_offset += 0xF00; \ |
3737 | psx_gpu->num_blocks = num_blocks; \ |
3738 | } \ |
3739 | |
3740 | #define setup_sprite_tile_half_8bpp_4x(edge) \ |
3741 | { \ |
3742 | setup_sprite_tile_add_blocks(sub_tile_height * 4); \ |
3743 | vec_8x16u texels_wide; \ |
3744 | vec_4x16u texels_half; \ |
3745 | u32 edge##_mask_bits_a = edge##_mask_bits & 0xFF; \ |
3746 | u32 edge##_mask_bits_b = edge##_mask_bits >> 8; \ |
3747 | \ |
3748 | while(sub_tile_height) \ |
3749 | { \ |
3750 | setup_sprite_tile_fetch_texel_block_8bpp(0); \ |
3751 | gvzip_u8(texels_wide, texels, texels); \ |
3752 | gvget_lo(texels_half, texels_wide); \ |
3753 | gvst1_u8(texels_half, block->r.e); \ |
3754 | block->draw_mask_bits = edge##_mask_bits_a; \ |
3755 | block->fb_ptr = fb_ptr; \ |
3756 | block++; \ |
3757 | \ |
3758 | gvst1_u8(texels_half, block->r.e); \ |
3759 | block->draw_mask_bits = edge##_mask_bits_a; \ |
3760 | block->fb_ptr = fb_ptr + 1024; \ |
3761 | block++; \ |
3762 | \ |
3763 | gvget_hi(texels_half, texels_wide); \ |
3764 | gvst1_u8(texels_half, block->r.e); \ |
3765 | block->draw_mask_bits = edge##_mask_bits_b; \ |
3766 | block->fb_ptr = fb_ptr + 8; \ |
3767 | block++; \ |
3768 | \ |
3769 | gvst1_u8(texels_half, block->r.e); \ |
3770 | block->draw_mask_bits = edge##_mask_bits_b; \ |
3771 | block->fb_ptr = fb_ptr + 8 + 1024; \ |
3772 | block++; \ |
3773 | \ |
3774 | fb_ptr += 2048; \ |
3775 | texture_offset += 0x10; \ |
3776 | sub_tile_height--; \ |
3777 | } \ |
3778 | texture_offset += 0xF00; \ |
3779 | psx_gpu->num_blocks = num_blocks; \ |
3780 | } \ |
3781 | |
3782 | #define setup_sprite_tile_column_edge_pre_adjust_half_right_4x() \ |
3783 | texture_offset = texture_offset_base + 8; \ |
3784 | fb_ptr += 16 \ |
3785 | |
3786 | #define setup_sprite_tile_column_edge_pre_adjust_half_left_4x() \ |
3787 | texture_offset = texture_offset_base \ |
3788 | |
3789 | #define setup_sprite_tile_column_edge_pre_adjust_half_4x(edge) \ |
3790 | setup_sprite_tile_column_edge_pre_adjust_half_##edge##_4x() \ |
3791 | |
3792 | #define setup_sprite_tile_column_edge_pre_adjust_full_4x(edge) \ |
3793 | texture_offset = texture_offset_base \ |
3794 | |
3795 | #define setup_sprite_tile_column_edge_post_adjust_half_right_4x() \ |
3796 | fb_ptr -= 16 \ |
3797 | |
3798 | #define setup_sprite_tile_column_edge_post_adjust_half_left_4x() \ |
3799 | |
3800 | #define setup_sprite_tile_column_edge_post_adjust_half_4x(edge) \ |
3801 | setup_sprite_tile_column_edge_post_adjust_half_##edge##_4x() \ |
3802 | |
3803 | #define setup_sprite_tile_column_edge_post_adjust_full_4x(edge) \ |
3804 | |
3805 | #define setup_sprite_offset_u_adjust() \ |
3806 | |
3807 | #define setup_sprite_comapre_left_block_mask() \ |
3808 | ((left_block_mask & 0xFF) == 0xFF) \ |
3809 | |
3810 | #define setup_sprite_comapre_right_block_mask() \ |
3811 | (((right_block_mask >> 8) & 0xFF) == 0xFF) \ |
3812 | |
3813 | #define setup_sprite_offset_u_adjust_4x() \ |
3814 | offset_u *= 2; \ |
3815 | offset_u_right = offset_u_right * 2 + 1 \ |
3816 | |
3817 | #define setup_sprite_comapre_left_block_mask_4x() \ |
3818 | ((left_block_mask & 0xFFFF) == 0xFFFF) \ |
3819 | |
3820 | #define setup_sprite_comapre_right_block_mask_4x() \ |
3821 | (((right_block_mask >> 16) & 0xFFFF) == 0xFFFF) \ |
3822 | |
3823 | |
3824 | #define setup_sprite_tiled_do(texture_mode, x4mode) \ |
3825 | s32 offset_u = u & 0xF; \ |
3826 | s32 offset_v = v & 0xF; \ |
3827 | \ |
3828 | s32 width_rounded = offset_u + width + 15; \ |
3829 | s32 height_rounded = offset_v + height + 15; \ |
3830 | s32 tile_height = height_rounded / 16; \ |
3831 | s32 tile_width = width_rounded / 16; \ |
3832 | u32 offset_u_right = width_rounded & 0xF; \ |
3833 | \ |
3834 | setup_sprite_offset_u_adjust##x4mode(); \ |
3835 | \ |
3836 | u32 left_block_mask = ~(0xFFFFFFFF << offset_u); \ |
3837 | u32 right_block_mask = 0xFFFFFFFE << offset_u_right; \ |
3838 | \ |
3839 | u32 left_mask_bits; \ |
3840 | u32 right_mask_bits; \ |
3841 | \ |
3842 | u32 sub_tile_height; \ |
3843 | u32 column_data; \ |
3844 | \ |
3845 | u32 texture_mask = (psx_gpu->texture_mask_width & 0xF) | \ |
3846 | ((psx_gpu->texture_mask_height & 0xF) << 4) | \ |
3847 | ((psx_gpu->texture_mask_width >> 4) << 8) | \ |
3848 | ((psx_gpu->texture_mask_height >> 4) << 12); \ |
3849 | u32 texture_offset = ((v & 0xF) << 4) | ((u & 0xF0) << 4) | \ |
3850 | ((v & 0xF0) << 8); \ |
3851 | u32 texture_offset_base = texture_offset; \ |
3852 | u32 control_mask; \ |
3853 | \ |
3854 | u16 *fb_ptr = psx_gpu->vram_out_ptr + (y * 1024) + (x - offset_u); \ |
3855 | u32 num_blocks = psx_gpu->num_blocks; \ |
3856 | block_struct *block = psx_gpu->blocks + num_blocks; \ |
3857 | \ |
3858 | u16 *texture_block_ptr; \ |
3859 | vec_8x8u texels; \ |
3860 | \ |
3861 | setup_sprite_tiled_initialize_##texture_mode##x4mode(); \ |
3862 | \ |
3863 | control_mask = tile_width == 1; \ |
3864 | control_mask |= (tile_height == 1) << 1; \ |
3865 | control_mask |= setup_sprite_comapre_left_block_mask##x4mode() << 2; \ |
3866 | control_mask |= setup_sprite_comapre_right_block_mask##x4mode() << 3; \ |
3867 | \ |
3868 | switch(control_mask) \ |
3869 | { \ |
3870 | default: \ |
3871 | case 0x0: \ |
3872 | setup_sprite_tile_column_width_multi(texture_mode, multi, full, full, \ |
3873 | x4mode); \ |
3874 | break; \ |
3875 | \ |
3876 | case 0x1: \ |
3877 | setup_sprite_tile_column_width_single(texture_mode, multi, full, none, \ |
3878 | x4mode); \ |
3879 | break; \ |
3880 | \ |
3881 | case 0x2: \ |
3882 | setup_sprite_tile_column_width_multi(texture_mode, single, full, full, \ |
3883 | x4mode); \ |
3884 | break; \ |
3885 | \ |
3886 | case 0x3: \ |
3887 | setup_sprite_tile_column_width_single(texture_mode, single, full, none, \ |
3888 | x4mode); \ |
3889 | break; \ |
3890 | \ |
3891 | case 0x4: \ |
3892 | setup_sprite_tile_column_width_multi(texture_mode, multi, half, full, \ |
3893 | x4mode); \ |
3894 | break; \ |
3895 | \ |
3896 | case 0x5: \ |
3897 | setup_sprite_tile_column_width_single(texture_mode, multi, half, right, \ |
3898 | x4mode); \ |
3899 | break; \ |
3900 | \ |
3901 | case 0x6: \ |
3902 | setup_sprite_tile_column_width_multi(texture_mode, single, half, full, \ |
3903 | x4mode); \ |
3904 | break; \ |
3905 | \ |
3906 | case 0x7: \ |
3907 | setup_sprite_tile_column_width_single(texture_mode, single, half, right, \ |
3908 | x4mode); \ |
3909 | break; \ |
3910 | \ |
3911 | case 0x8: \ |
3912 | setup_sprite_tile_column_width_multi(texture_mode, multi, full, half, \ |
3913 | x4mode); \ |
3914 | break; \ |
3915 | \ |
3916 | case 0x9: \ |
3917 | setup_sprite_tile_column_width_single(texture_mode, multi, half, left, \ |
3918 | x4mode); \ |
3919 | break; \ |
3920 | \ |
3921 | case 0xA: \ |
3922 | setup_sprite_tile_column_width_multi(texture_mode, single, full, half, \ |
3923 | x4mode); \ |
3924 | break; \ |
3925 | \ |
3926 | case 0xB: \ |
3927 | setup_sprite_tile_column_width_single(texture_mode, single, half, left, \ |
3928 | x4mode); \ |
3929 | break; \ |
3930 | \ |
3931 | case 0xC: \ |
3932 | setup_sprite_tile_column_width_multi(texture_mode, multi, half, half, \ |
3933 | x4mode); \ |
3934 | break; \ |
3935 | \ |
3936 | case 0xE: \ |
3937 | setup_sprite_tile_column_width_multi(texture_mode, single, half, half, \ |
3938 | x4mode); \ |
3939 | break; \ |
3940 | } \ |
3941 | |
3942 | void setup_sprite_4bpp(psx_gpu_struct *psx_gpu, s32 x, s32 y, s32 u, s32 v, |
3943 | s32 width, s32 height, u32 color) |
3944 | { |
3945 | #if 0 |
3946 | setup_sprite_4bpp_(psx_gpu, x, y, u, v, width, height, color); |
3947 | return; |
3948 | #endif |
3949 | setup_sprite_tiled_do(4bpp,) |
3950 | } |
3951 | |
3952 | void setup_sprite_8bpp(psx_gpu_struct *psx_gpu, s32 x, s32 y, s32 u, s32 v, |
3953 | s32 width, s32 height, u32 color) |
3954 | { |
3955 | #if 0 |
3956 | setup_sprite_8bpp_(psx_gpu, x, y, u, v, width, height, color); |
3957 | return; |
3958 | #endif |
3959 | setup_sprite_tiled_do(8bpp,) |
3960 | } |
3961 | |
3962 | #undef draw_mask_fb_ptr_left |
3963 | #undef draw_mask_fb_ptr_right |
3964 | |
3965 | void setup_sprite_4bpp_4x(psx_gpu_struct *psx_gpu, s32 x, s32 y, s32 u, s32 v, |
3966 | s32 width, s32 height, u32 color) |
3967 | { |
3968 | #if 0 |
3969 | setup_sprite_4bpp_4x_(psx_gpu, x, y, u, v, width, height, color); |
3970 | return; |
3971 | #endif |
3972 | setup_sprite_tiled_do(4bpp, _4x) |
3973 | } |
3974 | |
3975 | void setup_sprite_8bpp_4x(psx_gpu_struct *psx_gpu, s32 x, s32 y, s32 u, s32 v, |
3976 | s32 width, s32 height, u32 color) |
3977 | { |
3978 | #if 0 |
3979 | setup_sprite_8bpp_4x_(psx_gpu, x, y, u, v, width, height, color); |
3980 | return; |
3981 | #endif |
3982 | setup_sprite_tiled_do(8bpp, _4x) |
3983 | } |
3984 | |
3985 | |
3986 | void scale2x_tiles8(void * __restrict__ dst_, const void * __restrict__ src_, int w8, int h) |
3987 | { |
3988 | #if 0 |
df740cdc |
3989 | scale2x_tiles8_(dst_, src_, w8, h); |
a2cb152a |
3990 | return; |
3991 | #endif |
3992 | const u16 * __restrict__ src = src_; |
3993 | const u16 * __restrict__ src1; |
3994 | u16 * __restrict__ dst = dst_; |
3995 | u16 * __restrict__ dst1; |
3996 | gvreg a, b; |
3997 | int w; |
3998 | for (; h > 0; h--, src += 1024, dst += 1024*2) |
3999 | { |
4000 | src1 = src; |
4001 | dst1 = dst; |
4002 | for (w = w8; w > 0; w--, src1 += 8, dst1 += 8*2) |
4003 | { |
4004 | gvld1q_u16(a, src1); |
4005 | gvzipq_u16(a, b, a, a); |
4006 | gvst1q_u16(a, dst1); |
4007 | gvst1q_u16(b, dst1 + 8); |
4008 | gvst1q_u16(a, dst1 + 1024); |
4009 | gvst1q_u16(b, dst1 + 1024 + 8); |
4010 | } |
4011 | } |
4012 | } |
4013 | |
4014 | // vim:ts=2:sw=2:expandtab |