gpu_neon: adjust some comments and things
[pcsx_rearmed.git] / plugins / gpu_neon / psx_gpu / psx_gpu_simd.c
CommitLineData
a2cb152a 1/*
2 * Copyright (C) 2011 Gilead Kutnick "Exophase" <exophase@gmail.com>
3 * Copyright (C) 2022 GraÅžvydas Ignotas "notaz" <notasas@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation; either version 2 of
8 * the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 */
15
16#include <string.h>
17#include "psx_gpu.h"
18#include "psx_gpu_simd.h"
19//#define ASM_PROTOTYPES
20//#include "psx_gpu_simd.h"
21#ifndef SIMD_BUILD
22#error "please define SIMD_BUILD if you want this gpu_neon C simd implementation"
23#endif
24
25typedef u8 gvu8 __attribute__((vector_size(16)));
26typedef u16 gvu16 __attribute__((vector_size(16)));
27typedef u32 gvu32 __attribute__((vector_size(16)));
28typedef u64 gvu64 __attribute__((vector_size(16)));
29typedef s8 gvs8 __attribute__((vector_size(16)));
30typedef s16 gvs16 __attribute__((vector_size(16)));
31typedef s32 gvs32 __attribute__((vector_size(16)));
32typedef s64 gvs64 __attribute__((vector_size(16)));
33
34typedef u8 gvhu8 __attribute__((vector_size(8)));
35typedef u16 gvhu16 __attribute__((vector_size(8)));
36typedef u32 gvhu32 __attribute__((vector_size(8)));
37typedef u64 gvhu64 __attribute__((vector_size(8)));
38typedef s8 gvhs8 __attribute__((vector_size(8)));
39typedef s16 gvhs16 __attribute__((vector_size(8)));
40typedef s32 gvhs32 __attribute__((vector_size(8)));
41typedef s64 gvhs64 __attribute__((vector_size(8)));
42
43typedef union
44{
45 gvhu8 u8;
46 gvhu16 u16;
47 gvhu32 u32;
48 gvhu64 u64;
49 //u64 u64;
50 //uint64x1_t u64;
51 gvhs8 s8;
52 gvhs16 s16;
53 gvhs32 s32;
54 gvhs64 s64;
55 //s64 s64;
56 //int64x1_t s64;
57} gvhreg;
58
59typedef union
60{
61 gvu8 u8;
62 gvu16 u16;
63 gvu32 u32;
64 gvu64 u64;
65 gvs8 s8;
66 gvs16 s16;
67 gvs32 s32;
68 gvs64 s64;
69 // this may be tempting, but it causes gcc to do lots of stack spills
70 //gvhreg h[2];
71} gvreg;
72
73#if defined(__ARM_NEON) || defined(__ARM_NEON__)
74#include <arm_neon.h>
75
76#define gvaddhn_u32(d, a, b) d.u16 = vaddhn_u32(a.u32, b.u32)
77#define gvaddw_s32(d, a, b) d.s64 = vaddw_s32(a.s64, b.s32)
78#define gvabsq_s32(d, s) d.s32 = vabsq_s32(s.s32)
79#define gvbic_n_u16(d, n) d.u16 = vbic_u16(d.u16, vmov_n_u16(n))
80#define gvbifq(d, a, b) d.u8 = vbslq_u8(b.u8, d.u8, a.u8)
81#define gvbit(d, a, b) d.u8 = vbsl_u8(b.u8, a.u8, d.u8)
82#define gvceqq_u16(d, a, b) d.u16 = vceqq_u16(a.u16, b.u16)
83#define gvcgt_s16(d, a, b) d.u16 = vcgt_s16(a.s16, b.s16)
84#define gvclt_s16(d, a, b) d.u16 = vclt_s16(a.s16, b.s16)
85#define gvcreate_s32(d, a, b) d.s32 = vcreate_s32((u32)(a) | ((u64)(b) << 32))
86#define gvcreate_u32(d, a, b) d.u32 = vcreate_u32((u32)(a) | ((u64)(b) << 32))
87#define gvcreate_s64(d, s) d.s64 = (gvhs64)vcreate_s64(s)
88#define gvcreate_u64(d, s) d.u64 = (gvhu64)vcreate_u64(s)
89#define gvcombine_u16(d, l, h) d.u16 = vcombine_u16(l.u16, h.u16)
90#define gvcombine_u32(d, l, h) d.u32 = vcombine_u32(l.u32, h.u32)
91#define gvcombine_s64(d, l, h) d.s64 = vcombine_s64((int64x1_t)l.s64, (int64x1_t)h.s64)
92#define gvdup_l_u8(d, s, l) d.u8 = vdup_lane_u8(s.u8, l)
93#define gvdup_l_u16(d, s, l) d.u16 = vdup_lane_u16(s.u16, l)
94#define gvdup_l_u32(d, s, l) d.u32 = vdup_lane_u32(s.u32, l)
95#define gvdupq_l_s64(d, s, l) d.s64 = vdupq_lane_s64((int64x1_t)s.s64, l)
96#define gvdupq_l_u32(d, s, l) d.u32 = vdupq_lane_u32(s.u32, l)
97#define gvdup_n_s64(d, n) d.s64 = vdup_n_s64(n)
98#define gvdup_n_u8(d, n) d.u8 = vdup_n_u8(n)
99#define gvdup_n_u16(d, n) d.u16 = vdup_n_u16(n)
100#define gvdup_n_u32(d, n) d.u32 = vdup_n_u32(n)
101#define gvdupq_n_u16(d, n) d.u16 = vdupq_n_u16(n)
102#define gvdupq_n_u32(d, n) d.u32 = vdupq_n_u32(n)
103#define gvdupq_n_s64(d, n) d.s64 = vdupq_n_s64(n)
104#define gvhaddq_u16(d, a, b) d.u16 = vhaddq_u16(a.u16, b.u16)
105#define gvmax_s16(d, a, b) d.s16 = vmax_s16(a.s16, b.s16)
106#define gvmin_s16(d, a, b) d.s16 = vmin_s16(a.s16, b.s16)
107#define gvminq_u8(d, a, b) d.u8 = vminq_u8(a.u8, b.u8)
108#define gvminq_u16(d, a, b) d.u16 = vminq_u16(a.u16, b.u16)
109#define gvmla_s32(d, a, b) d.s32 = vmla_s32(d.s32, a.s32, b.s32)
110#define gvmla_u32(d, a, b) d.u32 = vmla_u32(d.u32, a.u32, b.u32)
111#define gvmlaq_s32(d, a, b) d.s32 = vmlaq_s32(d.s32, a.s32, b.s32)
112#define gvmlaq_u32(d, a, b) d.u32 = vmlaq_u32(d.u32, a.u32, b.u32)
113#define gvmlal_s32(d, a, b) d.s64 = vmlal_s32(d.s64, a.s32, b.s32)
114#define gvmlal_u8(d, a, b) d.u16 = vmlal_u8(d.u16, a.u8, b.u8)
115#define gvmlsq_s32(d, a, b) d.s32 = vmlsq_s32(d.s32, a.s32, b.s32)
116#define gvmlsq_l_s32(d, a, b, l) d.s32 = vmlsq_lane_s32(d.s32, a.s32, b.s32, l)
117#define gvmov_l_s32(d, s, l) d.s32 = vset_lane_s32(s, d.s32, l)
118#define gvmov_l_u32(d, s, l) d.u32 = vset_lane_u32(s, d.u32, l)
119#define gvmovl_u8(d, s) d.u16 = vmovl_u8(s.u8)
120#define gvmovl_s32(d, s) d.s64 = vmovl_s32(s.s32)
121#define gvmovn_u16(d, s) d.u8 = vmovn_u16(s.u16)
122#define gvmovn_u32(d, s) d.u16 = vmovn_u32(s.u32)
123#define gvmovn_u64(d, s) d.u32 = vmovn_u64(s.u64)
124#define gvmul_s32(d, a, b) d.s32 = vmul_s32(a.s32, b.s32)
125#define gvmull_s16(d, a, b) d.s32 = vmull_s16(a.s16, b.s16)
126#define gvmull_s32(d, a, b) d.s64 = vmull_s32(a.s32, b.s32)
127#define gvmull_u8(d, a, b) d.u16 = vmull_u8(a.u8, b.u8)
128#define gvmull_l_u32(d, a, b, l) d.u64 = vmull_lane_u32(a.u32, b.u32, l)
129#define gvmlsl_s16(d, a, b) d.s32 = vmlsl_s16(d.s32, a.s16, b.s16)
130#define gvneg_s32(d, s) d.s32 = vneg_s32(s.s32)
131#define gvqadd_u8(d, a, b) d.u8 = vqadd_u8(a.u8, b.u8)
132#define gvqsub_u8(d, a, b) d.u8 = vqsub_u8(a.u8, b.u8)
133#define gvshl_u16(d, a, b) d.u16 = vshl_u16(a.u16, b.s16)
134#define gvshlq_s64(d, a, b) d.s64 = vshlq_s64(a.s64, b.s64)
135#define gvshlq_u32(d, a, b) d.u32 = vshlq_u32(a.u32, b.s32)
136#define gvshlq_u64(d, a, b) d.u64 = vshlq_u64(a.u64, b.s64)
137#define gvshrq_n_s16(d, s, n) d.s16 = vshrq_n_s16(s.s16, n)
138#define gvshrq_n_u16(d, s, n) d.u16 = vshrq_n_u16(s.u16, n)
139#define gvshl_n_u32(d, s, n) d.u32 = vshl_n_u32(s.u32, n)
140#define gvshlq_n_u16(d, s, n) d.u16 = vshlq_n_u16(s.u16, n)
141#define gvshlq_n_u32(d, s, n) d.u32 = vshlq_n_u32(s.u32, n)
142#define gvshll_n_s8(d, s, n) d.s16 = vshll_n_s8(s.s8, n)
143#define gvshll_n_u8(d, s, n) d.u16 = vshll_n_u8(s.u8, n)
144#define gvshll_n_u16(d, s, n) d.u32 = vshll_n_u16(s.u16, n)
145#define gvshr_n_u8(d, s, n) d.u8 = vshr_n_u8(s.u8, n)
146#define gvshr_n_u16(d, s, n) d.u16 = vshr_n_u16(s.u16, n)
147#define gvshr_n_u32(d, s, n) d.u32 = vshr_n_u32(s.u32, n)
148#define gvshr_n_u64(d, s, n) d.u64 = (gvhu64)vshr_n_u64((uint64x1_t)s.u64, n)
149#define gvshrn_n_s64(d, s, n) d.s32 = vshrn_n_s64(s.s64, n)
150#define gvshrn_n_u16(d, s, n) d.u8 = vshrn_n_u16(s.u16, n)
151#define gvshrn_n_u32(d, s, n) d.u16 = vshrn_n_u32(s.u32, n)
152#define gvsli_n_u8(d, s, n) d.u8 = vsli_n_u8(d.u8, s.u8, n)
153#define gvsri_n_u8(d, s, n) d.u8 = vsri_n_u8(d.u8, s.u8, n)
154#define gvtstq_u16(d, a, b) d.u16 = vtstq_u16(a.u16, b.u16)
155#define gvqshrun_n_s16(d, s, n) d.u8 = vqshrun_n_s16(s.s16, n)
156#define gvqsubq_u8(d, a, b) d.u8 = vqsubq_u8(a.u8, b.u8)
157#define gvqsubq_u16(d, a, b) d.u16 = vqsubq_u16(a.u16, b.u16)
158
159#define gvget_lo(d, s) d.u16 = vget_low_u16(s.u16)
160#define gvget_hi(d, s) d.u16 = vget_high_u16(s.u16)
161#define gvlo(s) ({gvhreg t_; gvget_lo(t_, s); t_;})
162#define gvhi(s) ({gvhreg t_; gvget_hi(t_, s); t_;})
163
164#define gvset_lo(d, s) d.u16 = vcombine_u16(s.u16, gvhi(d).u16)
165#define gvset_hi(d, s) d.u16 = vcombine_u16(gvlo(d).u16, s.u16)
166
167#define gvtbl2_u8(d, a, b) { \
168 uint8x8x2_t v_; \
169 v_.val[0] = vget_low_u8(a.u8); v_.val[1] = vget_high_u8(a.u8); \
170 d.u8 = vtbl2_u8(v_, b.u8); \
171}
172
173#define gvzip_u8(d, a, b) { \
174 uint8x8x2_t v_ = vzip_u8(a.u8, b.u8); \
175 d.u8 = vcombine_u8(v_.val[0], v_.val[1]); \
176}
177#define gvzipq_u16(d0, d1, s0, s1) { \
178 uint16x8x2_t v_ = vzipq_u16(s0.u16, s1.u16); \
179 d0.u16 = v_.val[0]; d1.u16 = v_.val[1]; \
180}
181
182#define gvld1_u8(d, s) d.u8 = vld1_u8(s)
183#define gvld1_u32(d, s) d.u32 = vld1_u32((const u32 *)(s))
184#define gvld1q_u8(d, s) d.u8 = vld1q_u8(s)
185#define gvld1q_u16(d, s) d.u16 = vld1q_u16(s)
186#define gvld1q_u32(d, s) d.u32 = vld1q_u32((const u32 *)(s))
187#define gvld2_dup(v0, v1, p) { \
188 uint8x8x2_t v_ = vld2_dup_u8(p); \
189 v0.u8 = v_.val[0]; v1.u8 = v_.val[1]; \
190}
191#define gvld2q_u8(v0, v1, p) { \
192 uint8x16x2_t v_ = vld2q_u8(p); \
193 v0.u8 = v_.val[0]; v1.u8 = v_.val[1]; \
194}
195
196#define gvst1_u8(v, p) \
197 vst1_u8(p, v.u8)
198#define gvst1q_u16(v, p) \
199 vst1q_u16(p, v.u16)
200#define gvst1q_inc_u32(v, p, i) { \
201 vst1q_u32((u32 *)(p), v.u32); \
202 p += (i) / sizeof(*p); \
203}
204#define gvst2_u8(v0, v1, p) { \
205 uint8x8x2_t v_; \
206 v_.val[0] = v0.u8; v_.val[1] = v1.u8; \
207 vst2_u8(p, v_); \
208}
209#define gvst2_u16(v0, v1, p) { \
210 uint16x4x2_t v_; \
211 v_.val[0] = v0.u16; v_.val[1] = v1.u16; \
212 vst2_u16(p, v_); \
213}
214#define gvst2q_u8(v0, v1, p) { \
215 uint8x16x2_t v_; \
216 v_.val[0] = v0.u8; v_.val[1] = v1.u8; \
217 vst2q_u8(p, v_); \
218}
219#define gvst4_4_inc_u32(v0, v1, v2, v3, p, i) { \
220 uint32x2x4_t v_; \
221 v_.val[0] = v0.u32; v_.val[1] = v1.u32; v_.val[2] = v2.u32; v_.val[3] = v3.u32; \
222 vst4_u32(p, v_); p += (i) / sizeof(*p); \
223}
224#define gvst4_pi_u16(v0, v1, v2, v3, p) { \
225 uint16x4x4_t v_; \
226 v_.val[0] = v0.u16; v_.val[1] = v1.u16; v_.val[2] = v2.u16; v_.val[3] = v3.u16; \
227 vst4_u16((u16 *)(p), v_); p += sizeof(v_) / sizeof(*p); \
228}
229#define gvst1q_pi_u32(v, p) \
230 gvst1q_inc_u32(v, p, sizeof(v))
231// could use vst1q_u32_x2 but that's not always available
232#define gvst1q_2_pi_u32(v0, v1, p) { \
233 gvst1q_inc_u32(v0, p, sizeof(v0)); \
234 gvst1q_inc_u32(v1, p, sizeof(v1)); \
235}
236
237/* notes:
238 - gcc > 9: (arm32) int64x1_t type produces ops on gp regs
239 (also u64 __attribute__((vector_size(8))) :( )
240 - gcc <11: (arm32) handles '<vec> == 0' poorly
241*/
242
243/*
244#elif defined(__SSE2__)
245#include <x86intrin.h>
246*/
247#else
248#error "arch not supported or SIMD support was not enabled by your compiler"
249#endif
250
251// the below have intrinsics but they evaluate to basic operations on both gcc and clang
252#define gvadd_s64(d, a, b) d.s64 = a.s64 + b.s64
253#define gvadd_u8(d, a, b) d.u8 = a.u8 + b.u8
254#define gvadd_u16(d, a, b) d.u16 = a.u16 + b.u16
255#define gvadd_u32(d, a, b) d.u32 = a.u32 + b.u32
256#define gvaddq_s64 gvadd_s64
257#define gvaddq_u16 gvadd_u16
258#define gvaddq_u32 gvadd_u32
259#define gvand(d, a, b) d.u32 = a.u32 & b.u32
260#define gvbic(d, a, b) d.u32 = a.u32 & ~b.u32
261#define gvbicq gvbic
262#define gveor(d, a, b) d.u32 = a.u32 ^ b.u32
263#define gveorq gveor
264#define gvceqz_u16(d, s) d.u16 = s.u16 == 0
265#define gvceqzq_u16 gvceqz_u16
266#define gvcltz_s16(d, s) d.s16 = s.s16 < 0
267#define gvcltzq_s16 gvcltz_s16
268#define gvsub_u16(d, a, b) d.u16 = a.u16 - b.u16
269#define gvsub_u32(d, a, b) d.u32 = a.u32 - b.u32
270#define gvsubq_u16 gvsub_u16
271#define gvsubq_u32 gvsub_u32
272#define gvorr(d, a, b) d.u32 = a.u32 | b.u32
273#define gvorrq gvorr
274
275#if defined(__arm__)
276
277#define gssub16(d, a, b) asm("ssub16 %0,%1,%2" : "=r"(d) : "r"(a), "r"(b))
278#define gsmusdx(d, a, b) asm("smusdx %0,%1,%2" : "=r"(d) : "r"(a), "r"(b))
279
280#if 0
281// gcc/config/arm/arm.c
282#undef gvadd_s64
283#define gvadd_s64(d, a, b) asm("vadd.i64 %P0,%P1,%P2" : "=w"(d.s64) : "w"(a.s64), "w"(b.s64))
284#endif
285
286#else
287
288#define gssub16(d, a, b) d = (u16)((a) - (b)) | ((((a) >> 16) - ((b) >> 16)) << 16)
289#define gsmusdx(d, a, b) d = ((s32)(s16)(a) * ((s32)(b) >> 16)) \
290 - (((s32)(a) >> 16) * (s16)(b))
291
292#endif
293
294// for compatibility with the original psx_gpu.c code
295#define vec_2x64s gvreg
296#define vec_2x64u gvreg
297#define vec_4x32s gvreg
298#define vec_4x32u gvreg
299#define vec_8x16s gvreg
300#define vec_8x16u gvreg
301#define vec_16x8s gvreg
302#define vec_16x8u gvreg
303#define vec_1x64s gvhreg
304#define vec_1x64u gvhreg
305#define vec_2x32s gvhreg
306#define vec_2x32u gvhreg
307#define vec_4x16s gvhreg
308#define vec_4x16u gvhreg
309#define vec_8x8s gvhreg
310#define vec_8x8u gvhreg
311
312#if 0
313#include <stdio.h>
314#include <stdlib.h>
315#include <unistd.h>
aafce833 316static int ccount, dump_enabled;
a2cb152a 317void cmpp(const char *name, const void *a_, const void *b_, size_t len)
318{
319 const uint32_t *a = a_, *b = b_, masks[] = { 0, 0xff, 0xffff, 0xffffff };
320 size_t i, left;
321 uint32_t mask;
322 for (i = 0; i < (len + 3)/4; i++) {
323 left = len - i*4;
324 mask = left >= 4 ? ~0u : masks[left];
325 if ((a[i] ^ b[i]) & mask) {
326 printf("%s: %08x %08x [%03zx/%zu] #%d\n",
327 name, a[i] & mask, b[i] & mask, i*4, i, ccount);
328 exit(1);
329 }
330 }
331 ccount++;
332}
333#define ccmpf(n) cmpp(#n, &psx_gpu->n, &n##_c, sizeof(n##_c))
334#define ccmpa(n,c) cmpp(#n, &psx_gpu->n, &n##_c, sizeof(n##_c[0]) * c)
335
336void dump_r_(const char *name, void *dump, int is_q)
337{
338 unsigned long long *u = dump;
aafce833 339 if (!dump_enabled) return;
a2cb152a 340 //if (ccount > 1) return;
aafce833 341 printf("%20s %016llx ", name, u[0]);
a2cb152a 342 if (is_q)
343 printf("%016llx", u[1]);
344 puts("");
345}
346void __attribute__((noinline,noclone)) dump_r_d(const char *name, void *dump)
347{ dump_r_(name, dump, 0); }
348void __attribute__((noinline,noclone)) dump_r_q(const char *name, void *dump)
349{ dump_r_(name, dump, 1); }
350#define dumprd(n) { u8 dump_[8]; gvst1_u8(n, dump_); dump_r_d(#n, dump_); }
351#define dumprq(n) { u16 dump_[8]; gvst1q_u16(n, dump_); dump_r_q(#n, dump_); }
352#endif
353
354void compute_all_gradients(psx_gpu_struct * __restrict__ psx_gpu,
355 const vertex_struct * __restrict__ a, const vertex_struct * __restrict__ b,
356 const vertex_struct * __restrict__ c)
357{
358 union { double d; struct { u32 l; u32 h; } i; } divident, divider;
359 union { double d; gvhreg v; } d30;
360
361#if 0
362 compute_all_gradients_(psx_gpu, a, b, c);
363 return;
364#endif
365 // First compute the triangle area reciprocal and shift. The division will
366 // happen concurrently with much of the work which follows.
367
368 // load exponent of 62 into upper half of double
369 u32 shift = __builtin_clz(psx_gpu->triangle_area);
370 u32 triangle_area_normalized = psx_gpu->triangle_area << shift;
371
372 // load area normalized into lower half of double
373 divident.i.l = triangle_area_normalized >> 10;
374 divident.i.h = (62 + 1023) << 20;
375
376 divider.i.l = triangle_area_normalized << 20;
377 divider.i.h = ((1022 + 31) << 20) + (triangle_area_normalized >> 11);
378
379 d30.d = divident.d / divider.d; // d30 = ((1 << 62) + ta_n) / ta_n
380
381 // ((x1 - x0) * (y2 - y1)) - ((x2 - x1) * (y1 - y0)) =
382 // ( d0 * d1 ) - ( d2 * d3 ) =
383 // ( m0 ) - ( m1 ) = gradient
384
385 // This is split to do 12 elements at a time over three sets: a, b, and c.
386 // Technically we only need to do 10 elements (uvrgb_x and uvrgb_y), so
387 // two of the slots are unused.
388
389 // Inputs are all 16-bit signed. The m0/m1 results are 32-bit signed, as
390 // is g.
391
392 // First type is: uvrg bxxx xxxx
393 // Second type is: yyyy ybyy uvrg
394 // Since x_a and y_c are the same the same variable is used for both.
395
396 gvreg v0;
397 gvreg v1;
398 gvreg v2;
399 gvreg uvrg_xxxx0;
400 gvreg uvrg_xxxx1;
401 gvreg uvrg_xxxx2;
402
403 gvreg y0_ab;
404 gvreg y1_ab;
405 gvreg y2_ab;
406
407 gvreg d0_ab;
408 gvreg d1_ab;
409 gvreg d2_ab;
410 gvreg d3_ab;
411
412 gvreg ga_uvrg_x;
413 gvreg ga_uvrg_y;
414 gvreg gw_rg_x;
415 gvreg gw_rg_y;
416 gvreg w_mask;
417 gvreg r_shift;
418 gvreg uvrg_dx2, uvrg_dx3;
419 gvreg uvrgb_phase;
420 gvhreg zero, tmp_lo, tmp_hi;
421
422 gvld1q_u8(v0, (u8 *)a); // v0 = { uvrg0, b0, x0, y0 }
423 gvld1q_u8(v1, (u8 *)b); // v1 = { uvrg1, b1, x1, y1 }
424 gvld1q_u8(v2, (u8 *)c); // v2 = { uvrg2, b2, x2, y2 }
425
426 gvmovl_u8(uvrg_xxxx0, gvlo(v0)); // uvrg_xxxx0 = { uv0, rg0, b0-, -- }
427 gvmovl_u8(uvrg_xxxx1, gvlo(v1)); // uvrg_xxxx1 = { uv1, rg1, b1-, -- }
428 gvmovl_u8(uvrg_xxxx2, gvlo(v2)); // uvrg_xxxx2 = { uv2, rg2, b2-, -- }
429
430 gvdup_l_u16(tmp_lo, gvhi(v0), 1); // yyyy0 = { yy0, yy0 }
431 gvcombine_u16(y0_ab, tmp_lo, gvlo(uvrg_xxxx0));
432
433 gvdup_l_u16(tmp_lo, gvhi(v0), 0); // xxxx0 = { xx0, xx0 }
434 gvset_hi(uvrg_xxxx0, tmp_lo);
435
436 u32 x1_x2 = (u16)b->x | (c->x << 16); // x1_x2 = { x1, x2 }
437 u32 x0_x1 = (u16)a->x | (b->x << 16); // x0_x1 = { x0, x1 }
438
439 gvdup_l_u16(tmp_lo, gvhi(v1), 1); // yyyy1 = { yy1, yy1 }
440 gvcombine_u16(y1_ab, tmp_lo, gvlo(uvrg_xxxx1));
441
442 gvdup_l_u16(tmp_lo, gvhi(v1), 0); // xxxx1 = { xx1, xx1 }
443 gvset_hi(uvrg_xxxx1, tmp_lo);
444
445 gvdup_l_u16(tmp_lo, gvhi(v2), 1); // yyyy2 = { yy2, yy2 }
446 gvcombine_u16(y2_ab, tmp_lo, gvlo(uvrg_xxxx2));
447
448 gvdup_l_u16(tmp_lo, gvhi(v2), 0); // xxxx2 = { xx2, xx2 }
449 gvset_hi(uvrg_xxxx2, tmp_lo);
450
451 u32 y0_y1 = (u16)a->y | (b->y << 16); // y0_y1 = { y0, y1 }
452 u32 y1_y2 = (u16)b->y | (c->y << 16); // y1_y2 = { y1, y2 }
453
454 gvsubq_u16(d0_ab, uvrg_xxxx1, uvrg_xxxx0);
455
456 u32 b1_b2 = b->b | (c->b << 16); // b1_b2 = { b1, b2 }
457
458 gvsubq_u16(d2_ab, uvrg_xxxx2, uvrg_xxxx1);
459
460 gvsubq_u16(d1_ab, y2_ab, y1_ab);
461
462 u32 b0_b1 = a->b | (b->b << 16); // b0_b1 = { b0, b1 }
463
464 u32 dx, dy, db;
465 gssub16(dx, x1_x2, x0_x1); // dx = { x1 - x0, x2 - x1 }
466 gssub16(dy, y1_y2, y0_y1); // dy = { y1 - y0, y2 - y1 }
467 gssub16(db, b1_b2, b0_b1); // db = { b1 - b0, b2 - b1 }
468
469 u32 ga_by, ga_bx;
470 gvsubq_u16(d3_ab, y1_ab, y0_ab);
471 gsmusdx(ga_by, dx, db); // ga_by = ((x1 - x0) * (b2 - b1)) -
472 // ((x2 - X1) * (b1 - b0))
473 gvmull_s16(ga_uvrg_x, gvlo(d0_ab), gvlo(d1_ab));
474 gsmusdx(ga_bx, db, dy); // ga_bx = ((b1 - b0) * (y2 - y1)) -
475 // ((b2 - b1) * (y1 - y0))
476 gvmlsl_s16(ga_uvrg_x, gvlo(d2_ab), gvlo(d3_ab));
477 u32 gs_bx = (s32)ga_bx >> 31; // movs
478
479 gvmull_s16(ga_uvrg_y, gvhi(d0_ab), gvhi(d1_ab));
480 if ((s32)gs_bx < 0) ga_bx = -ga_bx; // rsbmi
481
482 gvmlsl_s16(ga_uvrg_y, gvhi(d2_ab), gvhi(d3_ab));
483 u32 gs_by = (s32)ga_by >> 31; // movs
484
485 gvhreg d0;
486 gvshr_n_u64(d0, d30.v, 22); // note: on "d30 >> 22" gcc generates junk code
487
488 gvdupq_n_u32(uvrgb_phase, psx_gpu->uvrgb_phase);
489 u32 b_base = psx_gpu->uvrgb_phase + (a->b << 16);
490
491 if ((s32)gs_by < 0) ga_by = -ga_by; // rsbmi
492 gvreg gs_uvrg_x, gs_uvrg_y;
493 gs_uvrg_x.s32 = ga_uvrg_x.s32 < 0; // gs_uvrg_x = ga_uvrg_x < 0
494 gs_uvrg_y.s32 = ga_uvrg_y.s32 < 0; // gs_uvrg_y = ga_uvrg_y < 0
495
496 gvdupq_n_u32(w_mask, -psx_gpu->triangle_winding); // w_mask = { -w, -w, -w, -w }
497 shift -= 62 - 12; // shift -= (62 - FIXED_BITS)
498
499 gvreg uvrg_base;
500 gvshll_n_u16(uvrg_base, gvlo(uvrg_xxxx0), 16); // uvrg_base = uvrg0 << 16
aafce833 501 gvdupq_n_s64(r_shift, shift); // r_shift = { shift, shift }
a2cb152a 502
503 gvaddq_u32(uvrg_base, uvrg_base, uvrgb_phase);
504 gvabsq_s32(ga_uvrg_x, ga_uvrg_x); // ga_uvrg_x = abs(ga_uvrg_x)
505
506 u32 area_r_s = d0.u32[0]; // area_r_s = triangle_reciprocal
507 gvabsq_s32(ga_uvrg_y, ga_uvrg_y); // ga_uvrg_y = abs(ga_uvrg_y)
508
509 gvmull_l_u32(gw_rg_x, gvhi(ga_uvrg_x), d0, 0);
510 gvmull_l_u32(ga_uvrg_x, gvlo(ga_uvrg_x), d0, 0);
511 gvmull_l_u32(gw_rg_y, gvhi(ga_uvrg_y), d0, 0);
512 gvmull_l_u32(ga_uvrg_y, gvlo(ga_uvrg_y), d0, 0);
513
514 gvshlq_u64(gw_rg_x, gw_rg_x, r_shift);
515 gvshlq_u64(ga_uvrg_x, ga_uvrg_x, r_shift);
516 gvshlq_u64(gw_rg_y, gw_rg_y, r_shift);
517 gvshlq_u64(ga_uvrg_y, ga_uvrg_y, r_shift);
518
519 gveorq(gs_uvrg_x, gs_uvrg_x, w_mask);
520 gvmovn_u64(tmp_lo, ga_uvrg_x);
521
522 gveorq(gs_uvrg_y, gs_uvrg_y, w_mask);
523 gvmovn_u64(tmp_hi, gw_rg_x);
524
525 gvcombine_u32(ga_uvrg_x, tmp_lo, tmp_hi);
526
527 gveorq(ga_uvrg_x, ga_uvrg_x, gs_uvrg_x);
528 gvmovn_u64(tmp_lo, ga_uvrg_y);
529
530 gvsubq_u32(ga_uvrg_x, ga_uvrg_x, gs_uvrg_x);
531 gvmovn_u64(tmp_hi, gw_rg_y);
532
533 gvcombine_u32(ga_uvrg_y, tmp_lo, tmp_hi);
534
535 gveorq(ga_uvrg_y, ga_uvrg_y, gs_uvrg_y);
536 ga_bx = ga_bx << 13;
537
538 gvsubq_u32(ga_uvrg_y, ga_uvrg_y, gs_uvrg_y);
539 ga_by = ga_by << 13;
540
541 u32 gw_bx_h, gw_by_h;
542 gw_bx_h = (u64)ga_bx * area_r_s >> 32;
543
544 gvshlq_n_u32(ga_uvrg_x, ga_uvrg_x, 4);
545 gvshlq_n_u32(ga_uvrg_y, ga_uvrg_y, 4);
546
547 gw_by_h = (u64)ga_by * area_r_s >> 32;
548 gvdup_n_u32(tmp_lo, a->x);
549 gvmlsq_l_s32(uvrg_base, ga_uvrg_x, tmp_lo, 0);
550
551 gs_bx = gs_bx ^ -psx_gpu->triangle_winding;
552 gvaddq_u32(uvrg_dx2, ga_uvrg_x, ga_uvrg_x);
553
554 gs_by = gs_by ^ -psx_gpu->triangle_winding;
555
556 u32 r11 = -shift; // r11 = negative shift for scalar lsr
557 u32 *store_a = psx_gpu->uvrg.e;
558 r11 = r11 - (32 - 13);
559 u32 *store_b = store_a + 16 / sizeof(u32);
560
561 gvaddq_u32(uvrg_dx3, uvrg_dx2, ga_uvrg_x);
562 gvst1q_inc_u32(uvrg_base, store_a, 32);
563
564 gvst1q_inc_u32(ga_uvrg_x, store_b, 32);
565 u32 g_bx = (u32)gw_bx_h >> r11;
566
567 gvst1q_inc_u32(ga_uvrg_y, store_a, 32);
568 u32 g_by = (u32)gw_by_h >> r11;
569
570 gvdup_n_u32(zero, 0);
571
572 gvst4_4_inc_u32(zero, gvlo(ga_uvrg_x), gvlo(uvrg_dx2), gvlo(uvrg_dx3), store_b, 32);
573 g_bx = g_bx ^ gs_bx;
574
575 gvst4_4_inc_u32(zero, gvhi(ga_uvrg_x), gvhi(uvrg_dx2), gvhi(uvrg_dx3), store_b, 32);
576 g_bx = g_bx - gs_bx;
577
578 g_bx = g_bx << 4;
579 g_by = g_by ^ gs_by;
580
581 b_base -= g_bx * a->x;
582 g_by = g_by - gs_by;
583
584 g_by = g_by << 4;
585
586 u32 g_bx2 = g_bx + g_bx;
587 u32 g_bx3 = g_bx + g_bx2;
588
589 // 112
590 store_b[0] = 0;
591 store_b[1] = g_bx;
592 store_b[2] = g_bx2;
593 store_b[3] = g_bx3;
594 store_b[4] = b_base;
595 store_b[5] = g_by; // 132
596}
597
598#define setup_spans_debug_check(span_edge_data_element) \
599
600#define setup_spans_prologue_alternate_yes() \
601 vec_2x64s alternate_x; \
602 vec_2x64s alternate_dx_dy; \
603 vec_4x32s alternate_x_32; \
aafce833 604 vec_4x16u alternate_x_16; \
a2cb152a 605 \
606 vec_4x16u alternate_select; \
607 vec_4x16s y_mid_point; \
608 \
609 s32 y_b = v_b->y; \
610 s64 edge_alt; \
611 s32 edge_dx_dy_alt; \
612 u32 edge_shift_alt \
613
614#define setup_spans_prologue_alternate_no() \
615
616#define setup_spans_prologue(alternate_active) \
617 edge_data_struct *span_edge_data; \
618 vec_4x32u *span_uvrg_offset; \
619 u32 *span_b_offset; \
620 \
621 s32 clip; \
622 vec_4x32u v_clip; \
623 \
624 union { vec_2x64s full; vec_1x64s h[2]; } edges_xy; \
625 vec_2x32s edges_dx_dy; \
626 vec_2x32u edge_shifts; \
627 \
628 vec_2x64s left_x, right_x; \
629 vec_2x64s left_dx_dy, right_dx_dy; \
630 vec_4x32s left_x_32, right_x_32; \
631 vec_2x32s left_x_32_lo, right_x_32_lo; \
632 vec_2x32s left_x_32_hi, right_x_32_hi; \
633 vec_4x16s left_right_x_16_lo, left_right_x_16_hi; \
634 vec_4x16s y_x4; \
635 vec_8x16s left_edge; \
636 vec_8x16s right_edge; \
637 vec_4x16u span_shift; \
638 \
639 vec_2x32u c_0x01; \
640 vec_4x16u c_0x04; \
641 vec_4x16u c_0xFFFE; \
642 vec_4x16u c_0x07; \
643 \
644 vec_2x32s x_starts; \
645 vec_2x32s x_ends; \
646 \
647 s32 x_a = v_a->x; \
648 s32 x_b = v_b->x; \
649 s32 x_c = v_c->x; \
650 s32 y_a = v_a->y; \
651 s32 y_c = v_c->y; \
652 \
653 vec_4x32u uvrg; \
654 vec_4x32u uvrg_dy; \
655 u32 b = psx_gpu->b; \
656 u32 b_dy = psx_gpu->b_dy; \
657 const u32 *reciprocal_table = psx_gpu->reciprocal_table_ptr; \
658 \
659 gvld1q_u32(uvrg, psx_gpu->uvrg.e); \
660 gvld1q_u32(uvrg_dy, psx_gpu->uvrg_dy.e); \
661 gvdup_n_u32(c_0x01, 0x01); \
662 setup_spans_prologue_alternate_##alternate_active() \
663
664#define setup_spans_prologue_b() \
665 span_edge_data = psx_gpu->span_edge_data; \
666 span_uvrg_offset = (vec_4x32u *)psx_gpu->span_uvrg_offset; \
667 span_b_offset = psx_gpu->span_b_offset; \
668 \
669 vec_8x16u c_0x0001; \
670 \
671 gvdupq_n_u16(c_0x0001, 0x0001); \
672 gvdupq_n_u16(left_edge, psx_gpu->viewport_start_x); \
673 gvdupq_n_u16(right_edge, psx_gpu->viewport_end_x); \
674 gvaddq_u16(right_edge, right_edge, c_0x0001); \
675 gvdup_n_u16(c_0x04, 0x04); \
676 gvdup_n_u16(c_0x07, 0x07); \
677 gvdup_n_u16(c_0xFFFE, 0xFFFE); \
678
679
680#define compute_edge_delta_x2() \
681{ \
682 vec_2x32s heights; \
683 vec_2x32s height_reciprocals; \
684 vec_2x32s heights_b; \
685 vec_2x32u widths; \
686 \
687 u32 edge_shift = reciprocal_table[height]; \
688 \
689 gvdup_n_u32(heights, height); \
690 gvsub_u32(widths, x_ends, x_starts); \
691 \
692 gvdup_n_u32(edge_shifts, edge_shift); \
693 gvsub_u32(heights_b, heights, c_0x01); \
694 gvshr_n_u32(height_reciprocals, edge_shifts, 10); \
695 \
696 gvmla_s32(heights_b, x_starts, heights); \
697 gvbic_n_u16(edge_shifts, 0xE0); \
698 gvmul_s32(edges_dx_dy, widths, height_reciprocals); \
699 gvmull_s32(edges_xy.full, heights_b, height_reciprocals); \
700} \
701
702#define compute_edge_delta_x3(start_c, height_a, height_b) \
703{ \
704 vec_2x32s heights; \
705 vec_2x32s height_reciprocals; \
706 vec_2x32s heights_b; \
707 vec_2x32u widths; \
708 \
709 u32 width_alt; \
710 s32 height_b_alt; \
711 u32 height_reciprocal_alt; \
712 \
713 gvcreate_u32(heights, height_a, height_b); \
714 gvcreate_u32(edge_shifts, reciprocal_table[height_a], reciprocal_table[height_b]); \
715 \
716 edge_shift_alt = reciprocal_table[height_minor_b]; \
717 \
718 gvsub_u32(widths, x_ends, x_starts); \
719 width_alt = x_c - start_c; \
720 \
721 gvshr_n_u32(height_reciprocals, edge_shifts, 10); \
722 height_reciprocal_alt = edge_shift_alt >> 10; \
723 \
724 gvbic_n_u16(edge_shifts, 0xE0); \
725 edge_shift_alt &= 0x1F; \
726 \
727 gvsub_u32(heights_b, heights, c_0x01); \
728 height_b_alt = height_minor_b - 1; \
729 \
730 gvmla_s32(heights_b, x_starts, heights); \
731 height_b_alt += height_minor_b * start_c; \
732 \
733 gvmull_s32(edges_xy.full, heights_b, height_reciprocals); \
734 edge_alt = (s64)height_b_alt * height_reciprocal_alt; \
735 \
736 gvmul_s32(edges_dx_dy, widths, height_reciprocals); \
737 edge_dx_dy_alt = width_alt * height_reciprocal_alt; \
738} \
739
740
741#define setup_spans_adjust_y_up() \
742 gvsub_u32(y_x4, y_x4, c_0x04) \
743
744#define setup_spans_adjust_y_down() \
745 gvadd_u32(y_x4, y_x4, c_0x04) \
746
747#define setup_spans_adjust_interpolants_up() \
748 gvsubq_u32(uvrg, uvrg, uvrg_dy); \
749 b -= b_dy \
750
751#define setup_spans_adjust_interpolants_down() \
752 gvaddq_u32(uvrg, uvrg, uvrg_dy); \
753 b += b_dy \
754
755
756#define setup_spans_clip_interpolants_increment() \
757 gvmlaq_s32(uvrg, uvrg_dy, v_clip); \
758 b += b_dy * clip \
759
760#define setup_spans_clip_interpolants_decrement() \
761 gvmlsq_s32(uvrg, uvrg_dy, v_clip); \
762 b -= b_dy * clip \
763
764#define setup_spans_clip_alternate_yes() \
765 edge_alt += edge_dx_dy_alt * (s64)(clip) \
766
767#define setup_spans_clip_alternate_no() \
768
769#define setup_spans_clip(direction, alternate_active) \
770{ \
771 gvdupq_n_u32(v_clip, clip); \
772 gvmlal_s32(edges_xy.full, edges_dx_dy, gvlo(v_clip)); \
773 setup_spans_clip_alternate_##alternate_active(); \
774 setup_spans_clip_interpolants_##direction(); \
775} \
776
777
778#define setup_spans_adjust_edges_alternate_no(left_index, right_index) \
779{ \
780 vec_2x64s edge_shifts_64; \
781 union { vec_2x64s full; vec_1x64s h[2]; } edges_dx_dy_64; \
782 vec_1x64s left_x_hi, right_x_hi; \
783 \
784 gvmovl_s32(edge_shifts_64, edge_shifts); \
785 gvshlq_s64(edges_xy.full, edges_xy.full, edge_shifts_64); \
786 \
787 gvmovl_s32(edges_dx_dy_64.full, edges_dx_dy); \
788 gvshlq_s64(edges_dx_dy_64.full, edges_dx_dy_64.full, edge_shifts_64); \
789 \
790 gvdupq_l_s64(left_x, edges_xy.h[left_index], 0); \
791 gvdupq_l_s64(right_x, edges_xy.h[right_index], 0); \
792 \
793 gvdupq_l_s64(left_dx_dy, edges_dx_dy_64.h[left_index], 0); \
794 gvdupq_l_s64(right_dx_dy, edges_dx_dy_64.h[right_index], 0); \
795 \
796 gvadd_s64(left_x_hi, gvlo(left_x), gvlo(left_dx_dy)); \
797 gvadd_s64(right_x_hi, gvlo(right_x), gvlo(right_dx_dy)); \
798 \
799 gvset_hi(left_x, left_x_hi); \
800 gvset_hi(right_x, right_x_hi); \
801 \
802 gvaddq_s64(left_dx_dy, left_dx_dy, left_dx_dy); \
803 gvaddq_s64(right_dx_dy, right_dx_dy, right_dx_dy); \
804} \
805
806#define setup_spans_adjust_edges_alternate_yes(left_index, right_index) \
807{ \
808 setup_spans_adjust_edges_alternate_no(left_index, right_index); \
809 s64 edge_dx_dy_alt_64; \
810 vec_1x64s alternate_x_hi; \
811 \
812 gvdup_n_u16(y_mid_point, y_b); \
813 \
814 edge_alt <<= edge_shift_alt; \
815 edge_dx_dy_alt_64 = (s64)edge_dx_dy_alt << edge_shift_alt; \
816 \
817 gvdupq_n_s64(alternate_x, edge_alt); \
818 gvdupq_n_s64(alternate_dx_dy, edge_dx_dy_alt_64); \
819 \
820 gvadd_s64(alternate_x_hi, gvlo(alternate_x), gvlo(alternate_dx_dy)); \
821 gvaddq_s64(alternate_dx_dy, alternate_dx_dy, alternate_dx_dy); \
822 gvset_hi(alternate_x, alternate_x_hi); \
823} \
824
825
826#define setup_spans_y_select_up() \
827 gvclt_s16(alternate_select, y_x4, y_mid_point) \
828
829#define setup_spans_y_select_down() \
830 gvcgt_s16(alternate_select, y_x4, y_mid_point) \
831
832#define setup_spans_y_select_alternate_yes(direction) \
833 setup_spans_y_select_##direction() \
834
835#define setup_spans_y_select_alternate_no(direction) \
836
837#define setup_spans_alternate_select_left() \
838 gvbit(left_right_x_16_lo, alternate_x_16, alternate_select); \
839
840#define setup_spans_alternate_select_right() \
841 gvbit(left_right_x_16_hi, alternate_x_16, alternate_select); \
842
843#define setup_spans_alternate_select_none() \
844
845#define setup_spans_increment_alternate_yes() \
846{ \
847 vec_2x32s alternate_x_32_lo, alternate_x_32_hi; \
848 gvshrn_n_s64(alternate_x_32_lo, alternate_x, 32); \
849 gvaddq_s64(alternate_x, alternate_x, alternate_dx_dy); \
850 gvshrn_n_s64(alternate_x_32_hi, alternate_x, 32); \
851 gvaddq_s64(alternate_x, alternate_x, alternate_dx_dy); \
852 gvcombine_u32(alternate_x_32, alternate_x_32_lo, alternate_x_32_hi); \
853 gvmovn_u32(alternate_x_16, alternate_x_32); \
854} \
855
856#define setup_spans_increment_alternate_no() \
857
858#define setup_spans_set_x4(alternate, direction, alternate_active) \
859{ \
860 gvst1q_pi_u32(uvrg, span_uvrg_offset); \
861 *span_b_offset++ = b; \
862 setup_spans_adjust_interpolants_##direction(); \
863 \
864 gvst1q_pi_u32(uvrg, span_uvrg_offset); \
865 *span_b_offset++ = b; \
866 setup_spans_adjust_interpolants_##direction(); \
867 \
868 gvst1q_pi_u32(uvrg, span_uvrg_offset); \
869 *span_b_offset++ = b; \
870 setup_spans_adjust_interpolants_##direction(); \
871 \
872 gvst1q_pi_u32(uvrg, span_uvrg_offset); \
873 *span_b_offset++ = b; \
874 setup_spans_adjust_interpolants_##direction(); \
875 \
876 gvshrn_n_s64(left_x_32_lo, left_x, 32); \
877 gvshrn_n_s64(right_x_32_lo, right_x, 32); \
878 \
879 gvaddq_s64(left_x, left_x, left_dx_dy); \
880 gvaddq_s64(right_x, right_x, right_dx_dy); \
881 \
882 gvshrn_n_s64(left_x_32_hi, left_x, 32); \
883 gvshrn_n_s64(right_x_32_hi, right_x, 32); \
884 \
885 gvaddq_s64(left_x, left_x, left_dx_dy); \
886 gvaddq_s64(right_x, right_x, right_dx_dy); \
887 \
888 gvcombine_s64(left_x_32, left_x_32_lo, left_x_32_hi); \
889 gvcombine_s64(right_x_32, right_x_32_lo, right_x_32_hi); \
890 \
891 gvmovn_u32(left_right_x_16_lo, left_x_32); \
892 gvmovn_u32(left_right_x_16_hi, right_x_32); \
893 \
894 setup_spans_increment_alternate_##alternate_active(); \
895 setup_spans_y_select_alternate_##alternate_active(direction); \
896 setup_spans_alternate_select_##alternate(); \
897 \
898 gvmax_s16(left_right_x_16_lo, left_right_x_16_lo, gvlo(left_edge)); \
899 gvmax_s16(left_right_x_16_hi, left_right_x_16_hi, gvhi(left_edge)); \
900 gvmin_s16(left_right_x_16_lo, left_right_x_16_lo, gvlo(right_edge)); \
901 gvmin_s16(left_right_x_16_hi, left_right_x_16_hi, gvhi(right_edge)); \
902 \
903 gvsub_u16(left_right_x_16_hi, left_right_x_16_hi, left_right_x_16_lo); \
904 gvadd_u16(left_right_x_16_hi, left_right_x_16_hi, c_0x07); \
905 gvand(span_shift, left_right_x_16_hi, c_0x07); \
906 gvshl_u16(span_shift, c_0xFFFE, span_shift); \
907 gvshr_n_u16(left_right_x_16_hi, left_right_x_16_hi, 3); \
908 \
909 gvst4_pi_u16(left_right_x_16_lo, left_right_x_16_hi, span_shift, y_x4, \
910 span_edge_data); \
911 \
912 setup_spans_adjust_y_##direction(); \
913} \
914
915
916#define setup_spans_alternate_adjust_yes() \
917 edge_alt -= edge_dx_dy_alt * (s64)height_minor_a \
918
919#define setup_spans_alternate_adjust_no() \
920
921
922#define setup_spans_down(left_index, right_index, alternate, alternate_active) \
923 setup_spans_alternate_adjust_##alternate_active(); \
924 if(y_c > psx_gpu->viewport_end_y) \
925 height -= y_c - psx_gpu->viewport_end_y - 1; \
926 \
927 clip = psx_gpu->viewport_start_y - y_a; \
928 if(clip > 0) \
929 { \
930 height -= clip; \
931 y_a += clip; \
932 setup_spans_clip(increment, alternate_active); \
933 } \
934 \
935 setup_spans_prologue_b(); \
936 \
937 if(height > 0) \
938 { \
939 u64 y_x4_ = ((u64)(y_a + 3) << 48) | ((u64)(u16)(y_a + 2) << 32) \
940 | (u32)((y_a + 1) << 16) | (u16)y_a; \
941 gvcreate_u64(y_x4, y_x4_); \
942 setup_spans_adjust_edges_alternate_##alternate_active(left_index, \
943 right_index); \
944 \
945 psx_gpu->num_spans = height; \
946 do \
947 { \
948 setup_spans_set_x4(alternate, down, alternate_active); \
949 height -= 4; \
950 } while(height > 0); \
951 } \
952
953
954#define setup_spans_alternate_pre_increment_yes() \
955 edge_alt += edge_dx_dy_alt \
956
957#define setup_spans_alternate_pre_increment_no() \
958
959#define setup_spans_up_decrement_height_yes() \
960 height-- \
961
962#define setup_spans_up_decrement_height_no() \
963 {} \
964
965#define setup_spans_up(left_index, right_index, alternate, alternate_active) \
966 setup_spans_alternate_adjust_##alternate_active(); \
967 y_a--; \
968 \
969 if(y_c < psx_gpu->viewport_start_y) \
970 height -= psx_gpu->viewport_start_y - y_c; \
971 else \
972 setup_spans_up_decrement_height_##alternate_active(); \
973 \
974 clip = y_a - psx_gpu->viewport_end_y; \
975 if(clip > 0) \
976 { \
977 height -= clip; \
978 y_a -= clip; \
979 setup_spans_clip(decrement, alternate_active); \
980 } \
981 \
982 setup_spans_prologue_b(); \
983 \
984 if(height > 0) \
985 { \
986 u64 y_x4_ = ((u64)(y_a - 3) << 48) | ((u64)(u16)(y_a - 2) << 32) \
987 | (u32)((y_a - 1) << 16) | (u16)y_a; \
988 gvcreate_u64(y_x4, y_x4_); \
989 gvaddw_s32(edges_xy.full, edges_xy.full, edges_dx_dy); \
990 setup_spans_alternate_pre_increment_##alternate_active(); \
991 setup_spans_adjust_edges_alternate_##alternate_active(left_index, \
992 right_index); \
993 setup_spans_adjust_interpolants_up(); \
994 \
995 psx_gpu->num_spans = height; \
996 while(height > 0) \
997 { \
998 setup_spans_set_x4(alternate, up, alternate_active); \
999 height -= 4; \
1000 } \
1001 } \
1002
1003#define index_left 0
1004#define index_right 1
1005
1006#define setup_spans_up_up(minor, major) \
1007 setup_spans_prologue(yes); \
1008 s32 height_minor_a = y_a - y_b; \
1009 s32 height_minor_b = y_b - y_c; \
1010 s32 height = y_a - y_c; \
1011 \
1012 gvdup_n_u32(x_starts, x_a); \
1013 gvcreate_u32(x_ends, x_c, x_b); \
1014 \
1015 compute_edge_delta_x3(x_b, height, height_minor_a); \
1016 setup_spans_up(index_##major, index_##minor, minor, yes) \
1017
1018void setup_spans_up_left(psx_gpu_struct *psx_gpu, vertex_struct *v_a,
1019 vertex_struct *v_b, vertex_struct *v_c)
1020{
1021#if 0
1022 setup_spans_up_left_(psx_gpu, v_a, v_b, v_c);
1023 return;
1024#endif
1025 setup_spans_up_up(left, right)
1026}
1027
1028void setup_spans_up_right(psx_gpu_struct *psx_gpu, vertex_struct *v_a,
1029 vertex_struct *v_b, vertex_struct *v_c)
1030{
1031#if 0
1032 setup_spans_up_right_(psx_gpu, v_a, v_b, v_c);
1033 return;
1034#endif
1035 setup_spans_up_up(right, left)
1036}
1037
1038#define setup_spans_down_down(minor, major) \
1039 setup_spans_prologue(yes); \
1040 s32 height_minor_a = y_b - y_a; \
1041 s32 height_minor_b = y_c - y_b; \
1042 s32 height = y_c - y_a; \
1043 \
1044 gvdup_n_u32(x_starts, x_a); \
1045 gvcreate_u32(x_ends, x_c, x_b); \
1046 \
1047 compute_edge_delta_x3(x_b, height, height_minor_a); \
1048 setup_spans_down(index_##major, index_##minor, minor, yes) \
1049
1050void setup_spans_down_left(psx_gpu_struct *psx_gpu, vertex_struct *v_a,
1051 vertex_struct *v_b, vertex_struct *v_c)
1052{
1053#if 0
1054 setup_spans_down_left_(psx_gpu, v_a, v_b, v_c);
1055 return;
1056#endif
1057 setup_spans_down_down(left, right)
1058}
1059
1060void setup_spans_down_right(psx_gpu_struct *psx_gpu, vertex_struct *v_a,
1061 vertex_struct *v_b, vertex_struct *v_c)
1062{
1063#if 0
1064 setup_spans_down_right_(psx_gpu, v_a, v_b, v_c);
1065 return;
1066#endif
1067 setup_spans_down_down(right, left)
1068}
1069
1070#define setup_spans_up_flat() \
1071 s32 height = y_a - y_c; \
1072 \
1073 compute_edge_delta_x2(); \
1074 setup_spans_up(index_left, index_right, none, no) \
1075
1076void setup_spans_up_a(psx_gpu_struct *psx_gpu, vertex_struct *v_a,
1077 vertex_struct *v_b, vertex_struct *v_c)
1078{
1079#if 0
1080 setup_spans_up_a_(psx_gpu, v_a, v_b, v_c);
1081 return;
1082#endif
1083 setup_spans_prologue(no);
1084
1085 gvcreate_u32(x_starts, x_a, x_b);
1086 gvdup_n_u32(x_ends, x_c);
1087
1088 setup_spans_up_flat()
1089}
1090
1091void setup_spans_up_b(psx_gpu_struct *psx_gpu, vertex_struct *v_a,
1092 vertex_struct *v_b, vertex_struct *v_c)
1093{
1094#if 0
1095 setup_spans_up_b_(psx_gpu, v_a, v_b, v_c);
1096 return;
1097#endif
1098 setup_spans_prologue(no);
1099
1100 gvdup_n_u32(x_starts, x_a);
1101 gvcreate_u32(x_ends, x_b, x_c);
1102
1103 setup_spans_up_flat()
1104}
1105
1106#define setup_spans_down_flat() \
1107 s32 height = y_c - y_a; \
1108 \
1109 compute_edge_delta_x2(); \
1110 setup_spans_down(index_left, index_right, none, no) \
1111
1112void setup_spans_down_a(psx_gpu_struct *psx_gpu, vertex_struct *v_a,
1113 vertex_struct *v_b, vertex_struct *v_c)
1114{
1115#if 0
1116 setup_spans_down_a_(psx_gpu, v_a, v_b, v_c);
1117 return;
1118#endif
1119 setup_spans_prologue(no);
1120
1121 gvcreate_u32(x_starts, x_a, x_b);
1122 gvdup_n_u32(x_ends, x_c);
1123
1124 setup_spans_down_flat()
1125}
1126
1127void setup_spans_down_b(psx_gpu_struct *psx_gpu, vertex_struct *v_a,
1128 vertex_struct *v_b, vertex_struct *v_c)
1129{
1130#if 0
1131 setup_spans_down_b_(psx_gpu, v_a, v_b, v_c);
1132 return;
1133#endif
1134 setup_spans_prologue(no)
1135
1136 gvdup_n_u32(x_starts, x_a);
1137 gvcreate_u32(x_ends, x_b, x_c);
1138
1139 setup_spans_down_flat()
1140}
1141
1142void setup_spans_up_down(psx_gpu_struct *psx_gpu, vertex_struct *v_a,
1143 vertex_struct *v_b, vertex_struct *v_c)
1144{
1145#if 0
1146 setup_spans_up_down_(psx_gpu, v_a, v_b, v_c);
1147 return;
1148#endif
1149 setup_spans_prologue(no);
1150
1151 s32 y_b = v_b->y;
1152 s64 edge_alt;
1153 s32 edge_dx_dy_alt;
1154 u32 edge_shift_alt;
1155
1156 s32 middle_y = y_a;
1157 s32 height_minor_a = y_a - y_b;
1158 s32 height_minor_b = y_c - y_a;
1159 s32 height_major = y_c - y_b;
1160
1161 vec_2x64s edges_xy_b;
1162 vec_1x64s edges_xy_b_left;
1163 vec_2x32s edges_dx_dy_b;
1164 vec_2x32u edge_shifts_b;
1165
1166 vec_2x32s height_increment;
1167
1168 gvcreate_u32(x_starts, x_a, x_c);
1169 gvdup_n_u32(x_ends, x_b);
1170
1171 compute_edge_delta_x3(x_a, height_minor_a, height_major);
1172
1173 gvcreate_s32(height_increment, 0, height_minor_b);
1174
1175 gvmlal_s32(edges_xy.full, edges_dx_dy, height_increment);
1176
1177 gvcreate_s64(edges_xy_b_left, edge_alt);
1178 gvcombine_s64(edges_xy_b, edges_xy_b_left, gvhi(edges_xy.full));
1179
1180 edge_shifts_b = edge_shifts;
1181 gvmov_l_u32(edge_shifts_b, edge_shift_alt, 0);
1182
1183 gvneg_s32(edges_dx_dy_b, edges_dx_dy);
1184 gvmov_l_s32(edges_dx_dy_b, edge_dx_dy_alt, 0);
1185
1186 y_a--;
1187
1188 if(y_b < psx_gpu->viewport_start_y)
1189 height_minor_a -= psx_gpu->viewport_start_y - y_b;
1190
1191 clip = y_a - psx_gpu->viewport_end_y;
1192 if(clip > 0)
1193 {
1194 height_minor_a -= clip;
1195 y_a -= clip;
1196 setup_spans_clip(decrement, no);
1197 }
1198
1199 setup_spans_prologue_b();
1200
1201 if(height_minor_a > 0)
1202 {
1203 u64 y_x4_ = ((u64)(y_a - 3) << 48) | ((u64)(u16)(y_a - 2) << 32)
1204 | (u32)((y_a - 1) << 16) | (u16)y_a;
1205 gvcreate_u64(y_x4, y_x4_);
1206 gvaddw_s32(edges_xy.full, edges_xy.full, edges_dx_dy);
1207 setup_spans_adjust_edges_alternate_no(index_left, index_right);
1208 setup_spans_adjust_interpolants_up();
1209
1210 psx_gpu->num_spans = height_minor_a;
1211 while(height_minor_a > 0)
1212 {
1213 setup_spans_set_x4(none, up, no);
1214 height_minor_a -= 4;
1215 }
1216
1217 span_edge_data += height_minor_a;
1218 span_uvrg_offset += height_minor_a;
1219 span_b_offset += height_minor_a;
1220 }
1221
1222 edges_xy.full = edges_xy_b;
1223 edges_dx_dy = edges_dx_dy_b;
1224 edge_shifts = edge_shifts_b;
1225
1226 gvld1q_u32(uvrg, psx_gpu->uvrg.e);
1227 b = psx_gpu->b;
1228
1229 y_a = middle_y;
1230
1231 if(y_c > psx_gpu->viewport_end_y)
1232 height_minor_b -= y_c - psx_gpu->viewport_end_y - 1;
1233
1234 clip = psx_gpu->viewport_start_y - y_a;
1235 if(clip > 0)
1236 {
1237 height_minor_b -= clip;
1238 y_a += clip;
1239 setup_spans_clip(increment, no);
1240 }
1241
1242 if(height_minor_b > 0)
1243 {
1244 u64 y_x4_ = ((u64)(y_a + 3) << 48) | ((u64)(u16)(y_a + 2) << 32)
1245 | (u32)((y_a + 1) << 16) | (u16)y_a;
1246 gvcreate_u64(y_x4, y_x4_);
1247 setup_spans_adjust_edges_alternate_no(index_left, index_right);
1248
1249 // FIXME: overflow corner case
1250 if(psx_gpu->num_spans + height_minor_b == MAX_SPANS)
1251 height_minor_b &= ~3;
1252
1253 psx_gpu->num_spans += height_minor_b;
1254 while(height_minor_b > 0)
1255 {
1256 setup_spans_set_x4(none, down, no);
1257 height_minor_b -= 4;
1258 }
1259 }
1260}
1261
1262
1263#define dither_table_entry_normal(value) \
1264 (value) \
1265
1266#define setup_blocks_load_msb_mask_indirect() \
1267
1268#define setup_blocks_load_msb_mask_direct() \
1269 vec_8x16u msb_mask; \
1270 gvdupq_n_u16(msb_mask, psx_gpu->mask_msb); \
1271
1272#define setup_blocks_variables_shaded_textured(target) \
1273 vec_4x32u u_block; \
1274 vec_4x32u v_block; \
1275 vec_4x32u r_block; \
1276 vec_4x32u g_block; \
1277 vec_4x32u b_block; \
1278 vec_4x32u uvrg_dx; \
1279 vec_4x32u uvrg_dx4; \
1280 vec_4x32u uvrg_dx8; \
1281 vec_4x32u uvrg; \
1282 vec_16x8u texture_mask; \
1283 vec_8x8u texture_mask_lo, texture_mask_hi; \
1284 u32 b_dx = psx_gpu->b_block_span.e[1]; \
1285 u32 b_dx4 = b_dx << 2; \
1286 u32 b_dx8 = b_dx << 3; \
1287 u32 b; \
1288 \
1289 gvld1q_u32(uvrg_dx, psx_gpu->uvrg_dx.e); \
1290 gvshlq_n_u32(uvrg_dx4, uvrg_dx, 2); \
1291 gvshlq_n_u32(uvrg_dx8, uvrg_dx, 3); \
1292 gvld2_dup(texture_mask_lo, texture_mask_hi, &psx_gpu->texture_mask_width); \
1293 gvcombine_u16(texture_mask, texture_mask_lo, texture_mask_hi) \
1294
1295#define setup_blocks_variables_shaded_untextured(target) \
1296 vec_4x32u r_block; \
1297 vec_4x32u g_block; \
1298 vec_4x32u b_block; \
1299 vec_4x32u rgb_dx; \
1300 vec_2x32u rgb_dx_lo, rgb_dx_hi; \
1301 vec_4x32u rgb_dx4; \
1302 vec_4x32u rgb_dx8; \
1303 vec_4x32u rgb; \
1304 vec_2x32u rgb_lo, rgb_hi; \
1305 \
1306 vec_8x8u d64_0x07; \
1307 vec_8x8u d64_1; \
1308 vec_8x8u d64_4; \
1309 vec_8x8u d64_128; \
1310 \
1311 gvdup_n_u8(d64_0x07, 0x07); \
1312 gvdup_n_u8(d64_1, 1); \
1313 gvdup_n_u8(d64_4, 4); \
1314 gvdup_n_u8(d64_128, 128); \
1315 \
1316 gvld1_u32(rgb_dx_lo, &psx_gpu->uvrg_dx.e[2]); \
1317 gvcreate_u32(rgb_dx_hi, psx_gpu->b_block_span.e[1], 0); \
1318 gvcombine_u32(rgb_dx, rgb_dx_lo, rgb_dx_hi); \
1319 gvshlq_n_u32(rgb_dx4, rgb_dx, 2); \
1320 gvshlq_n_u32(rgb_dx8, rgb_dx, 3) \
1321
1322#define setup_blocks_variables_unshaded_textured(target) \
1323 vec_4x32u u_block; \
1324 vec_4x32u v_block; \
1325 vec_2x32u uv_dx; \
1326 vec_2x32u uv_dx4; \
1327 vec_2x32u uv_dx8; \
1328 vec_2x32u uv; \
1329 vec_16x8u texture_mask; \
1330 vec_8x8u texture_mask_lo, texture_mask_hi; \
1331 \
1332 gvld1_u32(uv_dx, psx_gpu->uvrg_dx.e); \
1333 gvld1_u32(uv, psx_gpu->uvrg.e); \
1334 gvshl_n_u32(uv_dx4, uv_dx, 2); \
1335 gvshl_n_u32(uv_dx8, uv_dx, 3); \
1336 gvld2_dup(texture_mask_lo, texture_mask_hi, &psx_gpu->texture_mask_width); \
1337 gvcombine_u16(texture_mask, texture_mask_lo, texture_mask_hi) \
1338
1339#define setup_blocks_variables_unshaded_untextured_direct() \
1340 gvorrq(colors, colors, msb_mask) \
1341
1342#define setup_blocks_variables_unshaded_untextured_indirect() \
1343
1344#define setup_blocks_variables_unshaded_untextured(target) \
1345 u32 color = psx_gpu->triangle_color; \
1346 vec_8x16u colors; \
1347 \
1348 u32 color_r = color & 0xFF; \
1349 u32 color_g = (color >> 8) & 0xFF; \
1350 u32 color_b = (color >> 16) & 0xFF; \
1351 \
1352 color = (color_r >> 3) | ((color_g >> 3) << 5) | \
1353 ((color_b >> 3) << 10); \
1354 gvdupq_n_u16(colors, color); \
1355 setup_blocks_variables_unshaded_untextured_##target() \
1356
1357#define setup_blocks_span_initialize_dithered_textured() \
1358 vec_8x16u dither_offsets; \
1359 gvshll_n_s8(dither_offsets, dither_offsets_short, 4) \
1360
1361#define setup_blocks_span_initialize_dithered_untextured() \
1362 vec_8x8u dither_offsets; \
1363 gvadd_u8(dither_offsets, dither_offsets_short, d64_4) \
1364
1365#define setup_blocks_span_initialize_dithered(texturing) \
1366 u32 dither_row = psx_gpu->dither_table[y & 0x3]; \
1367 u32 dither_shift = (span_edge_data->left_x & 0x3) * 8; \
1368 vec_8x8s dither_offsets_short; \
1369 \
1370 dither_row = \
1371 (dither_row >> dither_shift) | (dither_row << (32 - dither_shift)); \
1372 gvdup_n_u32(dither_offsets_short, dither_row); \
1373 setup_blocks_span_initialize_dithered_##texturing() \
1374
1375#define setup_blocks_span_initialize_undithered(texturing) \
1376
1377#define setup_blocks_span_initialize_shaded_textured() \
1378{ \
1379 u32 left_x = span_edge_data->left_x; \
1380 vec_4x32u block_span; \
1381 vec_4x32u v_left_x; \
1382 \
1383 gvld1q_u32(uvrg, span_uvrg_offset); \
1384 gvdupq_n_u32(v_left_x, left_x); \
1385 gvmlaq_u32(uvrg, uvrg_dx, v_left_x); \
1386 b = *span_b_offset; \
1387 b += b_dx * left_x; \
1388 \
1389 gvdupq_l_u32(u_block, gvlo(uvrg), 0); \
1390 gvdupq_l_u32(v_block, gvlo(uvrg), 1); \
1391 gvdupq_l_u32(r_block, gvhi(uvrg), 0); \
1392 gvdupq_l_u32(g_block, gvhi(uvrg), 1); \
1393 gvdupq_n_u32(b_block, b); \
1394 \
1395 gvld1q_u32(block_span, psx_gpu->u_block_span.e); \
1396 gvaddq_u32(u_block, u_block, block_span); \
1397 gvld1q_u32(block_span, psx_gpu->v_block_span.e); \
1398 gvaddq_u32(v_block, v_block, block_span); \
1399 gvld1q_u32(block_span, psx_gpu->r_block_span.e); \
1400 gvaddq_u32(r_block, r_block, block_span); \
1401 gvld1q_u32(block_span, psx_gpu->g_block_span.e); \
1402 gvaddq_u32(g_block, g_block, block_span); \
1403 gvld1q_u32(block_span, psx_gpu->b_block_span.e); \
1404 gvaddq_u32(b_block, b_block, block_span); \
1405}
1406
1407#define setup_blocks_span_initialize_shaded_untextured() \
1408{ \
1409 u32 left_x = span_edge_data->left_x; \
1410 u32 *span_uvrg_offset_high = (u32 *)span_uvrg_offset + 2; \
1411 vec_4x32u block_span; \
1412 vec_4x32u v_left_x; \
1413 \
1414 gvld1_u32(rgb_lo, span_uvrg_offset_high); \
1415 gvcreate_u32(rgb_hi, *span_b_offset, 0); \
1416 gvcombine_u32(rgb, rgb_lo, rgb_hi); \
1417 gvdupq_n_u32(v_left_x, left_x); \
1418 gvmlaq_u32(rgb, rgb_dx, v_left_x); \
1419 \
1420 gvdupq_l_u32(r_block, gvlo(rgb), 0); \
1421 gvdupq_l_u32(g_block, gvlo(rgb), 1); \
1422 gvdupq_l_u32(b_block, gvhi(rgb), 0); \
1423 \
1424 gvld1q_u32(block_span, psx_gpu->r_block_span.e); \
1425 gvaddq_u32(r_block, r_block, block_span); \
1426 gvld1q_u32(block_span, psx_gpu->g_block_span.e); \
1427 gvaddq_u32(g_block, g_block, block_span); \
1428 gvld1q_u32(block_span, psx_gpu->b_block_span.e); \
1429 gvaddq_u32(b_block, b_block, block_span); \
1430} \
1431
1432#define setup_blocks_span_initialize_unshaded_textured() \
1433{ \
1434 u32 left_x = span_edge_data->left_x; \
1435 vec_4x32u block_span; \
1436 vec_2x32u v_left_x; \
1437 \
1438 gvld1_u32(uv, span_uvrg_offset); \
1439 gvdup_n_u32(v_left_x, left_x); \
1440 gvmla_u32(uv, uv_dx, v_left_x); \
1441 \
1442 gvdupq_l_u32(u_block, uv, 0); \
1443 gvdupq_l_u32(v_block, uv, 1); \
1444 \
1445 gvld1q_u32(block_span, psx_gpu->u_block_span.e); \
1446 gvaddq_u32(u_block, u_block, block_span); \
1447 gvld1q_u32(block_span, psx_gpu->v_block_span.e); \
1448 gvaddq_u32(v_block, v_block, block_span); \
1449} \
1450
1451#define setup_blocks_span_initialize_unshaded_untextured() \
1452
1453#define setup_blocks_texture_swizzled() \
1454{ \
1455 vec_8x8u u_saved = u; \
1456 gvsli_n_u8(u, v, 4); \
1457 gvsri_n_u8(v, u_saved, 4); \
1458} \
1459
1460#define setup_blocks_texture_unswizzled() \
1461
1462#define setup_blocks_store_shaded_textured(swizzling, dithering, target, \
1463 edge_type) \
1464{ \
1465 vec_8x16u u_whole; \
1466 vec_8x16u v_whole; \
1467 vec_8x16u r_whole; \
1468 vec_8x16u g_whole; \
1469 vec_8x16u b_whole; \
1470 vec_4x16u u_whole_lo, u_whole_hi; \
1471 vec_4x16u v_whole_lo, v_whole_hi; \
1472 vec_4x16u r_whole_lo, r_whole_hi; \
1473 vec_4x16u g_whole_lo, g_whole_hi; \
1474 vec_4x16u b_whole_lo, b_whole_hi; \
1475 \
1476 vec_8x8u u; \
1477 vec_8x8u v; \
1478 vec_8x8u r; \
1479 vec_8x8u g; \
1480 vec_8x8u b; \
1481 \
1482 vec_4x32u dx4; \
1483 vec_4x32u dx8; \
1484 \
1485 gvshrn_n_u32(u_whole_lo, u_block, 16); \
1486 gvshrn_n_u32(v_whole_lo, v_block, 16); \
1487 gvshrn_n_u32(r_whole_lo, r_block, 16); \
1488 gvshrn_n_u32(g_whole_lo, g_block, 16); \
1489 gvshrn_n_u32(b_whole_lo, b_block, 16); \
1490 \
1491 gvdupq_l_u32(dx4, gvlo(uvrg_dx4), 0); \
1492 gvaddhn_u32(u_whole_hi, u_block, dx4); \
1493 gvdupq_l_u32(dx4, gvlo(uvrg_dx4), 1); \
1494 gvaddhn_u32(v_whole_hi, v_block, dx4); \
1495 gvdupq_l_u32(dx4, gvhi(uvrg_dx4), 0); \
1496 gvaddhn_u32(r_whole_hi, r_block, dx4); \
1497 gvdupq_l_u32(dx4, gvhi(uvrg_dx4), 1); \
1498 gvaddhn_u32(g_whole_hi, g_block, dx4); \
1499 gvdupq_n_u32(dx4, b_dx4); \
1500 gvaddhn_u32(b_whole_hi, b_block, dx4); \
1501 \
1502 gvcombine_u16(u_whole, u_whole_lo, u_whole_hi); \
1503 gvcombine_u16(v_whole, v_whole_lo, v_whole_hi); \
1504 gvcombine_u16(r_whole, r_whole_lo, r_whole_hi); \
1505 gvcombine_u16(g_whole, g_whole_lo, g_whole_hi); \
1506 gvcombine_u16(b_whole, b_whole_lo, b_whole_hi); \
1507 gvmovn_u16(u, u_whole); \
1508 gvmovn_u16(v, v_whole); \
1509 gvmovn_u16(r, r_whole); \
1510 gvmovn_u16(g, g_whole); \
1511 gvmovn_u16(b, b_whole); \
1512 \
1513 gvdupq_l_u32(dx8, gvlo(uvrg_dx8), 0); \
1514 gvaddq_u32(u_block, u_block, dx8); \
1515 gvdupq_l_u32(dx8, gvlo(uvrg_dx8), 1); \
1516 gvaddq_u32(v_block, v_block, dx8); \
1517 gvdupq_l_u32(dx8, gvhi(uvrg_dx8), 0); \
1518 gvaddq_u32(r_block, r_block, dx8); \
1519 gvdupq_l_u32(dx8, gvhi(uvrg_dx8), 1); \
1520 gvaddq_u32(g_block, g_block, dx8); \
1521 gvdupq_n_u32(dx8, b_dx8); \
1522 gvaddq_u32(b_block, b_block, dx8); \
1523 \
1524 gvand(u, u, gvlo(texture_mask)); \
1525 gvand(v, v, gvhi(texture_mask)); \
1526 setup_blocks_texture_##swizzling(); \
1527 \
1528 gvst2_u8(u, v, (u8 *)block->uv.e); \
1529 gvst1_u8(r, block->r.e); \
1530 gvst1_u8(g, block->g.e); \
1531 gvst1_u8(b, block->b.e); \
1532 gvst1q_u16(dither_offsets, (u16 *)block->dither_offsets.e); \
1533 block->fb_ptr = fb_ptr; \
1534} \
1535
1536#define setup_blocks_store_unshaded_textured(swizzling, dithering, target, \
1537 edge_type) \
1538{ \
1539 vec_8x16u u_whole; \
1540 vec_8x16u v_whole; \
1541 vec_4x16u u_whole_lo, u_whole_hi; \
1542 vec_4x16u v_whole_lo, v_whole_hi; \
1543 \
1544 vec_8x8u u; \
1545 vec_8x8u v; \
1546 \
1547 vec_4x32u dx4; \
1548 vec_4x32u dx8; \
1549 \
1550 gvshrn_n_u32(u_whole_lo, u_block, 16); \
1551 gvshrn_n_u32(v_whole_lo, v_block, 16); \
1552 \
1553 gvdupq_l_u32(dx4, uv_dx4, 0); \
1554 gvaddhn_u32(u_whole_hi, u_block, dx4); \
1555 gvdupq_l_u32(dx4, uv_dx4, 1); \
1556 gvaddhn_u32(v_whole_hi, v_block, dx4); \
1557 \
1558 gvcombine_u16(u_whole, u_whole_lo, u_whole_hi); \
1559 gvcombine_u16(v_whole, v_whole_lo, v_whole_hi); \
1560 gvmovn_u16(u, u_whole); \
1561 gvmovn_u16(v, v_whole); \
1562 \
1563 gvdupq_l_u32(dx8, uv_dx8, 0); \
1564 gvaddq_u32(u_block, u_block, dx8); \
1565 gvdupq_l_u32(dx8, uv_dx8, 1); \
1566 gvaddq_u32(v_block, v_block, dx8); \
1567 \
1568 gvand(u, u, gvlo(texture_mask)); \
1569 gvand(v, v, gvhi(texture_mask)); \
1570 setup_blocks_texture_##swizzling(); \
1571 \
1572 gvst2_u8(u, v, (u8 *)block->uv.e); \
1573 gvst1q_u16(dither_offsets, (u16 *)block->dither_offsets.e); \
1574 block->fb_ptr = fb_ptr; \
1575} \
1576
1577#define setup_blocks_store_shaded_untextured_dithered() \
1578 gvqadd_u8(r, r, dither_offsets); \
1579 gvqadd_u8(g, g, dither_offsets); \
1580 gvqadd_u8(b, b, dither_offsets); \
1581 \
1582 gvqsub_u8(r, r, d64_4); \
1583 gvqsub_u8(g, g, d64_4); \
1584 gvqsub_u8(b, b, d64_4) \
1585
1586#define setup_blocks_store_shaded_untextured_undithered() \
1587
1588#define setup_blocks_store_untextured_pixels_indirect_full(_pixels) \
1589 gvst1q_u16(_pixels, block->pixels.e); \
1590 block->fb_ptr = fb_ptr \
1591
1592#define setup_blocks_store_untextured_pixels_indirect_edge(_pixels) \
1593 gvst1q_u16(_pixels, block->pixels.e); \
1594 block->fb_ptr = fb_ptr \
1595
1596#define setup_blocks_store_shaded_untextured_seed_pixels_indirect() \
1597 gvmull_u8(pixels, r, d64_1) \
1598
1599#define setup_blocks_store_untextured_pixels_direct_full(_pixels) \
1600 gvst1q_u16(_pixels, fb_ptr) \
1601
1602#define setup_blocks_store_untextured_pixels_direct_edge(_pixels) \
1603{ \
1604 vec_8x16u fb_pixels; \
1605 vec_8x16u draw_mask; \
1606 vec_8x16u test_mask; \
1607 \
1608 gvld1q_u16(test_mask, psx_gpu->test_mask.e); \
1609 gvld1q_u16(fb_pixels, fb_ptr); \
1610 gvdupq_n_u16(draw_mask, span_edge_data->right_mask); \
1611 gvtstq_u16(draw_mask, draw_mask, test_mask); \
1612 gvbifq(fb_pixels, _pixels, draw_mask); \
1613 gvst1q_u16(fb_pixels, fb_ptr); \
1614} \
1615
1616#define setup_blocks_store_shaded_untextured_seed_pixels_direct() \
1617 pixels = msb_mask; \
1618 gvmlal_u8(pixels, r, d64_1) \
1619
1620#define setup_blocks_store_shaded_untextured(swizzling, dithering, target, \
1621 edge_type) \
1622{ \
1623 vec_8x16u r_whole; \
1624 vec_8x16u g_whole; \
1625 vec_8x16u b_whole; \
1626 vec_4x16u r_whole_lo, r_whole_hi; \
1627 vec_4x16u g_whole_lo, g_whole_hi; \
1628 vec_4x16u b_whole_lo, b_whole_hi; \
1629 \
1630 vec_8x8u r; \
1631 vec_8x8u g; \
1632 vec_8x8u b; \
1633 \
1634 vec_4x32u dx4; \
1635 vec_4x32u dx8; \
1636 \
1637 vec_8x16u pixels; \
1638 \
1639 gvshrn_n_u32(r_whole_lo, r_block, 16); \
1640 gvshrn_n_u32(g_whole_lo, g_block, 16); \
1641 gvshrn_n_u32(b_whole_lo, b_block, 16); \
1642 \
1643 gvdupq_l_u32(dx4, gvlo(rgb_dx4), 0); \
1644 gvaddhn_u32(r_whole_hi, r_block, dx4); \
1645 gvdupq_l_u32(dx4, gvlo(rgb_dx4), 1); \
1646 gvaddhn_u32(g_whole_hi, g_block, dx4); \
1647 gvdupq_l_u32(dx4, gvhi(rgb_dx4), 0); \
1648 gvaddhn_u32(b_whole_hi, b_block, dx4); \
1649 \
1650 gvcombine_u16(r_whole, r_whole_lo, r_whole_hi); \
1651 gvcombine_u16(g_whole, g_whole_lo, g_whole_hi); \
1652 gvcombine_u16(b_whole, b_whole_lo, b_whole_hi); \
1653 gvmovn_u16(r, r_whole); \
1654 gvmovn_u16(g, g_whole); \
1655 gvmovn_u16(b, b_whole); \
1656 \
1657 gvdupq_l_u32(dx8, gvlo(rgb_dx8), 0); \
1658 gvaddq_u32(r_block, r_block, dx8); \
1659 gvdupq_l_u32(dx8, gvlo(rgb_dx8), 1); \
1660 gvaddq_u32(g_block, g_block, dx8); \
1661 gvdupq_l_u32(dx8, gvhi(rgb_dx8), 0); \
1662 gvaddq_u32(b_block, b_block, dx8); \
1663 \
1664 setup_blocks_store_shaded_untextured_##dithering(); \
1665 \
1666 gvshr_n_u8(r, r, 3); \
1667 gvbic(g, g, d64_0x07); \
1668 gvbic(b, b, d64_0x07); \
1669 \
1670 setup_blocks_store_shaded_untextured_seed_pixels_##target(); \
1671 gvmlal_u8(pixels, g, d64_4); \
1672 gvmlal_u8(pixels, b, d64_128); \
1673 \
1674 setup_blocks_store_untextured_pixels_##target##_##edge_type(pixels); \
1675} \
1676
1677#define setup_blocks_store_unshaded_untextured(swizzling, dithering, target, \
1678 edge_type) \
1679 setup_blocks_store_untextured_pixels_##target##_##edge_type(colors) \
1680
1681#define setup_blocks_store_draw_mask_textured_indirect(_block, bits) \
1682 (_block)->draw_mask_bits = bits \
1683
1684#define setup_blocks_store_draw_mask_untextured_indirect(_block, bits) \
1685{ \
1686 vec_8x16u bits_mask; \
1687 vec_8x16u test_mask; \
1688 \
1689 gvld1q_u16(test_mask, psx_gpu->test_mask.e); \
1690 gvdupq_n_u16(bits_mask, bits); \
1691 gvtstq_u16(bits_mask, bits_mask, test_mask); \
1692 gvst1q_u16(bits_mask, (_block)->draw_mask.e); \
1693} \
1694
1695#define setup_blocks_store_draw_mask_untextured_direct(_block, bits) \
1696
1697#define setup_blocks_add_blocks_indirect() \
1698 num_blocks += span_num_blocks; \
1699 \
1700 if(num_blocks > MAX_BLOCKS) \
1701 { \
1702 psx_gpu->num_blocks = num_blocks - span_num_blocks; \
1703 flush_render_block_buffer(psx_gpu); \
1704 num_blocks = span_num_blocks; \
1705 block = psx_gpu->blocks; \
1706 } \
1707
1708#define setup_blocks_add_blocks_direct() \
1709
1710#define setup_blocks_do(shading, texturing, dithering, sw, target) \
1711 setup_blocks_load_msb_mask_##target(); \
1712 setup_blocks_variables_##shading##_##texturing(target); \
1713 \
1714 edge_data_struct *span_edge_data = psx_gpu->span_edge_data; \
1715 vec_4x32u *span_uvrg_offset = (vec_4x32u *)psx_gpu->span_uvrg_offset; \
1716 u32 *span_b_offset = psx_gpu->span_b_offset; \
1717 \
1718 block_struct *block = psx_gpu->blocks + psx_gpu->num_blocks; \
1719 \
1720 u32 num_spans = psx_gpu->num_spans; \
1721 \
1722 u16 *fb_ptr; \
1723 u32 y; \
1724 \
1725 u32 num_blocks = psx_gpu->num_blocks; \
1726 u32 span_num_blocks; \
1727 \
1728 while(num_spans) \
1729 { \
1730 span_num_blocks = span_edge_data->num_blocks; \
1731 if(span_num_blocks) \
1732 { \
1733 y = span_edge_data->y; \
1734 fb_ptr = psx_gpu->vram_out_ptr + span_edge_data->left_x + (y * 1024); \
1735 \
1736 setup_blocks_span_initialize_##shading##_##texturing(); \
1737 setup_blocks_span_initialize_##dithering(texturing); \
1738 \
1739 setup_blocks_add_blocks_##target(); \
1740 \
1741 s32 pixel_span = span_num_blocks * 8; \
1742 pixel_span -= __builtin_popcount(span_edge_data->right_mask & 0xFF); \
1743 \
1744 span_num_blocks--; \
1745 while(span_num_blocks) \
1746 { \
1747 setup_blocks_store_##shading##_##texturing(sw, dithering, target, \
1748 full); \
1749 setup_blocks_store_draw_mask_##texturing##_##target(block, 0x00); \
1750 \
1751 fb_ptr += 8; \
1752 block++; \
1753 span_num_blocks--; \
1754 } \
1755 \
1756 setup_blocks_store_##shading##_##texturing(sw, dithering, target, edge); \
1757 setup_blocks_store_draw_mask_##texturing##_##target(block, \
1758 span_edge_data->right_mask); \
1759 \
1760 block++; \
1761 } \
1762 \
1763 num_spans--; \
1764 span_edge_data++; \
1765 span_uvrg_offset++; \
1766 span_b_offset++; \
1767 } \
1768 \
1769 psx_gpu->num_blocks = num_blocks \
1770
1771void setup_blocks_shaded_textured_dithered_swizzled_indirect(psx_gpu_struct
1772 *psx_gpu)
1773{
1774#if 0
1775 setup_blocks_shaded_textured_dithered_swizzled_indirect_(psx_gpu);
1776 return;
1777#endif
1778 setup_blocks_do(shaded, textured, dithered, swizzled, indirect);
1779}
1780
1781void setup_blocks_shaded_textured_dithered_unswizzled_indirect(psx_gpu_struct
1782 *psx_gpu)
1783{
1784#if 0
1785 setup_blocks_shaded_textured_dithered_unswizzled_indirect_(psx_gpu);
1786 return;
1787#endif
1788 setup_blocks_do(shaded, textured, dithered, unswizzled, indirect);
1789}
1790
1791void setup_blocks_unshaded_textured_dithered_swizzled_indirect(psx_gpu_struct
1792 *psx_gpu)
1793{
1794#if 0
1795 setup_blocks_unshaded_textured_dithered_swizzled_indirect_(psx_gpu);
1796 return;
1797#endif
1798 setup_blocks_do(unshaded, textured, dithered, swizzled, indirect);
1799}
1800
1801void setup_blocks_unshaded_textured_dithered_unswizzled_indirect(psx_gpu_struct
1802 *psx_gpu)
1803{
1804#if 0
1805 setup_blocks_unshaded_textured_dithered_unswizzled_indirect_(psx_gpu);
1806 return;
1807#endif
1808 setup_blocks_do(unshaded, textured, dithered, unswizzled, indirect);
1809}
1810
1811void setup_blocks_unshaded_untextured_undithered_unswizzled_indirect(
1812 psx_gpu_struct *psx_gpu)
1813{
1814#if 0
1815 setup_blocks_unshaded_untextured_undithered_unswizzled_indirect_(psx_gpu);
1816 return;
1817#endif
1818 setup_blocks_do(unshaded, untextured, undithered, unswizzled, indirect);
1819}
1820
1821void setup_blocks_unshaded_untextured_undithered_unswizzled_direct(
1822 psx_gpu_struct *psx_gpu)
1823{
1824#if 0
1825 setup_blocks_unshaded_untextured_undithered_unswizzled_direct_(psx_gpu);
1826 return;
1827#endif
1828 setup_blocks_do(unshaded, untextured, undithered, unswizzled, direct);
1829}
1830
1831void setup_blocks_shaded_untextured_undithered_unswizzled_indirect(psx_gpu_struct
1832 *psx_gpu)
1833{
1834#if 0
1835 setup_blocks_shaded_untextured_undithered_unswizzled_indirect_(psx_gpu);
1836 return;
1837#endif
1838 setup_blocks_do(shaded, untextured, undithered, unswizzled, indirect);
1839}
1840
1841void setup_blocks_shaded_untextured_dithered_unswizzled_indirect(psx_gpu_struct
1842 *psx_gpu)
1843{
1844#if 0
1845 setup_blocks_shaded_untextured_dithered_unswizzled_indirect_(psx_gpu);
1846 return;
1847#endif
1848 setup_blocks_do(shaded, untextured, dithered, unswizzled, indirect);
1849}
1850
1851void setup_blocks_shaded_untextured_undithered_unswizzled_direct(
1852 psx_gpu_struct *psx_gpu)
1853{
1854#if 0
1855 setup_blocks_shaded_untextured_undithered_unswizzled_direct_(psx_gpu);
1856 return;
1857#endif
1858 setup_blocks_do(shaded, untextured, undithered, unswizzled, direct);
1859}
1860
1861void setup_blocks_shaded_untextured_dithered_unswizzled_direct(psx_gpu_struct
1862 *psx_gpu)
1863{
1864#if 0
1865 setup_blocks_shaded_untextured_dithered_unswizzled_direct_(psx_gpu);
1866 return;
1867#endif
1868 setup_blocks_do(shaded, untextured, dithered, unswizzled, direct);
1869}
1870
1871static void update_texture_4bpp_cache(psx_gpu_struct *psx_gpu)
1872{
1873 u32 current_texture_page = psx_gpu->current_texture_page;
1874 u8 *texture_page_ptr = psx_gpu->texture_page_base;
1875 const u16 *vram_ptr = psx_gpu->vram_ptr;
1876 u32 tile_x, tile_y;
1877 u32 sub_y;
1878 vec_8x16u c_0x00f0;
1879
1880 vram_ptr += (current_texture_page >> 4) * 256 * 1024;
1881 vram_ptr += (current_texture_page & 0xF) * 64;
1882
1883 gvdupq_n_u16(c_0x00f0, 0x00f0);
1884
1885 psx_gpu->dirty_textures_4bpp_mask &= ~(psx_gpu->current_texture_mask);
1886
1887 for (tile_y = 16; tile_y; tile_y--)
1888 {
1889 for (tile_x = 16; tile_x; tile_x--)
1890 {
1891 for (sub_y = 8; sub_y; sub_y--)
1892 {
1893 vec_8x8u texel_block_a, texel_block_b;
1894 vec_8x16u texel_block_expanded_a, texel_block_expanded_b;
1895 vec_8x16u texel_block_expanded_c, texel_block_expanded_d;
1896 vec_8x16u texel_block_expanded_ab, texel_block_expanded_cd;
1897
1898 gvld1_u8(texel_block_a, (u8 *)vram_ptr); vram_ptr += 1024;
1899 gvld1_u8(texel_block_b, (u8 *)vram_ptr); vram_ptr += 1024;
1900
1901 gvmovl_u8(texel_block_expanded_a, texel_block_a);
1902 gvshll_n_u8(texel_block_expanded_b, texel_block_a, 4);
1903 gvmovl_u8(texel_block_expanded_c, texel_block_b);
1904 gvshll_n_u8(texel_block_expanded_d, texel_block_b, 4);
1905
1906 gvbicq(texel_block_expanded_a, texel_block_expanded_a, c_0x00f0);
1907 gvbicq(texel_block_expanded_b, texel_block_expanded_b, c_0x00f0);
1908 gvbicq(texel_block_expanded_c, texel_block_expanded_c, c_0x00f0);
1909 gvbicq(texel_block_expanded_d, texel_block_expanded_d, c_0x00f0);
1910
1911 gvorrq(texel_block_expanded_ab, texel_block_expanded_a, texel_block_expanded_b);
1912 gvorrq(texel_block_expanded_cd, texel_block_expanded_c, texel_block_expanded_d);
1913
1914 gvst1q_2_pi_u32(texel_block_expanded_ab, texel_block_expanded_cd, texture_page_ptr);
1915 }
1916
1917 vram_ptr -= (1024 * 16) - 4;
1918 }
1919
1920 vram_ptr += (16 * 1024) - (4 * 16);
1921 }
1922}
1923
1924void update_texture_8bpp_cache_slice(psx_gpu_struct *psx_gpu,
1925 u32 texture_page)
1926{
1927#if 0
1928 update_texture_8bpp_cache_slice_(psx_gpu, texture_page);
1929 return;
1930#endif
1931 u16 *texture_page_ptr = psx_gpu->texture_page_base;
1932 u16 *vram_ptr = psx_gpu->vram_ptr;
1933
1934 u32 tile_x, tile_y;
1935 u32 sub_y;
1936
1937 vram_ptr += (texture_page >> 4) * 256 * 1024;
1938 vram_ptr += (texture_page & 0xF) * 64;
1939
1940 if((texture_page ^ psx_gpu->current_texture_page) & 0x1)
1941 texture_page_ptr += (8 * 16) * 8;
1942
1943 for (tile_y = 16; tile_y; tile_y--)
1944 {
1945 for (tile_x = 8; tile_x; tile_x--)
1946 {
1947 for (sub_y = 4; sub_y; sub_y--)
1948 {
1949 vec_4x32u texels_a, texels_b, texels_c, texels_d = {};
1950 gvld1q_u32(texels_a, vram_ptr); vram_ptr += 1024;
1951 gvld1q_u32(texels_b, vram_ptr); vram_ptr += 1024;
1952 gvld1q_u32(texels_c, vram_ptr); vram_ptr += 1024;
1953 gvld1q_u32(texels_d, vram_ptr); vram_ptr += 1024;
1954
1955 gvst1q_2_pi_u32(texels_a, texels_b, texture_page_ptr);
1956 gvst1q_2_pi_u32(texels_c, texels_d, texture_page_ptr);
1957 }
1958
1959 vram_ptr -= (1024 * 16) - 8;
1960 }
1961
1962 vram_ptr -= (8 * 8);
1963 vram_ptr += (16 * 1024);
1964
1965 texture_page_ptr += (8 * 16) * 8;
1966 }
1967}
1968
1969void texture_blocks_untextured(psx_gpu_struct *psx_gpu)
1970{
1971}
1972
1973void texture_blocks_4bpp(psx_gpu_struct *psx_gpu)
1974{
1975#if 0
1976 texture_blocks_4bpp_(psx_gpu);
1977 return;
1978#endif
1979 block_struct *block = psx_gpu->blocks;
1980 u32 num_blocks = psx_gpu->num_blocks;
1981
1982 vec_8x8u texels_low;
1983 vec_8x8u texels_high;
1984
1985 vec_16x8u clut_low;
1986 vec_16x8u clut_high;
1987
1988 const u8 *texture_ptr_8bpp = psx_gpu->texture_page_ptr;
1989
1990 gvld2q_u8(clut_low, clut_high, (u8 *)psx_gpu->clut_ptr);
1991
1992 if(psx_gpu->current_texture_mask & psx_gpu->dirty_textures_4bpp_mask)
1993 update_texture_4bpp_cache(psx_gpu);
1994
1995 while(num_blocks)
1996 {
1997 vec_8x8u texels =
1998 {
1999 .u8 =
2000 {
2001 texture_ptr_8bpp[block->uv.e[0]],
2002 texture_ptr_8bpp[block->uv.e[1]],
2003 texture_ptr_8bpp[block->uv.e[2]],
2004 texture_ptr_8bpp[block->uv.e[3]],
2005 texture_ptr_8bpp[block->uv.e[4]],
2006 texture_ptr_8bpp[block->uv.e[5]],
2007 texture_ptr_8bpp[block->uv.e[6]],
2008 texture_ptr_8bpp[block->uv.e[7]]
2009 }
2010 };
2011
2012 gvtbl2_u8(texels_low, clut_low, texels);
2013 gvtbl2_u8(texels_high, clut_high, texels);
2014
2015 gvst2_u8(texels_low, texels_high, (u8 *)block->texels.e);
2016
2017 num_blocks--;
2018 block++;
2019 }
2020}
2021
2022void texture_blocks_8bpp(psx_gpu_struct *psx_gpu)
2023{
2024#if 0
2025 texture_blocks_8bpp_(psx_gpu);
2026 return;
2027#endif
2028 u32 num_blocks = psx_gpu->num_blocks;
2029
2030 if(psx_gpu->current_texture_mask & psx_gpu->dirty_textures_8bpp_mask)
2031 update_texture_8bpp_cache(psx_gpu);
2032
2033 const u8 * __restrict__ texture_ptr_8bpp = psx_gpu->texture_page_ptr;
2034 const u16 * __restrict__ clut_ptr = psx_gpu->clut_ptr;
2035 block_struct * __restrict__ block = psx_gpu->blocks;
2036
2037 while(num_blocks)
2038 {
2039 u16 offset;
2040 #define load_one(i_) \
2041 offset = block->uv.e[i_]; u16 texel##i_ = texture_ptr_8bpp[offset]
2042 #define store_one(i_) \
2043 block->texels.e[i_] = clut_ptr[texel##i_]
2044 load_one(0); load_one(1); load_one(2); load_one(3);
2045 load_one(4); load_one(5); load_one(6); load_one(7);
2046 store_one(0); store_one(1); store_one(2); store_one(3);
2047 store_one(4); store_one(5); store_one(6); store_one(7);
2048 #undef load_one
2049 #undef store_one
2050
2051 num_blocks--;
2052 block++;
2053 }
2054}
2055
2056void texture_blocks_16bpp(psx_gpu_struct *psx_gpu)
2057{
2058#if 0
2059 texture_blocks_16bpp_(psx_gpu);
2060 return;
2061#endif
2062 u32 num_blocks = psx_gpu->num_blocks;
2063 const u16 * __restrict__ texture_ptr_16bpp = psx_gpu->texture_page_ptr;
2064 block_struct * __restrict__ block = psx_gpu->blocks;
2065
2066 while(num_blocks)
2067 {
2068 u32 offset;
2069 #define load_one(i_) \
2070 offset = block->uv.e[i_]; \
2071 offset += ((offset & 0xFF00) * 3); \
2072 u16 texel##i_ = texture_ptr_16bpp[offset]
2073 #define store_one(i_) \
2074 block->texels.e[i_] = texel##i_
2075 load_one(0); load_one(1); load_one(2); load_one(3);
2076 load_one(4); load_one(5); load_one(6); load_one(7);
2077 store_one(0); store_one(1); store_one(2); store_one(3);
2078 store_one(4); store_one(5); store_one(6); store_one(7);
2079 #undef load_one
2080 #undef store_one
2081
2082 num_blocks--;
2083 block++;
2084 }
2085}
2086
2087#define shade_blocks_load_msb_mask_indirect() \
2088
2089#define shade_blocks_load_msb_mask_direct() \
2090 vec_8x16u msb_mask; \
2091 gvdupq_n_u16(msb_mask, psx_gpu->mask_msb); \
2092
2093#define shade_blocks_store_indirect(_draw_mask, _pixels) \
2094 gvst1q_u16(_draw_mask, block->draw_mask.e); \
2095 gvst1q_u16(_pixels, block->pixels.e); \
2096
2097#define shade_blocks_store_direct(_draw_mask, _pixels) \
2098{ \
2099 vec_8x16u fb_pixels; \
2100 gvorrq(_pixels, _pixels, msb_mask); \
2101 gvld1q_u16(fb_pixels, block->fb_ptr); \
2102 gvbifq(fb_pixels, _pixels, _draw_mask); \
2103 gvst1q_u16(fb_pixels, block->fb_ptr); \
2104} \
2105
2106#define shade_blocks_textured_false_modulated_check_dithered(target) \
2107
2108#define shade_blocks_textured_false_modulated_check_undithered(target) \
2109 if(psx_gpu->triangle_color == 0x808080) \
2110 { \
2111 shade_blocks_textured_unmodulated_##target(psx_gpu); \
2112 return; \
2113 } \
2114
2115#define shade_blocks_textured_modulated_shaded_primitive_load(dithering, \
2116 target) \
2117
2118#define shade_blocks_textured_modulated_unshaded_primitive_load(dithering, \
2119 target) \
2120{ \
2121 u32 color = psx_gpu->triangle_color; \
2122 gvdup_n_u8(colors_r, color); \
2123 gvdup_n_u8(colors_g, color >> 8); \
2124 gvdup_n_u8(colors_b, color >> 16); \
2125 shade_blocks_textured_false_modulated_check_##dithering(target); \
2126} \
2127
2128#define shade_blocks_textured_modulated_shaded_block_load() \
2129 gvld1_u8(colors_r, block->r.e); \
2130 gvld1_u8(colors_g, block->g.e); \
2131 gvld1_u8(colors_b, block->b.e) \
2132
2133#define shade_blocks_textured_modulated_unshaded_block_load() \
2134
2135#define shade_blocks_textured_modulate_dithered(component) \
2136 gvld1q_u16(pixels_##component, block->dither_offsets.e); \
2137 gvmlal_u8(pixels_##component, texels_##component, colors_##component) \
2138
2139#define shade_blocks_textured_modulate_undithered(component) \
2140 gvmull_u8(pixels_##component, texels_##component, colors_##component) \
2141
2142#define shade_blocks_textured_modulated_do(shading, dithering, target) \
2143 block_struct *block = psx_gpu->blocks; \
2144 u32 num_blocks = psx_gpu->num_blocks; \
2145 vec_8x16u texels; \
2146 \
2147 vec_8x8u texels_r; \
2148 vec_8x8u texels_g; \
2149 vec_8x8u texels_b; \
2150 \
2151 vec_8x8u colors_r; \
2152 vec_8x8u colors_g; \
2153 vec_8x8u colors_b; \
2154 \
2155 vec_8x8u pixels_r_low; \
2156 vec_8x8u pixels_g_low; \
2157 vec_8x8u pixels_b_low; \
2158 vec_8x16u pixels; \
2159 \
2160 vec_8x16u pixels_r; \
2161 vec_8x16u pixels_g; \
2162 vec_8x16u pixels_b; \
2163 \
2164 vec_8x16u draw_mask; \
2165 vec_8x16u zero_mask; \
2166 \
2167 vec_8x8u d64_0x07; \
2168 vec_8x8u d64_0x1F; \
2169 vec_8x8u d64_1; \
2170 vec_8x8u d64_4; \
2171 vec_8x8u d64_128; \
2172 \
2173 vec_8x16u d128_0x8000; \
2174 \
2175 vec_8x16u test_mask; \
2176 u32 draw_mask_bits; \
2177 \
2178 gvld1q_u16(test_mask, psx_gpu->test_mask.e); \
2179 shade_blocks_load_msb_mask_##target(); \
2180 \
2181 gvdup_n_u8(d64_0x07, 0x07); \
2182 gvdup_n_u8(d64_0x1F, 0x1F); \
2183 gvdup_n_u8(d64_1, 1); \
2184 gvdup_n_u8(d64_4, 4); \
2185 gvdup_n_u8(d64_128, 128); \
2186 \
2187 gvdupq_n_u16(d128_0x8000, 0x8000); \
2188 \
2189 shade_blocks_textured_modulated_##shading##_primitive_load(dithering, \
2190 target); \
2191 \
2192 while(num_blocks) \
2193 { \
2194 draw_mask_bits = block->draw_mask_bits; \
2195 gvdupq_n_u16(draw_mask, draw_mask_bits); \
2196 gvtstq_u16(draw_mask, draw_mask, test_mask); \
2197 \
2198 shade_blocks_textured_modulated_##shading##_block_load(); \
2199 \
2200 gvld1q_u16(texels, block->texels.e); \
2201 \
2202 gvmovn_u16(texels_r, texels); \
2203 gvshrn_n_u16(texels_g, texels, 5); \
2204 gvshrn_n_u16(texels_b, texels, 7); \
2205 \
2206 gvand(texels_r, texels_r, d64_0x1F); \
2207 gvand(texels_g, texels_g, d64_0x1F); \
2208 gvshr_n_u8(texels_b, texels_b, 3); \
2209 \
2210 shade_blocks_textured_modulate_##dithering(r); \
2211 shade_blocks_textured_modulate_##dithering(g); \
2212 shade_blocks_textured_modulate_##dithering(b); \
2213 \
2214 gvceqzq_u16(zero_mask, texels); \
2215 gvand(pixels, texels, d128_0x8000); \
2216 \
2217 gvqshrun_n_s16(pixels_r_low, pixels_r, 4); \
2218 gvqshrun_n_s16(pixels_g_low, pixels_g, 4); \
2219 gvqshrun_n_s16(pixels_b_low, pixels_b, 4); \
2220 \
2221 gvorrq(zero_mask, draw_mask, zero_mask); \
2222 \
2223 gvshr_n_u8(pixels_r_low, pixels_r_low, 3); \
2224 gvbic(pixels_g_low, pixels_g_low, d64_0x07); \
2225 gvbic(pixels_b_low, pixels_b_low, d64_0x07); \
2226 \
2227 gvmlal_u8(pixels, pixels_r_low, d64_1); \
2228 gvmlal_u8(pixels, pixels_g_low, d64_4); \
2229 gvmlal_u8(pixels, pixels_b_low, d64_128); \
2230 \
2231 shade_blocks_store_##target(zero_mask, pixels); \
2232 \
2233 num_blocks--; \
2234 block++; \
2235 } \
2236
2237void shade_blocks_shaded_textured_modulated_dithered_direct(psx_gpu_struct
2238 *psx_gpu)
2239{
2240#if 0
2241 shade_blocks_shaded_textured_modulated_dithered_direct_(psx_gpu);
2242 return;
2243#endif
2244 shade_blocks_textured_modulated_do(shaded, dithered, direct);
2245}
2246
2247void shade_blocks_shaded_textured_modulated_undithered_direct(psx_gpu_struct
2248 *psx_gpu)
2249{
2250#if 0
2251 shade_blocks_shaded_textured_modulated_undithered_direct_(psx_gpu);
2252 return;
2253#endif
2254 shade_blocks_textured_modulated_do(shaded, undithered, direct);
2255}
2256
2257void shade_blocks_unshaded_textured_modulated_dithered_direct(psx_gpu_struct
2258 *psx_gpu)
2259{
2260#if 0
2261 shade_blocks_unshaded_textured_modulated_dithered_direct_(psx_gpu);
2262 return;
2263#endif
2264 shade_blocks_textured_modulated_do(unshaded, dithered, direct);
2265}
2266
2267void shade_blocks_unshaded_textured_modulated_undithered_direct(psx_gpu_struct
2268 *psx_gpu)
2269{
2270#if 0
2271 shade_blocks_unshaded_textured_modulated_undithered_direct_(psx_gpu);
2272 return;
2273#endif
2274 shade_blocks_textured_modulated_do(unshaded, undithered, direct);
2275}
2276
2277void shade_blocks_shaded_textured_modulated_dithered_indirect(psx_gpu_struct
2278 *psx_gpu)
2279{
2280#if 0
2281 shade_blocks_shaded_textured_modulated_dithered_indirect_(psx_gpu);
2282 return;
2283#endif
2284 shade_blocks_textured_modulated_do(shaded, dithered, indirect);
2285}
2286
2287void shade_blocks_shaded_textured_modulated_undithered_indirect(psx_gpu_struct
2288 *psx_gpu)
2289{
2290#if 0
2291 shade_blocks_shaded_textured_modulated_undithered_indirect_(psx_gpu);
2292 return;
2293#endif
2294 shade_blocks_textured_modulated_do(shaded, undithered, indirect);
2295}
2296
2297void shade_blocks_unshaded_textured_modulated_dithered_indirect(psx_gpu_struct
2298 *psx_gpu)
2299{
2300#if 0
2301 shade_blocks_unshaded_textured_modulated_dithered_indirect_(psx_gpu);
2302 return;
2303#endif
2304 shade_blocks_textured_modulated_do(unshaded, dithered, indirect);
2305}
2306
2307void shade_blocks_unshaded_textured_modulated_undithered_indirect(psx_gpu_struct
2308 *psx_gpu)
2309{
2310#if 0
2311 shade_blocks_unshaded_textured_modulated_undithered_indirect_(psx_gpu);
2312 return;
2313#endif
2314 shade_blocks_textured_modulated_do(unshaded, undithered, indirect);
2315}
2316
2317#define shade_blocks_textured_unmodulated_do(target) \
2318 block_struct *block = psx_gpu->blocks; \
2319 u32 num_blocks = psx_gpu->num_blocks; \
2320 vec_8x16u draw_mask; \
2321 vec_8x16u test_mask; \
2322 u32 draw_mask_bits; \
2323 \
2324 vec_8x16u pixels; \
2325 \
2326 gvld1q_u16(test_mask, psx_gpu->test_mask.e); \
2327 shade_blocks_load_msb_mask_##target(); \
2328 \
2329 while(num_blocks) \
2330 { \
2331 vec_8x16u zero_mask; \
2332 \
2333 draw_mask_bits = block->draw_mask_bits; \
2334 gvdupq_n_u16(draw_mask, draw_mask_bits); \
2335 gvtstq_u16(draw_mask, draw_mask, test_mask); \
2336 \
2337 gvld1q_u16(pixels, block->texels.e); \
2338 \
2339 gvceqzq_u16(zero_mask, pixels); \
2340 gvorrq(zero_mask, draw_mask, zero_mask); \
2341 \
2342 shade_blocks_store_##target(zero_mask, pixels); \
2343 \
2344 num_blocks--; \
2345 block++; \
2346 } \
2347
2348void shade_blocks_textured_unmodulated_indirect(psx_gpu_struct *psx_gpu)
2349{
2350#if 0
2351 shade_blocks_textured_unmodulated_indirect_(psx_gpu);
2352 return;
2353#endif
2354 shade_blocks_textured_unmodulated_do(indirect)
2355}
2356
2357void shade_blocks_textured_unmodulated_direct(psx_gpu_struct *psx_gpu)
2358{
2359#if 0
2360 shade_blocks_textured_unmodulated_direct_(psx_gpu);
2361 return;
2362#endif
2363 shade_blocks_textured_unmodulated_do(direct)
2364}
2365
2366void shade_blocks_unshaded_untextured_indirect(psx_gpu_struct *psx_gpu)
2367{
2368}
2369
2370void shade_blocks_unshaded_untextured_direct(psx_gpu_struct *psx_gpu)
2371{
2372#if 0
2373 shade_blocks_unshaded_untextured_direct_(psx_gpu);
2374 return;
2375#endif
2376 block_struct *block = psx_gpu->blocks;
2377 u32 num_blocks = psx_gpu->num_blocks;
2378
2379 vec_8x16u pixels;
2380 gvld1q_u16(pixels, block->texels.e);
2381 shade_blocks_load_msb_mask_direct();
2382
2383 while(num_blocks)
2384 {
2385 vec_8x16u draw_mask;
2386 gvld1q_u16(draw_mask, block->draw_mask.e);
2387 shade_blocks_store_direct(draw_mask, pixels);
2388
2389 num_blocks--;
2390 block++;
2391 }
2392}
2393
2394#define blend_blocks_mask_evaluate_on() \
2395 vec_8x16u mask_pixels; \
2396 gvcltzq_s16(mask_pixels, framebuffer_pixels); \
2397 gvorrq(draw_mask, draw_mask, mask_pixels) \
2398
2399#define blend_blocks_mask_evaluate_off() \
2400
2401#define blend_blocks_average() \
2402{ \
2403 vec_8x16u pixels_no_msb; \
2404 vec_8x16u fb_pixels_no_msb; \
2405 \
2406 vec_8x16u d128_0x0421; \
2407 \
2408 gvdupq_n_u16(d128_0x0421, 0x0421); \
2409 \
2410 gveorq(blend_pixels, pixels, framebuffer_pixels); \
2411 gvbicq(pixels_no_msb, pixels, d128_0x8000); \
2412 gvand(blend_pixels, blend_pixels, d128_0x0421); \
2413 gvsubq_u16(blend_pixels, pixels_no_msb, blend_pixels); \
2414 gvbicq(fb_pixels_no_msb, framebuffer_pixels, d128_0x8000); \
2415 gvhaddq_u16(blend_pixels, fb_pixels_no_msb, blend_pixels); \
2416} \
2417
2418#define blend_blocks_add() \
2419{ \
2420 vec_8x16u pixels_rb, pixels_g; \
2421 vec_8x16u fb_rb, fb_g; \
2422 \
2423 vec_8x16u d128_0x7C1F; \
2424 vec_8x16u d128_0x03E0; \
2425 \
2426 gvdupq_n_u16(d128_0x7C1F, 0x7C1F); \
2427 gvdupq_n_u16(d128_0x03E0, 0x03E0); \
2428 \
2429 gvand(pixels_rb, pixels, d128_0x7C1F); \
2430 gvand(pixels_g, pixels, d128_0x03E0); \
2431 \
2432 gvand(fb_rb, framebuffer_pixels, d128_0x7C1F); \
2433 gvand(fb_g, framebuffer_pixels, d128_0x03E0); \
2434 \
2435 gvaddq_u16(fb_rb, fb_rb, pixels_rb); \
2436 gvaddq_u16(fb_g, fb_g, pixels_g); \
2437 \
2438 gvminq_u8(fb_rb, fb_rb, d128_0x7C1F); \
2439 gvminq_u16(fb_g, fb_g, d128_0x03E0); \
2440 \
2441 gvorrq(blend_pixels, fb_rb, fb_g); \
2442} \
2443
2444#define blend_blocks_subtract() \
2445{ \
2446 vec_8x16u pixels_rb, pixels_g; \
2447 vec_8x16u fb_rb, fb_g; \
2448 \
2449 vec_8x16u d128_0x7C1F; \
2450 vec_8x16u d128_0x03E0; \
2451 \
2452 gvdupq_n_u16(d128_0x7C1F, 0x7C1F); \
2453 gvdupq_n_u16(d128_0x03E0, 0x03E0); \
2454 \
2455 gvand(pixels_rb, pixels, d128_0x7C1F); \
2456 gvand(pixels_g, pixels, d128_0x03E0); \
2457 \
2458 gvand(fb_rb, framebuffer_pixels, d128_0x7C1F); \
2459 gvand(fb_g, framebuffer_pixels, d128_0x03E0); \
2460 \
2461 gvqsubq_u8(fb_rb, fb_rb, pixels_rb); \
2462 gvqsubq_u16(fb_g, fb_g, pixels_g); \
2463 \
2464 gvorrq(blend_pixels, fb_rb, fb_g); \
2465} \
2466
2467#define blend_blocks_add_fourth() \
2468{ \
2469 vec_8x16u pixels_rb, pixels_g; \
2470 vec_8x16u pixels_fourth; \
2471 vec_8x16u fb_rb, fb_g; \
2472 \
2473 vec_8x16u d128_0x7C1F; \
2474 vec_8x16u d128_0x1C07; \
2475 vec_8x16u d128_0x03E0; \
2476 vec_8x16u d128_0x00E0; \
2477 \
2478 gvdupq_n_u16(d128_0x7C1F, 0x7C1F); \
2479 gvdupq_n_u16(d128_0x1C07, 0x1C07); \
2480 gvdupq_n_u16(d128_0x03E0, 0x03E0); \
2481 gvdupq_n_u16(d128_0x00E0, 0x00E0); \
2482 \
2483 gvshrq_n_u16(pixels_fourth, pixels, 2); \
2484 \
2485 gvand(fb_rb, framebuffer_pixels, d128_0x7C1F); \
2486 gvand(fb_g, framebuffer_pixels, d128_0x03E0); \
2487 \
2488 gvand(pixels_rb, pixels_fourth, d128_0x1C07); \
2489 gvand(pixels_g, pixels_fourth, d128_0x00E0); \
2490 \
2491 gvaddq_u16(fb_rb, fb_rb, pixels_rb); \
2492 gvaddq_u16(fb_g, fb_g, pixels_g); \
2493 \
2494 gvminq_u8(fb_rb, fb_rb, d128_0x7C1F); \
2495 gvminq_u16(fb_g, fb_g, d128_0x03E0); \
2496 \
2497 gvorrq(blend_pixels, fb_rb, fb_g); \
2498} \
2499
2500#define blend_blocks_blended_combine_textured() \
2501{ \
2502 vec_8x16u blend_mask; \
2503 gvcltzq_s16(blend_mask, pixels); \
2504 \
2505 gvorrq(blend_pixels, blend_pixels, d128_0x8000); \
2506 gvbifq(blend_pixels, pixels, blend_mask); \
2507} \
2508
2509#define blend_blocks_blended_combine_untextured() \
2510
2511#define blend_blocks_body_blend(blend_mode, texturing) \
2512{ \
2513 blend_blocks_##blend_mode(); \
2514 blend_blocks_blended_combine_##texturing(); \
2515} \
2516
2517#define blend_blocks_body_average(texturing) \
2518 blend_blocks_body_blend(average, texturing) \
2519
2520#define blend_blocks_body_add(texturing) \
2521 blend_blocks_body_blend(add, texturing) \
2522
2523#define blend_blocks_body_subtract(texturing) \
2524 blend_blocks_body_blend(subtract, texturing) \
2525
2526#define blend_blocks_body_add_fourth(texturing) \
2527 blend_blocks_body_blend(add_fourth, texturing) \
2528
2529#define blend_blocks_body_unblended(texturing) \
2530 blend_pixels = pixels \
2531
2532#define blend_blocks_do(texturing, blend_mode, mask_evaluate) \
2533 block_struct *block = psx_gpu->blocks; \
2534 u32 num_blocks = psx_gpu->num_blocks; \
2535 vec_8x16u draw_mask; \
2536 vec_8x16u pixels; \
2537 vec_8x16u blend_pixels; \
2538 vec_8x16u framebuffer_pixels; \
2539 vec_8x16u msb_mask; \
2540 vec_8x16u d128_0x8000; \
2541 \
2542 u16 *fb_ptr; \
2543 \
2544 gvdupq_n_u16(d128_0x8000, 0x8000); \
2545 gvdupq_n_u16(msb_mask, psx_gpu->mask_msb); \
2546 (void)d128_0x8000; /* sometimes unused */ \
2547 \
2548 while(num_blocks) \
2549 { \
2550 gvld1q_u16(pixels, block->pixels.e); \
2551 gvld1q_u16(draw_mask, block->draw_mask.e); \
2552 fb_ptr = block->fb_ptr; \
2553 \
2554 gvld1q_u16(framebuffer_pixels, fb_ptr); \
2555 \
2556 blend_blocks_mask_evaluate_##mask_evaluate(); \
2557 blend_blocks_body_##blend_mode(texturing); \
2558 \
2559 gvorrq(blend_pixels, blend_pixels, msb_mask); \
2560 gvbifq(framebuffer_pixels, blend_pixels, draw_mask); \
2561 gvst1q_u16(framebuffer_pixels, fb_ptr); \
2562 \
2563 num_blocks--; \
2564 block++; \
2565 } \
2566
2567
2568void blend_blocks_textured_average_off(psx_gpu_struct *psx_gpu)
2569{
2570#if 0
2571 blend_blocks_textured_average_off_(psx_gpu);
2572 return;
2573#endif
2574 blend_blocks_do(textured, average, off);
2575}
2576
2577void blend_blocks_untextured_average_off(psx_gpu_struct *psx_gpu)
2578{
2579#if 0
2580 blend_blocks_untextured_average_off_(psx_gpu);
2581 return;
2582#endif
2583 blend_blocks_do(untextured, average, off);
2584}
2585
2586void blend_blocks_textured_average_on(psx_gpu_struct *psx_gpu)
2587{
2588#if 0
2589 blend_blocks_textured_average_on_(psx_gpu);
2590 return;
2591#endif
2592 blend_blocks_do(textured, average, on);
2593}
2594
2595void blend_blocks_untextured_average_on(psx_gpu_struct *psx_gpu)
2596{
2597#if 0
2598 blend_blocks_untextured_average_on_(psx_gpu);
2599 return;
2600#endif
2601 blend_blocks_do(untextured, average, on);
2602}
2603
2604void blend_blocks_textured_add_off(psx_gpu_struct *psx_gpu)
2605{
2606#if 0
2607 blend_blocks_textured_add_off_(psx_gpu);
2608 return;
2609#endif
2610 blend_blocks_do(textured, add, off);
2611}
2612
2613void blend_blocks_textured_add_on(psx_gpu_struct *psx_gpu)
2614{
2615#if 0
2616 blend_blocks_textured_add_on_(psx_gpu);
2617 return;
2618#endif
2619 blend_blocks_do(textured, add, on);
2620}
2621
2622void blend_blocks_untextured_add_off(psx_gpu_struct *psx_gpu)
2623{
2624#if 0
2625 blend_blocks_untextured_add_off_(psx_gpu);
2626 return;
2627#endif
2628 blend_blocks_do(untextured, add, off);
2629}
2630
2631void blend_blocks_untextured_add_on(psx_gpu_struct *psx_gpu)
2632{
2633#if 0
2634 blend_blocks_untextured_add_on_(psx_gpu);
2635 return;
2636#endif
2637 blend_blocks_do(untextured, add, on);
2638}
2639
2640void blend_blocks_textured_subtract_off(psx_gpu_struct *psx_gpu)
2641{
2642#if 0
2643 blend_blocks_textured_subtract_off_(psx_gpu);
2644 return;
2645#endif
2646 blend_blocks_do(textured, subtract, off);
2647}
2648
2649void blend_blocks_textured_subtract_on(psx_gpu_struct *psx_gpu)
2650{
2651#if 0
2652 blend_blocks_textured_subtract_on_(psx_gpu);
2653 return;
2654#endif
2655 blend_blocks_do(textured, subtract, on);
2656}
2657
2658void blend_blocks_untextured_subtract_off(psx_gpu_struct *psx_gpu)
2659{
2660#if 0
2661 blend_blocks_untextured_subtract_off_(psx_gpu);
2662 return;
2663#endif
2664 blend_blocks_do(untextured, subtract, off);
2665}
2666
2667void blend_blocks_untextured_subtract_on(psx_gpu_struct *psx_gpu)
2668{
2669#if 0
2670 blend_blocks_untextured_subtract_on_(psx_gpu);
2671 return;
2672#endif
2673 blend_blocks_do(untextured, subtract, on);
2674}
2675
2676void blend_blocks_textured_add_fourth_off(psx_gpu_struct *psx_gpu)
2677{
2678#if 0
2679 blend_blocks_textured_add_fourth_off_(psx_gpu);
2680 return;
2681#endif
2682 blend_blocks_do(textured, add_fourth, off);
2683}
2684
2685void blend_blocks_textured_add_fourth_on(psx_gpu_struct *psx_gpu)
2686{
2687#if 0
2688 blend_blocks_textured_add_fourth_on_(psx_gpu);
2689 return;
2690#endif
2691 blend_blocks_do(textured, add_fourth, on);
2692}
2693
2694void blend_blocks_untextured_add_fourth_off(psx_gpu_struct *psx_gpu)
2695{
2696#if 0
2697 blend_blocks_untextured_add_fourth_off_(psx_gpu);
2698 return;
2699#endif
2700 blend_blocks_do(untextured, add_fourth, off);
2701}
2702
2703void blend_blocks_untextured_add_fourth_on(psx_gpu_struct *psx_gpu)
2704{
2705#if 0
2706 blend_blocks_untextured_add_fourth_on_(psx_gpu);
2707 return;
2708#endif
2709 blend_blocks_do(untextured, add_fourth, on);
2710}
2711
2712void blend_blocks_textured_unblended_on(psx_gpu_struct *psx_gpu)
2713{
2714#if 0
2715 blend_blocks_textured_unblended_on_(psx_gpu);
2716 return;
2717#endif
2718 blend_blocks_do(textured, unblended, on);
2719}
2720
2721void blend_blocks_textured_unblended_off(psx_gpu_struct *psx_gpu)
2722{
2723}
2724
2725void setup_sprite_untextured(psx_gpu_struct *psx_gpu, s32 x, s32 y, s32 u,
2726 s32 v, s32 width, s32 height, u32 color)
2727{
2728 if((psx_gpu->render_state & (RENDER_STATE_MASK_EVALUATE |
2729 RENDER_FLAGS_MODULATE_TEXELS | RENDER_FLAGS_BLEND)) == 0 &&
2730 (psx_gpu->render_mode & RENDER_INTERLACE_ENABLED) == 0)
2731 {
2732 setup_sprite_untextured_simple(psx_gpu, x, y, u, v, width, height, color);
2733 return;
2734 }
2735
2736#if 0
2737 setup_sprite_untextured_(psx_gpu, x, y, u, v, width, height, color);
2738 return;
2739#endif
2740 u32 right_width = ((width - 1) & 0x7) + 1;
2741 u32 right_mask_bits = (0xFF << right_width);
2742 u16 *fb_ptr = psx_gpu->vram_out_ptr + (y * 1024) + x;
2743 u32 block_width = (width + 7) / 8;
2744 u32 fb_ptr_pitch = 1024 - ((block_width - 1) * 8);
2745 u32 blocks_remaining;
2746 u32 num_blocks = psx_gpu->num_blocks;
2747 block_struct *block = psx_gpu->blocks + num_blocks;
2748
2749 u32 color_r = color & 0xFF;
2750 u32 color_g = (color >> 8) & 0xFF;
2751 u32 color_b = (color >> 16) & 0xFF;
2752 vec_8x16u colors;
2753 vec_8x16u right_mask;
2754 vec_8x16u test_mask;
2755 vec_8x16u zero_mask;
2756
2757 gvld1q_u16(test_mask, psx_gpu->test_mask.e);
2758 color = (color_r >> 3) | ((color_g >> 3) << 5) | ((color_b >> 3) << 10);
2759
2760 gvdupq_n_u16(colors, color);
2761 gvdupq_n_u16(zero_mask, 0x00);
2762 gvdupq_n_u16(right_mask, right_mask_bits);
2763 gvtstq_u16(right_mask, right_mask, test_mask);
2764
2765 while(height)
2766 {
2767 blocks_remaining = block_width - 1;
2768 num_blocks += block_width;
2769
2770 if(num_blocks > MAX_BLOCKS)
2771 {
2772 flush_render_block_buffer(psx_gpu);
2773 num_blocks = block_width;
2774 block = psx_gpu->blocks;
2775 }
2776
2777 while(blocks_remaining)
2778 {
2779 gvst1q_u16(colors, block->pixels.e);
2780 gvst1q_u16(zero_mask, block->draw_mask.e);
2781 block->fb_ptr = fb_ptr;
2782
2783 fb_ptr += 8;
2784 block++;
2785 blocks_remaining--;
2786 }
2787
2788 gvst1q_u16(colors, block->pixels.e);
2789 gvst1q_u16(right_mask, block->draw_mask.e);
2790 block->fb_ptr = fb_ptr;
2791
2792 block++;
2793 fb_ptr += fb_ptr_pitch;
2794
2795 height--;
2796 psx_gpu->num_blocks = num_blocks;
2797 }
2798}
2799
2800#define setup_sprite_tiled_initialize_4bpp_clut() \
2801 vec_16x8u clut_low, clut_high; \
2802 \
2803 gvld2q_u8(clut_low, clut_high, (u8 *)psx_gpu->clut_ptr) \
2804
2805#define setup_sprite_tiled_initialize_4bpp() \
2806 setup_sprite_tiled_initialize_4bpp_clut(); \
2807 \
2808 if(psx_gpu->current_texture_mask & psx_gpu->dirty_textures_4bpp_mask) \
2809 update_texture_4bpp_cache(psx_gpu) \
2810
2811#define setup_sprite_tiled_initialize_8bpp() \
2812 if(psx_gpu->current_texture_mask & psx_gpu->dirty_textures_8bpp_mask) \
2813 update_texture_8bpp_cache(psx_gpu) \
2814
2815#define setup_sprite_tile_fetch_texel_block_8bpp(offset) \
2816 texture_block_ptr = psx_gpu->texture_page_ptr + \
2817 ((texture_offset + offset) & texture_mask); \
2818 \
2819 gvld1_u8(texels, (u8 *)texture_block_ptr) \
2820
2821#define setup_sprite_tile_add_blocks(tile_num_blocks) \
2822 num_blocks += tile_num_blocks; \
2823 \
2824 if(num_blocks > MAX_BLOCKS) \
2825 { \
2826 flush_render_block_buffer(psx_gpu); \
2827 num_blocks = tile_num_blocks; \
2828 block = psx_gpu->blocks; \
2829 } \
2830
2831#define setup_sprite_tile_full_4bpp(edge) \
2832{ \
2833 vec_8x8u texels_low, texels_high; \
2834 setup_sprite_tile_add_blocks(sub_tile_height * 2); \
2835 \
2836 while(sub_tile_height) \
2837 { \
2838 setup_sprite_tile_fetch_texel_block_8bpp(0); \
2839 gvtbl2_u8(texels_low, clut_low, texels); \
2840 gvtbl2_u8(texels_high, clut_high, texels); \
2841 \
2842 gvst2_u8(texels_low, texels_high, (u8 *)block->texels.e); \
2843 block->draw_mask_bits = left_mask_bits; \
2844 block->fb_ptr = fb_ptr; \
2845 block++; \
2846 \
2847 setup_sprite_tile_fetch_texel_block_8bpp(8); \
2848 gvtbl2_u8(texels_low, clut_low, texels); \
2849 gvtbl2_u8(texels_high, clut_high, texels); \
2850 \
2851 gvst2_u8(texels_low, texels_high, (u8 *)block->texels.e); \
2852 block->draw_mask_bits = right_mask_bits; \
2853 block->fb_ptr = fb_ptr + 8; \
2854 block++; \
2855 \
2856 fb_ptr += 1024; \
2857 texture_offset += 0x10; \
2858 sub_tile_height--; \
2859 } \
2860 texture_offset += 0xF00; \
2861 psx_gpu->num_blocks = num_blocks; \
2862} \
2863
2864#define setup_sprite_tile_half_4bpp(edge) \
2865{ \
2866 vec_8x8u texels_low, texels_high; \
2867 setup_sprite_tile_add_blocks(sub_tile_height); \
2868 \
2869 while(sub_tile_height) \
2870 { \
2871 setup_sprite_tile_fetch_texel_block_8bpp(0); \
2872 gvtbl2_u8(texels_low, clut_low, texels); \
2873 gvtbl2_u8(texels_high, clut_high, texels); \
2874 \
2875 gvst2_u8(texels_low, texels_high, (u8 *)block->texels.e); \
2876 block->draw_mask_bits = edge##_mask_bits; \
2877 block->fb_ptr = fb_ptr; \
2878 block++; \
2879 \
2880 fb_ptr += 1024; \
2881 texture_offset += 0x10; \
2882 sub_tile_height--; \
2883 } \
2884 texture_offset += 0xF00; \
2885 psx_gpu->num_blocks = num_blocks; \
2886} \
2887
2888#define setup_sprite_tile_full_8bpp(edge) \
2889{ \
2890 setup_sprite_tile_add_blocks(sub_tile_height * 2); \
2891 \
2892 while(sub_tile_height) \
2893 { \
2894 setup_sprite_tile_fetch_texel_block_8bpp(0); \
2895 gvst1_u8(texels, block->r.e); \
2896 block->draw_mask_bits = left_mask_bits; \
2897 block->fb_ptr = fb_ptr; \
2898 block++; \
2899 \
2900 setup_sprite_tile_fetch_texel_block_8bpp(8); \
2901 gvst1_u8(texels, block->r.e); \
2902 block->draw_mask_bits = right_mask_bits; \
2903 block->fb_ptr = fb_ptr + 8; \
2904 block++; \
2905 \
2906 fb_ptr += 1024; \
2907 texture_offset += 0x10; \
2908 sub_tile_height--; \
2909 } \
2910 texture_offset += 0xF00; \
2911 psx_gpu->num_blocks = num_blocks; \
2912} \
2913
2914#define setup_sprite_tile_half_8bpp(edge) \
2915{ \
df740cdc 2916 setup_sprite_tile_add_blocks(sub_tile_height); \
a2cb152a 2917 \
2918 while(sub_tile_height) \
2919 { \
2920 setup_sprite_tile_fetch_texel_block_8bpp(0); \
2921 gvst1_u8(texels, block->r.e); \
2922 block->draw_mask_bits = edge##_mask_bits; \
2923 block->fb_ptr = fb_ptr; \
2924 block++; \
2925 \
2926 fb_ptr += 1024; \
2927 texture_offset += 0x10; \
2928 sub_tile_height--; \
2929 } \
2930 texture_offset += 0xF00; \
2931 psx_gpu->num_blocks = num_blocks; \
2932} \
2933
2934#define setup_sprite_tile_column_edge_pre_adjust_half_right() \
2935 texture_offset = texture_offset_base + 8; \
2936 fb_ptr += 8 \
2937
2938#define setup_sprite_tile_column_edge_pre_adjust_half_left() \
2939 texture_offset = texture_offset_base \
2940
2941#define setup_sprite_tile_column_edge_pre_adjust_half(edge) \
2942 setup_sprite_tile_column_edge_pre_adjust_half_##edge() \
2943
2944#define setup_sprite_tile_column_edge_pre_adjust_full(edge) \
2945 texture_offset = texture_offset_base \
2946
2947#define setup_sprite_tile_column_edge_post_adjust_half_right() \
2948 fb_ptr -= 8 \
2949
2950#define setup_sprite_tile_column_edge_post_adjust_half_left() \
2951
2952#define setup_sprite_tile_column_edge_post_adjust_half(edge) \
2953 setup_sprite_tile_column_edge_post_adjust_half_##edge() \
2954
2955#define setup_sprite_tile_column_edge_post_adjust_full(edge) \
2956
2957
2958#define setup_sprite_tile_column_height_single(edge_mode, edge, texture_mode, \
2959 x4mode) \
2960do \
2961{ \
2962 sub_tile_height = column_data; \
2963 setup_sprite_tile_column_edge_pre_adjust_##edge_mode##x4mode(edge); \
2964 setup_sprite_tile_##edge_mode##_##texture_mode##x4mode(edge); \
2965 setup_sprite_tile_column_edge_post_adjust_##edge_mode##x4mode(edge); \
2966} while(0) \
2967
2968#define setup_sprite_tile_column_height_multi(edge_mode, edge, texture_mode, \
2969 x4mode) \
2970do \
2971{ \
2972 u32 tiles_remaining = column_data >> 16; \
2973 sub_tile_height = column_data & 0xFF; \
2974 setup_sprite_tile_column_edge_pre_adjust_##edge_mode##x4mode(edge); \
2975 setup_sprite_tile_##edge_mode##_##texture_mode##x4mode(edge); \
2976 tiles_remaining -= 1; \
2977 \
2978 while(tiles_remaining) \
2979 { \
2980 sub_tile_height = 16; \
2981 setup_sprite_tile_##edge_mode##_##texture_mode##x4mode(edge); \
2982 tiles_remaining--; \
2983 } \
2984 \
2985 sub_tile_height = (column_data >> 8) & 0xFF; \
2986 setup_sprite_tile_##edge_mode##_##texture_mode##x4mode(edge); \
2987 setup_sprite_tile_column_edge_post_adjust_##edge_mode##x4mode(edge); \
2988} while(0) \
2989
2990
2991#define setup_sprite_column_data_single() \
2992 column_data = height \
2993
2994#define setup_sprite_column_data_multi() \
2995 column_data = 16 - offset_v; \
2996 column_data |= ((height_rounded & 0xF) + 1) << 8; \
2997 column_data |= (tile_height - 1) << 16 \
2998
2999#define RIGHT_MASK_BIT_SHIFT 8
3000#define RIGHT_MASK_BIT_SHIFT_4x 16
3001
3002#define setup_sprite_tile_column_width_single(texture_mode, multi_height, \
3003 edge_mode, edge, x4mode) \
3004{ \
3005 setup_sprite_column_data_##multi_height(); \
3006 left_mask_bits = left_block_mask | right_block_mask; \
3007 right_mask_bits = left_mask_bits >> RIGHT_MASK_BIT_SHIFT##x4mode; \
3008 \
3009 setup_sprite_tile_column_height_##multi_height(edge_mode, edge, \
3010 texture_mode, x4mode); \
3011} \
3012
3013#define setup_sprite_tiled_advance_column() \
3014 texture_offset_base += 0x100; \
3015 if((texture_offset_base & 0xF00) == 0) \
3016 texture_offset_base -= (0x100 + 0xF00) \
3017
3018#define FB_PTR_MULTIPLIER 1
3019#define FB_PTR_MULTIPLIER_4x 2
3020
3021#define setup_sprite_tile_column_width_multi(texture_mode, multi_height, \
3022 left_mode, right_mode, x4mode) \
3023{ \
3024 setup_sprite_column_data_##multi_height(); \
3025 s32 fb_ptr_advance_column = (16 - (1024 * height)) \
3026 * FB_PTR_MULTIPLIER##x4mode; \
3027 \
3028 tile_width -= 2; \
3029 left_mask_bits = left_block_mask; \
3030 right_mask_bits = left_mask_bits >> RIGHT_MASK_BIT_SHIFT##x4mode; \
3031 \
3032 setup_sprite_tile_column_height_##multi_height(left_mode, right, \
3033 texture_mode, x4mode); \
3034 fb_ptr += fb_ptr_advance_column; \
3035 \
3036 left_mask_bits = 0x00; \
3037 right_mask_bits = 0x00; \
3038 \
3039 while(tile_width) \
3040 { \
3041 setup_sprite_tiled_advance_column(); \
3042 setup_sprite_tile_column_height_##multi_height(full, none, \
3043 texture_mode, x4mode); \
3044 fb_ptr += fb_ptr_advance_column; \
3045 tile_width--; \
3046 } \
3047 \
3048 left_mask_bits = right_block_mask; \
3049 right_mask_bits = left_mask_bits >> RIGHT_MASK_BIT_SHIFT##x4mode; \
3050 \
3051 setup_sprite_tiled_advance_column(); \
3052 setup_sprite_tile_column_height_##multi_height(right_mode, left, \
3053 texture_mode, x4mode); \
3054} \
3055
3056
3057/* 4x stuff */
3058#define setup_sprite_tiled_initialize_4bpp_4x() \
3059 setup_sprite_tiled_initialize_4bpp_clut() \
3060
3061#define setup_sprite_tiled_initialize_8bpp_4x() \
3062
3063#define setup_sprite_tile_full_4bpp_4x(edge) \
3064{ \
3065 vec_8x8u texels_low, texels_high; \
3066 vec_8x16u pixels; \
3067 vec_4x16u pixels_half; \
3068 setup_sprite_tile_add_blocks(sub_tile_height * 2 * 4); \
3069 u32 left_mask_bits_a = left_mask_bits & 0xFF; \
3070 u32 left_mask_bits_b = left_mask_bits >> 8; \
3071 u32 right_mask_bits_a = right_mask_bits & 0xFF; \
3072 u32 right_mask_bits_b = right_mask_bits >> 8; \
3073 \
3074 while(sub_tile_height) \
3075 { \
3076 setup_sprite_tile_fetch_texel_block_8bpp(0); \
3077 gvtbl2_u8(texels_low, clut_low, texels); \
3078 gvtbl2_u8(texels_high, clut_high, texels); \
3079 gvzip_u8(pixels, texels_low, texels_high); \
3080 \
3081 gvget_lo(pixels_half, pixels); \
3082 gvst2_u16(pixels_half, pixels_half, block->texels.e); \
3083 block->draw_mask_bits = left_mask_bits_a; \
3084 block->fb_ptr = fb_ptr; \
3085 block++; \
3086 \
3087 gvst2_u16(pixels_half, pixels_half, block->texels.e); \
3088 block->draw_mask_bits = left_mask_bits_a; \
3089 block->fb_ptr = fb_ptr + 1024; \
3090 block++; \
3091 \
3092 gvget_hi(pixels_half, pixels); \
3093 gvst2_u16(pixels_half, pixels_half, block->texels.e); \
3094 block->draw_mask_bits = left_mask_bits_b; \
3095 block->fb_ptr = fb_ptr + 8; \
3096 block++; \
3097 \
3098 gvst2_u16(pixels_half, pixels_half, block->texels.e); \
3099 block->draw_mask_bits = left_mask_bits_b; \
3100 block->fb_ptr = fb_ptr + 1024 + 8; \
3101 block++; \
3102 \
3103 setup_sprite_tile_fetch_texel_block_8bpp(8); \
3104 gvtbl2_u8(texels_low, clut_low, texels); \
3105 gvtbl2_u8(texels_high, clut_high, texels); \
3106 gvzip_u8(pixels, texels_low, texels_high); \
3107 \
3108 gvget_lo(pixels_half, pixels); \
3109 gvst2_u16(pixels_half, pixels_half, block->texels.e); \
3110 block->draw_mask_bits = right_mask_bits_a; \
3111 block->fb_ptr = fb_ptr + 16; \
3112 block++; \
3113 \
3114 gvst2_u16(pixels_half, pixels_half, block->texels.e); \
3115 block->draw_mask_bits = right_mask_bits_a; \
3116 block->fb_ptr = fb_ptr + 1024 + 16; \
3117 block++; \
3118 \
3119 gvget_hi(pixels_half, pixels); \
3120 gvst2_u16(pixels_half, pixels_half, block->texels.e); \
3121 block->draw_mask_bits = right_mask_bits_b; \
3122 block->fb_ptr = fb_ptr + 24; \
3123 block++; \
3124 \
3125 gvst2_u16(pixels_half, pixels_half, block->texels.e); \
3126 block->draw_mask_bits = right_mask_bits_b; \
3127 block->fb_ptr = fb_ptr + 1024 + 24; \
3128 block++; \
3129 \
3130 fb_ptr += 2048; \
3131 texture_offset += 0x10; \
3132 sub_tile_height--; \
3133 } \
3134 texture_offset += 0xF00; \
3135 psx_gpu->num_blocks = num_blocks; \
3136} \
3137
3138#define setup_sprite_tile_half_4bpp_4x(edge) \
3139{ \
3140 vec_8x8u texels_low, texels_high; \
3141 vec_8x16u pixels; \
3142 vec_4x16u pixels_half; \
3143 setup_sprite_tile_add_blocks(sub_tile_height * 4); \
3144 u32 edge##_mask_bits_a = edge##_mask_bits & 0xFF; \
3145 u32 edge##_mask_bits_b = edge##_mask_bits >> 8; \
3146 \
3147 while(sub_tile_height) \
3148 { \
3149 setup_sprite_tile_fetch_texel_block_8bpp(0); \
3150 gvtbl2_u8(texels_low, clut_low, texels); \
3151 gvtbl2_u8(texels_high, clut_high, texels); \
3152 gvzip_u8(pixels, texels_low, texels_high); \
3153 \
3154 gvget_lo(pixels_half, pixels); \
3155 gvst2_u16(pixels_half, pixels_half, block->texels.e); \
3156 block->draw_mask_bits = edge##_mask_bits_a; \
3157 block->fb_ptr = fb_ptr; \
3158 block++; \
3159 \
3160 gvst2_u16(pixels_half, pixels_half, block->texels.e); \
3161 block->draw_mask_bits = edge##_mask_bits_a; \
3162 block->fb_ptr = fb_ptr + 1024; \
3163 block++; \
3164 \
3165 gvget_hi(pixels_half, pixels); \
3166 gvst2_u16(pixels_half, pixels_half, block->texels.e); \
3167 block->draw_mask_bits = edge##_mask_bits_b; \
3168 block->fb_ptr = fb_ptr + 8; \
3169 block++; \
3170 \
3171 gvst2_u16(pixels_half, pixels_half, block->texels.e); \
3172 block->draw_mask_bits = edge##_mask_bits_b; \
3173 block->fb_ptr = fb_ptr + 1024 + 8; \
3174 block++; \
3175 \
3176 fb_ptr += 2048; \
3177 texture_offset += 0x10; \
3178 sub_tile_height--; \
3179 } \
3180 texture_offset += 0xF00; \
3181 psx_gpu->num_blocks = num_blocks; \
3182} \
3183
3184#define setup_sprite_tile_full_8bpp_4x(edge) \
3185{ \
3186 setup_sprite_tile_add_blocks(sub_tile_height * 2 * 4); \
3187 vec_8x16u texels_wide; \
3188 vec_4x16u texels_half; \
3189 u32 left_mask_bits_a = left_mask_bits & 0xFF; \
3190 u32 left_mask_bits_b = left_mask_bits >> 8; \
3191 u32 right_mask_bits_a = right_mask_bits & 0xFF; \
3192 u32 right_mask_bits_b = right_mask_bits >> 8; \
3193 \
3194 while(sub_tile_height) \
3195 { \
3196 setup_sprite_tile_fetch_texel_block_8bpp(0); \
3197 gvzip_u8(texels_wide, texels, texels); \
3198 gvget_lo(texels_half, texels_wide); \
3199 gvst1_u8(texels_half, block->r.e); \
3200 block->draw_mask_bits = left_mask_bits_a; \
3201 block->fb_ptr = fb_ptr; \
3202 block++; \
3203 \
3204 gvst1_u8(texels_half, block->r.e); \
3205 block->draw_mask_bits = left_mask_bits_a; \
3206 block->fb_ptr = fb_ptr + 1024; \
3207 block++; \
3208 \
3209 gvget_hi(texels_half, texels_wide); \
3210 gvst1_u8(texels_half, block->r.e); \
3211 block->draw_mask_bits = left_mask_bits_b; \
3212 block->fb_ptr = fb_ptr + 8; \
3213 block++; \
3214 \
3215 gvst1_u8(texels_half, block->r.e); \
3216 block->draw_mask_bits = left_mask_bits_b; \
3217 block->fb_ptr = fb_ptr + 1024 + 8; \
3218 block++; \
3219 \
3220 setup_sprite_tile_fetch_texel_block_8bpp(8); \
3221 gvzip_u8(texels_wide, texels, texels); \
3222 gvget_lo(texels_half, texels_wide); \
3223 gvst1_u8(texels_half, block->r.e); \
3224 block->draw_mask_bits = right_mask_bits_a; \
3225 block->fb_ptr = fb_ptr + 16; \
3226 block++; \
3227 \
3228 gvst1_u8(texels_half, block->r.e); \
3229 block->draw_mask_bits = right_mask_bits_a; \
3230 block->fb_ptr = fb_ptr + 1024 + 16; \
3231 block++; \
3232 \
3233 gvget_hi(texels_half, texels_wide); \
3234 gvst1_u8(texels_half, block->r.e); \
3235 block->draw_mask_bits = right_mask_bits_b; \
3236 block->fb_ptr = fb_ptr + 24; \
3237 block++; \
3238 \
3239 gvst1_u8(texels_half, block->r.e); \
3240 block->draw_mask_bits = right_mask_bits_b; \
3241 block->fb_ptr = fb_ptr + 24 + 1024; \
3242 block++; \
3243 \
3244 fb_ptr += 2048; \
3245 texture_offset += 0x10; \
3246 sub_tile_height--; \
3247 } \
3248 texture_offset += 0xF00; \
3249 psx_gpu->num_blocks = num_blocks; \
3250} \
3251
3252#define setup_sprite_tile_half_8bpp_4x(edge) \
3253{ \
3254 setup_sprite_tile_add_blocks(sub_tile_height * 4); \
3255 vec_8x16u texels_wide; \
3256 vec_4x16u texels_half; \
3257 u32 edge##_mask_bits_a = edge##_mask_bits & 0xFF; \
3258 u32 edge##_mask_bits_b = edge##_mask_bits >> 8; \
3259 \
3260 while(sub_tile_height) \
3261 { \
3262 setup_sprite_tile_fetch_texel_block_8bpp(0); \
3263 gvzip_u8(texels_wide, texels, texels); \
3264 gvget_lo(texels_half, texels_wide); \
3265 gvst1_u8(texels_half, block->r.e); \
3266 block->draw_mask_bits = edge##_mask_bits_a; \
3267 block->fb_ptr = fb_ptr; \
3268 block++; \
3269 \
3270 gvst1_u8(texels_half, block->r.e); \
3271 block->draw_mask_bits = edge##_mask_bits_a; \
3272 block->fb_ptr = fb_ptr + 1024; \
3273 block++; \
3274 \
3275 gvget_hi(texels_half, texels_wide); \
3276 gvst1_u8(texels_half, block->r.e); \
3277 block->draw_mask_bits = edge##_mask_bits_b; \
3278 block->fb_ptr = fb_ptr + 8; \
3279 block++; \
3280 \
3281 gvst1_u8(texels_half, block->r.e); \
3282 block->draw_mask_bits = edge##_mask_bits_b; \
3283 block->fb_ptr = fb_ptr + 8 + 1024; \
3284 block++; \
3285 \
3286 fb_ptr += 2048; \
3287 texture_offset += 0x10; \
3288 sub_tile_height--; \
3289 } \
3290 texture_offset += 0xF00; \
3291 psx_gpu->num_blocks = num_blocks; \
3292} \
3293
3294#define setup_sprite_tile_column_edge_pre_adjust_half_right_4x() \
3295 texture_offset = texture_offset_base + 8; \
3296 fb_ptr += 16 \
3297
3298#define setup_sprite_tile_column_edge_pre_adjust_half_left_4x() \
3299 texture_offset = texture_offset_base \
3300
3301#define setup_sprite_tile_column_edge_pre_adjust_half_4x(edge) \
3302 setup_sprite_tile_column_edge_pre_adjust_half_##edge##_4x() \
3303
3304#define setup_sprite_tile_column_edge_pre_adjust_full_4x(edge) \
3305 texture_offset = texture_offset_base \
3306
3307#define setup_sprite_tile_column_edge_post_adjust_half_right_4x() \
3308 fb_ptr -= 16 \
3309
3310#define setup_sprite_tile_column_edge_post_adjust_half_left_4x() \
3311
3312#define setup_sprite_tile_column_edge_post_adjust_half_4x(edge) \
3313 setup_sprite_tile_column_edge_post_adjust_half_##edge##_4x() \
3314
3315#define setup_sprite_tile_column_edge_post_adjust_full_4x(edge) \
3316
3317#define setup_sprite_offset_u_adjust() \
3318
3319#define setup_sprite_comapre_left_block_mask() \
3320 ((left_block_mask & 0xFF) == 0xFF) \
3321
3322#define setup_sprite_comapre_right_block_mask() \
3323 (((right_block_mask >> 8) & 0xFF) == 0xFF) \
3324
3325#define setup_sprite_offset_u_adjust_4x() \
3326 offset_u *= 2; \
3327 offset_u_right = offset_u_right * 2 + 1 \
3328
3329#define setup_sprite_comapre_left_block_mask_4x() \
3330 ((left_block_mask & 0xFFFF) == 0xFFFF) \
3331
3332#define setup_sprite_comapre_right_block_mask_4x() \
3333 (((right_block_mask >> 16) & 0xFFFF) == 0xFFFF) \
3334
3335
3336#define setup_sprite_tiled_do(texture_mode, x4mode) \
3337 s32 offset_u = u & 0xF; \
3338 s32 offset_v = v & 0xF; \
3339 \
3340 s32 width_rounded = offset_u + width + 15; \
3341 s32 height_rounded = offset_v + height + 15; \
3342 s32 tile_height = height_rounded / 16; \
3343 s32 tile_width = width_rounded / 16; \
3344 u32 offset_u_right = width_rounded & 0xF; \
3345 \
3346 setup_sprite_offset_u_adjust##x4mode(); \
3347 \
3348 u32 left_block_mask = ~(0xFFFFFFFF << offset_u); \
3349 u32 right_block_mask = 0xFFFFFFFE << offset_u_right; \
3350 \
3351 u32 left_mask_bits; \
3352 u32 right_mask_bits; \
3353 \
3354 u32 sub_tile_height; \
3355 u32 column_data; \
3356 \
3357 u32 texture_mask = (psx_gpu->texture_mask_width & 0xF) | \
3358 ((psx_gpu->texture_mask_height & 0xF) << 4) | \
3359 ((psx_gpu->texture_mask_width >> 4) << 8) | \
3360 ((psx_gpu->texture_mask_height >> 4) << 12); \
3361 u32 texture_offset = ((v & 0xF) << 4) | ((u & 0xF0) << 4) | \
3362 ((v & 0xF0) << 8); \
3363 u32 texture_offset_base = texture_offset; \
3364 u32 control_mask; \
3365 \
3366 u16 *fb_ptr = psx_gpu->vram_out_ptr + (y * 1024) + (x - offset_u); \
3367 u32 num_blocks = psx_gpu->num_blocks; \
3368 block_struct *block = psx_gpu->blocks + num_blocks; \
3369 \
3370 u16 *texture_block_ptr; \
3371 vec_8x8u texels; \
3372 \
3373 setup_sprite_tiled_initialize_##texture_mode##x4mode(); \
3374 \
3375 control_mask = tile_width == 1; \
3376 control_mask |= (tile_height == 1) << 1; \
3377 control_mask |= setup_sprite_comapre_left_block_mask##x4mode() << 2; \
3378 control_mask |= setup_sprite_comapre_right_block_mask##x4mode() << 3; \
3379 \
3380 switch(control_mask) \
3381 { \
3382 default: \
3383 case 0x0: \
3384 setup_sprite_tile_column_width_multi(texture_mode, multi, full, full, \
3385 x4mode); \
3386 break; \
3387 \
3388 case 0x1: \
3389 setup_sprite_tile_column_width_single(texture_mode, multi, full, none, \
3390 x4mode); \
3391 break; \
3392 \
3393 case 0x2: \
3394 setup_sprite_tile_column_width_multi(texture_mode, single, full, full, \
3395 x4mode); \
3396 break; \
3397 \
3398 case 0x3: \
3399 setup_sprite_tile_column_width_single(texture_mode, single, full, none, \
3400 x4mode); \
3401 break; \
3402 \
3403 case 0x4: \
3404 setup_sprite_tile_column_width_multi(texture_mode, multi, half, full, \
3405 x4mode); \
3406 break; \
3407 \
3408 case 0x5: \
3409 setup_sprite_tile_column_width_single(texture_mode, multi, half, right, \
3410 x4mode); \
3411 break; \
3412 \
3413 case 0x6: \
3414 setup_sprite_tile_column_width_multi(texture_mode, single, half, full, \
3415 x4mode); \
3416 break; \
3417 \
3418 case 0x7: \
3419 setup_sprite_tile_column_width_single(texture_mode, single, half, right, \
3420 x4mode); \
3421 break; \
3422 \
3423 case 0x8: \
3424 setup_sprite_tile_column_width_multi(texture_mode, multi, full, half, \
3425 x4mode); \
3426 break; \
3427 \
3428 case 0x9: \
3429 setup_sprite_tile_column_width_single(texture_mode, multi, half, left, \
3430 x4mode); \
3431 break; \
3432 \
3433 case 0xA: \
3434 setup_sprite_tile_column_width_multi(texture_mode, single, full, half, \
3435 x4mode); \
3436 break; \
3437 \
3438 case 0xB: \
3439 setup_sprite_tile_column_width_single(texture_mode, single, half, left, \
3440 x4mode); \
3441 break; \
3442 \
3443 case 0xC: \
3444 setup_sprite_tile_column_width_multi(texture_mode, multi, half, half, \
3445 x4mode); \
3446 break; \
3447 \
3448 case 0xE: \
3449 setup_sprite_tile_column_width_multi(texture_mode, single, half, half, \
3450 x4mode); \
3451 break; \
3452 } \
3453
3454void setup_sprite_4bpp(psx_gpu_struct *psx_gpu, s32 x, s32 y, s32 u, s32 v,
3455 s32 width, s32 height, u32 color)
3456{
3457#if 0
3458 setup_sprite_4bpp_(psx_gpu, x, y, u, v, width, height, color);
3459 return;
3460#endif
3461 setup_sprite_tiled_do(4bpp,)
3462}
3463
3464void setup_sprite_8bpp(psx_gpu_struct *psx_gpu, s32 x, s32 y, s32 u, s32 v,
3465 s32 width, s32 height, u32 color)
3466{
3467#if 0
3468 setup_sprite_8bpp_(psx_gpu, x, y, u, v, width, height, color);
3469 return;
3470#endif
3471 setup_sprite_tiled_do(8bpp,)
3472}
3473
3474#undef draw_mask_fb_ptr_left
3475#undef draw_mask_fb_ptr_right
3476
3477void setup_sprite_4bpp_4x(psx_gpu_struct *psx_gpu, s32 x, s32 y, s32 u, s32 v,
3478 s32 width, s32 height, u32 color)
3479{
3480#if 0
3481 setup_sprite_4bpp_4x_(psx_gpu, x, y, u, v, width, height, color);
3482 return;
3483#endif
3484 setup_sprite_tiled_do(4bpp, _4x)
3485}
3486
3487void setup_sprite_8bpp_4x(psx_gpu_struct *psx_gpu, s32 x, s32 y, s32 u, s32 v,
3488 s32 width, s32 height, u32 color)
3489{
3490#if 0
3491 setup_sprite_8bpp_4x_(psx_gpu, x, y, u, v, width, height, color);
3492 return;
3493#endif
3494 setup_sprite_tiled_do(8bpp, _4x)
3495}
3496
3497
3498void scale2x_tiles8(void * __restrict__ dst_, const void * __restrict__ src_, int w8, int h)
3499{
3500#if 0
df740cdc 3501 scale2x_tiles8_(dst_, src_, w8, h);
a2cb152a 3502 return;
3503#endif
3504 const u16 * __restrict__ src = src_;
3505 const u16 * __restrict__ src1;
3506 u16 * __restrict__ dst = dst_;
3507 u16 * __restrict__ dst1;
3508 gvreg a, b;
3509 int w;
3510 for (; h > 0; h--, src += 1024, dst += 1024*2)
3511 {
3512 src1 = src;
3513 dst1 = dst;
3514 for (w = w8; w > 0; w--, src1 += 8, dst1 += 8*2)
3515 {
3516 gvld1q_u16(a, src1);
3517 gvzipq_u16(a, b, a, a);
3518 gvst1q_u16(a, dst1);
3519 gvst1q_u16(b, dst1 + 8);
3520 gvst1q_u16(a, dst1 + 1024);
3521 gvst1q_u16(b, dst1 + 1024 + 8);
3522 }
3523 }
3524}
3525
3526// vim:ts=2:sw=2:expandtab