gpu_neon: don't crash on large primitives in enhancement mode
[pcsx_rearmed.git] / plugins / gpu_neon / psx_gpu / psx_gpu_simd.c
CommitLineData
a2cb152a 1/*
2 * Copyright (C) 2011 Gilead Kutnick "Exophase" <exophase@gmail.com>
3 * Copyright (C) 2022 GraÅžvydas Ignotas "notaz" <notasas@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation; either version 2 of
8 * the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 */
15
16#include <string.h>
17#include "psx_gpu.h"
18#include "psx_gpu_simd.h"
19//#define ASM_PROTOTYPES
20//#include "psx_gpu_simd.h"
9088aca1 21#ifdef __SSE2__
22#include <x86intrin.h>
23#endif
a2cb152a 24#ifndef SIMD_BUILD
25#error "please define SIMD_BUILD if you want this gpu_neon C simd implementation"
26#endif
27
28typedef u8 gvu8 __attribute__((vector_size(16)));
29typedef u16 gvu16 __attribute__((vector_size(16)));
30typedef u32 gvu32 __attribute__((vector_size(16)));
31typedef u64 gvu64 __attribute__((vector_size(16)));
32typedef s8 gvs8 __attribute__((vector_size(16)));
33typedef s16 gvs16 __attribute__((vector_size(16)));
34typedef s32 gvs32 __attribute__((vector_size(16)));
35typedef s64 gvs64 __attribute__((vector_size(16)));
36
37typedef u8 gvhu8 __attribute__((vector_size(8)));
38typedef u16 gvhu16 __attribute__((vector_size(8)));
39typedef u32 gvhu32 __attribute__((vector_size(8)));
40typedef u64 gvhu64 __attribute__((vector_size(8)));
41typedef s8 gvhs8 __attribute__((vector_size(8)));
42typedef s16 gvhs16 __attribute__((vector_size(8)));
43typedef s32 gvhs32 __attribute__((vector_size(8)));
44typedef s64 gvhs64 __attribute__((vector_size(8)));
45
a2cb152a 46typedef union
47{
48 gvu8 u8;
49 gvu16 u16;
50 gvu32 u32;
51 gvu64 u64;
52 gvs8 s8;
53 gvs16 s16;
54 gvs32 s32;
55 gvs64 s64;
9088aca1 56#ifdef __SSE2__
57 __m128i m;
58#endif
a2cb152a 59 // this may be tempting, but it causes gcc to do lots of stack spills
60 //gvhreg h[2];
61} gvreg;
62
9088aca1 63typedef gvreg gvreg_ua __attribute__((aligned(1)));
64typedef uint64_t uint64_t_ua __attribute__((aligned(1)));
65typedef gvu8 gvu8_ua __attribute__((aligned(1)));
66typedef gvu16 gvu16_ua __attribute__((aligned(1)));
67
a2cb152a 68#if defined(__ARM_NEON) || defined(__ARM_NEON__)
69#include <arm_neon.h>
70
9088aca1 71typedef union
72{
73 gvhu8 u8;
74 gvhu16 u16;
75 gvhu32 u32;
76 gvhu64 u64;
77 //u64 u64;
78 //uint64x1_t u64;
79 gvhs8 s8;
80 gvhs16 s16;
81 gvhs32 s32;
82 gvhs64 s64;
83 //s64 s64;
84 //int64x1_t s64;
85} gvhreg;
86
a2cb152a 87#define gvaddhn_u32(d, a, b) d.u16 = vaddhn_u32(a.u32, b.u32)
88#define gvaddw_s32(d, a, b) d.s64 = vaddw_s32(a.s64, b.s32)
89#define gvabsq_s32(d, s) d.s32 = vabsq_s32(s.s32)
90#define gvbic_n_u16(d, n) d.u16 = vbic_u16(d.u16, vmov_n_u16(n))
91#define gvbifq(d, a, b) d.u8 = vbslq_u8(b.u8, d.u8, a.u8)
92#define gvbit(d, a, b) d.u8 = vbsl_u8(b.u8, a.u8, d.u8)
93#define gvceqq_u16(d, a, b) d.u16 = vceqq_u16(a.u16, b.u16)
94#define gvcgt_s16(d, a, b) d.u16 = vcgt_s16(a.s16, b.s16)
95#define gvclt_s16(d, a, b) d.u16 = vclt_s16(a.s16, b.s16)
96#define gvcreate_s32(d, a, b) d.s32 = vcreate_s32((u32)(a) | ((u64)(b) << 32))
97#define gvcreate_u32(d, a, b) d.u32 = vcreate_u32((u32)(a) | ((u64)(b) << 32))
98#define gvcreate_s64(d, s) d.s64 = (gvhs64)vcreate_s64(s)
99#define gvcreate_u64(d, s) d.u64 = (gvhu64)vcreate_u64(s)
100#define gvcombine_u16(d, l, h) d.u16 = vcombine_u16(l.u16, h.u16)
101#define gvcombine_u32(d, l, h) d.u32 = vcombine_u32(l.u32, h.u32)
102#define gvcombine_s64(d, l, h) d.s64 = vcombine_s64((int64x1_t)l.s64, (int64x1_t)h.s64)
103#define gvdup_l_u8(d, s, l) d.u8 = vdup_lane_u8(s.u8, l)
104#define gvdup_l_u16(d, s, l) d.u16 = vdup_lane_u16(s.u16, l)
105#define gvdup_l_u32(d, s, l) d.u32 = vdup_lane_u32(s.u32, l)
106#define gvdupq_l_s64(d, s, l) d.s64 = vdupq_lane_s64((int64x1_t)s.s64, l)
107#define gvdupq_l_u32(d, s, l) d.u32 = vdupq_lane_u32(s.u32, l)
108#define gvdup_n_s64(d, n) d.s64 = vdup_n_s64(n)
109#define gvdup_n_u8(d, n) d.u8 = vdup_n_u8(n)
110#define gvdup_n_u16(d, n) d.u16 = vdup_n_u16(n)
111#define gvdup_n_u32(d, n) d.u32 = vdup_n_u32(n)
112#define gvdupq_n_u16(d, n) d.u16 = vdupq_n_u16(n)
113#define gvdupq_n_u32(d, n) d.u32 = vdupq_n_u32(n)
114#define gvdupq_n_s64(d, n) d.s64 = vdupq_n_s64(n)
115#define gvhaddq_u16(d, a, b) d.u16 = vhaddq_u16(a.u16, b.u16)
116#define gvmax_s16(d, a, b) d.s16 = vmax_s16(a.s16, b.s16)
117#define gvmin_s16(d, a, b) d.s16 = vmin_s16(a.s16, b.s16)
2d658c89 118#define gvmin_u16(d, a, b) d.u16 = vmin_u16(a.u16, b.u16)
a2cb152a 119#define gvminq_u8(d, a, b) d.u8 = vminq_u8(a.u8, b.u8)
120#define gvminq_u16(d, a, b) d.u16 = vminq_u16(a.u16, b.u16)
121#define gvmla_s32(d, a, b) d.s32 = vmla_s32(d.s32, a.s32, b.s32)
122#define gvmla_u32(d, a, b) d.u32 = vmla_u32(d.u32, a.u32, b.u32)
123#define gvmlaq_s32(d, a, b) d.s32 = vmlaq_s32(d.s32, a.s32, b.s32)
124#define gvmlaq_u32(d, a, b) d.u32 = vmlaq_u32(d.u32, a.u32, b.u32)
125#define gvmlal_s32(d, a, b) d.s64 = vmlal_s32(d.s64, a.s32, b.s32)
126#define gvmlal_u8(d, a, b) d.u16 = vmlal_u8(d.u16, a.u8, b.u8)
127#define gvmlsq_s32(d, a, b) d.s32 = vmlsq_s32(d.s32, a.s32, b.s32)
128#define gvmlsq_l_s32(d, a, b, l) d.s32 = vmlsq_lane_s32(d.s32, a.s32, b.s32, l)
129#define gvmov_l_s32(d, s, l) d.s32 = vset_lane_s32(s, d.s32, l)
130#define gvmov_l_u32(d, s, l) d.u32 = vset_lane_u32(s, d.u32, l)
131#define gvmovl_u8(d, s) d.u16 = vmovl_u8(s.u8)
132#define gvmovl_s32(d, s) d.s64 = vmovl_s32(s.s32)
133#define gvmovn_u16(d, s) d.u8 = vmovn_u16(s.u16)
134#define gvmovn_u32(d, s) d.u16 = vmovn_u32(s.u32)
135#define gvmovn_u64(d, s) d.u32 = vmovn_u64(s.u64)
136#define gvmul_s32(d, a, b) d.s32 = vmul_s32(a.s32, b.s32)
137#define gvmull_s16(d, a, b) d.s32 = vmull_s16(a.s16, b.s16)
138#define gvmull_s32(d, a, b) d.s64 = vmull_s32(a.s32, b.s32)
139#define gvmull_u8(d, a, b) d.u16 = vmull_u8(a.u8, b.u8)
140#define gvmull_l_u32(d, a, b, l) d.u64 = vmull_lane_u32(a.u32, b.u32, l)
141#define gvmlsl_s16(d, a, b) d.s32 = vmlsl_s16(d.s32, a.s16, b.s16)
142#define gvneg_s32(d, s) d.s32 = vneg_s32(s.s32)
143#define gvqadd_u8(d, a, b) d.u8 = vqadd_u8(a.u8, b.u8)
144#define gvqsub_u8(d, a, b) d.u8 = vqsub_u8(a.u8, b.u8)
145#define gvshl_u16(d, a, b) d.u16 = vshl_u16(a.u16, b.s16)
a2cb152a 146#define gvshlq_u64(d, a, b) d.u64 = vshlq_u64(a.u64, b.s64)
147#define gvshrq_n_s16(d, s, n) d.s16 = vshrq_n_s16(s.s16, n)
148#define gvshrq_n_u16(d, s, n) d.u16 = vshrq_n_u16(s.u16, n)
149#define gvshl_n_u32(d, s, n) d.u32 = vshl_n_u32(s.u32, n)
150#define gvshlq_n_u16(d, s, n) d.u16 = vshlq_n_u16(s.u16, n)
151#define gvshlq_n_u32(d, s, n) d.u32 = vshlq_n_u32(s.u32, n)
152#define gvshll_n_s8(d, s, n) d.s16 = vshll_n_s8(s.s8, n)
153#define gvshll_n_u8(d, s, n) d.u16 = vshll_n_u8(s.u8, n)
154#define gvshll_n_u16(d, s, n) d.u32 = vshll_n_u16(s.u16, n)
155#define gvshr_n_u8(d, s, n) d.u8 = vshr_n_u8(s.u8, n)
156#define gvshr_n_u16(d, s, n) d.u16 = vshr_n_u16(s.u16, n)
157#define gvshr_n_u32(d, s, n) d.u32 = vshr_n_u32(s.u32, n)
158#define gvshr_n_u64(d, s, n) d.u64 = (gvhu64)vshr_n_u64((uint64x1_t)s.u64, n)
a2cb152a 159#define gvshrn_n_u16(d, s, n) d.u8 = vshrn_n_u16(s.u16, n)
160#define gvshrn_n_u32(d, s, n) d.u16 = vshrn_n_u32(s.u32, n)
161#define gvsli_n_u8(d, s, n) d.u8 = vsli_n_u8(d.u8, s.u8, n)
162#define gvsri_n_u8(d, s, n) d.u8 = vsri_n_u8(d.u8, s.u8, n)
163#define gvtstq_u16(d, a, b) d.u16 = vtstq_u16(a.u16, b.u16)
164#define gvqshrun_n_s16(d, s, n) d.u8 = vqshrun_n_s16(s.s16, n)
165#define gvqsubq_u8(d, a, b) d.u8 = vqsubq_u8(a.u8, b.u8)
166#define gvqsubq_u16(d, a, b) d.u16 = vqsubq_u16(a.u16, b.u16)
167
9088aca1 168#define gvmovn_top_u64(d, s) d.u32 = vshrn_n_u64(s.u64, 32)
169
a2cb152a 170#define gvget_lo(d, s) d.u16 = vget_low_u16(s.u16)
171#define gvget_hi(d, s) d.u16 = vget_high_u16(s.u16)
172#define gvlo(s) ({gvhreg t_; gvget_lo(t_, s); t_;})
173#define gvhi(s) ({gvhreg t_; gvget_hi(t_, s); t_;})
174
175#define gvset_lo(d, s) d.u16 = vcombine_u16(s.u16, gvhi(d).u16)
176#define gvset_hi(d, s) d.u16 = vcombine_u16(gvlo(d).u16, s.u16)
177
178#define gvtbl2_u8(d, a, b) { \
179 uint8x8x2_t v_; \
180 v_.val[0] = vget_low_u8(a.u8); v_.val[1] = vget_high_u8(a.u8); \
181 d.u8 = vtbl2_u8(v_, b.u8); \
182}
183
184#define gvzip_u8(d, a, b) { \
185 uint8x8x2_t v_ = vzip_u8(a.u8, b.u8); \
186 d.u8 = vcombine_u8(v_.val[0], v_.val[1]); \
187}
188#define gvzipq_u16(d0, d1, s0, s1) { \
189 uint16x8x2_t v_ = vzipq_u16(s0.u16, s1.u16); \
190 d0.u16 = v_.val[0]; d1.u16 = v_.val[1]; \
191}
192
193#define gvld1_u8(d, s) d.u8 = vld1_u8(s)
194#define gvld1_u32(d, s) d.u32 = vld1_u32((const u32 *)(s))
195#define gvld1q_u8(d, s) d.u8 = vld1q_u8(s)
196#define gvld1q_u16(d, s) d.u16 = vld1q_u16(s)
197#define gvld1q_u32(d, s) d.u32 = vld1q_u32((const u32 *)(s))
9088aca1 198#define gvld2_u8_dup(v0, v1, p) { \
a2cb152a 199 uint8x8x2_t v_ = vld2_dup_u8(p); \
200 v0.u8 = v_.val[0]; v1.u8 = v_.val[1]; \
201}
202#define gvld2q_u8(v0, v1, p) { \
203 uint8x16x2_t v_ = vld2q_u8(p); \
204 v0.u8 = v_.val[0]; v1.u8 = v_.val[1]; \
205}
206
207#define gvst1_u8(v, p) \
208 vst1_u8(p, v.u8)
209#define gvst1q_u16(v, p) \
210 vst1q_u16(p, v.u16)
211#define gvst1q_inc_u32(v, p, i) { \
212 vst1q_u32((u32 *)(p), v.u32); \
213 p += (i) / sizeof(*p); \
214}
215#define gvst2_u8(v0, v1, p) { \
216 uint8x8x2_t v_; \
217 v_.val[0] = v0.u8; v_.val[1] = v1.u8; \
218 vst2_u8(p, v_); \
219}
220#define gvst2_u16(v0, v1, p) { \
221 uint16x4x2_t v_; \
222 v_.val[0] = v0.u16; v_.val[1] = v1.u16; \
223 vst2_u16(p, v_); \
224}
225#define gvst2q_u8(v0, v1, p) { \
226 uint8x16x2_t v_; \
227 v_.val[0] = v0.u8; v_.val[1] = v1.u8; \
228 vst2q_u8(p, v_); \
229}
230#define gvst4_4_inc_u32(v0, v1, v2, v3, p, i) { \
231 uint32x2x4_t v_; \
232 v_.val[0] = v0.u32; v_.val[1] = v1.u32; v_.val[2] = v2.u32; v_.val[3] = v3.u32; \
233 vst4_u32(p, v_); p += (i) / sizeof(*p); \
234}
235#define gvst4_pi_u16(v0, v1, v2, v3, p) { \
236 uint16x4x4_t v_; \
237 v_.val[0] = v0.u16; v_.val[1] = v1.u16; v_.val[2] = v2.u16; v_.val[3] = v3.u16; \
238 vst4_u16((u16 *)(p), v_); p += sizeof(v_) / sizeof(*p); \
239}
240#define gvst1q_pi_u32(v, p) \
241 gvst1q_inc_u32(v, p, sizeof(v))
242// could use vst1q_u32_x2 but that's not always available
243#define gvst1q_2_pi_u32(v0, v1, p) { \
244 gvst1q_inc_u32(v0, p, sizeof(v0)); \
245 gvst1q_inc_u32(v1, p, sizeof(v1)); \
246}
247
248/* notes:
249 - gcc > 9: (arm32) int64x1_t type produces ops on gp regs
250 (also u64 __attribute__((vector_size(8))) :( )
251 - gcc <11: (arm32) handles '<vec> == 0' poorly
252*/
253
a2cb152a 254#elif defined(__SSE2__)
9088aca1 255
256// use a full reg and discard the upper half
257#define gvhreg gvreg
258
259#define gv0() _mm_setzero_si128()
260
261#ifdef __x86_64__
262#define gvcreate_s32(d, a, b) d.m = _mm_cvtsi64_si128((u32)(a) | ((u64)(b) << 32))
263#define gvcreate_s64(d, s) d.m = _mm_cvtsi64_si128(s)
264#else
265#define gvcreate_s32(d, a, b) d.m = _mm_set_epi32(0, 0, b, a)
266#define gvcreate_s64(d, s) d.m = _mm_loadu_si64(&(s))
267#endif
268
269#define gvbic_n_u16(d, n) d.m = _mm_andnot_si128(_mm_set1_epi16(n), d.m)
270#define gvceqq_u16(d, a, b) d.u16 = vceqq_u16(a.u16, b.u16)
271#define gvcgt_s16(d, a, b) d.m = _mm_cmpgt_epi16(a.m, b.m)
272#define gvclt_s16(d, a, b) d.m = _mm_cmpgt_epi16(b.m, a.m)
273#define gvcreate_u32 gvcreate_s32
274#define gvcreate_u64 gvcreate_s64
275#define gvcombine_u16(d, l, h) d.m = _mm_unpacklo_epi64(l.m, h.m)
276#define gvcombine_u32 gvcombine_u16
277#define gvcombine_s64 gvcombine_u16
278#define gvdup_l_u8(d, s, l) d.u8 = vdup_lane_u8(s.u8, l)
279#define gvdup_l_u16(d, s, l) d.m = _mm_shufflelo_epi16(s.m, (l)|((l)<<2)|((l)<<4)|((l)<<6))
280#define gvdup_l_u32(d, s, l) d.m = vdup_lane_u32(s.u32, l)
281#define gvdupq_l_s64(d, s, l) d.m = _mm_unpacklo_epi64(s.m, s.m)
282#define gvdupq_l_u32(d, s, l) d.m = _mm_shuffle_epi32(s.m, (l)|((l)<<2)|((l)<<4)|((l)<<6))
283#define gvdup_n_s64(d, n) d.m = _mm_set1_epi64x(n)
284#define gvdup_n_u8(d, n) d.m = _mm_set1_epi8(n)
285#define gvdup_n_u16(d, n) d.m = _mm_set1_epi16(n)
286#define gvdup_n_u32(d, n) d.m = _mm_set1_epi32(n)
287#define gvdupq_n_u16(d, n) d.m = _mm_set1_epi16(n)
288#define gvdupq_n_u32(d, n) d.m = _mm_set1_epi32(n)
289#define gvdupq_n_s64(d, n) d.m = _mm_set1_epi64x(n)
290#define gvmax_s16(d, a, b) d.m = _mm_max_epi16(a.m, b.m)
291#define gvmin_s16(d, a, b) d.m = _mm_min_epi16(a.m, b.m)
292#define gvminq_u8(d, a, b) d.m = _mm_min_epu8(a.m, b.m)
293#define gvmovn_u64(d, s) d.m = _mm_shuffle_epi32(s.m, 0 | (2 << 2))
294#define gvmovn_top_u64(d, s) d.m = _mm_shuffle_epi32(s.m, 1 | (3 << 2))
295#define gvmull_s16(d, a, b) { \
296 __m128i lo_ = _mm_mullo_epi16(a.m, b.m); \
297 __m128i hi_ = _mm_mulhi_epi16(a.m, b.m); \
298 d.m = _mm_unpacklo_epi16(lo_, hi_); \
299}
300#define gvmull_l_u32(d, a, b, l) { \
301 __m128i a_ = _mm_unpacklo_epi32(a.m, a.m); /* lanes 0,1 -> 0,2 */ \
302 __m128i b_ = _mm_shuffle_epi32(b.m, (l) | ((l) << 4)); \
303 d.m = _mm_mul_epu32(a_, b_); \
304}
305#define gvmlsl_s16(d, a, b) { \
306 gvreg tmp_; \
307 gvmull_s16(tmp_, a, b); \
308 d.m = _mm_sub_epi32(d.m, tmp_.m); \
309}
310#define gvqadd_u8(d, a, b) d.m = _mm_adds_epu8(a.m, b.m)
311#define gvqsub_u8(d, a, b) d.m = _mm_subs_epu8(a.m, b.m)
312#define gvshrq_n_s16(d, s, n) d.m = _mm_srai_epi16(s.m, n)
313#define gvshrq_n_u16(d, s, n) d.m = _mm_srli_epi16(s.m, n)
314#define gvshrq_n_u32(d, s, n) d.m = _mm_srli_epi32(s.m, n)
315#define gvshl_n_u32(d, s, n) d.m = _mm_slli_epi32(s.m, n)
316#define gvshlq_n_u16(d, s, n) d.m = _mm_slli_epi16(s.m, n)
317#define gvshlq_n_u32(d, s, n) d.m = _mm_slli_epi32(s.m, n)
318#define gvshll_n_u16(d, s, n) d.m = _mm_slli_epi32(_mm_unpacklo_epi16(s.m, gv0()), n)
319#define gvshr_n_u16(d, s, n) d.m = _mm_srli_epi16(s.m, n)
320#define gvshr_n_u32(d, s, n) d.m = _mm_srli_epi32(s.m, n)
321#define gvshr_n_u64(d, s, n) d.m = _mm_srli_epi64(s.m, n)
322#define gvshrn_n_s64(d, s, n) { \
323 gvreg tmp_; \
324 gvshrq_n_s64(tmp_, s, n); \
325 d.m = _mm_shuffle_epi32(tmp_.m, 0 | (2 << 2)); \
326}
327#define gvqshrun_n_s16(d, s, n) { \
328 __m128i t_ = _mm_srai_epi16(s.m, n); \
329 d.m = _mm_packus_epi16(t_, t_); \
330}
331#define gvqsubq_u8(d, a, b) d.m = _mm_subs_epu8(a.m, b.m)
332#define gvqsubq_u16(d, a, b) d.m = _mm_subs_epu16(a.m, b.m)
333
334#ifdef __SSSE3__
335#define gvabsq_s32(d, s) d.m = _mm_abs_epi32(s.m)
336#define gvtbl2_u8(d, a, b) d.m = _mm_shuffle_epi8(a.m, b.m)
337#else
338// must supply these here or else gcc will produce something terrible with __builtin_shuffle
339#define gvmovn_u16(d, s) { \
340 __m128i t2_ = _mm_and_si128(s.m, _mm_set1_epi16(0xff)); \
341 d.m = _mm_packus_epi16(t2_, t2_); \
342}
343#define gvmovn_u32(d, s) { \
344 __m128i t2_; \
345 t2_ = _mm_shufflelo_epi16(s.m, (0 << 0) | (2 << 2)); \
346 t2_ = _mm_shufflehi_epi16(t2_, (0 << 0) | (2 << 2)); \
347 d.m = _mm_shuffle_epi32(t2_, (0 << 0) | (2 << 2)); \
348}
349#define gvmovn_top_u32(d, s) { \
350 __m128i t2_; \
351 t2_ = _mm_shufflelo_epi16(s.m, (1 << 0) | (3 << 2)); \
352 t2_ = _mm_shufflehi_epi16(t2_, (1 << 0) | (3 << 2)); \
353 d.m = _mm_shuffle_epi32(t2_, (0 << 0) | (2 << 2)); \
354}
355#endif // !__SSSE3__
356#ifdef __SSE4_1__
2d658c89 357#define gvmin_u16(d, a, b) d.m = _mm_min_epu16(a.m, b.m)
358#define gvminq_u16 gvmin_u16
9088aca1 359#define gvmovl_u8(d, s) d.m = _mm_cvtepu8_epi16(s.m)
360#define gvmovl_s8(d, s) d.m = _mm_cvtepi8_epi16(s.m)
361#define gvmovl_s32(d, s) d.m = _mm_cvtepi32_epi64(s.m)
362#define gvmull_s32(d, a, b) { \
363 __m128i a_ = _mm_unpacklo_epi32(a.m, a.m); /* lanes 0,1 -> 0,2 */ \
364 __m128i b_ = _mm_unpacklo_epi32(b.m, b.m); \
365 d.m = _mm_mul_epi32(a_, b_); \
366}
367#else
368#define gvmovl_u8(d, s) d.m = _mm_unpacklo_epi8(s.m, gv0())
369#define gvmovl_s8(d, s) d.m = _mm_unpacklo_epi8(s.m, _mm_cmpgt_epi8(gv0(), s.m))
370#define gvmovl_s32(d, s) d.m = _mm_unpacklo_epi32(s.m, _mm_srai_epi32(s.m, 31))
371#endif // !__SSE4_1__
372#ifndef __AVX2__
373#define gvshlq_u64(d, a, b) { \
374 gvreg t1_, t2_; \
375 t1_.m = _mm_sll_epi64(a.m, b.m); \
376 t2_.m = _mm_sll_epi64(a.m, _mm_shuffle_epi32(b.m, (2 << 0) | (3 << 2))); \
377 d.u64 = (gvu64){ t1_.u64[0], t2_.u64[1] }; \
378}
379#endif // __AVX2__
380
381#define gvlo(s) s
382#define gvhi(s) ((gvreg)_mm_shuffle_epi32(s.m, (2 << 0) | (3 << 2)))
383#define gvget_lo(d, s) d = gvlo(s)
384#define gvget_hi(d, s) d = gvhi(s)
385
386#define gvset_lo(d, s) d.m = _mm_unpacklo_epi64(s.m, gvhi(d).m)
387#define gvset_hi(d, s) d.m = _mm_unpacklo_epi64(d.m, s.m)
388
389#define gvld1_u8(d, s) d.m = _mm_loadu_si64(s)
390#define gvld1_u32 gvld1_u8
391#define gvld1q_u8(d, s) d.m = _mm_loadu_si128((__m128i *)(s))
392#define gvld1q_u16 gvld1q_u8
393#define gvld1q_u32 gvld1q_u8
394
395#define gvst4_4_inc_u32(v0, v1, v2, v3, p, i) { \
396 __m128i t0 = _mm_unpacklo_epi32(v0.m, v1.m); \
397 __m128i t1 = _mm_unpacklo_epi32(v2.m, v3.m); \
398 _mm_storeu_si128(((__m128i *)(p)) + 0, _mm_unpacklo_epi64(t0, t1)); \
399 _mm_storeu_si128(((__m128i *)(p)) + 1, _mm_unpackhi_epi64(t0, t1)); \
400 p += (i) / sizeof(*p); \
401}
402#define gvst4_pi_u16(v0, v1, v2, v3, p) { \
403 __m128i t0 = _mm_unpacklo_epi16(v0.m, v1.m); \
404 __m128i t1 = _mm_unpacklo_epi16(v2.m, v3.m); \
405 _mm_storeu_si128(((__m128i *)(p)) + 0, _mm_unpacklo_epi32(t0, t1)); \
406 _mm_storeu_si128(((__m128i *)(p)) + 1, _mm_unpackhi_epi32(t0, t1)); \
407 p += sizeof(t0) * 2 / sizeof(*p); \
408}
409
a2cb152a 410#else
411#error "arch not supported or SIMD support was not enabled by your compiler"
412#endif
413
414// the below have intrinsics but they evaluate to basic operations on both gcc and clang
415#define gvadd_s64(d, a, b) d.s64 = a.s64 + b.s64
416#define gvadd_u8(d, a, b) d.u8 = a.u8 + b.u8
417#define gvadd_u16(d, a, b) d.u16 = a.u16 + b.u16
418#define gvadd_u32(d, a, b) d.u32 = a.u32 + b.u32
419#define gvaddq_s64 gvadd_s64
420#define gvaddq_u16 gvadd_u16
421#define gvaddq_u32 gvadd_u32
422#define gvand(d, a, b) d.u32 = a.u32 & b.u32
9088aca1 423#define gvand_n_u32(d, n) d.u32 &= n
a2cb152a 424#define gvbic(d, a, b) d.u32 = a.u32 & ~b.u32
425#define gvbicq gvbic
426#define gveor(d, a, b) d.u32 = a.u32 ^ b.u32
427#define gveorq gveor
428#define gvceqz_u16(d, s) d.u16 = s.u16 == 0
429#define gvceqzq_u16 gvceqz_u16
430#define gvcltz_s16(d, s) d.s16 = s.s16 < 0
431#define gvcltzq_s16 gvcltz_s16
432#define gvsub_u16(d, a, b) d.u16 = a.u16 - b.u16
433#define gvsub_u32(d, a, b) d.u32 = a.u32 - b.u32
434#define gvsubq_u16 gvsub_u16
435#define gvsubq_u32 gvsub_u32
436#define gvorr(d, a, b) d.u32 = a.u32 | b.u32
437#define gvorrq gvorr
9088aca1 438#define gvorr_n_u16(d, n) d.u16 |= n
439
440// fallbacks
441#if 1
442
443#ifndef gvaddhn_u32
444#define gvaddhn_u32(d, a, b) { \
445 gvreg tmp1_ = { .u32 = a.u32 + b.u32 }; \
446 gvmovn_top_u32(d, tmp1_); \
447}
448#endif
449#ifndef gvabsq_s32
450#define gvabsq_s32(d, s) { \
451 gvreg tmp1_ = { .s32 = (gvs32){} - s.s32 }; \
452 gvreg mask_ = { .s32 = s.s32 >> 31 }; \
453 gvbslq_(d, mask_, tmp1_, s); \
454}
455#endif
456#ifndef gvbit
457#define gvbslq_(d, s, a, b) d.u32 = (a.u32 & s.u32) | (b.u32 & ~s.u32)
458#define gvbifq(d, a, b) gvbslq_(d, b, d, a)
459#define gvbit(d, a, b) gvbslq_(d, b, a, d)
460#endif
461#ifndef gvaddw_s32
462#define gvaddw_s32(d, a, b) {gvreg t_; gvmovl_s32(t_, b); d.s64 += t_.s64;}
463#endif
464#ifndef gvhaddq_u16
465// can do this because the caller needs the msb clear
466#define gvhaddq_u16(d, a, b) d.u16 = (a.u16 + b.u16) >> 1
467#endif
2d658c89 468#ifndef gvmin_u16
469#define gvmin_u16(d, a, b) { \
9088aca1 470 gvu16 t_ = a.u16 < b.u16; \
471 d.u16 = (a.u16 & t_) | (b.u16 & ~t_); \
472}
2d658c89 473#define gvminq_u16 gvmin_u16
9088aca1 474#endif
475#ifndef gvmlsq_s32
476#define gvmlsq_s32(d, a, b) d.s32 -= a.s32 * b.s32
477#endif
478#ifndef gvmlsq_l_s32
479#define gvmlsq_l_s32(d, a, b, l){gvreg t_; gvdupq_l_u32(t_, b, l); d.s32 -= a.s32 * t_.s32;}
480#endif
481#ifndef gvmla_s32
482#define gvmla_s32(d, a, b) d.s32 += a.s32 * b.s32
483#endif
484#ifndef gvmla_u32
485#define gvmla_u32 gvmla_s32
486#endif
487#ifndef gvmlaq_s32
488#define gvmlaq_s32(d, a, b) d.s32 += a.s32 * b.s32
489#endif
490#ifndef gvmlaq_u32
491#define gvmlaq_u32 gvmlaq_s32
492#endif
493#ifndef gvmlal_u8
494#define gvmlal_u8(d, a, b) {gvreg t_; gvmull_u8(t_, a, b); d.u16 += t_.u16;}
495#endif
496#ifndef gvmlal_s32
497#define gvmlal_s32(d, a, b) {gvreg t_; gvmull_s32(t_, a, b); d.s64 += t_.s64;}
498#endif
499#ifndef gvmov_l_s32
500#define gvmov_l_s32(d, s, l) d.s32[l] = s
501#endif
502#ifndef gvmov_l_u32
503#define gvmov_l_u32(d, s, l) d.u32[l] = s
504#endif
505#ifndef gvmul_s32
506#define gvmul_s32(d, a, b) d.s32 = a.s32 * b.s32
507#endif
508#ifndef gvmull_u8
509#define gvmull_u8(d, a, b) { \
510 gvreg t1_, t2_; \
511 gvmovl_u8(t1_, a); \
512 gvmovl_u8(t2_, b); \
513 d.u16 = t1_.u16 * t2_.u16; \
514}
515#endif
516#ifndef gvmull_s32
517// note: compilers tend to use int regs here
518#define gvmull_s32(d, a, b) { \
519 d.s64[0] = (s64)a.s32[0] * b.s32[0]; \
520 d.s64[1] = (s64)a.s32[1] * b.s32[1]; \
521}
522#endif
523#ifndef gvneg_s32
524#define gvneg_s32(d, s) d.s32 = -s.s32
525#endif
526// x86 note: needs _mm_sllv_epi16 (avx512), else this sucks terribly
527#ifndef gvshl_u16
528#define gvshl_u16(d, a, b) d.u16 = a.u16 << b.u16
529#endif
530// x86 note: needs _mm_sllv_* (avx2)
531#ifndef gvshlq_u64
532#define gvshlq_u64(d, a, b) d.u64 = a.u64 << b.u64
533#endif
534#ifndef gvshll_n_s8
535#define gvshll_n_s8(d, s, n) {gvreg t_; gvmovl_s8(t_, s); gvshlq_n_u16(d, t_, n);}
536#endif
537#ifndef gvshll_n_u8
538#define gvshll_n_u8(d, s, n) {gvreg t_; gvmovl_u8(t_, s); gvshlq_n_u16(d, t_, n);}
539#endif
540#ifndef gvshr_n_u8
541#define gvshr_n_u8(d, s, n) d.u8 = s.u8 >> (n)
542#endif
543#ifndef gvshrq_n_s64
544#define gvshrq_n_s64(d, s, n) d.s64 = s.s64 >> (n)
545#endif
546#ifndef gvshrn_n_u16
547#define gvshrn_n_u16(d, s, n) {gvreg t_; gvshrq_n_u16(t_, s, n); gvmovn_u16(d, t_);}
548#endif
549#ifndef gvshrn_n_u32
550#define gvshrn_n_u32(d, s, n) {gvreg t_; gvshrq_n_u32(t_, s, n); gvmovn_u32(d, t_);}
551#endif
552#ifndef gvsli_n_u8
553#define gvsli_n_u8(d, s, n) d.u8 = (s.u8 << (n)) | (d.u8 & ((1u << (n)) - 1u))
554#endif
555#ifndef gvsri_n_u8
556#define gvsri_n_u8(d, s, n) d.u8 = (s.u8 >> (n)) | (d.u8 & ((0xff00u >> (n)) & 0xffu))
557#endif
558#ifndef gvtstq_u16
559#define gvtstq_u16(d, a, b) d.u16 = (a.u16 & b.u16) != 0
560#endif
561
562#ifndef gvld2_u8_dup
563#define gvld2_u8_dup(v0, v1, p) { \
564 gvdup_n_u8(v0, ((const u8 *)(p))[0]); \
565 gvdup_n_u8(v1, ((const u8 *)(p))[1]); \
566}
567#endif
568#ifndef gvst1_u8
569#define gvst1_u8(v, p) *(uint64_t_ua *)(p) = v.u64[0]
570#endif
571#ifndef gvst1q_u16
572#define gvst1q_u16(v, p) *(gvreg_ua *)(p) = v
573#endif
574#ifndef gvst1q_inc_u32
575#define gvst1q_inc_u32(v, p, i) {*(gvreg_ua *)(p) = v; p += (i) / sizeof(*p);}
576#endif
577#ifndef gvst1q_pi_u32
578#define gvst1q_pi_u32(v, p) gvst1q_inc_u32(v, p, sizeof(v))
579#endif
580#ifndef gvst1q_2_pi_u32
581#define gvst1q_2_pi_u32(v0, v1, p) { \
582 gvst1q_inc_u32(v0, p, sizeof(v0)); \
583 gvst1q_inc_u32(v1, p, sizeof(v1)); \
584}
585#endif
586#ifndef gvst2_u8
587#define gvst2_u8(v0, v1, p) {gvreg t_; gvzip_u8(t_, v0, v1); *(gvu8_ua *)(p) = t_.u8;}
588#endif
589#ifndef gvst2_u16
590#define gvst2_u16(v0, v1, p) {gvreg t_; gvzip_u16(t_, v0, v1); *(gvu16_ua *)(p) = t_.u16;}
591#endif
592
593// note: these shuffles assume sizeof(gvhreg) == 16 && sizeof(gvreg) == 16
594#ifndef __has_builtin
595#define __has_builtin(x) 0
596#endif
597
598// prefer __builtin_shuffle on gcc as it handles -1 poorly
599#if __has_builtin(__builtin_shufflevector) && !__has_builtin(__builtin_shuffle)
600
601#ifndef gvld2q_u8
602#define gvld2q_u8(v0, v1, p) { \
603 gvu8 v0_ = ((gvu8_ua *)(p))[0]; \
604 gvu8 v1_ = ((gvu8_ua *)(p))[1]; \
605 v0.u8 = __builtin_shufflevector(v0_, v1_, 0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30); \
606 v1.u8 = __builtin_shufflevector(v0_, v1_, 1,3,5,7,9,11,13,15,17,19,21,23,25,27,29,31); \
607}
608#endif
609#ifndef gvmovn_u16
610#define gvmovn_u16(d, s) \
611 d.u8 = __builtin_shufflevector(s.u8, s.u8, 0,2,4,6,8,10,12,14,-1,-1,-1,-1,-1,-1,-1,-1)
612#endif
613#ifndef gvmovn_u32
614#define gvmovn_u32(d, s) \
615 d.u16 = __builtin_shufflevector(s.u16, s.u16, 0,2,4,6,-1,-1,-1,-1)
616#endif
617#ifndef gvmovn_top_u32
618#define gvmovn_top_u32(d, s) \
619 d.u16 = __builtin_shufflevector(s.u16, s.u16, 1,3,5,7,-1,-1,-1,-1)
620#endif
621#ifndef gvzip_u8
622#define gvzip_u8(d, a, b) \
623 d.u8 = __builtin_shufflevector(a.u8, b.u8, 0,16,1,17,2,18,3,19,4,20,5,21,6,22,7,23)
624#endif
625#ifndef gvzip_u16
626#define gvzip_u16(d, a, b) \
627 d.u16 = __builtin_shufflevector(a.u16, b.u16, 0,8,1,9,2,10,3,11)
628#endif
629#ifndef gvzipq_u16
630#define gvzipq_u16(d0, d1, s0, s1) { \
631 gvu16 t_ = __builtin_shufflevector(s0.u16, s1.u16, 0, 8, 1, 9, 2, 10, 3, 11); \
632 d1.u16 = __builtin_shufflevector(s0.u16, s1.u16, 4,12, 5,13, 6, 14, 7, 15); \
633 d0.u16 = t_; \
634}
635#endif
636
637#else // !__has_builtin(__builtin_shufflevector)
638
639#ifndef gvld2q_u8
640#define gvld2q_u8(v0, v1, p) { \
641 gvu8 v0_ = ((gvu8_ua *)(p))[0]; \
642 gvu8 v1_ = ((gvu8_ua *)(p))[1]; \
643 v0.u8 = __builtin_shuffle(v0_, v1_, (gvu8){0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30}); \
644 v1.u8 = __builtin_shuffle(v0_, v1_, (gvu8){1,3,5,7,9,11,13,15,17,19,21,23,25,27,29,31}); \
645}
646#endif
647#ifndef gvmovn_u16
648#define gvmovn_u16(d, s) \
649 d.u8 = __builtin_shuffle(s.u8, (gvu8){0,2,4,6,8,10,12,14,0,2,4,6,8,10,12,14})
650#endif
651#ifndef gvmovn_u32
652#define gvmovn_u32(d, s) \
653 d.u16 = __builtin_shuffle(s.u16, (gvu16){0,2,4,6,0,2,4,6})
654#endif
655#ifndef gvmovn_top_u32
656#define gvmovn_top_u32(d, s) \
657 d.u16 = __builtin_shuffle(s.u16, (gvu16){1,3,5,7,1,3,5,7})
658#endif
659#ifndef gvtbl2_u8
660#define gvtbl2_u8(d, a, b) d.u8 = __builtin_shuffle(a.u8, b.u8)
661#endif
662#ifndef gvzip_u8
663#define gvzip_u8(d, a, b) \
664 d.u8 = __builtin_shuffle(a.u8, b.u8, (gvu8){0,16,1,17,2,18,3,19,4,20,5,21,6,22,7,23})
665#endif
666#ifndef gvzip_u16
667#define gvzip_u16(d, a, b) \
668 d.u16 = __builtin_shuffle(a.u16, b.u16, (gvu16){0,8,1,9,2,10,3,11})
669#endif
670#ifndef gvzipq_u16
671#define gvzipq_u16(d0, d1, s0, s1) { \
672 gvu16 t_ = __builtin_shuffle(s0.u16, s1.u16, (gvu16){0, 8, 1, 9, 2, 10, 3, 11}); \
673 d1.u16 = __builtin_shuffle(s0.u16, s1.u16, (gvu16){4,12, 5,13, 6, 14, 7, 15}); \
674 d0.u16 = t_; \
675}
676#endif
677
678#endif // __builtin_shufflevector || __builtin_shuffle
679
680#ifndef gvtbl2_u8
681#define gvtbl2_u8(d, a, b) { \
682 int i_; \
683 for (i_ = 0; i_ < 16; i_++) \
684 d.u8[i_] = a.u8[b.u8[i_]]; \
685}
686#endif
687
688#endif // fallbacks
a2cb152a 689
690#if defined(__arm__)
691
692#define gssub16(d, a, b) asm("ssub16 %0,%1,%2" : "=r"(d) : "r"(a), "r"(b))
693#define gsmusdx(d, a, b) asm("smusdx %0,%1,%2" : "=r"(d) : "r"(a), "r"(b))
694
695#if 0
696// gcc/config/arm/arm.c
697#undef gvadd_s64
698#define gvadd_s64(d, a, b) asm("vadd.i64 %P0,%P1,%P2" : "=w"(d.s64) : "w"(a.s64), "w"(b.s64))
699#endif
700
701#else
702
703#define gssub16(d, a, b) d = (u16)((a) - (b)) | ((((a) >> 16) - ((b) >> 16)) << 16)
704#define gsmusdx(d, a, b) d = ((s32)(s16)(a) * ((s32)(b) >> 16)) \
705 - (((s32)(a) >> 16) * (s16)(b))
706
707#endif
708
709// for compatibility with the original psx_gpu.c code
710#define vec_2x64s gvreg
711#define vec_2x64u gvreg
712#define vec_4x32s gvreg
713#define vec_4x32u gvreg
714#define vec_8x16s gvreg
715#define vec_8x16u gvreg
716#define vec_16x8s gvreg
717#define vec_16x8u gvreg
718#define vec_1x64s gvhreg
719#define vec_1x64u gvhreg
720#define vec_2x32s gvhreg
721#define vec_2x32u gvhreg
722#define vec_4x16s gvhreg
723#define vec_4x16u gvhreg
724#define vec_8x8s gvhreg
725#define vec_8x8u gvhreg
726
727#if 0
728#include <stdio.h>
729#include <stdlib.h>
730#include <unistd.h>
aafce833 731static int ccount, dump_enabled;
a2cb152a 732void cmpp(const char *name, const void *a_, const void *b_, size_t len)
733{
734 const uint32_t *a = a_, *b = b_, masks[] = { 0, 0xff, 0xffff, 0xffffff };
735 size_t i, left;
736 uint32_t mask;
737 for (i = 0; i < (len + 3)/4; i++) {
738 left = len - i*4;
739 mask = left >= 4 ? ~0u : masks[left];
740 if ((a[i] ^ b[i]) & mask) {
741 printf("%s: %08x %08x [%03zx/%zu] #%d\n",
742 name, a[i] & mask, b[i] & mask, i*4, i, ccount);
743 exit(1);
744 }
745 }
746 ccount++;
747}
748#define ccmpf(n) cmpp(#n, &psx_gpu->n, &n##_c, sizeof(n##_c))
749#define ccmpa(n,c) cmpp(#n, &psx_gpu->n, &n##_c, sizeof(n##_c[0]) * c)
750
751void dump_r_(const char *name, void *dump, int is_q)
752{
753 unsigned long long *u = dump;
aafce833 754 if (!dump_enabled) return;
a2cb152a 755 //if (ccount > 1) return;
aafce833 756 printf("%20s %016llx ", name, u[0]);
a2cb152a 757 if (is_q)
758 printf("%016llx", u[1]);
759 puts("");
760}
761void __attribute__((noinline,noclone)) dump_r_d(const char *name, void *dump)
762{ dump_r_(name, dump, 0); }
763void __attribute__((noinline,noclone)) dump_r_q(const char *name, void *dump)
764{ dump_r_(name, dump, 1); }
765#define dumprd(n) { u8 dump_[8]; gvst1_u8(n, dump_); dump_r_d(#n, dump_); }
766#define dumprq(n) { u16 dump_[8]; gvst1q_u16(n, dump_); dump_r_q(#n, dump_); }
767#endif
768
769void compute_all_gradients(psx_gpu_struct * __restrict__ psx_gpu,
770 const vertex_struct * __restrict__ a, const vertex_struct * __restrict__ b,
771 const vertex_struct * __restrict__ c)
772{
773 union { double d; struct { u32 l; u32 h; } i; } divident, divider;
774 union { double d; gvhreg v; } d30;
775
776#if 0
777 compute_all_gradients_(psx_gpu, a, b, c);
778 return;
779#endif
780 // First compute the triangle area reciprocal and shift. The division will
781 // happen concurrently with much of the work which follows.
782
783 // load exponent of 62 into upper half of double
784 u32 shift = __builtin_clz(psx_gpu->triangle_area);
785 u32 triangle_area_normalized = psx_gpu->triangle_area << shift;
786
787 // load area normalized into lower half of double
788 divident.i.l = triangle_area_normalized >> 10;
789 divident.i.h = (62 + 1023) << 20;
790
791 divider.i.l = triangle_area_normalized << 20;
792 divider.i.h = ((1022 + 31) << 20) + (triangle_area_normalized >> 11);
793
794 d30.d = divident.d / divider.d; // d30 = ((1 << 62) + ta_n) / ta_n
795
796 // ((x1 - x0) * (y2 - y1)) - ((x2 - x1) * (y1 - y0)) =
797 // ( d0 * d1 ) - ( d2 * d3 ) =
798 // ( m0 ) - ( m1 ) = gradient
799
800 // This is split to do 12 elements at a time over three sets: a, b, and c.
801 // Technically we only need to do 10 elements (uvrgb_x and uvrgb_y), so
802 // two of the slots are unused.
803
804 // Inputs are all 16-bit signed. The m0/m1 results are 32-bit signed, as
805 // is g.
806
807 // First type is: uvrg bxxx xxxx
808 // Second type is: yyyy ybyy uvrg
809 // Since x_a and y_c are the same the same variable is used for both.
810
811 gvreg v0;
812 gvreg v1;
813 gvreg v2;
814 gvreg uvrg_xxxx0;
815 gvreg uvrg_xxxx1;
816 gvreg uvrg_xxxx2;
817
818 gvreg y0_ab;
819 gvreg y1_ab;
820 gvreg y2_ab;
821
822 gvreg d0_ab;
823 gvreg d1_ab;
824 gvreg d2_ab;
825 gvreg d3_ab;
826
827 gvreg ga_uvrg_x;
828 gvreg ga_uvrg_y;
829 gvreg gw_rg_x;
830 gvreg gw_rg_y;
831 gvreg w_mask;
832 gvreg r_shift;
833 gvreg uvrg_dx2, uvrg_dx3;
834 gvreg uvrgb_phase;
835 gvhreg zero, tmp_lo, tmp_hi;
836
837 gvld1q_u8(v0, (u8 *)a); // v0 = { uvrg0, b0, x0, y0 }
838 gvld1q_u8(v1, (u8 *)b); // v1 = { uvrg1, b1, x1, y1 }
839 gvld1q_u8(v2, (u8 *)c); // v2 = { uvrg2, b2, x2, y2 }
840
841 gvmovl_u8(uvrg_xxxx0, gvlo(v0)); // uvrg_xxxx0 = { uv0, rg0, b0-, -- }
842 gvmovl_u8(uvrg_xxxx1, gvlo(v1)); // uvrg_xxxx1 = { uv1, rg1, b1-, -- }
843 gvmovl_u8(uvrg_xxxx2, gvlo(v2)); // uvrg_xxxx2 = { uv2, rg2, b2-, -- }
844
845 gvdup_l_u16(tmp_lo, gvhi(v0), 1); // yyyy0 = { yy0, yy0 }
846 gvcombine_u16(y0_ab, tmp_lo, gvlo(uvrg_xxxx0));
847
848 gvdup_l_u16(tmp_lo, gvhi(v0), 0); // xxxx0 = { xx0, xx0 }
849 gvset_hi(uvrg_xxxx0, tmp_lo);
850
851 u32 x1_x2 = (u16)b->x | (c->x << 16); // x1_x2 = { x1, x2 }
852 u32 x0_x1 = (u16)a->x | (b->x << 16); // x0_x1 = { x0, x1 }
853
854 gvdup_l_u16(tmp_lo, gvhi(v1), 1); // yyyy1 = { yy1, yy1 }
855 gvcombine_u16(y1_ab, tmp_lo, gvlo(uvrg_xxxx1));
856
857 gvdup_l_u16(tmp_lo, gvhi(v1), 0); // xxxx1 = { xx1, xx1 }
858 gvset_hi(uvrg_xxxx1, tmp_lo);
859
860 gvdup_l_u16(tmp_lo, gvhi(v2), 1); // yyyy2 = { yy2, yy2 }
861 gvcombine_u16(y2_ab, tmp_lo, gvlo(uvrg_xxxx2));
862
863 gvdup_l_u16(tmp_lo, gvhi(v2), 0); // xxxx2 = { xx2, xx2 }
864 gvset_hi(uvrg_xxxx2, tmp_lo);
865
866 u32 y0_y1 = (u16)a->y | (b->y << 16); // y0_y1 = { y0, y1 }
867 u32 y1_y2 = (u16)b->y | (c->y << 16); // y1_y2 = { y1, y2 }
868
869 gvsubq_u16(d0_ab, uvrg_xxxx1, uvrg_xxxx0);
870
871 u32 b1_b2 = b->b | (c->b << 16); // b1_b2 = { b1, b2 }
872
873 gvsubq_u16(d2_ab, uvrg_xxxx2, uvrg_xxxx1);
874
875 gvsubq_u16(d1_ab, y2_ab, y1_ab);
876
877 u32 b0_b1 = a->b | (b->b << 16); // b0_b1 = { b0, b1 }
878
879 u32 dx, dy, db;
880 gssub16(dx, x1_x2, x0_x1); // dx = { x1 - x0, x2 - x1 }
881 gssub16(dy, y1_y2, y0_y1); // dy = { y1 - y0, y2 - y1 }
882 gssub16(db, b1_b2, b0_b1); // db = { b1 - b0, b2 - b1 }
883
884 u32 ga_by, ga_bx;
885 gvsubq_u16(d3_ab, y1_ab, y0_ab);
886 gsmusdx(ga_by, dx, db); // ga_by = ((x1 - x0) * (b2 - b1)) -
887 // ((x2 - X1) * (b1 - b0))
888 gvmull_s16(ga_uvrg_x, gvlo(d0_ab), gvlo(d1_ab));
889 gsmusdx(ga_bx, db, dy); // ga_bx = ((b1 - b0) * (y2 - y1)) -
890 // ((b2 - b1) * (y1 - y0))
891 gvmlsl_s16(ga_uvrg_x, gvlo(d2_ab), gvlo(d3_ab));
892 u32 gs_bx = (s32)ga_bx >> 31; // movs
893
894 gvmull_s16(ga_uvrg_y, gvhi(d0_ab), gvhi(d1_ab));
895 if ((s32)gs_bx < 0) ga_bx = -ga_bx; // rsbmi
896
897 gvmlsl_s16(ga_uvrg_y, gvhi(d2_ab), gvhi(d3_ab));
898 u32 gs_by = (s32)ga_by >> 31; // movs
899
900 gvhreg d0;
901 gvshr_n_u64(d0, d30.v, 22); // note: on "d30 >> 22" gcc generates junk code
902
903 gvdupq_n_u32(uvrgb_phase, psx_gpu->uvrgb_phase);
904 u32 b_base = psx_gpu->uvrgb_phase + (a->b << 16);
905
906 if ((s32)gs_by < 0) ga_by = -ga_by; // rsbmi
907 gvreg gs_uvrg_x, gs_uvrg_y;
908 gs_uvrg_x.s32 = ga_uvrg_x.s32 < 0; // gs_uvrg_x = ga_uvrg_x < 0
909 gs_uvrg_y.s32 = ga_uvrg_y.s32 < 0; // gs_uvrg_y = ga_uvrg_y < 0
910
911 gvdupq_n_u32(w_mask, -psx_gpu->triangle_winding); // w_mask = { -w, -w, -w, -w }
912 shift -= 62 - 12; // shift -= (62 - FIXED_BITS)
913
914 gvreg uvrg_base;
915 gvshll_n_u16(uvrg_base, gvlo(uvrg_xxxx0), 16); // uvrg_base = uvrg0 << 16
a2cb152a 916
917 gvaddq_u32(uvrg_base, uvrg_base, uvrgb_phase);
918 gvabsq_s32(ga_uvrg_x, ga_uvrg_x); // ga_uvrg_x = abs(ga_uvrg_x)
919
920 u32 area_r_s = d0.u32[0]; // area_r_s = triangle_reciprocal
921 gvabsq_s32(ga_uvrg_y, ga_uvrg_y); // ga_uvrg_y = abs(ga_uvrg_y)
922
923 gvmull_l_u32(gw_rg_x, gvhi(ga_uvrg_x), d0, 0);
924 gvmull_l_u32(ga_uvrg_x, gvlo(ga_uvrg_x), d0, 0);
925 gvmull_l_u32(gw_rg_y, gvhi(ga_uvrg_y), d0, 0);
926 gvmull_l_u32(ga_uvrg_y, gvlo(ga_uvrg_y), d0, 0);
927
9088aca1 928#if defined(__ARM_NEON) || defined(__ARM_NEON__)
929 gvdupq_n_s64(r_shift, shift); // r_shift = { shift, shift }
a2cb152a 930 gvshlq_u64(gw_rg_x, gw_rg_x, r_shift);
931 gvshlq_u64(ga_uvrg_x, ga_uvrg_x, r_shift);
932 gvshlq_u64(gw_rg_y, gw_rg_y, r_shift);
933 gvshlq_u64(ga_uvrg_y, ga_uvrg_y, r_shift);
9088aca1 934#elif defined(__SSE2__)
935 r_shift.m = _mm_cvtsi32_si128(-shift);
936 gw_rg_x.m = _mm_srl_epi64(gw_rg_x.m, r_shift.m);
937 ga_uvrg_x.m = _mm_srl_epi64(ga_uvrg_x.m, r_shift.m);
938 gw_rg_y.m = _mm_srl_epi64(gw_rg_y.m, r_shift.m);
939 ga_uvrg_y.m = _mm_srl_epi64(ga_uvrg_y.m, r_shift.m);
940#else
941 gvdupq_n_s64(r_shift, -shift); // r_shift = { shift, shift }
942 gvshrq_u64(gw_rg_x, gw_rg_x, r_shift);
943 gvshrq_u64(ga_uvrg_x, ga_uvrg_x, r_shift);
944 gvshrq_u64(gw_rg_y, gw_rg_y, r_shift);
945 gvshrq_u64(ga_uvrg_y, ga_uvrg_y, r_shift);
946#endif
a2cb152a 947
948 gveorq(gs_uvrg_x, gs_uvrg_x, w_mask);
949 gvmovn_u64(tmp_lo, ga_uvrg_x);
950
951 gveorq(gs_uvrg_y, gs_uvrg_y, w_mask);
952 gvmovn_u64(tmp_hi, gw_rg_x);
953
954 gvcombine_u32(ga_uvrg_x, tmp_lo, tmp_hi);
955
956 gveorq(ga_uvrg_x, ga_uvrg_x, gs_uvrg_x);
957 gvmovn_u64(tmp_lo, ga_uvrg_y);
958
959 gvsubq_u32(ga_uvrg_x, ga_uvrg_x, gs_uvrg_x);
960 gvmovn_u64(tmp_hi, gw_rg_y);
961
962 gvcombine_u32(ga_uvrg_y, tmp_lo, tmp_hi);
963
964 gveorq(ga_uvrg_y, ga_uvrg_y, gs_uvrg_y);
965 ga_bx = ga_bx << 13;
966
967 gvsubq_u32(ga_uvrg_y, ga_uvrg_y, gs_uvrg_y);
968 ga_by = ga_by << 13;
969
970 u32 gw_bx_h, gw_by_h;
971 gw_bx_h = (u64)ga_bx * area_r_s >> 32;
972
973 gvshlq_n_u32(ga_uvrg_x, ga_uvrg_x, 4);
974 gvshlq_n_u32(ga_uvrg_y, ga_uvrg_y, 4);
975
976 gw_by_h = (u64)ga_by * area_r_s >> 32;
977 gvdup_n_u32(tmp_lo, a->x);
978 gvmlsq_l_s32(uvrg_base, ga_uvrg_x, tmp_lo, 0);
979
980 gs_bx = gs_bx ^ -psx_gpu->triangle_winding;
981 gvaddq_u32(uvrg_dx2, ga_uvrg_x, ga_uvrg_x);
982
983 gs_by = gs_by ^ -psx_gpu->triangle_winding;
984
985 u32 r11 = -shift; // r11 = negative shift for scalar lsr
986 u32 *store_a = psx_gpu->uvrg.e;
987 r11 = r11 - (32 - 13);
988 u32 *store_b = store_a + 16 / sizeof(u32);
989
990 gvaddq_u32(uvrg_dx3, uvrg_dx2, ga_uvrg_x);
991 gvst1q_inc_u32(uvrg_base, store_a, 32);
992
993 gvst1q_inc_u32(ga_uvrg_x, store_b, 32);
994 u32 g_bx = (u32)gw_bx_h >> r11;
995
996 gvst1q_inc_u32(ga_uvrg_y, store_a, 32);
997 u32 g_by = (u32)gw_by_h >> r11;
998
999 gvdup_n_u32(zero, 0);
1000
1001 gvst4_4_inc_u32(zero, gvlo(ga_uvrg_x), gvlo(uvrg_dx2), gvlo(uvrg_dx3), store_b, 32);
1002 g_bx = g_bx ^ gs_bx;
1003
1004 gvst4_4_inc_u32(zero, gvhi(ga_uvrg_x), gvhi(uvrg_dx2), gvhi(uvrg_dx3), store_b, 32);
1005 g_bx = g_bx - gs_bx;
1006
1007 g_bx = g_bx << 4;
1008 g_by = g_by ^ gs_by;
1009
1010 b_base -= g_bx * a->x;
1011 g_by = g_by - gs_by;
1012
1013 g_by = g_by << 4;
1014
1015 u32 g_bx2 = g_bx + g_bx;
1016 u32 g_bx3 = g_bx + g_bx2;
1017
1018 // 112
1019 store_b[0] = 0;
1020 store_b[1] = g_bx;
1021 store_b[2] = g_bx2;
1022 store_b[3] = g_bx3;
1023 store_b[4] = b_base;
1024 store_b[5] = g_by; // 132
1025}
1026
1027#define setup_spans_debug_check(span_edge_data_element) \
1028
1029#define setup_spans_prologue_alternate_yes() \
1030 vec_2x64s alternate_x; \
1031 vec_2x64s alternate_dx_dy; \
1032 vec_4x32s alternate_x_32; \
aafce833 1033 vec_4x16u alternate_x_16; \
a2cb152a 1034 \
1035 vec_4x16u alternate_select; \
1036 vec_4x16s y_mid_point; \
1037 \
1038 s32 y_b = v_b->y; \
1039 s64 edge_alt; \
1040 s32 edge_dx_dy_alt; \
1041 u32 edge_shift_alt \
1042
1043#define setup_spans_prologue_alternate_no() \
1044
1045#define setup_spans_prologue(alternate_active) \
1046 edge_data_struct *span_edge_data; \
1047 vec_4x32u *span_uvrg_offset; \
1048 u32 *span_b_offset; \
1049 \
1050 s32 clip; \
1051 vec_4x32u v_clip; \
1052 \
9088aca1 1053 vec_2x64s edges_xy; \
a2cb152a 1054 vec_2x32s edges_dx_dy; \
1055 vec_2x32u edge_shifts; \
1056 \
1057 vec_2x64s left_x, right_x; \
1058 vec_2x64s left_dx_dy, right_dx_dy; \
1059 vec_4x32s left_x_32, right_x_32; \
1060 vec_2x32s left_x_32_lo, right_x_32_lo; \
1061 vec_2x32s left_x_32_hi, right_x_32_hi; \
1062 vec_4x16s left_right_x_16_lo, left_right_x_16_hi; \
1063 vec_4x16s y_x4; \
1064 vec_8x16s left_edge; \
1065 vec_8x16s right_edge; \
1066 vec_4x16u span_shift; \
1067 \
1068 vec_2x32u c_0x01; \
1069 vec_4x16u c_0x04; \
1070 vec_4x16u c_0xFFFE; \
1071 vec_4x16u c_0x07; \
1072 \
1073 vec_2x32s x_starts; \
1074 vec_2x32s x_ends; \
1075 \
1076 s32 x_a = v_a->x; \
1077 s32 x_b = v_b->x; \
1078 s32 x_c = v_c->x; \
1079 s32 y_a = v_a->y; \
1080 s32 y_c = v_c->y; \
1081 \
1082 vec_4x32u uvrg; \
1083 vec_4x32u uvrg_dy; \
1084 u32 b = psx_gpu->b; \
1085 u32 b_dy = psx_gpu->b_dy; \
1086 const u32 *reciprocal_table = psx_gpu->reciprocal_table_ptr; \
1087 \
1088 gvld1q_u32(uvrg, psx_gpu->uvrg.e); \
1089 gvld1q_u32(uvrg_dy, psx_gpu->uvrg_dy.e); \
1090 gvdup_n_u32(c_0x01, 0x01); \
1091 setup_spans_prologue_alternate_##alternate_active() \
1092
1093#define setup_spans_prologue_b() \
1094 span_edge_data = psx_gpu->span_edge_data; \
1095 span_uvrg_offset = (vec_4x32u *)psx_gpu->span_uvrg_offset; \
1096 span_b_offset = psx_gpu->span_b_offset; \
1097 \
1098 vec_8x16u c_0x0001; \
2d658c89 1099 vec_4x16u c_max_blocks_per_row; \
a2cb152a 1100 \
1101 gvdupq_n_u16(c_0x0001, 0x0001); \
1102 gvdupq_n_u16(left_edge, psx_gpu->viewport_start_x); \
1103 gvdupq_n_u16(right_edge, psx_gpu->viewport_end_x); \
1104 gvaddq_u16(right_edge, right_edge, c_0x0001); \
1105 gvdup_n_u16(c_0x04, 0x04); \
1106 gvdup_n_u16(c_0x07, 0x07); \
1107 gvdup_n_u16(c_0xFFFE, 0xFFFE); \
2d658c89 1108 gvdup_n_u16(c_max_blocks_per_row, MAX_BLOCKS_PER_ROW); \
a2cb152a 1109
9088aca1 1110#if defined(__ARM_NEON) || defined(__ARM_NEON__)
1111// better encoding, remaining bits are unused anyway
1112#define mask_edge_shifts(edge_shifts) \
1113 gvbic_n_u16(edge_shifts, 0xE0)
1114#else
1115#define mask_edge_shifts(edge_shifts) \
1116 gvand_n_u32(edge_shifts, 0x1F)
1117#endif
a2cb152a 1118
1119#define compute_edge_delta_x2() \
1120{ \
1121 vec_2x32s heights; \
1122 vec_2x32s height_reciprocals; \
1123 vec_2x32s heights_b; \
1124 vec_2x32u widths; \
1125 \
1126 u32 edge_shift = reciprocal_table[height]; \
1127 \
1128 gvdup_n_u32(heights, height); \
1129 gvsub_u32(widths, x_ends, x_starts); \
1130 \
1131 gvdup_n_u32(edge_shifts, edge_shift); \
1132 gvsub_u32(heights_b, heights, c_0x01); \
1133 gvshr_n_u32(height_reciprocals, edge_shifts, 10); \
1134 \
1135 gvmla_s32(heights_b, x_starts, heights); \
9088aca1 1136 mask_edge_shifts(edge_shifts); \
a2cb152a 1137 gvmul_s32(edges_dx_dy, widths, height_reciprocals); \
9088aca1 1138 gvmull_s32(edges_xy, heights_b, height_reciprocals); \
a2cb152a 1139} \
1140
1141#define compute_edge_delta_x3(start_c, height_a, height_b) \
1142{ \
1143 vec_2x32s heights; \
1144 vec_2x32s height_reciprocals; \
1145 vec_2x32s heights_b; \
1146 vec_2x32u widths; \
1147 \
1148 u32 width_alt; \
1149 s32 height_b_alt; \
1150 u32 height_reciprocal_alt; \
1151 \
1152 gvcreate_u32(heights, height_a, height_b); \
1153 gvcreate_u32(edge_shifts, reciprocal_table[height_a], reciprocal_table[height_b]); \
1154 \
1155 edge_shift_alt = reciprocal_table[height_minor_b]; \
1156 \
1157 gvsub_u32(widths, x_ends, x_starts); \
1158 width_alt = x_c - start_c; \
1159 \
1160 gvshr_n_u32(height_reciprocals, edge_shifts, 10); \
1161 height_reciprocal_alt = edge_shift_alt >> 10; \
1162 \
9088aca1 1163 mask_edge_shifts(edge_shifts); \
a2cb152a 1164 edge_shift_alt &= 0x1F; \
1165 \
1166 gvsub_u32(heights_b, heights, c_0x01); \
1167 height_b_alt = height_minor_b - 1; \
1168 \
1169 gvmla_s32(heights_b, x_starts, heights); \
1170 height_b_alt += height_minor_b * start_c; \
1171 \
9088aca1 1172 gvmull_s32(edges_xy, heights_b, height_reciprocals); \
a2cb152a 1173 edge_alt = (s64)height_b_alt * height_reciprocal_alt; \
1174 \
1175 gvmul_s32(edges_dx_dy, widths, height_reciprocals); \
1176 edge_dx_dy_alt = width_alt * height_reciprocal_alt; \
1177} \
1178
1179
1180#define setup_spans_adjust_y_up() \
1181 gvsub_u32(y_x4, y_x4, c_0x04) \
1182
1183#define setup_spans_adjust_y_down() \
1184 gvadd_u32(y_x4, y_x4, c_0x04) \
1185
1186#define setup_spans_adjust_interpolants_up() \
1187 gvsubq_u32(uvrg, uvrg, uvrg_dy); \
1188 b -= b_dy \
1189
1190#define setup_spans_adjust_interpolants_down() \
1191 gvaddq_u32(uvrg, uvrg, uvrg_dy); \
1192 b += b_dy \
1193
1194
1195#define setup_spans_clip_interpolants_increment() \
1196 gvmlaq_s32(uvrg, uvrg_dy, v_clip); \
1197 b += b_dy * clip \
1198
1199#define setup_spans_clip_interpolants_decrement() \
1200 gvmlsq_s32(uvrg, uvrg_dy, v_clip); \
1201 b -= b_dy * clip \
1202
1203#define setup_spans_clip_alternate_yes() \
1204 edge_alt += edge_dx_dy_alt * (s64)(clip) \
1205
1206#define setup_spans_clip_alternate_no() \
1207
1208#define setup_spans_clip(direction, alternate_active) \
1209{ \
1210 gvdupq_n_u32(v_clip, clip); \
9088aca1 1211 gvmlal_s32(edges_xy, edges_dx_dy, gvlo(v_clip)); \
a2cb152a 1212 setup_spans_clip_alternate_##alternate_active(); \
1213 setup_spans_clip_interpolants_##direction(); \
1214} \
1215
1216
9088aca1 1217#define setup_spans_adjust_edges_alternate_no(left_half, right_half) \
a2cb152a 1218{ \
1219 vec_2x64s edge_shifts_64; \
9088aca1 1220 vec_2x64s edges_dx_dy_64; \
a2cb152a 1221 vec_1x64s left_x_hi, right_x_hi; \
1222 \
1223 gvmovl_s32(edge_shifts_64, edge_shifts); \
9088aca1 1224 gvshlq_u64(edges_xy, edges_xy, edge_shifts_64); \
a2cb152a 1225 \
9088aca1 1226 gvmovl_s32(edges_dx_dy_64, edges_dx_dy); \
1227 gvshlq_u64(edges_dx_dy_64, edges_dx_dy_64, edge_shifts_64); \
a2cb152a 1228 \
9088aca1 1229 gvdupq_l_s64(left_x, gv##left_half(edges_xy), 0); \
1230 gvdupq_l_s64(right_x, gv##right_half(edges_xy), 0); \
a2cb152a 1231 \
9088aca1 1232 gvdupq_l_s64(left_dx_dy, gv##left_half(edges_dx_dy_64), 0); \
1233 gvdupq_l_s64(right_dx_dy, gv##right_half(edges_dx_dy_64), 0); \
a2cb152a 1234 \
1235 gvadd_s64(left_x_hi, gvlo(left_x), gvlo(left_dx_dy)); \
1236 gvadd_s64(right_x_hi, gvlo(right_x), gvlo(right_dx_dy)); \
1237 \
1238 gvset_hi(left_x, left_x_hi); \
1239 gvset_hi(right_x, right_x_hi); \
1240 \
1241 gvaddq_s64(left_dx_dy, left_dx_dy, left_dx_dy); \
1242 gvaddq_s64(right_dx_dy, right_dx_dy, right_dx_dy); \
1243} \
1244
9088aca1 1245#define setup_spans_adjust_edges_alternate_yes(left_half, right_half) \
a2cb152a 1246{ \
9088aca1 1247 setup_spans_adjust_edges_alternate_no(left_half, right_half); \
a2cb152a 1248 s64 edge_dx_dy_alt_64; \
1249 vec_1x64s alternate_x_hi; \
1250 \
1251 gvdup_n_u16(y_mid_point, y_b); \
1252 \
1253 edge_alt <<= edge_shift_alt; \
1254 edge_dx_dy_alt_64 = (s64)edge_dx_dy_alt << edge_shift_alt; \
1255 \
1256 gvdupq_n_s64(alternate_x, edge_alt); \
1257 gvdupq_n_s64(alternate_dx_dy, edge_dx_dy_alt_64); \
1258 \
1259 gvadd_s64(alternate_x_hi, gvlo(alternate_x), gvlo(alternate_dx_dy)); \
1260 gvaddq_s64(alternate_dx_dy, alternate_dx_dy, alternate_dx_dy); \
1261 gvset_hi(alternate_x, alternate_x_hi); \
1262} \
1263
1264
1265#define setup_spans_y_select_up() \
1266 gvclt_s16(alternate_select, y_x4, y_mid_point) \
1267
1268#define setup_spans_y_select_down() \
1269 gvcgt_s16(alternate_select, y_x4, y_mid_point) \
1270
1271#define setup_spans_y_select_alternate_yes(direction) \
1272 setup_spans_y_select_##direction() \
1273
1274#define setup_spans_y_select_alternate_no(direction) \
1275
1276#define setup_spans_alternate_select_left() \
1277 gvbit(left_right_x_16_lo, alternate_x_16, alternate_select); \
1278
1279#define setup_spans_alternate_select_right() \
1280 gvbit(left_right_x_16_hi, alternate_x_16, alternate_select); \
1281
1282#define setup_spans_alternate_select_none() \
1283
1284#define setup_spans_increment_alternate_yes() \
1285{ \
1286 vec_2x32s alternate_x_32_lo, alternate_x_32_hi; \
9088aca1 1287 gvmovn_top_u64(alternate_x_32_lo, alternate_x); \
a2cb152a 1288 gvaddq_s64(alternate_x, alternate_x, alternate_dx_dy); \
9088aca1 1289 gvmovn_top_u64(alternate_x_32_hi, alternate_x); \
a2cb152a 1290 gvaddq_s64(alternate_x, alternate_x, alternate_dx_dy); \
1291 gvcombine_u32(alternate_x_32, alternate_x_32_lo, alternate_x_32_hi); \
1292 gvmovn_u32(alternate_x_16, alternate_x_32); \
1293} \
1294
1295#define setup_spans_increment_alternate_no() \
1296
9088aca1 1297#if defined(__SSE2__) && !(defined(__AVX512BW__) && defined(__AVX512VL__))
1298#define setup_spans_make_span_shift(span_shift) { \
1299 gvreg tab1_ = { .u8 = { 0xfe, 0xfc, 0xf8, 0xf0, 0xe0, 0xc0, 0x80, 0x00 } }; \
1300 gvtbl2_u8(span_shift, tab1_, span_shift); \
1301 gvorr_n_u16(span_shift, 0xff00); \
1302 (void)c_0xFFFE; \
1303}
1304#else
1305#define setup_spans_make_span_shift(span_shift) \
1306 gvshl_u16(span_shift, c_0xFFFE, span_shift)
1307#endif
1308
a2cb152a 1309#define setup_spans_set_x4(alternate, direction, alternate_active) \
1310{ \
1311 gvst1q_pi_u32(uvrg, span_uvrg_offset); \
1312 *span_b_offset++ = b; \
1313 setup_spans_adjust_interpolants_##direction(); \
1314 \
1315 gvst1q_pi_u32(uvrg, span_uvrg_offset); \
1316 *span_b_offset++ = b; \
1317 setup_spans_adjust_interpolants_##direction(); \
1318 \
1319 gvst1q_pi_u32(uvrg, span_uvrg_offset); \
1320 *span_b_offset++ = b; \
1321 setup_spans_adjust_interpolants_##direction(); \
1322 \
1323 gvst1q_pi_u32(uvrg, span_uvrg_offset); \
1324 *span_b_offset++ = b; \
1325 setup_spans_adjust_interpolants_##direction(); \
1326 \
9088aca1 1327 gvmovn_top_u64(left_x_32_lo, left_x); \
1328 gvmovn_top_u64(right_x_32_lo, right_x); \
a2cb152a 1329 \
1330 gvaddq_s64(left_x, left_x, left_dx_dy); \
1331 gvaddq_s64(right_x, right_x, right_dx_dy); \
1332 \
9088aca1 1333 gvmovn_top_u64(left_x_32_hi, left_x); \
1334 gvmovn_top_u64(right_x_32_hi, right_x); \
a2cb152a 1335 \
1336 gvaddq_s64(left_x, left_x, left_dx_dy); \
1337 gvaddq_s64(right_x, right_x, right_dx_dy); \
1338 \
1339 gvcombine_s64(left_x_32, left_x_32_lo, left_x_32_hi); \
1340 gvcombine_s64(right_x_32, right_x_32_lo, right_x_32_hi); \
1341 \
1342 gvmovn_u32(left_right_x_16_lo, left_x_32); \
1343 gvmovn_u32(left_right_x_16_hi, right_x_32); \
1344 \
1345 setup_spans_increment_alternate_##alternate_active(); \
1346 setup_spans_y_select_alternate_##alternate_active(direction); \
1347 setup_spans_alternate_select_##alternate(); \
1348 \
1349 gvmax_s16(left_right_x_16_lo, left_right_x_16_lo, gvlo(left_edge)); \
1350 gvmax_s16(left_right_x_16_hi, left_right_x_16_hi, gvhi(left_edge)); \
1351 gvmin_s16(left_right_x_16_lo, left_right_x_16_lo, gvlo(right_edge)); \
1352 gvmin_s16(left_right_x_16_hi, left_right_x_16_hi, gvhi(right_edge)); \
1353 \
1354 gvsub_u16(left_right_x_16_hi, left_right_x_16_hi, left_right_x_16_lo); \
1355 gvadd_u16(left_right_x_16_hi, left_right_x_16_hi, c_0x07); \
1356 gvand(span_shift, left_right_x_16_hi, c_0x07); \
9088aca1 1357 setup_spans_make_span_shift(span_shift); \
a2cb152a 1358 gvshr_n_u16(left_right_x_16_hi, left_right_x_16_hi, 3); \
2d658c89 1359 gvmin_u16(left_right_x_16_hi, left_right_x_16_hi, c_max_blocks_per_row); \
a2cb152a 1360 \
1361 gvst4_pi_u16(left_right_x_16_lo, left_right_x_16_hi, span_shift, y_x4, \
1362 span_edge_data); \
1363 \
1364 setup_spans_adjust_y_##direction(); \
1365} \
1366
1367
1368#define setup_spans_alternate_adjust_yes() \
1369 edge_alt -= edge_dx_dy_alt * (s64)height_minor_a \
1370
1371#define setup_spans_alternate_adjust_no() \
1372
1373
9088aca1 1374#define setup_spans_down(left_half, right_half, alternate, alternate_active) \
a2cb152a 1375 setup_spans_alternate_adjust_##alternate_active(); \
1376 if(y_c > psx_gpu->viewport_end_y) \
1377 height -= y_c - psx_gpu->viewport_end_y - 1; \
1378 \
1379 clip = psx_gpu->viewport_start_y - y_a; \
1380 if(clip > 0) \
1381 { \
1382 height -= clip; \
1383 y_a += clip; \
1384 setup_spans_clip(increment, alternate_active); \
1385 } \
1386 \
1387 setup_spans_prologue_b(); \
1388 \
2d658c89 1389 if (height > 512) \
1390 height = 512; \
1391 if (height > 0) \
a2cb152a 1392 { \
1393 u64 y_x4_ = ((u64)(y_a + 3) << 48) | ((u64)(u16)(y_a + 2) << 32) \
1394 | (u32)((y_a + 1) << 16) | (u16)y_a; \
1395 gvcreate_u64(y_x4, y_x4_); \
9088aca1 1396 setup_spans_adjust_edges_alternate_##alternate_active(left_half, right_half); \
a2cb152a 1397 \
1398 psx_gpu->num_spans = height; \
1399 do \
1400 { \
1401 setup_spans_set_x4(alternate, down, alternate_active); \
1402 height -= 4; \
1403 } while(height > 0); \
1404 } \
1405
1406
1407#define setup_spans_alternate_pre_increment_yes() \
1408 edge_alt += edge_dx_dy_alt \
1409
1410#define setup_spans_alternate_pre_increment_no() \
1411
1412#define setup_spans_up_decrement_height_yes() \
1413 height-- \
1414
1415#define setup_spans_up_decrement_height_no() \
1416 {} \
1417
9088aca1 1418#define setup_spans_up(left_half, right_half, alternate, alternate_active) \
a2cb152a 1419 setup_spans_alternate_adjust_##alternate_active(); \
1420 y_a--; \
1421 \
1422 if(y_c < psx_gpu->viewport_start_y) \
1423 height -= psx_gpu->viewport_start_y - y_c; \
1424 else \
1425 setup_spans_up_decrement_height_##alternate_active(); \
1426 \
1427 clip = y_a - psx_gpu->viewport_end_y; \
1428 if(clip > 0) \
1429 { \
1430 height -= clip; \
1431 y_a -= clip; \
1432 setup_spans_clip(decrement, alternate_active); \
1433 } \
1434 \
1435 setup_spans_prologue_b(); \
1436 \
2d658c89 1437 if (height > 512) \
1438 height = 512; \
1439 if (height > 0) \
a2cb152a 1440 { \
1441 u64 y_x4_ = ((u64)(y_a - 3) << 48) | ((u64)(u16)(y_a - 2) << 32) \
1442 | (u32)((y_a - 1) << 16) | (u16)y_a; \
1443 gvcreate_u64(y_x4, y_x4_); \
9088aca1 1444 gvaddw_s32(edges_xy, edges_xy, edges_dx_dy); \
a2cb152a 1445 setup_spans_alternate_pre_increment_##alternate_active(); \
9088aca1 1446 setup_spans_adjust_edges_alternate_##alternate_active(left_half, right_half); \
a2cb152a 1447 setup_spans_adjust_interpolants_up(); \
1448 \
1449 psx_gpu->num_spans = height; \
1450 while(height > 0) \
1451 { \
1452 setup_spans_set_x4(alternate, up, alternate_active); \
1453 height -= 4; \
1454 } \
1455 } \
1456
9088aca1 1457#define half_left lo
1458#define half_right hi
a2cb152a 1459
1460#define setup_spans_up_up(minor, major) \
1461 setup_spans_prologue(yes); \
1462 s32 height_minor_a = y_a - y_b; \
1463 s32 height_minor_b = y_b - y_c; \
1464 s32 height = y_a - y_c; \
1465 \
1466 gvdup_n_u32(x_starts, x_a); \
1467 gvcreate_u32(x_ends, x_c, x_b); \
1468 \
1469 compute_edge_delta_x3(x_b, height, height_minor_a); \
9088aca1 1470 setup_spans_up(half_##major, half_##minor, minor, yes) \
a2cb152a 1471
1472void setup_spans_up_left(psx_gpu_struct *psx_gpu, vertex_struct *v_a,
1473 vertex_struct *v_b, vertex_struct *v_c)
1474{
1475#if 0
1476 setup_spans_up_left_(psx_gpu, v_a, v_b, v_c);
1477 return;
1478#endif
1479 setup_spans_up_up(left, right)
1480}
1481
1482void setup_spans_up_right(psx_gpu_struct *psx_gpu, vertex_struct *v_a,
1483 vertex_struct *v_b, vertex_struct *v_c)
1484{
1485#if 0
1486 setup_spans_up_right_(psx_gpu, v_a, v_b, v_c);
1487 return;
1488#endif
1489 setup_spans_up_up(right, left)
1490}
1491
1492#define setup_spans_down_down(minor, major) \
1493 setup_spans_prologue(yes); \
1494 s32 height_minor_a = y_b - y_a; \
1495 s32 height_minor_b = y_c - y_b; \
1496 s32 height = y_c - y_a; \
1497 \
1498 gvdup_n_u32(x_starts, x_a); \
1499 gvcreate_u32(x_ends, x_c, x_b); \
1500 \
1501 compute_edge_delta_x3(x_b, height, height_minor_a); \
9088aca1 1502 setup_spans_down(half_##major, half_##minor, minor, yes) \
a2cb152a 1503
1504void setup_spans_down_left(psx_gpu_struct *psx_gpu, vertex_struct *v_a,
1505 vertex_struct *v_b, vertex_struct *v_c)
1506{
1507#if 0
1508 setup_spans_down_left_(psx_gpu, v_a, v_b, v_c);
1509 return;
1510#endif
1511 setup_spans_down_down(left, right)
1512}
1513
1514void setup_spans_down_right(psx_gpu_struct *psx_gpu, vertex_struct *v_a,
1515 vertex_struct *v_b, vertex_struct *v_c)
1516{
1517#if 0
1518 setup_spans_down_right_(psx_gpu, v_a, v_b, v_c);
1519 return;
1520#endif
1521 setup_spans_down_down(right, left)
1522}
1523
1524#define setup_spans_up_flat() \
1525 s32 height = y_a - y_c; \
1526 \
1527 compute_edge_delta_x2(); \
9088aca1 1528 setup_spans_up(half_left, half_right, none, no) \
a2cb152a 1529
1530void setup_spans_up_a(psx_gpu_struct *psx_gpu, vertex_struct *v_a,
1531 vertex_struct *v_b, vertex_struct *v_c)
1532{
1533#if 0
1534 setup_spans_up_a_(psx_gpu, v_a, v_b, v_c);
1535 return;
1536#endif
1537 setup_spans_prologue(no);
1538
1539 gvcreate_u32(x_starts, x_a, x_b);
1540 gvdup_n_u32(x_ends, x_c);
1541
1542 setup_spans_up_flat()
1543}
1544
1545void setup_spans_up_b(psx_gpu_struct *psx_gpu, vertex_struct *v_a,
1546 vertex_struct *v_b, vertex_struct *v_c)
1547{
1548#if 0
1549 setup_spans_up_b_(psx_gpu, v_a, v_b, v_c);
1550 return;
1551#endif
1552 setup_spans_prologue(no);
1553
1554 gvdup_n_u32(x_starts, x_a);
1555 gvcreate_u32(x_ends, x_b, x_c);
1556
1557 setup_spans_up_flat()
1558}
1559
1560#define setup_spans_down_flat() \
1561 s32 height = y_c - y_a; \
1562 \
1563 compute_edge_delta_x2(); \
9088aca1 1564 setup_spans_down(half_left, half_right, none, no) \
a2cb152a 1565
1566void setup_spans_down_a(psx_gpu_struct *psx_gpu, vertex_struct *v_a,
1567 vertex_struct *v_b, vertex_struct *v_c)
1568{
1569#if 0
1570 setup_spans_down_a_(psx_gpu, v_a, v_b, v_c);
1571 return;
1572#endif
1573 setup_spans_prologue(no);
1574
1575 gvcreate_u32(x_starts, x_a, x_b);
1576 gvdup_n_u32(x_ends, x_c);
1577
1578 setup_spans_down_flat()
1579}
1580
1581void setup_spans_down_b(psx_gpu_struct *psx_gpu, vertex_struct *v_a,
1582 vertex_struct *v_b, vertex_struct *v_c)
1583{
1584#if 0
1585 setup_spans_down_b_(psx_gpu, v_a, v_b, v_c);
1586 return;
1587#endif
1588 setup_spans_prologue(no)
1589
1590 gvdup_n_u32(x_starts, x_a);
1591 gvcreate_u32(x_ends, x_b, x_c);
1592
1593 setup_spans_down_flat()
1594}
1595
1596void setup_spans_up_down(psx_gpu_struct *psx_gpu, vertex_struct *v_a,
1597 vertex_struct *v_b, vertex_struct *v_c)
1598{
1599#if 0
1600 setup_spans_up_down_(psx_gpu, v_a, v_b, v_c);
1601 return;
1602#endif
1603 setup_spans_prologue(no);
1604
1605 s32 y_b = v_b->y;
1606 s64 edge_alt;
1607 s32 edge_dx_dy_alt;
1608 u32 edge_shift_alt;
1609
1610 s32 middle_y = y_a;
1611 s32 height_minor_a = y_a - y_b;
1612 s32 height_minor_b = y_c - y_a;
1613 s32 height_major = y_c - y_b;
1614
1615 vec_2x64s edges_xy_b;
1616 vec_1x64s edges_xy_b_left;
1617 vec_2x32s edges_dx_dy_b;
1618 vec_2x32u edge_shifts_b;
1619
1620 vec_2x32s height_increment;
1621
1622 gvcreate_u32(x_starts, x_a, x_c);
1623 gvdup_n_u32(x_ends, x_b);
1624
1625 compute_edge_delta_x3(x_a, height_minor_a, height_major);
1626
1627 gvcreate_s32(height_increment, 0, height_minor_b);
1628
9088aca1 1629 gvmlal_s32(edges_xy, edges_dx_dy, height_increment);
a2cb152a 1630
1631 gvcreate_s64(edges_xy_b_left, edge_alt);
9088aca1 1632 gvcombine_s64(edges_xy_b, edges_xy_b_left, gvhi(edges_xy));
a2cb152a 1633
1634 edge_shifts_b = edge_shifts;
1635 gvmov_l_u32(edge_shifts_b, edge_shift_alt, 0);
1636
1637 gvneg_s32(edges_dx_dy_b, edges_dx_dy);
1638 gvmov_l_s32(edges_dx_dy_b, edge_dx_dy_alt, 0);
1639
1640 y_a--;
1641
1642 if(y_b < psx_gpu->viewport_start_y)
1643 height_minor_a -= psx_gpu->viewport_start_y - y_b;
1644
1645 clip = y_a - psx_gpu->viewport_end_y;
1646 if(clip > 0)
1647 {
1648 height_minor_a -= clip;
1649 y_a -= clip;
1650 setup_spans_clip(decrement, no);
1651 }
1652
1653 setup_spans_prologue_b();
1654
2d658c89 1655 if (height_minor_a > 512)
1656 height_minor_a = 512;
1657 if (height_minor_a > 0)
a2cb152a 1658 {
1659 u64 y_x4_ = ((u64)(y_a - 3) << 48) | ((u64)(u16)(y_a - 2) << 32)
1660 | (u32)((y_a - 1) << 16) | (u16)y_a;
1661 gvcreate_u64(y_x4, y_x4_);
9088aca1 1662 gvaddw_s32(edges_xy, edges_xy, edges_dx_dy);
1663 setup_spans_adjust_edges_alternate_no(lo, hi);
a2cb152a 1664 setup_spans_adjust_interpolants_up();
1665
1666 psx_gpu->num_spans = height_minor_a;
1667 while(height_minor_a > 0)
1668 {
1669 setup_spans_set_x4(none, up, no);
1670 height_minor_a -= 4;
1671 }
1672
1673 span_edge_data += height_minor_a;
1674 span_uvrg_offset += height_minor_a;
1675 span_b_offset += height_minor_a;
1676 }
1677
9088aca1 1678 edges_xy = edges_xy_b;
a2cb152a 1679 edges_dx_dy = edges_dx_dy_b;
1680 edge_shifts = edge_shifts_b;
1681
1682 gvld1q_u32(uvrg, psx_gpu->uvrg.e);
1683 b = psx_gpu->b;
1684
1685 y_a = middle_y;
1686
1687 if(y_c > psx_gpu->viewport_end_y)
1688 height_minor_b -= y_c - psx_gpu->viewport_end_y - 1;
1689
1690 clip = psx_gpu->viewport_start_y - y_a;
1691 if(clip > 0)
1692 {
1693 height_minor_b -= clip;
1694 y_a += clip;
1695 setup_spans_clip(increment, no);
1696 }
1697
2d658c89 1698 if (height_minor_b > 512)
1699 height_minor_b = 512;
1700 if (height_minor_b > 0)
a2cb152a 1701 {
1702 u64 y_x4_ = ((u64)(y_a + 3) << 48) | ((u64)(u16)(y_a + 2) << 32)
1703 | (u32)((y_a + 1) << 16) | (u16)y_a;
1704 gvcreate_u64(y_x4, y_x4_);
9088aca1 1705 setup_spans_adjust_edges_alternate_no(lo, hi);
a2cb152a 1706
1707 // FIXME: overflow corner case
1708 if(psx_gpu->num_spans + height_minor_b == MAX_SPANS)
1709 height_minor_b &= ~3;
1710
1711 psx_gpu->num_spans += height_minor_b;
1712 while(height_minor_b > 0)
1713 {
1714 setup_spans_set_x4(none, down, no);
1715 height_minor_b -= 4;
1716 }
1717 }
1718}
1719
1720
1721#define dither_table_entry_normal(value) \
1722 (value) \
1723
1724#define setup_blocks_load_msb_mask_indirect() \
1725
1726#define setup_blocks_load_msb_mask_direct() \
1727 vec_8x16u msb_mask; \
1728 gvdupq_n_u16(msb_mask, psx_gpu->mask_msb); \
1729
1730#define setup_blocks_variables_shaded_textured(target) \
1731 vec_4x32u u_block; \
1732 vec_4x32u v_block; \
1733 vec_4x32u r_block; \
1734 vec_4x32u g_block; \
1735 vec_4x32u b_block; \
1736 vec_4x32u uvrg_dx; \
1737 vec_4x32u uvrg_dx4; \
1738 vec_4x32u uvrg_dx8; \
1739 vec_4x32u uvrg; \
1740 vec_16x8u texture_mask; \
1741 vec_8x8u texture_mask_lo, texture_mask_hi; \
1742 u32 b_dx = psx_gpu->b_block_span.e[1]; \
1743 u32 b_dx4 = b_dx << 2; \
1744 u32 b_dx8 = b_dx << 3; \
1745 u32 b; \
1746 \
1747 gvld1q_u32(uvrg_dx, psx_gpu->uvrg_dx.e); \
1748 gvshlq_n_u32(uvrg_dx4, uvrg_dx, 2); \
1749 gvshlq_n_u32(uvrg_dx8, uvrg_dx, 3); \
9088aca1 1750 gvld2_u8_dup(texture_mask_lo, texture_mask_hi, &psx_gpu->texture_mask_width); \
a2cb152a 1751 gvcombine_u16(texture_mask, texture_mask_lo, texture_mask_hi) \
1752
1753#define setup_blocks_variables_shaded_untextured(target) \
1754 vec_4x32u r_block; \
1755 vec_4x32u g_block; \
1756 vec_4x32u b_block; \
1757 vec_4x32u rgb_dx; \
1758 vec_2x32u rgb_dx_lo, rgb_dx_hi; \
1759 vec_4x32u rgb_dx4; \
1760 vec_4x32u rgb_dx8; \
1761 vec_4x32u rgb; \
1762 vec_2x32u rgb_lo, rgb_hi; \
1763 \
1764 vec_8x8u d64_0x07; \
1765 vec_8x8u d64_1; \
1766 vec_8x8u d64_4; \
1767 vec_8x8u d64_128; \
1768 \
1769 gvdup_n_u8(d64_0x07, 0x07); \
1770 gvdup_n_u8(d64_1, 1); \
1771 gvdup_n_u8(d64_4, 4); \
9088aca1 1772 gvdup_n_u8(d64_128, 128u); \
a2cb152a 1773 \
1774 gvld1_u32(rgb_dx_lo, &psx_gpu->uvrg_dx.e[2]); \
1775 gvcreate_u32(rgb_dx_hi, psx_gpu->b_block_span.e[1], 0); \
1776 gvcombine_u32(rgb_dx, rgb_dx_lo, rgb_dx_hi); \
1777 gvshlq_n_u32(rgb_dx4, rgb_dx, 2); \
1778 gvshlq_n_u32(rgb_dx8, rgb_dx, 3) \
1779
1780#define setup_blocks_variables_unshaded_textured(target) \
1781 vec_4x32u u_block; \
1782 vec_4x32u v_block; \
1783 vec_2x32u uv_dx; \
1784 vec_2x32u uv_dx4; \
1785 vec_2x32u uv_dx8; \
1786 vec_2x32u uv; \
1787 vec_16x8u texture_mask; \
1788 vec_8x8u texture_mask_lo, texture_mask_hi; \
1789 \
1790 gvld1_u32(uv_dx, psx_gpu->uvrg_dx.e); \
1791 gvld1_u32(uv, psx_gpu->uvrg.e); \
1792 gvshl_n_u32(uv_dx4, uv_dx, 2); \
1793 gvshl_n_u32(uv_dx8, uv_dx, 3); \
9088aca1 1794 gvld2_u8_dup(texture_mask_lo, texture_mask_hi, &psx_gpu->texture_mask_width); \
a2cb152a 1795 gvcombine_u16(texture_mask, texture_mask_lo, texture_mask_hi) \
1796
1797#define setup_blocks_variables_unshaded_untextured_direct() \
1798 gvorrq(colors, colors, msb_mask) \
1799
1800#define setup_blocks_variables_unshaded_untextured_indirect() \
1801
1802#define setup_blocks_variables_unshaded_untextured(target) \
1803 u32 color = psx_gpu->triangle_color; \
1804 vec_8x16u colors; \
1805 \
1806 u32 color_r = color & 0xFF; \
1807 u32 color_g = (color >> 8) & 0xFF; \
1808 u32 color_b = (color >> 16) & 0xFF; \
1809 \
1810 color = (color_r >> 3) | ((color_g >> 3) << 5) | \
1811 ((color_b >> 3) << 10); \
1812 gvdupq_n_u16(colors, color); \
1813 setup_blocks_variables_unshaded_untextured_##target() \
1814
1815#define setup_blocks_span_initialize_dithered_textured() \
1816 vec_8x16u dither_offsets; \
1817 gvshll_n_s8(dither_offsets, dither_offsets_short, 4) \
1818
1819#define setup_blocks_span_initialize_dithered_untextured() \
1820 vec_8x8u dither_offsets; \
1821 gvadd_u8(dither_offsets, dither_offsets_short, d64_4) \
1822
1823#define setup_blocks_span_initialize_dithered(texturing) \
1824 u32 dither_row = psx_gpu->dither_table[y & 0x3]; \
1825 u32 dither_shift = (span_edge_data->left_x & 0x3) * 8; \
1826 vec_8x8s dither_offsets_short; \
1827 \
1828 dither_row = \
1829 (dither_row >> dither_shift) | (dither_row << (32 - dither_shift)); \
1830 gvdup_n_u32(dither_offsets_short, dither_row); \
1831 setup_blocks_span_initialize_dithered_##texturing() \
1832
1833#define setup_blocks_span_initialize_undithered(texturing) \
1834
1835#define setup_blocks_span_initialize_shaded_textured() \
1836{ \
1837 u32 left_x = span_edge_data->left_x; \
1838 vec_4x32u block_span; \
1839 vec_4x32u v_left_x; \
1840 \
1841 gvld1q_u32(uvrg, span_uvrg_offset); \
1842 gvdupq_n_u32(v_left_x, left_x); \
1843 gvmlaq_u32(uvrg, uvrg_dx, v_left_x); \
1844 b = *span_b_offset; \
1845 b += b_dx * left_x; \
1846 \
1847 gvdupq_l_u32(u_block, gvlo(uvrg), 0); \
1848 gvdupq_l_u32(v_block, gvlo(uvrg), 1); \
1849 gvdupq_l_u32(r_block, gvhi(uvrg), 0); \
1850 gvdupq_l_u32(g_block, gvhi(uvrg), 1); \
1851 gvdupq_n_u32(b_block, b); \
1852 \
1853 gvld1q_u32(block_span, psx_gpu->u_block_span.e); \
1854 gvaddq_u32(u_block, u_block, block_span); \
1855 gvld1q_u32(block_span, psx_gpu->v_block_span.e); \
1856 gvaddq_u32(v_block, v_block, block_span); \
1857 gvld1q_u32(block_span, psx_gpu->r_block_span.e); \
1858 gvaddq_u32(r_block, r_block, block_span); \
1859 gvld1q_u32(block_span, psx_gpu->g_block_span.e); \
1860 gvaddq_u32(g_block, g_block, block_span); \
1861 gvld1q_u32(block_span, psx_gpu->b_block_span.e); \
1862 gvaddq_u32(b_block, b_block, block_span); \
1863}
1864
1865#define setup_blocks_span_initialize_shaded_untextured() \
1866{ \
1867 u32 left_x = span_edge_data->left_x; \
1868 u32 *span_uvrg_offset_high = (u32 *)span_uvrg_offset + 2; \
1869 vec_4x32u block_span; \
1870 vec_4x32u v_left_x; \
1871 \
1872 gvld1_u32(rgb_lo, span_uvrg_offset_high); \
1873 gvcreate_u32(rgb_hi, *span_b_offset, 0); \
1874 gvcombine_u32(rgb, rgb_lo, rgb_hi); \
1875 gvdupq_n_u32(v_left_x, left_x); \
1876 gvmlaq_u32(rgb, rgb_dx, v_left_x); \
1877 \
1878 gvdupq_l_u32(r_block, gvlo(rgb), 0); \
1879 gvdupq_l_u32(g_block, gvlo(rgb), 1); \
1880 gvdupq_l_u32(b_block, gvhi(rgb), 0); \
1881 \
1882 gvld1q_u32(block_span, psx_gpu->r_block_span.e); \
1883 gvaddq_u32(r_block, r_block, block_span); \
1884 gvld1q_u32(block_span, psx_gpu->g_block_span.e); \
1885 gvaddq_u32(g_block, g_block, block_span); \
1886 gvld1q_u32(block_span, psx_gpu->b_block_span.e); \
1887 gvaddq_u32(b_block, b_block, block_span); \
1888} \
1889
1890#define setup_blocks_span_initialize_unshaded_textured() \
1891{ \
1892 u32 left_x = span_edge_data->left_x; \
1893 vec_4x32u block_span; \
1894 vec_2x32u v_left_x; \
1895 \
1896 gvld1_u32(uv, span_uvrg_offset); \
1897 gvdup_n_u32(v_left_x, left_x); \
1898 gvmla_u32(uv, uv_dx, v_left_x); \
1899 \
1900 gvdupq_l_u32(u_block, uv, 0); \
1901 gvdupq_l_u32(v_block, uv, 1); \
1902 \
1903 gvld1q_u32(block_span, psx_gpu->u_block_span.e); \
1904 gvaddq_u32(u_block, u_block, block_span); \
1905 gvld1q_u32(block_span, psx_gpu->v_block_span.e); \
1906 gvaddq_u32(v_block, v_block, block_span); \
1907} \
1908
1909#define setup_blocks_span_initialize_unshaded_untextured() \
1910
1911#define setup_blocks_texture_swizzled() \
1912{ \
1913 vec_8x8u u_saved = u; \
1914 gvsli_n_u8(u, v, 4); \
1915 gvsri_n_u8(v, u_saved, 4); \
1916} \
1917
1918#define setup_blocks_texture_unswizzled() \
1919
1920#define setup_blocks_store_shaded_textured(swizzling, dithering, target, \
1921 edge_type) \
1922{ \
1923 vec_8x16u u_whole; \
1924 vec_8x16u v_whole; \
1925 vec_8x16u r_whole; \
1926 vec_8x16u g_whole; \
1927 vec_8x16u b_whole; \
1928 vec_4x16u u_whole_lo, u_whole_hi; \
1929 vec_4x16u v_whole_lo, v_whole_hi; \
1930 vec_4x16u r_whole_lo, r_whole_hi; \
1931 vec_4x16u g_whole_lo, g_whole_hi; \
1932 vec_4x16u b_whole_lo, b_whole_hi; \
1933 \
1934 vec_8x8u u; \
1935 vec_8x8u v; \
1936 vec_8x8u r; \
1937 vec_8x8u g; \
1938 vec_8x8u b; \
1939 \
1940 vec_4x32u dx4; \
1941 vec_4x32u dx8; \
1942 \
1943 gvshrn_n_u32(u_whole_lo, u_block, 16); \
1944 gvshrn_n_u32(v_whole_lo, v_block, 16); \
1945 gvshrn_n_u32(r_whole_lo, r_block, 16); \
1946 gvshrn_n_u32(g_whole_lo, g_block, 16); \
1947 gvshrn_n_u32(b_whole_lo, b_block, 16); \
1948 \
1949 gvdupq_l_u32(dx4, gvlo(uvrg_dx4), 0); \
1950 gvaddhn_u32(u_whole_hi, u_block, dx4); \
1951 gvdupq_l_u32(dx4, gvlo(uvrg_dx4), 1); \
1952 gvaddhn_u32(v_whole_hi, v_block, dx4); \
1953 gvdupq_l_u32(dx4, gvhi(uvrg_dx4), 0); \
1954 gvaddhn_u32(r_whole_hi, r_block, dx4); \
1955 gvdupq_l_u32(dx4, gvhi(uvrg_dx4), 1); \
1956 gvaddhn_u32(g_whole_hi, g_block, dx4); \
1957 gvdupq_n_u32(dx4, b_dx4); \
1958 gvaddhn_u32(b_whole_hi, b_block, dx4); \
1959 \
1960 gvcombine_u16(u_whole, u_whole_lo, u_whole_hi); \
1961 gvcombine_u16(v_whole, v_whole_lo, v_whole_hi); \
1962 gvcombine_u16(r_whole, r_whole_lo, r_whole_hi); \
1963 gvcombine_u16(g_whole, g_whole_lo, g_whole_hi); \
1964 gvcombine_u16(b_whole, b_whole_lo, b_whole_hi); \
1965 gvmovn_u16(u, u_whole); \
1966 gvmovn_u16(v, v_whole); \
1967 gvmovn_u16(r, r_whole); \
1968 gvmovn_u16(g, g_whole); \
1969 gvmovn_u16(b, b_whole); \
1970 \
1971 gvdupq_l_u32(dx8, gvlo(uvrg_dx8), 0); \
1972 gvaddq_u32(u_block, u_block, dx8); \
1973 gvdupq_l_u32(dx8, gvlo(uvrg_dx8), 1); \
1974 gvaddq_u32(v_block, v_block, dx8); \
1975 gvdupq_l_u32(dx8, gvhi(uvrg_dx8), 0); \
1976 gvaddq_u32(r_block, r_block, dx8); \
1977 gvdupq_l_u32(dx8, gvhi(uvrg_dx8), 1); \
1978 gvaddq_u32(g_block, g_block, dx8); \
1979 gvdupq_n_u32(dx8, b_dx8); \
1980 gvaddq_u32(b_block, b_block, dx8); \
1981 \
1982 gvand(u, u, gvlo(texture_mask)); \
1983 gvand(v, v, gvhi(texture_mask)); \
1984 setup_blocks_texture_##swizzling(); \
1985 \
1986 gvst2_u8(u, v, (u8 *)block->uv.e); \
1987 gvst1_u8(r, block->r.e); \
1988 gvst1_u8(g, block->g.e); \
1989 gvst1_u8(b, block->b.e); \
1990 gvst1q_u16(dither_offsets, (u16 *)block->dither_offsets.e); \
1991 block->fb_ptr = fb_ptr; \
1992} \
1993
1994#define setup_blocks_store_unshaded_textured(swizzling, dithering, target, \
1995 edge_type) \
1996{ \
1997 vec_8x16u u_whole; \
1998 vec_8x16u v_whole; \
1999 vec_4x16u u_whole_lo, u_whole_hi; \
2000 vec_4x16u v_whole_lo, v_whole_hi; \
2001 \
2002 vec_8x8u u; \
2003 vec_8x8u v; \
2004 \
2005 vec_4x32u dx4; \
2006 vec_4x32u dx8; \
2007 \
2008 gvshrn_n_u32(u_whole_lo, u_block, 16); \
2009 gvshrn_n_u32(v_whole_lo, v_block, 16); \
2010 \
2011 gvdupq_l_u32(dx4, uv_dx4, 0); \
2012 gvaddhn_u32(u_whole_hi, u_block, dx4); \
2013 gvdupq_l_u32(dx4, uv_dx4, 1); \
2014 gvaddhn_u32(v_whole_hi, v_block, dx4); \
2015 \
2016 gvcombine_u16(u_whole, u_whole_lo, u_whole_hi); \
2017 gvcombine_u16(v_whole, v_whole_lo, v_whole_hi); \
2018 gvmovn_u16(u, u_whole); \
2019 gvmovn_u16(v, v_whole); \
2020 \
2021 gvdupq_l_u32(dx8, uv_dx8, 0); \
2022 gvaddq_u32(u_block, u_block, dx8); \
2023 gvdupq_l_u32(dx8, uv_dx8, 1); \
2024 gvaddq_u32(v_block, v_block, dx8); \
2025 \
2026 gvand(u, u, gvlo(texture_mask)); \
2027 gvand(v, v, gvhi(texture_mask)); \
2028 setup_blocks_texture_##swizzling(); \
2029 \
2030 gvst2_u8(u, v, (u8 *)block->uv.e); \
2031 gvst1q_u16(dither_offsets, (u16 *)block->dither_offsets.e); \
2032 block->fb_ptr = fb_ptr; \
2033} \
2034
2035#define setup_blocks_store_shaded_untextured_dithered() \
2036 gvqadd_u8(r, r, dither_offsets); \
2037 gvqadd_u8(g, g, dither_offsets); \
2038 gvqadd_u8(b, b, dither_offsets); \
2039 \
2040 gvqsub_u8(r, r, d64_4); \
2041 gvqsub_u8(g, g, d64_4); \
2042 gvqsub_u8(b, b, d64_4) \
2043
2044#define setup_blocks_store_shaded_untextured_undithered() \
2045
2046#define setup_blocks_store_untextured_pixels_indirect_full(_pixels) \
2047 gvst1q_u16(_pixels, block->pixels.e); \
2048 block->fb_ptr = fb_ptr \
2049
2050#define setup_blocks_store_untextured_pixels_indirect_edge(_pixels) \
2051 gvst1q_u16(_pixels, block->pixels.e); \
2052 block->fb_ptr = fb_ptr \
2053
2054#define setup_blocks_store_shaded_untextured_seed_pixels_indirect() \
2055 gvmull_u8(pixels, r, d64_1) \
2056
2057#define setup_blocks_store_untextured_pixels_direct_full(_pixels) \
2058 gvst1q_u16(_pixels, fb_ptr) \
2059
2060#define setup_blocks_store_untextured_pixels_direct_edge(_pixels) \
2061{ \
2062 vec_8x16u fb_pixels; \
2063 vec_8x16u draw_mask; \
2064 vec_8x16u test_mask; \
2065 \
2066 gvld1q_u16(test_mask, psx_gpu->test_mask.e); \
2067 gvld1q_u16(fb_pixels, fb_ptr); \
2068 gvdupq_n_u16(draw_mask, span_edge_data->right_mask); \
2069 gvtstq_u16(draw_mask, draw_mask, test_mask); \
2070 gvbifq(fb_pixels, _pixels, draw_mask); \
2071 gvst1q_u16(fb_pixels, fb_ptr); \
2072} \
2073
2074#define setup_blocks_store_shaded_untextured_seed_pixels_direct() \
2075 pixels = msb_mask; \
2076 gvmlal_u8(pixels, r, d64_1) \
2077
2078#define setup_blocks_store_shaded_untextured(swizzling, dithering, target, \
2079 edge_type) \
2080{ \
2081 vec_8x16u r_whole; \
2082 vec_8x16u g_whole; \
2083 vec_8x16u b_whole; \
2084 vec_4x16u r_whole_lo, r_whole_hi; \
2085 vec_4x16u g_whole_lo, g_whole_hi; \
2086 vec_4x16u b_whole_lo, b_whole_hi; \
2087 \
2088 vec_8x8u r; \
2089 vec_8x8u g; \
2090 vec_8x8u b; \
2091 \
2092 vec_4x32u dx4; \
2093 vec_4x32u dx8; \
2094 \
2095 vec_8x16u pixels; \
2096 \
2097 gvshrn_n_u32(r_whole_lo, r_block, 16); \
2098 gvshrn_n_u32(g_whole_lo, g_block, 16); \
2099 gvshrn_n_u32(b_whole_lo, b_block, 16); \
2100 \
2101 gvdupq_l_u32(dx4, gvlo(rgb_dx4), 0); \
2102 gvaddhn_u32(r_whole_hi, r_block, dx4); \
2103 gvdupq_l_u32(dx4, gvlo(rgb_dx4), 1); \
2104 gvaddhn_u32(g_whole_hi, g_block, dx4); \
2105 gvdupq_l_u32(dx4, gvhi(rgb_dx4), 0); \
2106 gvaddhn_u32(b_whole_hi, b_block, dx4); \
2107 \
2108 gvcombine_u16(r_whole, r_whole_lo, r_whole_hi); \
2109 gvcombine_u16(g_whole, g_whole_lo, g_whole_hi); \
2110 gvcombine_u16(b_whole, b_whole_lo, b_whole_hi); \
2111 gvmovn_u16(r, r_whole); \
2112 gvmovn_u16(g, g_whole); \
2113 gvmovn_u16(b, b_whole); \
2114 \
2115 gvdupq_l_u32(dx8, gvlo(rgb_dx8), 0); \
2116 gvaddq_u32(r_block, r_block, dx8); \
2117 gvdupq_l_u32(dx8, gvlo(rgb_dx8), 1); \
2118 gvaddq_u32(g_block, g_block, dx8); \
2119 gvdupq_l_u32(dx8, gvhi(rgb_dx8), 0); \
2120 gvaddq_u32(b_block, b_block, dx8); \
2121 \
2122 setup_blocks_store_shaded_untextured_##dithering(); \
2123 \
2124 gvshr_n_u8(r, r, 3); \
2125 gvbic(g, g, d64_0x07); \
2126 gvbic(b, b, d64_0x07); \
2127 \
2128 setup_blocks_store_shaded_untextured_seed_pixels_##target(); \
2129 gvmlal_u8(pixels, g, d64_4); \
2130 gvmlal_u8(pixels, b, d64_128); \
2131 \
2132 setup_blocks_store_untextured_pixels_##target##_##edge_type(pixels); \
2133} \
2134
2135#define setup_blocks_store_unshaded_untextured(swizzling, dithering, target, \
2136 edge_type) \
2137 setup_blocks_store_untextured_pixels_##target##_##edge_type(colors) \
2138
2139#define setup_blocks_store_draw_mask_textured_indirect(_block, bits) \
2140 (_block)->draw_mask_bits = bits \
2141
2142#define setup_blocks_store_draw_mask_untextured_indirect(_block, bits) \
2143{ \
2144 vec_8x16u bits_mask; \
2145 vec_8x16u test_mask; \
2146 \
2147 gvld1q_u16(test_mask, psx_gpu->test_mask.e); \
2148 gvdupq_n_u16(bits_mask, bits); \
2149 gvtstq_u16(bits_mask, bits_mask, test_mask); \
2150 gvst1q_u16(bits_mask, (_block)->draw_mask.e); \
2151} \
2152
2153#define setup_blocks_store_draw_mask_untextured_direct(_block, bits) \
2154
2155#define setup_blocks_add_blocks_indirect() \
2156 num_blocks += span_num_blocks; \
2157 \
2158 if(num_blocks > MAX_BLOCKS) \
2159 { \
2160 psx_gpu->num_blocks = num_blocks - span_num_blocks; \
2161 flush_render_block_buffer(psx_gpu); \
2162 num_blocks = span_num_blocks; \
2163 block = psx_gpu->blocks; \
2164 } \
2165
2166#define setup_blocks_add_blocks_direct() \
2167
2168#define setup_blocks_do(shading, texturing, dithering, sw, target) \
2169 setup_blocks_load_msb_mask_##target(); \
2170 setup_blocks_variables_##shading##_##texturing(target); \
2171 \
2172 edge_data_struct *span_edge_data = psx_gpu->span_edge_data; \
2173 vec_4x32u *span_uvrg_offset = (vec_4x32u *)psx_gpu->span_uvrg_offset; \
2174 u32 *span_b_offset = psx_gpu->span_b_offset; \
2175 \
2176 block_struct *block = psx_gpu->blocks + psx_gpu->num_blocks; \
2177 \
2178 u32 num_spans = psx_gpu->num_spans; \
2179 \
9088aca1 2180 u16 * __restrict__ fb_ptr; \
a2cb152a 2181 u32 y; \
2182 \
2183 u32 num_blocks = psx_gpu->num_blocks; \
2184 u32 span_num_blocks; \
2185 \
2186 while(num_spans) \
2187 { \
2188 span_num_blocks = span_edge_data->num_blocks; \
2189 if(span_num_blocks) \
2190 { \
2191 y = span_edge_data->y; \
2192 fb_ptr = psx_gpu->vram_out_ptr + span_edge_data->left_x + (y * 1024); \
2193 \
2194 setup_blocks_span_initialize_##shading##_##texturing(); \
2195 setup_blocks_span_initialize_##dithering(texturing); \
2196 \
2197 setup_blocks_add_blocks_##target(); \
2198 \
2199 s32 pixel_span = span_num_blocks * 8; \
2200 pixel_span -= __builtin_popcount(span_edge_data->right_mask & 0xFF); \
2201 \
2202 span_num_blocks--; \
2203 while(span_num_blocks) \
2204 { \
2205 setup_blocks_store_##shading##_##texturing(sw, dithering, target, \
2206 full); \
2207 setup_blocks_store_draw_mask_##texturing##_##target(block, 0x00); \
2208 \
2209 fb_ptr += 8; \
2210 block++; \
2211 span_num_blocks--; \
2212 } \
2213 \
2214 setup_blocks_store_##shading##_##texturing(sw, dithering, target, edge); \
2215 setup_blocks_store_draw_mask_##texturing##_##target(block, \
2216 span_edge_data->right_mask); \
2217 \
2218 block++; \
2219 } \
2220 \
2221 num_spans--; \
2222 span_edge_data++; \
2223 span_uvrg_offset++; \
2224 span_b_offset++; \
2225 } \
2226 \
2227 psx_gpu->num_blocks = num_blocks \
2228
2229void setup_blocks_shaded_textured_dithered_swizzled_indirect(psx_gpu_struct
2230 *psx_gpu)
2231{
2232#if 0
2233 setup_blocks_shaded_textured_dithered_swizzled_indirect_(psx_gpu);
2234 return;
2235#endif
2236 setup_blocks_do(shaded, textured, dithered, swizzled, indirect);
2237}
2238
2239void setup_blocks_shaded_textured_dithered_unswizzled_indirect(psx_gpu_struct
2240 *psx_gpu)
2241{
2242#if 0
2243 setup_blocks_shaded_textured_dithered_unswizzled_indirect_(psx_gpu);
2244 return;
2245#endif
2246 setup_blocks_do(shaded, textured, dithered, unswizzled, indirect);
2247}
2248
2249void setup_blocks_unshaded_textured_dithered_swizzled_indirect(psx_gpu_struct
2250 *psx_gpu)
2251{
2252#if 0
2253 setup_blocks_unshaded_textured_dithered_swizzled_indirect_(psx_gpu);
2254 return;
2255#endif
2256 setup_blocks_do(unshaded, textured, dithered, swizzled, indirect);
2257}
2258
2259void setup_blocks_unshaded_textured_dithered_unswizzled_indirect(psx_gpu_struct
2260 *psx_gpu)
2261{
2262#if 0
2263 setup_blocks_unshaded_textured_dithered_unswizzled_indirect_(psx_gpu);
2264 return;
2265#endif
2266 setup_blocks_do(unshaded, textured, dithered, unswizzled, indirect);
2267}
2268
2269void setup_blocks_unshaded_untextured_undithered_unswizzled_indirect(
2270 psx_gpu_struct *psx_gpu)
2271{
2272#if 0
2273 setup_blocks_unshaded_untextured_undithered_unswizzled_indirect_(psx_gpu);
2274 return;
2275#endif
2276 setup_blocks_do(unshaded, untextured, undithered, unswizzled, indirect);
2277}
2278
2279void setup_blocks_unshaded_untextured_undithered_unswizzled_direct(
2280 psx_gpu_struct *psx_gpu)
2281{
2282#if 0
2283 setup_blocks_unshaded_untextured_undithered_unswizzled_direct_(psx_gpu);
2284 return;
2285#endif
2286 setup_blocks_do(unshaded, untextured, undithered, unswizzled, direct);
2287}
2288
2289void setup_blocks_shaded_untextured_undithered_unswizzled_indirect(psx_gpu_struct
2290 *psx_gpu)
2291{
2292#if 0
2293 setup_blocks_shaded_untextured_undithered_unswizzled_indirect_(psx_gpu);
2294 return;
2295#endif
2296 setup_blocks_do(shaded, untextured, undithered, unswizzled, indirect);
2297}
2298
2299void setup_blocks_shaded_untextured_dithered_unswizzled_indirect(psx_gpu_struct
2300 *psx_gpu)
2301{
2302#if 0
2303 setup_blocks_shaded_untextured_dithered_unswizzled_indirect_(psx_gpu);
2304 return;
2305#endif
2306 setup_blocks_do(shaded, untextured, dithered, unswizzled, indirect);
2307}
2308
2309void setup_blocks_shaded_untextured_undithered_unswizzled_direct(
2310 psx_gpu_struct *psx_gpu)
2311{
2312#if 0
2313 setup_blocks_shaded_untextured_undithered_unswizzled_direct_(psx_gpu);
2314 return;
2315#endif
2316 setup_blocks_do(shaded, untextured, undithered, unswizzled, direct);
2317}
2318
2319void setup_blocks_shaded_untextured_dithered_unswizzled_direct(psx_gpu_struct
2320 *psx_gpu)
2321{
2322#if 0
2323 setup_blocks_shaded_untextured_dithered_unswizzled_direct_(psx_gpu);
2324 return;
2325#endif
2326 setup_blocks_do(shaded, untextured, dithered, unswizzled, direct);
2327}
2328
2329static void update_texture_4bpp_cache(psx_gpu_struct *psx_gpu)
2330{
2331 u32 current_texture_page = psx_gpu->current_texture_page;
2332 u8 *texture_page_ptr = psx_gpu->texture_page_base;
2333 const u16 *vram_ptr = psx_gpu->vram_ptr;
2334 u32 tile_x, tile_y;
2335 u32 sub_y;
2336 vec_8x16u c_0x00f0;
2337
2338 vram_ptr += (current_texture_page >> 4) * 256 * 1024;
2339 vram_ptr += (current_texture_page & 0xF) * 64;
2340
2341 gvdupq_n_u16(c_0x00f0, 0x00f0);
2342
2343 psx_gpu->dirty_textures_4bpp_mask &= ~(psx_gpu->current_texture_mask);
2344
2345 for (tile_y = 16; tile_y; tile_y--)
2346 {
2347 for (tile_x = 16; tile_x; tile_x--)
2348 {
2349 for (sub_y = 8; sub_y; sub_y--)
2350 {
2351 vec_8x8u texel_block_a, texel_block_b;
2352 vec_8x16u texel_block_expanded_a, texel_block_expanded_b;
2353 vec_8x16u texel_block_expanded_c, texel_block_expanded_d;
2354 vec_8x16u texel_block_expanded_ab, texel_block_expanded_cd;
2355
2356 gvld1_u8(texel_block_a, (u8 *)vram_ptr); vram_ptr += 1024;
2357 gvld1_u8(texel_block_b, (u8 *)vram_ptr); vram_ptr += 1024;
2358
2359 gvmovl_u8(texel_block_expanded_a, texel_block_a);
2360 gvshll_n_u8(texel_block_expanded_b, texel_block_a, 4);
2361 gvmovl_u8(texel_block_expanded_c, texel_block_b);
2362 gvshll_n_u8(texel_block_expanded_d, texel_block_b, 4);
2363
2364 gvbicq(texel_block_expanded_a, texel_block_expanded_a, c_0x00f0);
2365 gvbicq(texel_block_expanded_b, texel_block_expanded_b, c_0x00f0);
2366 gvbicq(texel_block_expanded_c, texel_block_expanded_c, c_0x00f0);
2367 gvbicq(texel_block_expanded_d, texel_block_expanded_d, c_0x00f0);
2368
2369 gvorrq(texel_block_expanded_ab, texel_block_expanded_a, texel_block_expanded_b);
2370 gvorrq(texel_block_expanded_cd, texel_block_expanded_c, texel_block_expanded_d);
2371
2372 gvst1q_2_pi_u32(texel_block_expanded_ab, texel_block_expanded_cd, texture_page_ptr);
2373 }
2374
2375 vram_ptr -= (1024 * 16) - 4;
2376 }
2377
2378 vram_ptr += (16 * 1024) - (4 * 16);
2379 }
2380}
2381
2382void update_texture_8bpp_cache_slice(psx_gpu_struct *psx_gpu,
2383 u32 texture_page)
2384{
2385#if 0
2386 update_texture_8bpp_cache_slice_(psx_gpu, texture_page);
2387 return;
2388#endif
2389 u16 *texture_page_ptr = psx_gpu->texture_page_base;
2390 u16 *vram_ptr = psx_gpu->vram_ptr;
2391
2392 u32 tile_x, tile_y;
2393 u32 sub_y;
2394
2395 vram_ptr += (texture_page >> 4) * 256 * 1024;
2396 vram_ptr += (texture_page & 0xF) * 64;
2397
2398 if((texture_page ^ psx_gpu->current_texture_page) & 0x1)
2399 texture_page_ptr += (8 * 16) * 8;
2400
2401 for (tile_y = 16; tile_y; tile_y--)
2402 {
2403 for (tile_x = 8; tile_x; tile_x--)
2404 {
2405 for (sub_y = 4; sub_y; sub_y--)
2406 {
2407 vec_4x32u texels_a, texels_b, texels_c, texels_d = {};
2408 gvld1q_u32(texels_a, vram_ptr); vram_ptr += 1024;
2409 gvld1q_u32(texels_b, vram_ptr); vram_ptr += 1024;
2410 gvld1q_u32(texels_c, vram_ptr); vram_ptr += 1024;
2411 gvld1q_u32(texels_d, vram_ptr); vram_ptr += 1024;
2412
2413 gvst1q_2_pi_u32(texels_a, texels_b, texture_page_ptr);
2414 gvst1q_2_pi_u32(texels_c, texels_d, texture_page_ptr);
2415 }
2416
2417 vram_ptr -= (1024 * 16) - 8;
2418 }
2419
2420 vram_ptr -= (8 * 8);
2421 vram_ptr += (16 * 1024);
2422
2423 texture_page_ptr += (8 * 16) * 8;
2424 }
2425}
2426
2427void texture_blocks_untextured(psx_gpu_struct *psx_gpu)
2428{
2429}
2430
2431void texture_blocks_4bpp(psx_gpu_struct *psx_gpu)
2432{
2433#if 0
2434 texture_blocks_4bpp_(psx_gpu);
2435 return;
2436#endif
2437 block_struct *block = psx_gpu->blocks;
2438 u32 num_blocks = psx_gpu->num_blocks;
2439
2440 vec_8x8u texels_low;
2441 vec_8x8u texels_high;
2442
2443 vec_16x8u clut_low;
2444 vec_16x8u clut_high;
2445
2446 const u8 *texture_ptr_8bpp = psx_gpu->texture_page_ptr;
2447
2448 gvld2q_u8(clut_low, clut_high, (u8 *)psx_gpu->clut_ptr);
2449
2450 if(psx_gpu->current_texture_mask & psx_gpu->dirty_textures_4bpp_mask)
2451 update_texture_4bpp_cache(psx_gpu);
2452
2453 while(num_blocks)
2454 {
2455 vec_8x8u texels =
2456 {
2457 .u8 =
2458 {
2459 texture_ptr_8bpp[block->uv.e[0]],
2460 texture_ptr_8bpp[block->uv.e[1]],
2461 texture_ptr_8bpp[block->uv.e[2]],
2462 texture_ptr_8bpp[block->uv.e[3]],
2463 texture_ptr_8bpp[block->uv.e[4]],
2464 texture_ptr_8bpp[block->uv.e[5]],
2465 texture_ptr_8bpp[block->uv.e[6]],
2466 texture_ptr_8bpp[block->uv.e[7]]
2467 }
2468 };
2469
2470 gvtbl2_u8(texels_low, clut_low, texels);
2471 gvtbl2_u8(texels_high, clut_high, texels);
2472
2473 gvst2_u8(texels_low, texels_high, (u8 *)block->texels.e);
2474
2475 num_blocks--;
2476 block++;
2477 }
2478}
2479
2480void texture_blocks_8bpp(psx_gpu_struct *psx_gpu)
2481{
2482#if 0
2483 texture_blocks_8bpp_(psx_gpu);
2484 return;
2485#endif
2486 u32 num_blocks = psx_gpu->num_blocks;
2487
2488 if(psx_gpu->current_texture_mask & psx_gpu->dirty_textures_8bpp_mask)
2489 update_texture_8bpp_cache(psx_gpu);
2490
2491 const u8 * __restrict__ texture_ptr_8bpp = psx_gpu->texture_page_ptr;
2492 const u16 * __restrict__ clut_ptr = psx_gpu->clut_ptr;
2493 block_struct * __restrict__ block = psx_gpu->blocks;
2494
2495 while(num_blocks)
2496 {
2497 u16 offset;
2498 #define load_one(i_) \
2499 offset = block->uv.e[i_]; u16 texel##i_ = texture_ptr_8bpp[offset]
2500 #define store_one(i_) \
2501 block->texels.e[i_] = clut_ptr[texel##i_]
2502 load_one(0); load_one(1); load_one(2); load_one(3);
2503 load_one(4); load_one(5); load_one(6); load_one(7);
2504 store_one(0); store_one(1); store_one(2); store_one(3);
2505 store_one(4); store_one(5); store_one(6); store_one(7);
2506 #undef load_one
2507 #undef store_one
2508
2509 num_blocks--;
2510 block++;
2511 }
2512}
2513
2514void texture_blocks_16bpp(psx_gpu_struct *psx_gpu)
2515{
2516#if 0
2517 texture_blocks_16bpp_(psx_gpu);
2518 return;
2519#endif
2520 u32 num_blocks = psx_gpu->num_blocks;
2521 const u16 * __restrict__ texture_ptr_16bpp = psx_gpu->texture_page_ptr;
2522 block_struct * __restrict__ block = psx_gpu->blocks;
2523
2524 while(num_blocks)
2525 {
2526 u32 offset;
2527 #define load_one(i_) \
2528 offset = block->uv.e[i_]; \
2529 offset += ((offset & 0xFF00) * 3); \
2530 u16 texel##i_ = texture_ptr_16bpp[offset]
2531 #define store_one(i_) \
2532 block->texels.e[i_] = texel##i_
2533 load_one(0); load_one(1); load_one(2); load_one(3);
2534 load_one(4); load_one(5); load_one(6); load_one(7);
2535 store_one(0); store_one(1); store_one(2); store_one(3);
2536 store_one(4); store_one(5); store_one(6); store_one(7);
2537 #undef load_one
2538 #undef store_one
2539
2540 num_blocks--;
2541 block++;
2542 }
2543}
2544
2545#define shade_blocks_load_msb_mask_indirect() \
2546
2547#define shade_blocks_load_msb_mask_direct() \
2548 vec_8x16u msb_mask; \
2549 gvdupq_n_u16(msb_mask, psx_gpu->mask_msb); \
2550
2551#define shade_blocks_store_indirect(_draw_mask, _pixels) \
2552 gvst1q_u16(_draw_mask, block->draw_mask.e); \
2553 gvst1q_u16(_pixels, block->pixels.e); \
2554
2555#define shade_blocks_store_direct(_draw_mask, _pixels) \
2556{ \
9088aca1 2557 u16 * __restrict__ fb_ptr = block->fb_ptr; \
a2cb152a 2558 vec_8x16u fb_pixels; \
9088aca1 2559 gvld1q_u16(fb_pixels, fb_ptr); \
a2cb152a 2560 gvorrq(_pixels, _pixels, msb_mask); \
a2cb152a 2561 gvbifq(fb_pixels, _pixels, _draw_mask); \
9088aca1 2562 gvst1q_u16(fb_pixels, fb_ptr); \
a2cb152a 2563} \
2564
2565#define shade_blocks_textured_false_modulated_check_dithered(target) \
2566
2567#define shade_blocks_textured_false_modulated_check_undithered(target) \
2568 if(psx_gpu->triangle_color == 0x808080) \
2569 { \
2570 shade_blocks_textured_unmodulated_##target(psx_gpu); \
2571 return; \
2572 } \
2573
2574#define shade_blocks_textured_modulated_shaded_primitive_load(dithering, \
2575 target) \
2576
2577#define shade_blocks_textured_modulated_unshaded_primitive_load(dithering, \
2578 target) \
2579{ \
2580 u32 color = psx_gpu->triangle_color; \
2581 gvdup_n_u8(colors_r, color); \
2582 gvdup_n_u8(colors_g, color >> 8); \
2583 gvdup_n_u8(colors_b, color >> 16); \
2584 shade_blocks_textured_false_modulated_check_##dithering(target); \
2585} \
2586
2587#define shade_blocks_textured_modulated_shaded_block_load() \
2588 gvld1_u8(colors_r, block->r.e); \
2589 gvld1_u8(colors_g, block->g.e); \
2590 gvld1_u8(colors_b, block->b.e) \
2591
2592#define shade_blocks_textured_modulated_unshaded_block_load() \
2593
2594#define shade_blocks_textured_modulate_dithered(component) \
2595 gvld1q_u16(pixels_##component, block->dither_offsets.e); \
2596 gvmlal_u8(pixels_##component, texels_##component, colors_##component) \
2597
2598#define shade_blocks_textured_modulate_undithered(component) \
2599 gvmull_u8(pixels_##component, texels_##component, colors_##component) \
2600
2601#define shade_blocks_textured_modulated_do(shading, dithering, target) \
4ebb76b3 2602 block_struct * __restrict__ block = psx_gpu->blocks; \
a2cb152a 2603 u32 num_blocks = psx_gpu->num_blocks; \
2604 vec_8x16u texels; \
2605 \
2606 vec_8x8u texels_r; \
2607 vec_8x8u texels_g; \
2608 vec_8x8u texels_b; \
2609 \
2610 vec_8x8u colors_r; \
2611 vec_8x8u colors_g; \
2612 vec_8x8u colors_b; \
2613 \
2614 vec_8x8u pixels_r_low; \
2615 vec_8x8u pixels_g_low; \
2616 vec_8x8u pixels_b_low; \
2617 vec_8x16u pixels; \
2618 \
2619 vec_8x16u pixels_r; \
2620 vec_8x16u pixels_g; \
2621 vec_8x16u pixels_b; \
2622 \
2623 vec_8x16u draw_mask; \
2624 vec_8x16u zero_mask; \
2625 \
2626 vec_8x8u d64_0x07; \
2627 vec_8x8u d64_0x1F; \
2628 vec_8x8u d64_1; \
2629 vec_8x8u d64_4; \
2630 vec_8x8u d64_128; \
2631 \
2632 vec_8x16u d128_0x8000; \
2633 \
2634 vec_8x16u test_mask; \
2635 u32 draw_mask_bits; \
2636 \
2637 gvld1q_u16(test_mask, psx_gpu->test_mask.e); \
2638 shade_blocks_load_msb_mask_##target(); \
2639 \
2640 gvdup_n_u8(d64_0x07, 0x07); \
2641 gvdup_n_u8(d64_0x1F, 0x1F); \
2642 gvdup_n_u8(d64_1, 1); \
2643 gvdup_n_u8(d64_4, 4); \
9088aca1 2644 gvdup_n_u8(d64_128, 128u); \
a2cb152a 2645 \
2646 gvdupq_n_u16(d128_0x8000, 0x8000); \
2647 \
2648 shade_blocks_textured_modulated_##shading##_primitive_load(dithering, \
2649 target); \
2650 \
2651 while(num_blocks) \
2652 { \
2653 draw_mask_bits = block->draw_mask_bits; \
2654 gvdupq_n_u16(draw_mask, draw_mask_bits); \
2655 gvtstq_u16(draw_mask, draw_mask, test_mask); \
2656 \
2657 shade_blocks_textured_modulated_##shading##_block_load(); \
2658 \
2659 gvld1q_u16(texels, block->texels.e); \
2660 \
2661 gvmovn_u16(texels_r, texels); \
2662 gvshrn_n_u16(texels_g, texels, 5); \
2663 gvshrn_n_u16(texels_b, texels, 7); \
2664 \
2665 gvand(texels_r, texels_r, d64_0x1F); \
2666 gvand(texels_g, texels_g, d64_0x1F); \
2667 gvshr_n_u8(texels_b, texels_b, 3); \
2668 \
2669 shade_blocks_textured_modulate_##dithering(r); \
2670 shade_blocks_textured_modulate_##dithering(g); \
2671 shade_blocks_textured_modulate_##dithering(b); \
2672 \
2673 gvceqzq_u16(zero_mask, texels); \
2674 gvand(pixels, texels, d128_0x8000); \
2675 \
2676 gvqshrun_n_s16(pixels_r_low, pixels_r, 4); \
2677 gvqshrun_n_s16(pixels_g_low, pixels_g, 4); \
2678 gvqshrun_n_s16(pixels_b_low, pixels_b, 4); \
2679 \
2680 gvorrq(zero_mask, draw_mask, zero_mask); \
2681 \
2682 gvshr_n_u8(pixels_r_low, pixels_r_low, 3); \
2683 gvbic(pixels_g_low, pixels_g_low, d64_0x07); \
2684 gvbic(pixels_b_low, pixels_b_low, d64_0x07); \
2685 \
2686 gvmlal_u8(pixels, pixels_r_low, d64_1); \
2687 gvmlal_u8(pixels, pixels_g_low, d64_4); \
2688 gvmlal_u8(pixels, pixels_b_low, d64_128); \
2689 \
2690 shade_blocks_store_##target(zero_mask, pixels); \
2691 \
2692 num_blocks--; \
2693 block++; \
2694 } \
2695
2696void shade_blocks_shaded_textured_modulated_dithered_direct(psx_gpu_struct
2697 *psx_gpu)
2698{
2699#if 0
2700 shade_blocks_shaded_textured_modulated_dithered_direct_(psx_gpu);
2701 return;
2702#endif
2703 shade_blocks_textured_modulated_do(shaded, dithered, direct);
2704}
2705
2706void shade_blocks_shaded_textured_modulated_undithered_direct(psx_gpu_struct
2707 *psx_gpu)
2708{
2709#if 0
2710 shade_blocks_shaded_textured_modulated_undithered_direct_(psx_gpu);
2711 return;
2712#endif
2713 shade_blocks_textured_modulated_do(shaded, undithered, direct);
2714}
2715
2716void shade_blocks_unshaded_textured_modulated_dithered_direct(psx_gpu_struct
2717 *psx_gpu)
2718{
2719#if 0
2720 shade_blocks_unshaded_textured_modulated_dithered_direct_(psx_gpu);
2721 return;
2722#endif
2723 shade_blocks_textured_modulated_do(unshaded, dithered, direct);
2724}
2725
2726void shade_blocks_unshaded_textured_modulated_undithered_direct(psx_gpu_struct
2727 *psx_gpu)
2728{
2729#if 0
2730 shade_blocks_unshaded_textured_modulated_undithered_direct_(psx_gpu);
2731 return;
2732#endif
2733 shade_blocks_textured_modulated_do(unshaded, undithered, direct);
2734}
2735
2736void shade_blocks_shaded_textured_modulated_dithered_indirect(psx_gpu_struct
2737 *psx_gpu)
2738{
2739#if 0
2740 shade_blocks_shaded_textured_modulated_dithered_indirect_(psx_gpu);
2741 return;
2742#endif
2743 shade_blocks_textured_modulated_do(shaded, dithered, indirect);
2744}
2745
2746void shade_blocks_shaded_textured_modulated_undithered_indirect(psx_gpu_struct
2747 *psx_gpu)
2748{
2749#if 0
2750 shade_blocks_shaded_textured_modulated_undithered_indirect_(psx_gpu);
2751 return;
2752#endif
2753 shade_blocks_textured_modulated_do(shaded, undithered, indirect);
2754}
2755
2756void shade_blocks_unshaded_textured_modulated_dithered_indirect(psx_gpu_struct
2757 *psx_gpu)
2758{
2759#if 0
2760 shade_blocks_unshaded_textured_modulated_dithered_indirect_(psx_gpu);
2761 return;
2762#endif
2763 shade_blocks_textured_modulated_do(unshaded, dithered, indirect);
2764}
2765
2766void shade_blocks_unshaded_textured_modulated_undithered_indirect(psx_gpu_struct
2767 *psx_gpu)
2768{
2769#if 0
2770 shade_blocks_unshaded_textured_modulated_undithered_indirect_(psx_gpu);
2771 return;
2772#endif
2773 shade_blocks_textured_modulated_do(unshaded, undithered, indirect);
2774}
2775
2776#define shade_blocks_textured_unmodulated_do(target) \
2777 block_struct *block = psx_gpu->blocks; \
2778 u32 num_blocks = psx_gpu->num_blocks; \
2779 vec_8x16u draw_mask; \
2780 vec_8x16u test_mask; \
2781 u32 draw_mask_bits; \
2782 \
2783 vec_8x16u pixels; \
2784 \
2785 gvld1q_u16(test_mask, psx_gpu->test_mask.e); \
2786 shade_blocks_load_msb_mask_##target(); \
2787 \
2788 while(num_blocks) \
2789 { \
2790 vec_8x16u zero_mask; \
2791 \
2792 draw_mask_bits = block->draw_mask_bits; \
2793 gvdupq_n_u16(draw_mask, draw_mask_bits); \
2794 gvtstq_u16(draw_mask, draw_mask, test_mask); \
2795 \
2796 gvld1q_u16(pixels, block->texels.e); \
2797 \
2798 gvceqzq_u16(zero_mask, pixels); \
2799 gvorrq(zero_mask, draw_mask, zero_mask); \
2800 \
2801 shade_blocks_store_##target(zero_mask, pixels); \
2802 \
2803 num_blocks--; \
2804 block++; \
2805 } \
2806
2807void shade_blocks_textured_unmodulated_indirect(psx_gpu_struct *psx_gpu)
2808{
2809#if 0
2810 shade_blocks_textured_unmodulated_indirect_(psx_gpu);
2811 return;
2812#endif
2813 shade_blocks_textured_unmodulated_do(indirect)
2814}
2815
2816void shade_blocks_textured_unmodulated_direct(psx_gpu_struct *psx_gpu)
2817{
2818#if 0
2819 shade_blocks_textured_unmodulated_direct_(psx_gpu);
2820 return;
2821#endif
2822 shade_blocks_textured_unmodulated_do(direct)
2823}
2824
2825void shade_blocks_unshaded_untextured_indirect(psx_gpu_struct *psx_gpu)
2826{
2827}
2828
2829void shade_blocks_unshaded_untextured_direct(psx_gpu_struct *psx_gpu)
2830{
2831#if 0
2832 shade_blocks_unshaded_untextured_direct_(psx_gpu);
2833 return;
2834#endif
2835 block_struct *block = psx_gpu->blocks;
2836 u32 num_blocks = psx_gpu->num_blocks;
2837
2838 vec_8x16u pixels;
b0d96051 2839 gvld1q_u16(pixels, block->pixels.e);
a2cb152a 2840 shade_blocks_load_msb_mask_direct();
2841
2842 while(num_blocks)
2843 {
2844 vec_8x16u draw_mask;
2845 gvld1q_u16(draw_mask, block->draw_mask.e);
2846 shade_blocks_store_direct(draw_mask, pixels);
2847
2848 num_blocks--;
2849 block++;
2850 }
2851}
2852
2853#define blend_blocks_mask_evaluate_on() \
2854 vec_8x16u mask_pixels; \
2855 gvcltzq_s16(mask_pixels, framebuffer_pixels); \
2856 gvorrq(draw_mask, draw_mask, mask_pixels) \
2857
2858#define blend_blocks_mask_evaluate_off() \
2859
2860#define blend_blocks_average() \
2861{ \
2862 vec_8x16u pixels_no_msb; \
2863 vec_8x16u fb_pixels_no_msb; \
2864 \
2865 vec_8x16u d128_0x0421; \
2866 \
2867 gvdupq_n_u16(d128_0x0421, 0x0421); \
2868 \
2869 gveorq(blend_pixels, pixels, framebuffer_pixels); \
2870 gvbicq(pixels_no_msb, pixels, d128_0x8000); \
2871 gvand(blend_pixels, blend_pixels, d128_0x0421); \
2872 gvsubq_u16(blend_pixels, pixels_no_msb, blend_pixels); \
2873 gvbicq(fb_pixels_no_msb, framebuffer_pixels, d128_0x8000); \
2874 gvhaddq_u16(blend_pixels, fb_pixels_no_msb, blend_pixels); \
2875} \
2876
2877#define blend_blocks_add() \
2878{ \
2879 vec_8x16u pixels_rb, pixels_g; \
2880 vec_8x16u fb_rb, fb_g; \
2881 \
2882 vec_8x16u d128_0x7C1F; \
2883 vec_8x16u d128_0x03E0; \
2884 \
2885 gvdupq_n_u16(d128_0x7C1F, 0x7C1F); \
2886 gvdupq_n_u16(d128_0x03E0, 0x03E0); \
2887 \
2888 gvand(pixels_rb, pixels, d128_0x7C1F); \
2889 gvand(pixels_g, pixels, d128_0x03E0); \
2890 \
2891 gvand(fb_rb, framebuffer_pixels, d128_0x7C1F); \
2892 gvand(fb_g, framebuffer_pixels, d128_0x03E0); \
2893 \
2894 gvaddq_u16(fb_rb, fb_rb, pixels_rb); \
2895 gvaddq_u16(fb_g, fb_g, pixels_g); \
2896 \
2897 gvminq_u8(fb_rb, fb_rb, d128_0x7C1F); \
2898 gvminq_u16(fb_g, fb_g, d128_0x03E0); \
2899 \
2900 gvorrq(blend_pixels, fb_rb, fb_g); \
2901} \
2902
2903#define blend_blocks_subtract() \
2904{ \
2905 vec_8x16u pixels_rb, pixels_g; \
2906 vec_8x16u fb_rb, fb_g; \
2907 \
2908 vec_8x16u d128_0x7C1F; \
2909 vec_8x16u d128_0x03E0; \
2910 \
2911 gvdupq_n_u16(d128_0x7C1F, 0x7C1F); \
2912 gvdupq_n_u16(d128_0x03E0, 0x03E0); \
2913 \
2914 gvand(pixels_rb, pixels, d128_0x7C1F); \
2915 gvand(pixels_g, pixels, d128_0x03E0); \
2916 \
2917 gvand(fb_rb, framebuffer_pixels, d128_0x7C1F); \
2918 gvand(fb_g, framebuffer_pixels, d128_0x03E0); \
2919 \
2920 gvqsubq_u8(fb_rb, fb_rb, pixels_rb); \
2921 gvqsubq_u16(fb_g, fb_g, pixels_g); \
2922 \
2923 gvorrq(blend_pixels, fb_rb, fb_g); \
2924} \
2925
2926#define blend_blocks_add_fourth() \
2927{ \
2928 vec_8x16u pixels_rb, pixels_g; \
2929 vec_8x16u pixels_fourth; \
2930 vec_8x16u fb_rb, fb_g; \
2931 \
2932 vec_8x16u d128_0x7C1F; \
2933 vec_8x16u d128_0x1C07; \
2934 vec_8x16u d128_0x03E0; \
2935 vec_8x16u d128_0x00E0; \
2936 \
2937 gvdupq_n_u16(d128_0x7C1F, 0x7C1F); \
2938 gvdupq_n_u16(d128_0x1C07, 0x1C07); \
2939 gvdupq_n_u16(d128_0x03E0, 0x03E0); \
2940 gvdupq_n_u16(d128_0x00E0, 0x00E0); \
2941 \
2942 gvshrq_n_u16(pixels_fourth, pixels, 2); \
2943 \
2944 gvand(fb_rb, framebuffer_pixels, d128_0x7C1F); \
2945 gvand(fb_g, framebuffer_pixels, d128_0x03E0); \
2946 \
2947 gvand(pixels_rb, pixels_fourth, d128_0x1C07); \
2948 gvand(pixels_g, pixels_fourth, d128_0x00E0); \
2949 \
2950 gvaddq_u16(fb_rb, fb_rb, pixels_rb); \
2951 gvaddq_u16(fb_g, fb_g, pixels_g); \
2952 \
2953 gvminq_u8(fb_rb, fb_rb, d128_0x7C1F); \
2954 gvminq_u16(fb_g, fb_g, d128_0x03E0); \
2955 \
2956 gvorrq(blend_pixels, fb_rb, fb_g); \
2957} \
2958
2959#define blend_blocks_blended_combine_textured() \
2960{ \
2961 vec_8x16u blend_mask; \
2962 gvcltzq_s16(blend_mask, pixels); \
2963 \
2964 gvorrq(blend_pixels, blend_pixels, d128_0x8000); \
2965 gvbifq(blend_pixels, pixels, blend_mask); \
2966} \
2967
2968#define blend_blocks_blended_combine_untextured() \
2969
2970#define blend_blocks_body_blend(blend_mode, texturing) \
2971{ \
2972 blend_blocks_##blend_mode(); \
2973 blend_blocks_blended_combine_##texturing(); \
2974} \
2975
2976#define blend_blocks_body_average(texturing) \
2977 blend_blocks_body_blend(average, texturing) \
2978
2979#define blend_blocks_body_add(texturing) \
2980 blend_blocks_body_blend(add, texturing) \
2981
2982#define blend_blocks_body_subtract(texturing) \
2983 blend_blocks_body_blend(subtract, texturing) \
2984
2985#define blend_blocks_body_add_fourth(texturing) \
2986 blend_blocks_body_blend(add_fourth, texturing) \
2987
2988#define blend_blocks_body_unblended(texturing) \
2989 blend_pixels = pixels \
2990
2991#define blend_blocks_do(texturing, blend_mode, mask_evaluate) \
2992 block_struct *block = psx_gpu->blocks; \
2993 u32 num_blocks = psx_gpu->num_blocks; \
2994 vec_8x16u draw_mask; \
2995 vec_8x16u pixels; \
2996 vec_8x16u blend_pixels; \
2997 vec_8x16u framebuffer_pixels; \
2998 vec_8x16u msb_mask; \
2999 vec_8x16u d128_0x8000; \
3000 \
3001 u16 *fb_ptr; \
3002 \
3003 gvdupq_n_u16(d128_0x8000, 0x8000); \
3004 gvdupq_n_u16(msb_mask, psx_gpu->mask_msb); \
3005 (void)d128_0x8000; /* sometimes unused */ \
3006 \
3007 while(num_blocks) \
3008 { \
3009 gvld1q_u16(pixels, block->pixels.e); \
3010 gvld1q_u16(draw_mask, block->draw_mask.e); \
3011 fb_ptr = block->fb_ptr; \
3012 \
3013 gvld1q_u16(framebuffer_pixels, fb_ptr); \
3014 \
3015 blend_blocks_mask_evaluate_##mask_evaluate(); \
3016 blend_blocks_body_##blend_mode(texturing); \
3017 \
3018 gvorrq(blend_pixels, blend_pixels, msb_mask); \
3019 gvbifq(framebuffer_pixels, blend_pixels, draw_mask); \
3020 gvst1q_u16(framebuffer_pixels, fb_ptr); \
3021 \
3022 num_blocks--; \
3023 block++; \
3024 } \
3025
3026
3027void blend_blocks_textured_average_off(psx_gpu_struct *psx_gpu)
3028{
3029#if 0
3030 blend_blocks_textured_average_off_(psx_gpu);
3031 return;
3032#endif
3033 blend_blocks_do(textured, average, off);
3034}
3035
3036void blend_blocks_untextured_average_off(psx_gpu_struct *psx_gpu)
3037{
3038#if 0
3039 blend_blocks_untextured_average_off_(psx_gpu);
3040 return;
3041#endif
3042 blend_blocks_do(untextured, average, off);
3043}
3044
3045void blend_blocks_textured_average_on(psx_gpu_struct *psx_gpu)
3046{
3047#if 0
3048 blend_blocks_textured_average_on_(psx_gpu);
3049 return;
3050#endif
3051 blend_blocks_do(textured, average, on);
3052}
3053
3054void blend_blocks_untextured_average_on(psx_gpu_struct *psx_gpu)
3055{
3056#if 0
3057 blend_blocks_untextured_average_on_(psx_gpu);
3058 return;
3059#endif
3060 blend_blocks_do(untextured, average, on);
3061}
3062
3063void blend_blocks_textured_add_off(psx_gpu_struct *psx_gpu)
3064{
3065#if 0
3066 blend_blocks_textured_add_off_(psx_gpu);
3067 return;
3068#endif
3069 blend_blocks_do(textured, add, off);
3070}
3071
3072void blend_blocks_textured_add_on(psx_gpu_struct *psx_gpu)
3073{
3074#if 0
3075 blend_blocks_textured_add_on_(psx_gpu);
3076 return;
3077#endif
3078 blend_blocks_do(textured, add, on);
3079}
3080
3081void blend_blocks_untextured_add_off(psx_gpu_struct *psx_gpu)
3082{
3083#if 0
3084 blend_blocks_untextured_add_off_(psx_gpu);
3085 return;
3086#endif
3087 blend_blocks_do(untextured, add, off);
3088}
3089
3090void blend_blocks_untextured_add_on(psx_gpu_struct *psx_gpu)
3091{
3092#if 0
3093 blend_blocks_untextured_add_on_(psx_gpu);
3094 return;
3095#endif
3096 blend_blocks_do(untextured, add, on);
3097}
3098
3099void blend_blocks_textured_subtract_off(psx_gpu_struct *psx_gpu)
3100{
3101#if 0
3102 blend_blocks_textured_subtract_off_(psx_gpu);
3103 return;
3104#endif
3105 blend_blocks_do(textured, subtract, off);
3106}
3107
3108void blend_blocks_textured_subtract_on(psx_gpu_struct *psx_gpu)
3109{
3110#if 0
3111 blend_blocks_textured_subtract_on_(psx_gpu);
3112 return;
3113#endif
3114 blend_blocks_do(textured, subtract, on);
3115}
3116
3117void blend_blocks_untextured_subtract_off(psx_gpu_struct *psx_gpu)
3118{
3119#if 0
3120 blend_blocks_untextured_subtract_off_(psx_gpu);
3121 return;
3122#endif
3123 blend_blocks_do(untextured, subtract, off);
3124}
3125
3126void blend_blocks_untextured_subtract_on(psx_gpu_struct *psx_gpu)
3127{
3128#if 0
3129 blend_blocks_untextured_subtract_on_(psx_gpu);
3130 return;
3131#endif
3132 blend_blocks_do(untextured, subtract, on);
3133}
3134
3135void blend_blocks_textured_add_fourth_off(psx_gpu_struct *psx_gpu)
3136{
3137#if 0
3138 blend_blocks_textured_add_fourth_off_(psx_gpu);
3139 return;
3140#endif
3141 blend_blocks_do(textured, add_fourth, off);
3142}
3143
3144void blend_blocks_textured_add_fourth_on(psx_gpu_struct *psx_gpu)
3145{
3146#if 0
3147 blend_blocks_textured_add_fourth_on_(psx_gpu);
3148 return;
3149#endif
3150 blend_blocks_do(textured, add_fourth, on);
3151}
3152
3153void blend_blocks_untextured_add_fourth_off(psx_gpu_struct *psx_gpu)
3154{
3155#if 0
3156 blend_blocks_untextured_add_fourth_off_(psx_gpu);
3157 return;
3158#endif
3159 blend_blocks_do(untextured, add_fourth, off);
3160}
3161
3162void blend_blocks_untextured_add_fourth_on(psx_gpu_struct *psx_gpu)
3163{
3164#if 0
3165 blend_blocks_untextured_add_fourth_on_(psx_gpu);
3166 return;
3167#endif
3168 blend_blocks_do(untextured, add_fourth, on);
3169}
3170
3171void blend_blocks_textured_unblended_on(psx_gpu_struct *psx_gpu)
3172{
3173#if 0
3174 blend_blocks_textured_unblended_on_(psx_gpu);
3175 return;
3176#endif
3177 blend_blocks_do(textured, unblended, on);
3178}
3179
3180void blend_blocks_textured_unblended_off(psx_gpu_struct *psx_gpu)
3181{
3182}
3183
2d658c89 3184void setup_sprite_untextured_512(psx_gpu_struct *psx_gpu, s32 x, s32 y, s32 u,
a2cb152a 3185 s32 v, s32 width, s32 height, u32 color)
3186{
a2cb152a 3187#if 0
2d658c89 3188 setup_sprite_untextured_512_(psx_gpu, x, y, u, v, width, height, color);
a2cb152a 3189 return;
3190#endif
3191 u32 right_width = ((width - 1) & 0x7) + 1;
3192 u32 right_mask_bits = (0xFF << right_width);
3193 u16 *fb_ptr = psx_gpu->vram_out_ptr + (y * 1024) + x;
3194 u32 block_width = (width + 7) / 8;
3195 u32 fb_ptr_pitch = 1024 - ((block_width - 1) * 8);
3196 u32 blocks_remaining;
3197 u32 num_blocks = psx_gpu->num_blocks;
3198 block_struct *block = psx_gpu->blocks + num_blocks;
3199
3200 u32 color_r = color & 0xFF;
3201 u32 color_g = (color >> 8) & 0xFF;
3202 u32 color_b = (color >> 16) & 0xFF;
3203 vec_8x16u colors;
3204 vec_8x16u right_mask;
3205 vec_8x16u test_mask;
3206 vec_8x16u zero_mask;
3207
3208 gvld1q_u16(test_mask, psx_gpu->test_mask.e);
3209 color = (color_r >> 3) | ((color_g >> 3) << 5) | ((color_b >> 3) << 10);
3210
3211 gvdupq_n_u16(colors, color);
3212 gvdupq_n_u16(zero_mask, 0x00);
3213 gvdupq_n_u16(right_mask, right_mask_bits);
3214 gvtstq_u16(right_mask, right_mask, test_mask);
3215
3216 while(height)
3217 {
3218 blocks_remaining = block_width - 1;
3219 num_blocks += block_width;
3220
3221 if(num_blocks > MAX_BLOCKS)
3222 {
3223 flush_render_block_buffer(psx_gpu);
3224 num_blocks = block_width;
3225 block = psx_gpu->blocks;
3226 }
3227
3228 while(blocks_remaining)
3229 {
3230 gvst1q_u16(colors, block->pixels.e);
3231 gvst1q_u16(zero_mask, block->draw_mask.e);
3232 block->fb_ptr = fb_ptr;
3233
3234 fb_ptr += 8;
3235 block++;
3236 blocks_remaining--;
3237 }
3238
3239 gvst1q_u16(colors, block->pixels.e);
3240 gvst1q_u16(right_mask, block->draw_mask.e);
3241 block->fb_ptr = fb_ptr;
3242
3243 block++;
3244 fb_ptr += fb_ptr_pitch;
3245
3246 height--;
3247 psx_gpu->num_blocks = num_blocks;
3248 }
3249}
3250
3251#define setup_sprite_tiled_initialize_4bpp_clut() \
3252 vec_16x8u clut_low, clut_high; \
3253 \
3254 gvld2q_u8(clut_low, clut_high, (u8 *)psx_gpu->clut_ptr) \
3255
3256#define setup_sprite_tiled_initialize_4bpp() \
3257 setup_sprite_tiled_initialize_4bpp_clut(); \
3258 \
3259 if(psx_gpu->current_texture_mask & psx_gpu->dirty_textures_4bpp_mask) \
3260 update_texture_4bpp_cache(psx_gpu) \
3261
3262#define setup_sprite_tiled_initialize_8bpp() \
3263 if(psx_gpu->current_texture_mask & psx_gpu->dirty_textures_8bpp_mask) \
3264 update_texture_8bpp_cache(psx_gpu) \
3265
3266#define setup_sprite_tile_fetch_texel_block_8bpp(offset) \
3267 texture_block_ptr = psx_gpu->texture_page_ptr + \
3268 ((texture_offset + offset) & texture_mask); \
3269 \
3270 gvld1_u8(texels, (u8 *)texture_block_ptr) \
3271
3272#define setup_sprite_tile_add_blocks(tile_num_blocks) \
3273 num_blocks += tile_num_blocks; \
3274 \
3275 if(num_blocks > MAX_BLOCKS) \
3276 { \
3277 flush_render_block_buffer(psx_gpu); \
3278 num_blocks = tile_num_blocks; \
3279 block = psx_gpu->blocks; \
3280 } \
3281
3282#define setup_sprite_tile_full_4bpp(edge) \
3283{ \
3284 vec_8x8u texels_low, texels_high; \
3285 setup_sprite_tile_add_blocks(sub_tile_height * 2); \
3286 \
3287 while(sub_tile_height) \
3288 { \
3289 setup_sprite_tile_fetch_texel_block_8bpp(0); \
3290 gvtbl2_u8(texels_low, clut_low, texels); \
3291 gvtbl2_u8(texels_high, clut_high, texels); \
3292 \
3293 gvst2_u8(texels_low, texels_high, (u8 *)block->texels.e); \
3294 block->draw_mask_bits = left_mask_bits; \
3295 block->fb_ptr = fb_ptr; \
3296 block++; \
3297 \
3298 setup_sprite_tile_fetch_texel_block_8bpp(8); \
3299 gvtbl2_u8(texels_low, clut_low, texels); \
3300 gvtbl2_u8(texels_high, clut_high, texels); \
3301 \
3302 gvst2_u8(texels_low, texels_high, (u8 *)block->texels.e); \
3303 block->draw_mask_bits = right_mask_bits; \
3304 block->fb_ptr = fb_ptr + 8; \
3305 block++; \
3306 \
3307 fb_ptr += 1024; \
3308 texture_offset += 0x10; \
3309 sub_tile_height--; \
3310 } \
3311 texture_offset += 0xF00; \
3312 psx_gpu->num_blocks = num_blocks; \
3313} \
3314
3315#define setup_sprite_tile_half_4bpp(edge) \
3316{ \
3317 vec_8x8u texels_low, texels_high; \
3318 setup_sprite_tile_add_blocks(sub_tile_height); \
3319 \
3320 while(sub_tile_height) \
3321 { \
3322 setup_sprite_tile_fetch_texel_block_8bpp(0); \
3323 gvtbl2_u8(texels_low, clut_low, texels); \
3324 gvtbl2_u8(texels_high, clut_high, texels); \
3325 \
3326 gvst2_u8(texels_low, texels_high, (u8 *)block->texels.e); \
3327 block->draw_mask_bits = edge##_mask_bits; \
3328 block->fb_ptr = fb_ptr; \
3329 block++; \
3330 \
3331 fb_ptr += 1024; \
3332 texture_offset += 0x10; \
3333 sub_tile_height--; \
3334 } \
3335 texture_offset += 0xF00; \
3336 psx_gpu->num_blocks = num_blocks; \
3337} \
3338
3339#define setup_sprite_tile_full_8bpp(edge) \
3340{ \
3341 setup_sprite_tile_add_blocks(sub_tile_height * 2); \
3342 \
3343 while(sub_tile_height) \
3344 { \
3345 setup_sprite_tile_fetch_texel_block_8bpp(0); \
3346 gvst1_u8(texels, block->r.e); \
3347 block->draw_mask_bits = left_mask_bits; \
3348 block->fb_ptr = fb_ptr; \
3349 block++; \
3350 \
3351 setup_sprite_tile_fetch_texel_block_8bpp(8); \
3352 gvst1_u8(texels, block->r.e); \
3353 block->draw_mask_bits = right_mask_bits; \
3354 block->fb_ptr = fb_ptr + 8; \
3355 block++; \
3356 \
3357 fb_ptr += 1024; \
3358 texture_offset += 0x10; \
3359 sub_tile_height--; \
3360 } \
3361 texture_offset += 0xF00; \
3362 psx_gpu->num_blocks = num_blocks; \
3363} \
3364
3365#define setup_sprite_tile_half_8bpp(edge) \
3366{ \
df740cdc 3367 setup_sprite_tile_add_blocks(sub_tile_height); \
a2cb152a 3368 \
3369 while(sub_tile_height) \
3370 { \
3371 setup_sprite_tile_fetch_texel_block_8bpp(0); \
3372 gvst1_u8(texels, block->r.e); \
3373 block->draw_mask_bits = edge##_mask_bits; \
3374 block->fb_ptr = fb_ptr; \
3375 block++; \
3376 \
3377 fb_ptr += 1024; \
3378 texture_offset += 0x10; \
3379 sub_tile_height--; \
3380 } \
3381 texture_offset += 0xF00; \
3382 psx_gpu->num_blocks = num_blocks; \
3383} \
3384
3385#define setup_sprite_tile_column_edge_pre_adjust_half_right() \
3386 texture_offset = texture_offset_base + 8; \
3387 fb_ptr += 8 \
3388
3389#define setup_sprite_tile_column_edge_pre_adjust_half_left() \
3390 texture_offset = texture_offset_base \
3391
3392#define setup_sprite_tile_column_edge_pre_adjust_half(edge) \
3393 setup_sprite_tile_column_edge_pre_adjust_half_##edge() \
3394
3395#define setup_sprite_tile_column_edge_pre_adjust_full(edge) \
3396 texture_offset = texture_offset_base \
3397
3398#define setup_sprite_tile_column_edge_post_adjust_half_right() \
3399 fb_ptr -= 8 \
3400
3401#define setup_sprite_tile_column_edge_post_adjust_half_left() \
3402
3403#define setup_sprite_tile_column_edge_post_adjust_half(edge) \
3404 setup_sprite_tile_column_edge_post_adjust_half_##edge() \
3405
3406#define setup_sprite_tile_column_edge_post_adjust_full(edge) \
3407
3408
3409#define setup_sprite_tile_column_height_single(edge_mode, edge, texture_mode, \
3410 x4mode) \
3411do \
3412{ \
3413 sub_tile_height = column_data; \
3414 setup_sprite_tile_column_edge_pre_adjust_##edge_mode##x4mode(edge); \
3415 setup_sprite_tile_##edge_mode##_##texture_mode##x4mode(edge); \
3416 setup_sprite_tile_column_edge_post_adjust_##edge_mode##x4mode(edge); \
3417} while(0) \
3418
3419#define setup_sprite_tile_column_height_multi(edge_mode, edge, texture_mode, \
3420 x4mode) \
3421do \
3422{ \
3423 u32 tiles_remaining = column_data >> 16; \
3424 sub_tile_height = column_data & 0xFF; \
3425 setup_sprite_tile_column_edge_pre_adjust_##edge_mode##x4mode(edge); \
3426 setup_sprite_tile_##edge_mode##_##texture_mode##x4mode(edge); \
3427 tiles_remaining -= 1; \
3428 \
3429 while(tiles_remaining) \
3430 { \
3431 sub_tile_height = 16; \
3432 setup_sprite_tile_##edge_mode##_##texture_mode##x4mode(edge); \
3433 tiles_remaining--; \
3434 } \
3435 \
3436 sub_tile_height = (column_data >> 8) & 0xFF; \
3437 setup_sprite_tile_##edge_mode##_##texture_mode##x4mode(edge); \
3438 setup_sprite_tile_column_edge_post_adjust_##edge_mode##x4mode(edge); \
3439} while(0) \
3440
3441
3442#define setup_sprite_column_data_single() \
3443 column_data = height \
3444
3445#define setup_sprite_column_data_multi() \
3446 column_data = 16 - offset_v; \
3447 column_data |= ((height_rounded & 0xF) + 1) << 8; \
3448 column_data |= (tile_height - 1) << 16 \
3449
3450#define RIGHT_MASK_BIT_SHIFT 8
3451#define RIGHT_MASK_BIT_SHIFT_4x 16
3452
3453#define setup_sprite_tile_column_width_single(texture_mode, multi_height, \
3454 edge_mode, edge, x4mode) \
3455{ \
3456 setup_sprite_column_data_##multi_height(); \
3457 left_mask_bits = left_block_mask | right_block_mask; \
3458 right_mask_bits = left_mask_bits >> RIGHT_MASK_BIT_SHIFT##x4mode; \
3459 \
3460 setup_sprite_tile_column_height_##multi_height(edge_mode, edge, \
3461 texture_mode, x4mode); \
3462} \
3463
3464#define setup_sprite_tiled_advance_column() \
3465 texture_offset_base += 0x100; \
3466 if((texture_offset_base & 0xF00) == 0) \
3467 texture_offset_base -= (0x100 + 0xF00) \
3468
3469#define FB_PTR_MULTIPLIER 1
3470#define FB_PTR_MULTIPLIER_4x 2
3471
3472#define setup_sprite_tile_column_width_multi(texture_mode, multi_height, \
3473 left_mode, right_mode, x4mode) \
3474{ \
3475 setup_sprite_column_data_##multi_height(); \
3476 s32 fb_ptr_advance_column = (16 - (1024 * height)) \
3477 * FB_PTR_MULTIPLIER##x4mode; \
3478 \
3479 tile_width -= 2; \
3480 left_mask_bits = left_block_mask; \
3481 right_mask_bits = left_mask_bits >> RIGHT_MASK_BIT_SHIFT##x4mode; \
3482 \
3483 setup_sprite_tile_column_height_##multi_height(left_mode, right, \
3484 texture_mode, x4mode); \
3485 fb_ptr += fb_ptr_advance_column; \
3486 \
3487 left_mask_bits = 0x00; \
3488 right_mask_bits = 0x00; \
3489 \
3490 while(tile_width) \
3491 { \
3492 setup_sprite_tiled_advance_column(); \
3493 setup_sprite_tile_column_height_##multi_height(full, none, \
3494 texture_mode, x4mode); \
3495 fb_ptr += fb_ptr_advance_column; \
3496 tile_width--; \
3497 } \
3498 \
3499 left_mask_bits = right_block_mask; \
3500 right_mask_bits = left_mask_bits >> RIGHT_MASK_BIT_SHIFT##x4mode; \
3501 \
3502 setup_sprite_tiled_advance_column(); \
3503 setup_sprite_tile_column_height_##multi_height(right_mode, left, \
3504 texture_mode, x4mode); \
3505} \
3506
3507
3508/* 4x stuff */
3509#define setup_sprite_tiled_initialize_4bpp_4x() \
3510 setup_sprite_tiled_initialize_4bpp_clut() \
3511
3512#define setup_sprite_tiled_initialize_8bpp_4x() \
3513
3514#define setup_sprite_tile_full_4bpp_4x(edge) \
3515{ \
3516 vec_8x8u texels_low, texels_high; \
3517 vec_8x16u pixels; \
3518 vec_4x16u pixels_half; \
3519 setup_sprite_tile_add_blocks(sub_tile_height * 2 * 4); \
3520 u32 left_mask_bits_a = left_mask_bits & 0xFF; \
3521 u32 left_mask_bits_b = left_mask_bits >> 8; \
3522 u32 right_mask_bits_a = right_mask_bits & 0xFF; \
3523 u32 right_mask_bits_b = right_mask_bits >> 8; \
3524 \
3525 while(sub_tile_height) \
3526 { \
3527 setup_sprite_tile_fetch_texel_block_8bpp(0); \
3528 gvtbl2_u8(texels_low, clut_low, texels); \
3529 gvtbl2_u8(texels_high, clut_high, texels); \
3530 gvzip_u8(pixels, texels_low, texels_high); \
3531 \
3532 gvget_lo(pixels_half, pixels); \
3533 gvst2_u16(pixels_half, pixels_half, block->texels.e); \
3534 block->draw_mask_bits = left_mask_bits_a; \
3535 block->fb_ptr = fb_ptr; \
3536 block++; \
3537 \
3538 gvst2_u16(pixels_half, pixels_half, block->texels.e); \
3539 block->draw_mask_bits = left_mask_bits_a; \
3540 block->fb_ptr = fb_ptr + 1024; \
3541 block++; \
3542 \
3543 gvget_hi(pixels_half, pixels); \
3544 gvst2_u16(pixels_half, pixels_half, block->texels.e); \
3545 block->draw_mask_bits = left_mask_bits_b; \
3546 block->fb_ptr = fb_ptr + 8; \
3547 block++; \
3548 \
3549 gvst2_u16(pixels_half, pixels_half, block->texels.e); \
3550 block->draw_mask_bits = left_mask_bits_b; \
3551 block->fb_ptr = fb_ptr + 1024 + 8; \
3552 block++; \
3553 \
3554 setup_sprite_tile_fetch_texel_block_8bpp(8); \
3555 gvtbl2_u8(texels_low, clut_low, texels); \
3556 gvtbl2_u8(texels_high, clut_high, texels); \
3557 gvzip_u8(pixels, texels_low, texels_high); \
3558 \
3559 gvget_lo(pixels_half, pixels); \
3560 gvst2_u16(pixels_half, pixels_half, block->texels.e); \
3561 block->draw_mask_bits = right_mask_bits_a; \
3562 block->fb_ptr = fb_ptr + 16; \
3563 block++; \
3564 \
3565 gvst2_u16(pixels_half, pixels_half, block->texels.e); \
3566 block->draw_mask_bits = right_mask_bits_a; \
3567 block->fb_ptr = fb_ptr + 1024 + 16; \
3568 block++; \
3569 \
3570 gvget_hi(pixels_half, pixels); \
3571 gvst2_u16(pixels_half, pixels_half, block->texels.e); \
3572 block->draw_mask_bits = right_mask_bits_b; \
3573 block->fb_ptr = fb_ptr + 24; \
3574 block++; \
3575 \
3576 gvst2_u16(pixels_half, pixels_half, block->texels.e); \
3577 block->draw_mask_bits = right_mask_bits_b; \
3578 block->fb_ptr = fb_ptr + 1024 + 24; \
3579 block++; \
3580 \
3581 fb_ptr += 2048; \
3582 texture_offset += 0x10; \
3583 sub_tile_height--; \
3584 } \
3585 texture_offset += 0xF00; \
3586 psx_gpu->num_blocks = num_blocks; \
3587} \
3588
3589#define setup_sprite_tile_half_4bpp_4x(edge) \
3590{ \
3591 vec_8x8u texels_low, texels_high; \
3592 vec_8x16u pixels; \
3593 vec_4x16u pixels_half; \
3594 setup_sprite_tile_add_blocks(sub_tile_height * 4); \
3595 u32 edge##_mask_bits_a = edge##_mask_bits & 0xFF; \
3596 u32 edge##_mask_bits_b = edge##_mask_bits >> 8; \
3597 \
3598 while(sub_tile_height) \
3599 { \
3600 setup_sprite_tile_fetch_texel_block_8bpp(0); \
3601 gvtbl2_u8(texels_low, clut_low, texels); \
3602 gvtbl2_u8(texels_high, clut_high, texels); \
3603 gvzip_u8(pixels, texels_low, texels_high); \
3604 \
3605 gvget_lo(pixels_half, pixels); \
3606 gvst2_u16(pixels_half, pixels_half, block->texels.e); \
3607 block->draw_mask_bits = edge##_mask_bits_a; \
3608 block->fb_ptr = fb_ptr; \
3609 block++; \
3610 \
3611 gvst2_u16(pixels_half, pixels_half, block->texels.e); \
3612 block->draw_mask_bits = edge##_mask_bits_a; \
3613 block->fb_ptr = fb_ptr + 1024; \
3614 block++; \
3615 \
3616 gvget_hi(pixels_half, pixels); \
3617 gvst2_u16(pixels_half, pixels_half, block->texels.e); \
3618 block->draw_mask_bits = edge##_mask_bits_b; \
3619 block->fb_ptr = fb_ptr + 8; \
3620 block++; \
3621 \
3622 gvst2_u16(pixels_half, pixels_half, block->texels.e); \
3623 block->draw_mask_bits = edge##_mask_bits_b; \
3624 block->fb_ptr = fb_ptr + 1024 + 8; \
3625 block++; \
3626 \
3627 fb_ptr += 2048; \
3628 texture_offset += 0x10; \
3629 sub_tile_height--; \
3630 } \
3631 texture_offset += 0xF00; \
3632 psx_gpu->num_blocks = num_blocks; \
3633} \
3634
3635#define setup_sprite_tile_full_8bpp_4x(edge) \
3636{ \
3637 setup_sprite_tile_add_blocks(sub_tile_height * 2 * 4); \
3638 vec_8x16u texels_wide; \
3639 vec_4x16u texels_half; \
3640 u32 left_mask_bits_a = left_mask_bits & 0xFF; \
3641 u32 left_mask_bits_b = left_mask_bits >> 8; \
3642 u32 right_mask_bits_a = right_mask_bits & 0xFF; \
3643 u32 right_mask_bits_b = right_mask_bits >> 8; \
3644 \
3645 while(sub_tile_height) \
3646 { \
3647 setup_sprite_tile_fetch_texel_block_8bpp(0); \
3648 gvzip_u8(texels_wide, texels, texels); \
3649 gvget_lo(texels_half, texels_wide); \
3650 gvst1_u8(texels_half, block->r.e); \
3651 block->draw_mask_bits = left_mask_bits_a; \
3652 block->fb_ptr = fb_ptr; \
3653 block++; \
3654 \
3655 gvst1_u8(texels_half, block->r.e); \
3656 block->draw_mask_bits = left_mask_bits_a; \
3657 block->fb_ptr = fb_ptr + 1024; \
3658 block++; \
3659 \
3660 gvget_hi(texels_half, texels_wide); \
3661 gvst1_u8(texels_half, block->r.e); \
3662 block->draw_mask_bits = left_mask_bits_b; \
3663 block->fb_ptr = fb_ptr + 8; \
3664 block++; \
3665 \
3666 gvst1_u8(texels_half, block->r.e); \
3667 block->draw_mask_bits = left_mask_bits_b; \
3668 block->fb_ptr = fb_ptr + 1024 + 8; \
3669 block++; \
3670 \
3671 setup_sprite_tile_fetch_texel_block_8bpp(8); \
3672 gvzip_u8(texels_wide, texels, texels); \
3673 gvget_lo(texels_half, texels_wide); \
3674 gvst1_u8(texels_half, block->r.e); \
3675 block->draw_mask_bits = right_mask_bits_a; \
3676 block->fb_ptr = fb_ptr + 16; \
3677 block++; \
3678 \
3679 gvst1_u8(texels_half, block->r.e); \
3680 block->draw_mask_bits = right_mask_bits_a; \
3681 block->fb_ptr = fb_ptr + 1024 + 16; \
3682 block++; \
3683 \
3684 gvget_hi(texels_half, texels_wide); \
3685 gvst1_u8(texels_half, block->r.e); \
3686 block->draw_mask_bits = right_mask_bits_b; \
3687 block->fb_ptr = fb_ptr + 24; \
3688 block++; \
3689 \
3690 gvst1_u8(texels_half, block->r.e); \
3691 block->draw_mask_bits = right_mask_bits_b; \
3692 block->fb_ptr = fb_ptr + 24 + 1024; \
3693 block++; \
3694 \
3695 fb_ptr += 2048; \
3696 texture_offset += 0x10; \
3697 sub_tile_height--; \
3698 } \
3699 texture_offset += 0xF00; \
3700 psx_gpu->num_blocks = num_blocks; \
3701} \
3702
3703#define setup_sprite_tile_half_8bpp_4x(edge) \
3704{ \
3705 setup_sprite_tile_add_blocks(sub_tile_height * 4); \
3706 vec_8x16u texels_wide; \
3707 vec_4x16u texels_half; \
3708 u32 edge##_mask_bits_a = edge##_mask_bits & 0xFF; \
3709 u32 edge##_mask_bits_b = edge##_mask_bits >> 8; \
3710 \
3711 while(sub_tile_height) \
3712 { \
3713 setup_sprite_tile_fetch_texel_block_8bpp(0); \
3714 gvzip_u8(texels_wide, texels, texels); \
3715 gvget_lo(texels_half, texels_wide); \
3716 gvst1_u8(texels_half, block->r.e); \
3717 block->draw_mask_bits = edge##_mask_bits_a; \
3718 block->fb_ptr = fb_ptr; \
3719 block++; \
3720 \
3721 gvst1_u8(texels_half, block->r.e); \
3722 block->draw_mask_bits = edge##_mask_bits_a; \
3723 block->fb_ptr = fb_ptr + 1024; \
3724 block++; \
3725 \
3726 gvget_hi(texels_half, texels_wide); \
3727 gvst1_u8(texels_half, block->r.e); \
3728 block->draw_mask_bits = edge##_mask_bits_b; \
3729 block->fb_ptr = fb_ptr + 8; \
3730 block++; \
3731 \
3732 gvst1_u8(texels_half, block->r.e); \
3733 block->draw_mask_bits = edge##_mask_bits_b; \
3734 block->fb_ptr = fb_ptr + 8 + 1024; \
3735 block++; \
3736 \
3737 fb_ptr += 2048; \
3738 texture_offset += 0x10; \
3739 sub_tile_height--; \
3740 } \
3741 texture_offset += 0xF00; \
3742 psx_gpu->num_blocks = num_blocks; \
3743} \
3744
3745#define setup_sprite_tile_column_edge_pre_adjust_half_right_4x() \
3746 texture_offset = texture_offset_base + 8; \
3747 fb_ptr += 16 \
3748
3749#define setup_sprite_tile_column_edge_pre_adjust_half_left_4x() \
3750 texture_offset = texture_offset_base \
3751
3752#define setup_sprite_tile_column_edge_pre_adjust_half_4x(edge) \
3753 setup_sprite_tile_column_edge_pre_adjust_half_##edge##_4x() \
3754
3755#define setup_sprite_tile_column_edge_pre_adjust_full_4x(edge) \
3756 texture_offset = texture_offset_base \
3757
3758#define setup_sprite_tile_column_edge_post_adjust_half_right_4x() \
3759 fb_ptr -= 16 \
3760
3761#define setup_sprite_tile_column_edge_post_adjust_half_left_4x() \
3762
3763#define setup_sprite_tile_column_edge_post_adjust_half_4x(edge) \
3764 setup_sprite_tile_column_edge_post_adjust_half_##edge##_4x() \
3765
3766#define setup_sprite_tile_column_edge_post_adjust_full_4x(edge) \
3767
3768#define setup_sprite_offset_u_adjust() \
3769
3770#define setup_sprite_comapre_left_block_mask() \
3771 ((left_block_mask & 0xFF) == 0xFF) \
3772
3773#define setup_sprite_comapre_right_block_mask() \
3774 (((right_block_mask >> 8) & 0xFF) == 0xFF) \
3775
3776#define setup_sprite_offset_u_adjust_4x() \
3777 offset_u *= 2; \
3778 offset_u_right = offset_u_right * 2 + 1 \
3779
3780#define setup_sprite_comapre_left_block_mask_4x() \
3781 ((left_block_mask & 0xFFFF) == 0xFFFF) \
3782
3783#define setup_sprite_comapre_right_block_mask_4x() \
3784 (((right_block_mask >> 16) & 0xFFFF) == 0xFFFF) \
3785
3786
3787#define setup_sprite_tiled_do(texture_mode, x4mode) \
3788 s32 offset_u = u & 0xF; \
3789 s32 offset_v = v & 0xF; \
3790 \
3791 s32 width_rounded = offset_u + width + 15; \
3792 s32 height_rounded = offset_v + height + 15; \
3793 s32 tile_height = height_rounded / 16; \
3794 s32 tile_width = width_rounded / 16; \
3795 u32 offset_u_right = width_rounded & 0xF; \
3796 \
3797 setup_sprite_offset_u_adjust##x4mode(); \
3798 \
3799 u32 left_block_mask = ~(0xFFFFFFFF << offset_u); \
3800 u32 right_block_mask = 0xFFFFFFFE << offset_u_right; \
3801 \
3802 u32 left_mask_bits; \
3803 u32 right_mask_bits; \
3804 \
3805 u32 sub_tile_height; \
3806 u32 column_data; \
3807 \
3808 u32 texture_mask = (psx_gpu->texture_mask_width & 0xF) | \
3809 ((psx_gpu->texture_mask_height & 0xF) << 4) | \
3810 ((psx_gpu->texture_mask_width >> 4) << 8) | \
3811 ((psx_gpu->texture_mask_height >> 4) << 12); \
3812 u32 texture_offset = ((v & 0xF) << 4) | ((u & 0xF0) << 4) | \
3813 ((v & 0xF0) << 8); \
3814 u32 texture_offset_base = texture_offset; \
3815 u32 control_mask; \
3816 \
3817 u16 *fb_ptr = psx_gpu->vram_out_ptr + (y * 1024) + (x - offset_u); \
3818 u32 num_blocks = psx_gpu->num_blocks; \
3819 block_struct *block = psx_gpu->blocks + num_blocks; \
3820 \
3821 u16 *texture_block_ptr; \
3822 vec_8x8u texels; \
3823 \
3824 setup_sprite_tiled_initialize_##texture_mode##x4mode(); \
3825 \
3826 control_mask = tile_width == 1; \
3827 control_mask |= (tile_height == 1) << 1; \
3828 control_mask |= setup_sprite_comapre_left_block_mask##x4mode() << 2; \
3829 control_mask |= setup_sprite_comapre_right_block_mask##x4mode() << 3; \
3830 \
3831 switch(control_mask) \
3832 { \
3833 default: \
3834 case 0x0: \
3835 setup_sprite_tile_column_width_multi(texture_mode, multi, full, full, \
3836 x4mode); \
3837 break; \
3838 \
3839 case 0x1: \
3840 setup_sprite_tile_column_width_single(texture_mode, multi, full, none, \
3841 x4mode); \
3842 break; \
3843 \
3844 case 0x2: \
3845 setup_sprite_tile_column_width_multi(texture_mode, single, full, full, \
3846 x4mode); \
3847 break; \
3848 \
3849 case 0x3: \
3850 setup_sprite_tile_column_width_single(texture_mode, single, full, none, \
3851 x4mode); \
3852 break; \
3853 \
3854 case 0x4: \
3855 setup_sprite_tile_column_width_multi(texture_mode, multi, half, full, \
3856 x4mode); \
3857 break; \
3858 \
3859 case 0x5: \
3860 setup_sprite_tile_column_width_single(texture_mode, multi, half, right, \
3861 x4mode); \
3862 break; \
3863 \
3864 case 0x6: \
3865 setup_sprite_tile_column_width_multi(texture_mode, single, half, full, \
3866 x4mode); \
3867 break; \
3868 \
3869 case 0x7: \
3870 setup_sprite_tile_column_width_single(texture_mode, single, half, right, \
3871 x4mode); \
3872 break; \
3873 \
3874 case 0x8: \
3875 setup_sprite_tile_column_width_multi(texture_mode, multi, full, half, \
3876 x4mode); \
3877 break; \
3878 \
3879 case 0x9: \
3880 setup_sprite_tile_column_width_single(texture_mode, multi, half, left, \
3881 x4mode); \
3882 break; \
3883 \
3884 case 0xA: \
3885 setup_sprite_tile_column_width_multi(texture_mode, single, full, half, \
3886 x4mode); \
3887 break; \
3888 \
3889 case 0xB: \
3890 setup_sprite_tile_column_width_single(texture_mode, single, half, left, \
3891 x4mode); \
3892 break; \
3893 \
3894 case 0xC: \
3895 setup_sprite_tile_column_width_multi(texture_mode, multi, half, half, \
3896 x4mode); \
3897 break; \
3898 \
3899 case 0xE: \
3900 setup_sprite_tile_column_width_multi(texture_mode, single, half, half, \
3901 x4mode); \
3902 break; \
3903 } \
3904
3905void setup_sprite_4bpp(psx_gpu_struct *psx_gpu, s32 x, s32 y, s32 u, s32 v,
3906 s32 width, s32 height, u32 color)
3907{
3908#if 0
3909 setup_sprite_4bpp_(psx_gpu, x, y, u, v, width, height, color);
3910 return;
3911#endif
3912 setup_sprite_tiled_do(4bpp,)
3913}
3914
3915void setup_sprite_8bpp(psx_gpu_struct *psx_gpu, s32 x, s32 y, s32 u, s32 v,
3916 s32 width, s32 height, u32 color)
3917{
3918#if 0
3919 setup_sprite_8bpp_(psx_gpu, x, y, u, v, width, height, color);
3920 return;
3921#endif
3922 setup_sprite_tiled_do(8bpp,)
3923}
3924
3925#undef draw_mask_fb_ptr_left
3926#undef draw_mask_fb_ptr_right
3927
3928void setup_sprite_4bpp_4x(psx_gpu_struct *psx_gpu, s32 x, s32 y, s32 u, s32 v,
3929 s32 width, s32 height, u32 color)
3930{
3931#if 0
3932 setup_sprite_4bpp_4x_(psx_gpu, x, y, u, v, width, height, color);
3933 return;
3934#endif
3935 setup_sprite_tiled_do(4bpp, _4x)
3936}
3937
3938void setup_sprite_8bpp_4x(psx_gpu_struct *psx_gpu, s32 x, s32 y, s32 u, s32 v,
3939 s32 width, s32 height, u32 color)
3940{
3941#if 0
3942 setup_sprite_8bpp_4x_(psx_gpu, x, y, u, v, width, height, color);
3943 return;
3944#endif
3945 setup_sprite_tiled_do(8bpp, _4x)
3946}
3947
3948
3949void scale2x_tiles8(void * __restrict__ dst_, const void * __restrict__ src_, int w8, int h)
3950{
3951#if 0
df740cdc 3952 scale2x_tiles8_(dst_, src_, w8, h);
a2cb152a 3953 return;
3954#endif
3955 const u16 * __restrict__ src = src_;
3956 const u16 * __restrict__ src1;
3957 u16 * __restrict__ dst = dst_;
3958 u16 * __restrict__ dst1;
3959 gvreg a, b;
3960 int w;
3961 for (; h > 0; h--, src += 1024, dst += 1024*2)
3962 {
3963 src1 = src;
3964 dst1 = dst;
3965 for (w = w8; w > 0; w--, src1 += 8, dst1 += 8*2)
3966 {
3967 gvld1q_u16(a, src1);
3968 gvzipq_u16(a, b, a, a);
3969 gvst1q_u16(a, dst1);
3970 gvst1q_u16(b, dst1 + 8);
3971 gvst1q_u16(a, dst1 + 1024);
3972 gvst1q_u16(b, dst1 + 1024 + 8);
3973 }
3974 }
3975}
3976
3977// vim:ts=2:sw=2:expandtab