gpu_unai: sync with libretro
[pcsx_rearmed.git] / plugins / gpu_unai / gpu_inner.h
1 /***************************************************************************
2 *   Copyright (C) 2010 PCSX4ALL Team                                      *
3 *   Copyright (C) 2010 Unai                                               *
4 *   Copyright (C) 2016 Senquack (dansilsby <AT> gmail <DOT> com)          *
5 *                                                                         *
6 *   This program is free software; you can redistribute it and/or modify  *
7 *   it under the terms of the GNU General Public License as published by  *
8 *   the Free Software Foundation; either version 2 of the License, or     *
9 *   (at your option) any later version.                                   *
10 *                                                                         *
11 *   This program is distributed in the hope that it will be useful,       *
12 *   but WITHOUT ANY WARRANTY; without even the implied warranty of        *
13 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the         *
14 *   GNU General Public License for more details.                          *
15 *                                                                         *
16 *   You should have received a copy of the GNU General Public License     *
17 *   along with this program; if not, write to the                         *
18 *   Free Software Foundation, Inc.,                                       *
19 *   51 Franklin Street, Fifth Floor, Boston, MA 02111-1307 USA.           *
20 ***************************************************************************/
21
22 #ifndef __GPU_UNAI_GPU_INNER_H__
23 #define __GPU_UNAI_GPU_INNER_H__
24
25 ///////////////////////////////////////////////////////////////////////////////
26 // Inner loop driver instantiation file
27
28 ///////////////////////////////////////////////////////////////////////////////
29 //  Option Masks (CF template paramter)
30 #define  CF_LIGHT     ((CF>> 0)&1) // Lighting
31 #define  CF_BLEND     ((CF>> 1)&1) // Blending
32 #define  CF_MASKCHECK ((CF>> 2)&1) // Mask bit check
33 #define  CF_BLENDMODE ((CF>> 3)&3) // Blend mode   0..3
34 #define  CF_TEXTMODE  ((CF>> 5)&3) // Texture mode 1..3 (0: texturing disabled)
35 #define  CF_GOURAUD   ((CF>> 7)&1) // Gouraud shading
36 #define  CF_MASKSET   ((CF>> 8)&1) // Mask bit set
37 #define  CF_DITHER    ((CF>> 9)&1) // Dithering
38 #define  CF_BLITMASK  ((CF>>10)&1) // blit_mask check (skip rendering pixels
39                                    //  that wouldn't end up displayed on
40                                    //  low-res screen using simple downscaler)
41
42 //#ifdef __arm__
43 //#ifndef ENABLE_GPU_ARMV7
44 /* ARMv5 */
45 //#include "gpu_inner_blend_arm5.h"
46 //#else
47 /* ARMv7 optimized */
48 //#include "gpu_inner_blend_arm7.h"
49 //#endif
50 //#else
51 //#include "gpu_inner_blend.h"
52 //#endif
53
54 #include "gpu_inner_blend.h"
55 #include "gpu_inner_quantization.h"
56 #include "gpu_inner_light.h"
57
58 #ifdef __arm__
59 #include "gpu_inner_blend_arm.h"
60 #include "gpu_inner_light_arm.h"
61 #define gpuBlending gpuBlendingARM
62 #define gpuLightingRGB gpuLightingRGBARM
63 #define gpuLightingTXT gpuLightingTXTARM
64 #define gpuLightingTXTGouraud gpuLightingTXTGouraudARM
65 // Non-dithering lighting and blending functions preserve uSrc
66 // MSB. This saves a few operations and useless load/stores.
67 #define MSB_PRESERVED (!CF_DITHER)
68 #else
69 #define gpuBlending gpuBlendingGeneric
70 #define gpuLightingRGB gpuLightingRGBGeneric
71 #define gpuLightingTXT gpuLightingTXTGeneric
72 #define gpuLightingTXTGouraud gpuLightingTXTGouraudGeneric
73 #define MSB_PRESERVED 0
74 #endif
75
76
77 // If defined, Gouraud colors are fixed-point 5.11, otherwise they are 8.16
78 // This is only for debugging/verification of low-precision colors in C.
79 // Low-precision Gouraud is intended for use by SIMD-optimized inner drivers
80 // which get/use Gouraud colors in SIMD registers.
81 //#define GPU_GOURAUD_LOW_PRECISION
82
83 // How many bits of fixed-point precision GouraudColor uses
84 #ifdef GPU_GOURAUD_LOW_PRECISION
85 #define GPU_GOURAUD_FIXED_BITS 11
86 #else
87 #define GPU_GOURAUD_FIXED_BITS 16
88 #endif
89
90 // Used to pass Gouraud colors to gpuPixelSpanFn() (lines)
91 struct GouraudColor {
92 #ifdef GPU_GOURAUD_LOW_PRECISION
93         u16 r, g, b;
94         s16 r_incr, g_incr, b_incr;
95 #else
96         u32 r, g, b;
97         s32 r_incr, g_incr, b_incr;
98 #endif
99 };
100
101 static inline u16 gpuGouraudColor15bpp(u32 r, u32 g, u32 b)
102 {
103         r >>= GPU_GOURAUD_FIXED_BITS;
104         g >>= GPU_GOURAUD_FIXED_BITS;
105         b >>= GPU_GOURAUD_FIXED_BITS;
106
107 #ifndef GPU_GOURAUD_LOW_PRECISION
108         // High-precision Gouraud colors are 8-bit + fractional
109         r >>= 3;  g >>= 3;  b >>= 3;
110 #endif
111
112         return r | (g << 5) | (b << 10);
113 }
114
115 ///////////////////////////////////////////////////////////////////////////////
116 //  GPU Pixel span operations generator gpuPixelSpanFn<>
117 //  Oct 2016: Created/adapted from old gpuPixelFn by senquack:
118 //  Original gpuPixelFn was used to draw lines one pixel at a time. I wrote
119 //  new line algorithms that draw lines using horizontal/vertical/diagonal
120 //  spans of pixels, necessitating new pixel-drawing function that could
121 //  not only render spans of pixels, but gouraud-shade them as well.
122 //  This speeds up line rendering and would allow tile-rendering (untextured
123 //  rectangles) to use the same set of functions. Since tiles are always
124 //  monochrome, they simply wouldn't use the extra set of 32 gouraud-shaded
125 //  gpuPixelSpanFn functions (TODO?).
126 template<int CF>
127 static le16_t* gpuPixelSpanFn(le16_t* pDst, uintptr_t data, ptrdiff_t incr, size_t len)
128 {
129         // Blend func can save an operation if it knows uSrc MSB is
130         //  unset. For untextured prims, this is always true.
131         const bool skip_uSrc_mask = true;
132
133         u16 col;
134         struct GouraudColor * gcPtr;
135         u32 r, g, b;
136         s32 r_incr, g_incr, b_incr;
137
138         // Caller counts in bytes, we count in pixels
139         incr /= 2;
140
141         if (CF_GOURAUD) {
142                 gcPtr = (GouraudColor*)data;
143                 r = gcPtr->r;  r_incr = gcPtr->r_incr;
144                 g = gcPtr->g;  g_incr = gcPtr->g_incr;
145                 b = gcPtr->b;  b_incr = gcPtr->b_incr;
146         } else {
147                 col = (u16)data;
148         }
149
150         do {
151                 if (!CF_GOURAUD)
152                 {   // NO GOURAUD
153                         if (!CF_MASKCHECK && !CF_BLEND) {
154                                 if (CF_MASKSET) { *pDst = u16_to_le16(col | 0x8000); }
155                                 else            { *pDst = u16_to_le16(col);          }
156                         } else if (CF_MASKCHECK && !CF_BLEND) {
157                                 if (!(le16_raw(*pDst) & HTOLE16(0x8000))) {
158                                         if (CF_MASKSET) { *pDst = u16_to_le16(col | 0x8000); }
159                                         else            { *pDst = u16_to_le16(col);          }
160                                 }
161                         } else {
162                                 uint_fast16_t uDst = le16_to_u16(*pDst);
163                                 if (CF_MASKCHECK) { if (uDst & 0x8000) goto endpixel; }
164
165                                 uint_fast16_t uSrc = col;
166
167                                 if (CF_BLEND)
168                                         uSrc = gpuBlending<CF_BLENDMODE, skip_uSrc_mask>(uSrc, uDst);
169
170                                 if (CF_MASKSET) { *pDst = u16_to_le16(uSrc | 0x8000); }
171                                 else            { *pDst = u16_to_le16(uSrc);          }
172                         }
173
174                 } else
175                 {   // GOURAUD
176
177                         if (!CF_MASKCHECK && !CF_BLEND) {
178                                 col = gpuGouraudColor15bpp(r, g, b);
179                                 if (CF_MASKSET) { *pDst = u16_to_le16(col | 0x8000); }
180                                 else            { *pDst = u16_to_le16(col);          }
181                         } else if (CF_MASKCHECK && !CF_BLEND) {
182                                 col = gpuGouraudColor15bpp(r, g, b);
183                                 if (!(le16_raw(*pDst) & HTOLE16(0x8000))) {
184                                         if (CF_MASKSET) { *pDst = u16_to_le16(col | 0x8000); }
185                                         else            { *pDst = u16_to_le16(col);          }
186                                 }
187                         } else {
188                                 uint_fast16_t uDst = le16_to_u16(*pDst);
189                                 if (CF_MASKCHECK) { if (uDst & 0x8000) goto endpixel; }
190                                 col = gpuGouraudColor15bpp(r, g, b);
191
192                                 uint_fast16_t uSrc = col;
193
194                                 // Blend func can save an operation if it knows uSrc MSB is
195                                 //  unset. For untextured prims, this is always true.
196                                 const bool skip_uSrc_mask = true;
197
198                                 if (CF_BLEND)
199                                         uSrc = gpuBlending<CF_BLENDMODE, skip_uSrc_mask>(uSrc, uDst);
200
201                                 if (CF_MASKSET) { *pDst = u16_to_le16(uSrc | 0x8000); }
202                                 else            { *pDst = u16_to_le16(uSrc);          }
203                         }
204                 }
205
206 endpixel:
207                 if (CF_GOURAUD) {
208                         r += r_incr;
209                         g += g_incr;
210                         b += b_incr;
211                 }
212                 pDst += incr;
213         } while (len-- > 1);
214
215         // Note from senquack: Normally, I'd prefer to write a 'do {} while (--len)'
216         //  loop, or even a for() loop, however, on MIPS platforms anything but the
217         //  'do {} while (len-- > 1)' tends to generate very unoptimal asm, with
218         //  many unneeded MULs/ADDs/branches at the ends of these functions.
219         //  If you change the loop structure above, be sure to compare the quality
220         //  of the generated code!!
221
222         if (CF_GOURAUD) {
223                 gcPtr->r = r;
224                 gcPtr->g = g;
225                 gcPtr->b = b;
226         }
227         return pDst;
228 }
229
230 static le16_t* PixelSpanNULL(le16_t* pDst, uintptr_t data, ptrdiff_t incr, size_t len)
231 {
232         #ifdef ENABLE_GPU_LOG_SUPPORT
233                 fprintf(stdout,"PixelSpanNULL()\n");
234         #endif
235         return pDst;
236 }
237
238 ///////////////////////////////////////////////////////////////////////////////
239 //  PixelSpan (lines) innerloops driver
240 typedef le16_t* (*PSD)(le16_t* dst, uintptr_t data, ptrdiff_t incr, size_t len);
241
242 const PSD gpuPixelSpanDrivers[64] =
243
244         // Array index | 'CF' template field | Field value
245         // ------------+---------------------+----------------
246         // Bit 0       | CF_BLEND            | off (0), on (1)
247         // Bit 1       | CF_MASKCHECK        | off (0), on (1)
248         // Bit 3:2     | CF_BLENDMODE        | 0..3
249         // Bit 4       | CF_MASKSET          | off (0), on (1)
250         // Bit 5       | CF_GOURAUD          | off (0), on (1)
251         //
252         // NULL entries are ones for which blending is disabled and blend-mode
253         //  field is non-zero, which is obviously invalid.
254
255         // Flat-shaded
256         gpuPixelSpanFn<0x00<<1>,         gpuPixelSpanFn<0x01<<1>,         gpuPixelSpanFn<0x02<<1>,         gpuPixelSpanFn<0x03<<1>,
257         PixelSpanNULL,                   gpuPixelSpanFn<0x05<<1>,         PixelSpanNULL,                   gpuPixelSpanFn<0x07<<1>,
258         PixelSpanNULL,                   gpuPixelSpanFn<0x09<<1>,         PixelSpanNULL,                   gpuPixelSpanFn<0x0B<<1>,
259         PixelSpanNULL,                   gpuPixelSpanFn<0x0D<<1>,         PixelSpanNULL,                   gpuPixelSpanFn<0x0F<<1>,
260
261         // Flat-shaded + PixelMSB (CF_MASKSET)
262         gpuPixelSpanFn<(0x00<<1)|0x100>, gpuPixelSpanFn<(0x01<<1)|0x100>, gpuPixelSpanFn<(0x02<<1)|0x100>, gpuPixelSpanFn<(0x03<<1)|0x100>,
263         PixelSpanNULL,                   gpuPixelSpanFn<(0x05<<1)|0x100>, PixelSpanNULL,                   gpuPixelSpanFn<(0x07<<1)|0x100>,
264         PixelSpanNULL,                   gpuPixelSpanFn<(0x09<<1)|0x100>, PixelSpanNULL,                   gpuPixelSpanFn<(0x0B<<1)|0x100>,
265         PixelSpanNULL,                   gpuPixelSpanFn<(0x0D<<1)|0x100>, PixelSpanNULL,                   gpuPixelSpanFn<(0x0F<<1)|0x100>,
266
267         // Gouraud-shaded (CF_GOURAUD)
268         gpuPixelSpanFn<(0x00<<1)|0x80>,  gpuPixelSpanFn<(0x01<<1)|0x80>,  gpuPixelSpanFn<(0x02<<1)|0x80>,  gpuPixelSpanFn<(0x03<<1)|0x80>,
269         PixelSpanNULL,                   gpuPixelSpanFn<(0x05<<1)|0x80>,  PixelSpanNULL,                   gpuPixelSpanFn<(0x07<<1)|0x80>,
270         PixelSpanNULL,                   gpuPixelSpanFn<(0x09<<1)|0x80>,  PixelSpanNULL,                   gpuPixelSpanFn<(0x0B<<1)|0x80>,
271         PixelSpanNULL,                   gpuPixelSpanFn<(0x0D<<1)|0x80>,  PixelSpanNULL,                   gpuPixelSpanFn<(0x0F<<1)|0x80>,
272
273         // Gouraud-shaded (CF_GOURAUD) + PixelMSB (CF_MASKSET)
274         gpuPixelSpanFn<(0x00<<1)|0x180>, gpuPixelSpanFn<(0x01<<1)|0x180>, gpuPixelSpanFn<(0x02<<1)|0x180>, gpuPixelSpanFn<(0x03<<1)|0x180>,
275         PixelSpanNULL,                   gpuPixelSpanFn<(0x05<<1)|0x180>, PixelSpanNULL,                   gpuPixelSpanFn<(0x07<<1)|0x180>,
276         PixelSpanNULL,                   gpuPixelSpanFn<(0x09<<1)|0x180>, PixelSpanNULL,                   gpuPixelSpanFn<(0x0B<<1)|0x180>,
277         PixelSpanNULL,                   gpuPixelSpanFn<(0x0D<<1)|0x180>, PixelSpanNULL,                   gpuPixelSpanFn<(0x0F<<1)|0x180>
278 };
279
280 ///////////////////////////////////////////////////////////////////////////////
281 //  GPU Tiles innerloops generator
282
283 template<int CF>
284 static void gpuTileSpanFn(le16_t *pDst, u32 count, u16 data)
285 {
286         le16_t ldata;
287
288         if (!CF_MASKCHECK && !CF_BLEND) {
289                 if (CF_MASKSET)
290                         ldata = u16_to_le16(data | 0x8000);
291                 else
292                         ldata = u16_to_le16(data);
293                 do { *pDst++ = ldata; } while (--count);
294         } else if (CF_MASKCHECK && !CF_BLEND) {
295                 if (CF_MASKSET)
296                         ldata = u16_to_le16(data | 0x8000);
297                 else
298                         ldata = u16_to_le16(data);
299                 do {
300                         if (!(le16_raw(*pDst) & HTOLE16(0x8000)))
301                                 *pDst = ldata;
302                         pDst++;
303                 } while (--count);
304         } else
305         {
306                 // Blend func can save an operation if it knows uSrc MSB is
307                 //  unset. For untextured prims, this is always true.
308                 const bool skip_uSrc_mask = true;
309
310                 uint_fast16_t uSrc, uDst;
311                 do
312                 {
313                         if (CF_MASKCHECK || CF_BLEND) { uDst = le16_to_u16(*pDst); }
314                         if (CF_MASKCHECK) if (uDst&0x8000) { goto endtile; }
315
316                         uSrc = data;
317
318                         if (CF_BLEND)
319                                 uSrc = gpuBlending<CF_BLENDMODE, skip_uSrc_mask>(uSrc, uDst);
320
321                         if (CF_MASKSET) { *pDst = u16_to_le16(uSrc | 0x8000); }
322                         else            { *pDst = u16_to_le16(uSrc);          }
323
324                         //senquack - Did not apply "Silent Hill" mask-bit fix to here.
325                         // It is hard to tell from scarce documentation available and
326                         //  lack of comments in code, but I believe the tile-span
327                         //  functions here should not bother to preserve any source MSB,
328                         //  as they are not drawing from a texture.
329 endtile:
330                         pDst++;
331                 }
332                 while (--count);
333         }
334 }
335
336 static void TileNULL(le16_t *pDst, u32 count, u16 data)
337 {
338         #ifdef ENABLE_GPU_LOG_SUPPORT
339                 fprintf(stdout,"TileNULL()\n");
340         #endif
341 }
342
343 ///////////////////////////////////////////////////////////////////////////////
344 //  Tiles innerloops driver
345 typedef void (*PT)(le16_t *pDst, u32 count, u16 data);
346
347 // Template instantiation helper macros
348 #define TI(cf) gpuTileSpanFn<(cf)>
349 #define TN     TileNULL
350 #define TIBLOCK(ub) \
351         TI((ub)|0x00), TI((ub)|0x02), TI((ub)|0x04), TI((ub)|0x06), \
352         TN,            TI((ub)|0x0a), TN,            TI((ub)|0x0e), \
353         TN,            TI((ub)|0x12), TN,            TI((ub)|0x16), \
354         TN,            TI((ub)|0x1a), TN,            TI((ub)|0x1e)
355
356 const PT gpuTileSpanDrivers[32] = {
357         TIBLOCK(0<<8), TIBLOCK(1<<8)
358 };
359
360 #undef TI
361 #undef TN
362 #undef TIBLOCK
363
364
365 ///////////////////////////////////////////////////////////////////////////////
366 //  GPU Sprites innerloops generator
367
368 template<int CF>
369 static void gpuSpriteSpanFn(le16_t *pDst, u32 count, u8* pTxt, u32 u0)
370 {
371         // Blend func can save an operation if it knows uSrc MSB is unset.
372         //  Untextured prims can always skip (source color always comes with MSB=0).
373         //  For textured prims, the generic lighting funcs always return it unset. (bonus!)
374         const bool skip_uSrc_mask = MSB_PRESERVED ? (!CF_TEXTMODE) : (!CF_TEXTMODE) || CF_LIGHT;
375
376         uint_fast16_t uSrc, uDst, srcMSB;
377         bool should_blend;
378         u32 u0_mask = gpu_unai.TextureWindow[2];
379
380         u8 r5, g5, b5;
381         if (CF_LIGHT) {
382                 r5 = gpu_unai.r5;
383                 g5 = gpu_unai.g5;
384                 b5 = gpu_unai.b5;
385         }
386
387         if (CF_TEXTMODE==3) {
388                 // Texture is accessed byte-wise, so adjust mask if 16bpp
389                 u0_mask <<= 1;
390         }
391
392         const le16_t *CBA_; if (CF_TEXTMODE!=3) CBA_ = gpu_unai.CBA;
393
394         do
395         {
396                 if (CF_MASKCHECK || CF_BLEND) { uDst = le16_to_u16(*pDst); }
397                 if (CF_MASKCHECK) if (uDst&0x8000) { goto endsprite; }
398
399                 if (CF_TEXTMODE==1) {  //  4bpp (CLUT)
400                         u8 rgb = pTxt[(u0 & u0_mask)>>1];
401                         uSrc = le16_to_u16(CBA_[(rgb>>((u0&1)<<2))&0xf]);
402                 }
403                 if (CF_TEXTMODE==2) {  //  8bpp (CLUT)
404                         uSrc = le16_to_u16(CBA_[pTxt[u0 & u0_mask]]);
405                 }
406                 if (CF_TEXTMODE==3) {  // 16bpp
407                         uSrc = le16_to_u16(*(le16_t*)(&pTxt[u0 & u0_mask]));
408                 }
409
410                 if (!uSrc) goto endsprite;
411
412                 //senquack - save source MSB, as blending or lighting macros will not
413                 //           (Silent Hill gray rectangles mask bit bug)
414                 if (CF_BLEND || CF_LIGHT) srcMSB = uSrc & 0x8000;
415                 
416                 if (CF_LIGHT)
417                         uSrc = gpuLightingTXT(uSrc, r5, g5, b5);
418
419                 should_blend = MSB_PRESERVED ? uSrc & 0x8000 : srcMSB;
420
421                 if (CF_BLEND && should_blend)
422                         uSrc = gpuBlending<CF_BLENDMODE, skip_uSrc_mask>(uSrc, uDst);
423
424                 if (CF_MASKSET)                                    { *pDst = u16_to_le16(uSrc | 0x8000); }
425                 else if (!MSB_PRESERVED && (CF_BLEND || CF_LIGHT)) { *pDst = u16_to_le16(uSrc | srcMSB); }
426                 else                                               { *pDst = u16_to_le16(uSrc);          }
427
428 endsprite:
429                 u0 += (CF_TEXTMODE==3) ? 2 : 1;
430                 pDst++;
431         }
432         while (--count);
433 }
434
435 static void SpriteNULL(le16_t *pDst, u32 count, u8* pTxt, u32 u0)
436 {
437         #ifdef ENABLE_GPU_LOG_SUPPORT
438                 fprintf(stdout,"SpriteNULL()\n");
439         #endif
440 }
441
442 ///////////////////////////////////////////////////////////////////////////////
443
444 ///////////////////////////////////////////////////////////////////////////////
445 //  Sprite innerloops driver
446 typedef void (*PS)(le16_t *pDst, u32 count, u8* pTxt, u32 u0);
447
448 // Template instantiation helper macros
449 #define TI(cf) gpuSpriteSpanFn<(cf)>
450 #define TN     SpriteNULL
451 #define TIBLOCK(ub) \
452         TN,            TN,            TN,            TN,            TN,            TN,            TN,            TN,            \
453         TN,            TN,            TN,            TN,            TN,            TN,            TN,            TN,            \
454         TN,            TN,            TN,            TN,            TN,            TN,            TN,            TN,            \
455         TN,            TN,            TN,            TN,            TN,            TN,            TN,            TN,            \
456         TI((ub)|0x20), TI((ub)|0x21), TI((ub)|0x22), TI((ub)|0x23), TI((ub)|0x24), TI((ub)|0x25), TI((ub)|0x26), TI((ub)|0x27), \
457         TN,            TN,            TI((ub)|0x2a), TI((ub)|0x2b), TN,            TN,            TI((ub)|0x2e), TI((ub)|0x2f), \
458         TN,            TN,            TI((ub)|0x32), TI((ub)|0x33), TN,            TN,            TI((ub)|0x36), TI((ub)|0x37), \
459         TN,            TN,            TI((ub)|0x3a), TI((ub)|0x3b), TN,            TN,            TI((ub)|0x3e), TI((ub)|0x3f), \
460         TI((ub)|0x40), TI((ub)|0x41), TI((ub)|0x42), TI((ub)|0x43), TI((ub)|0x44), TI((ub)|0x45), TI((ub)|0x46), TI((ub)|0x47), \
461         TN,            TN,            TI((ub)|0x4a), TI((ub)|0x4b), TN,            TN,            TI((ub)|0x4e), TI((ub)|0x4f), \
462         TN,            TN,            TI((ub)|0x52), TI((ub)|0x53), TN,            TN,            TI((ub)|0x56), TI((ub)|0x57), \
463         TN,            TN,            TI((ub)|0x5a), TI((ub)|0x5b), TN,            TN,            TI((ub)|0x5e), TI((ub)|0x5f), \
464         TI((ub)|0x60), TI((ub)|0x61), TI((ub)|0x62), TI((ub)|0x63), TI((ub)|0x64), TI((ub)|0x65), TI((ub)|0x66), TI((ub)|0x67), \
465         TN,            TN,            TI((ub)|0x6a), TI((ub)|0x6b), TN,            TN,            TI((ub)|0x6e), TI((ub)|0x6f), \
466         TN,            TN,            TI((ub)|0x72), TI((ub)|0x73), TN,            TN,            TI((ub)|0x76), TI((ub)|0x77), \
467         TN,            TN,            TI((ub)|0x7a), TI((ub)|0x7b), TN,            TN,            TI((ub)|0x7e), TI((ub)|0x7f)
468
469 const PS gpuSpriteSpanDrivers[256] = {
470         TIBLOCK(0<<8), TIBLOCK(1<<8)
471 };
472
473 #undef TI
474 #undef TN
475 #undef TIBLOCK
476
477 ///////////////////////////////////////////////////////////////////////////////
478 //  GPU Polygon innerloops generator
479
480 //senquack - Newer version with following changes:
481 //           * Adapted to work with new poly routings in gpu_raster_polygon.h
482 //             adapted from DrHell GPU. They are less glitchy and use 22.10
483 //             fixed-point instead of original UNAI's 16.16.
484 //           * Texture coordinates are no longer packed together into one
485 //             unsigned int. This seems to lose too much accuracy (they each
486 //             end up being only 8.7 fixed-point that way) and pixel-droupouts
487 //             were noticeable both with original code and current DrHell
488 //             adaptations. An example would be the sky in NFS3. Now, they are
489 //             stored in separate ints, using separate masks.
490 //           * Function is no longer INLINE, as it was always called
491 //             through a function pointer.
492 //           * Function now ensures the mask bit of source texture is preserved
493 //             across calls to blending functions (Silent Hill rectangles fix)
494 //           * November 2016: Large refactoring of blending/lighting when
495 //             JohnnyF added dithering. See gpu_inner_quantization.h and
496 //             relevant blend/light headers.
497 // (see README_senquack.txt)
498 template<int CF>
499 static void gpuPolySpanFn(const gpu_unai_t &gpu_unai, le16_t *pDst, u32 count)
500 {
501         // Blend func can save an operation if it knows uSrc MSB is unset.
502         //  Untextured prims can always skip this (src color MSB is always 0).
503         //  For textured prims, the generic lighting funcs always return it unset. (bonus!)
504         const bool skip_uSrc_mask = MSB_PRESERVED ? (!CF_TEXTMODE) : (!CF_TEXTMODE) || CF_LIGHT;
505         bool should_blend;
506
507         u32 bMsk; if (CF_BLITMASK) bMsk = gpu_unai.blit_mask;
508
509         if (!CF_TEXTMODE)
510         {
511                 if (!CF_GOURAUD)
512                 {
513                         // UNTEXTURED, NO GOURAUD
514                         const u16 pix15 = gpu_unai.PixelData;
515                         do {
516                                 uint_fast16_t uSrc, uDst;
517
518                                 // NOTE: Don't enable CF_BLITMASK  pixel skipping (speed hack)
519                                 //  on untextured polys. It seems to do more harm than good: see
520                                 //  gravestone text at end of Medieval intro sequence. -senquack
521                                 //if (CF_BLITMASK) { if ((bMsk>>((((uintptr_t)pDst)>>1)&7))&1) { goto endpolynotextnogou; } }
522
523                                 if (CF_BLEND || CF_MASKCHECK) uDst = le16_to_u16(*pDst);
524                                 if (CF_MASKCHECK) { if (uDst&0x8000) { goto endpolynotextnogou; } }
525
526                                 uSrc = pix15;
527
528                                 if (CF_BLEND)
529                                         uSrc = gpuBlending<CF_BLENDMODE, skip_uSrc_mask>(uSrc, uDst);
530
531                                 if (CF_MASKSET) { *pDst = u16_to_le16(uSrc | 0x8000); }
532                                 else            { *pDst = u16_to_le16(uSrc);          }
533
534 endpolynotextnogou:
535                                 pDst++;
536                         } while(--count);
537                 }
538                 else
539                 {
540                         // UNTEXTURED, GOURAUD
541                         u32 l_gCol = gpu_unai.gCol;
542                         u32 l_gInc = gpu_unai.gInc;
543
544                         do {
545                                 uint_fast16_t uDst, uSrc;
546
547                                 // See note in above loop regarding CF_BLITMASK
548                                 //if (CF_BLITMASK) { if ((bMsk>>((((uintptr_t)pDst)>>1)&7))&1) goto endpolynotextgou; }
549
550                                 if (CF_BLEND || CF_MASKCHECK) uDst = le16_to_u16(*pDst);
551                                 if (CF_MASKCHECK) { if (uDst&0x8000) goto endpolynotextgou; }
552
553                                 if (CF_DITHER) {
554                                         // GOURAUD, DITHER
555
556                                         u32 uSrc24 = gpuLightingRGB24(l_gCol);
557                                         if (CF_BLEND)
558                                                 uSrc24 = gpuBlending24<CF_BLENDMODE>(uSrc24, uDst);
559                                         uSrc = gpuColorQuantization24<CF_DITHER>(uSrc24, pDst);
560                                 } else {
561                                         // GOURAUD, NO DITHER
562
563                                         uSrc = gpuLightingRGB(l_gCol);
564
565                                         if (CF_BLEND)
566                                                 uSrc = gpuBlending<CF_BLENDMODE, skip_uSrc_mask>(uSrc, uDst);
567                                 }
568
569                                 if (CF_MASKSET) { *pDst = u16_to_le16(uSrc | 0x8000); }
570                                 else            { *pDst = u16_to_le16(uSrc);          }
571
572 endpolynotextgou:
573                                 pDst++;
574                                 l_gCol += l_gInc;
575                         }
576                         while (--count);
577                 }
578         }
579         else
580         {
581                 // TEXTURED
582
583                 uint_fast16_t uDst, uSrc, srcMSB;
584
585                 //senquack - note: original UNAI code had gpu_unai.{u4/v4} packed into
586                 // one 32-bit unsigned int, but this proved to lose too much accuracy
587                 // (pixel drouputs noticeable in NFS3 sky), so now are separate vars.
588                 u32 l_u_msk = gpu_unai.u_msk;     u32 l_v_msk = gpu_unai.v_msk;
589                 u32 l_u = gpu_unai.u & l_u_msk;   u32 l_v = gpu_unai.v & l_v_msk;
590                 s32 l_u_inc = gpu_unai.u_inc;     s32 l_v_inc = gpu_unai.v_inc;
591
592                 const le16_t* TBA_ = gpu_unai.TBA;
593                 const le16_t* CBA_; if (CF_TEXTMODE!=3) CBA_ = gpu_unai.CBA;
594
595                 u8 r5, g5, b5;
596                 u8 r8, g8, b8;
597
598                 u32 l_gInc, l_gCol;
599
600                 if (CF_LIGHT) {
601                         if (CF_GOURAUD) {
602                                 l_gInc = gpu_unai.gInc;
603                                 l_gCol = gpu_unai.gCol;
604                         } else {
605                                 if (CF_DITHER) {
606                                         r8 = gpu_unai.r8;
607                                         g8 = gpu_unai.g8;
608                                         b8 = gpu_unai.b8;
609                                 } else {
610                                         r5 = gpu_unai.r5;
611                                         g5 = gpu_unai.g5;
612                                         b5 = gpu_unai.b5;
613                                 }
614                         }
615                 }
616
617                 do
618                 {
619                         if (CF_BLITMASK) { if ((bMsk>>((((uintptr_t)pDst)>>1)&7))&1) goto endpolytext; }
620                         if (CF_MASKCHECK || CF_BLEND) { uDst = le16_to_u16(*pDst); }
621                         if (CF_MASKCHECK) if (uDst&0x8000) { goto endpolytext; }
622
623                         //senquack - adapted to work with new 22.10 fixed point routines:
624                         //           (UNAI originally used 16.16)
625                         if (CF_TEXTMODE==1) {  //  4bpp (CLUT)
626                                 u32 tu=(l_u>>10);
627                                 u32 tv=(l_v<<1)&(0xff<<11);
628                                 u8 rgb=((u8*)TBA_)[tv+(tu>>1)];
629                                 uSrc=le16_to_u16(CBA_[(rgb>>((tu&1)<<2))&0xf]);
630                                 if (!uSrc) goto endpolytext;
631                         }
632                         if (CF_TEXTMODE==2) {  //  8bpp (CLUT)
633                                 uSrc = le16_to_u16(CBA_[(((u8*)TBA_)[(l_u>>10)+((l_v<<1)&(0xff<<11))])]);
634                                 if (!uSrc) goto endpolytext;
635                         }
636                         if (CF_TEXTMODE==3) {  // 16bpp
637                                 uSrc = le16_to_u16(TBA_[(l_u>>10)+((l_v)&(0xff<<10))]);
638                                 if (!uSrc) goto endpolytext;
639                         }
640
641                         // Save source MSB, as blending or lighting will not (Silent Hill)
642                         if (CF_BLEND || CF_LIGHT) srcMSB = uSrc & 0x8000;
643
644                         // When textured, only dither when LIGHT (texture blend) is enabled
645                         // LIGHT &&  BLEND => dither
646                         // LIGHT && !BLEND => dither
647                         //!LIGHT &&  BLEND => no dither
648                         //!LIGHT && !BLEND => no dither
649
650                         if (CF_DITHER && CF_LIGHT) {
651                                 u32 uSrc24;
652                                 if ( CF_GOURAUD)
653                                         uSrc24 = gpuLightingTXT24Gouraud(uSrc, l_gCol);
654                                 if (!CF_GOURAUD)
655                                         uSrc24 = gpuLightingTXT24(uSrc, r8, g8, b8);
656
657                                 if (CF_BLEND && srcMSB)
658                                         uSrc24 = gpuBlending24<CF_BLENDMODE>(uSrc24, uDst);
659
660                                 uSrc = gpuColorQuantization24<CF_DITHER>(uSrc24, pDst);
661                         } else
662                         {
663                                 if (CF_LIGHT) {
664                                         if ( CF_GOURAUD)
665                                                 uSrc = gpuLightingTXTGouraud(uSrc, l_gCol);
666                                         if (!CF_GOURAUD)
667                                                 uSrc = gpuLightingTXT(uSrc, r5, g5, b5);
668                                 }
669
670                                 should_blend = MSB_PRESERVED ? uSrc & 0x8000 : srcMSB;
671                                 if (CF_BLEND && should_blend)
672                                         uSrc = gpuBlending<CF_BLENDMODE, skip_uSrc_mask>(uSrc, uDst);
673                         }
674
675                         if (CF_MASKSET)                                    { *pDst = u16_to_le16(uSrc | 0x8000); }
676                         else if (!MSB_PRESERVED && (CF_BLEND || CF_LIGHT)) { *pDst = u16_to_le16(uSrc | srcMSB); }
677                         else                                               { *pDst = u16_to_le16(uSrc);          }
678 endpolytext:
679                         pDst++;
680                         l_u = (l_u + l_u_inc) & l_u_msk;
681                         l_v = (l_v + l_v_inc) & l_v_msk;
682                         if (CF_LIGHT && CF_GOURAUD) l_gCol += l_gInc;
683                 }
684                 while (--count);
685         }
686 }
687
688 static void PolyNULL(const gpu_unai_t &gpu_unai, le16_t *pDst, u32 count)
689 {
690         #ifdef ENABLE_GPU_LOG_SUPPORT
691                 fprintf(stdout,"PolyNULL()\n");
692         #endif
693 }
694
695 ///////////////////////////////////////////////////////////////////////////////
696 //  Polygon innerloops driver
697 typedef void (*PP)(const gpu_unai_t &gpu_unai, le16_t *pDst, u32 count);
698
699 // Template instantiation helper macros
700 #define TI(cf) gpuPolySpanFn<(cf)>
701 #define TN     PolyNULL
702 #define TIBLOCK(ub) \
703         TI((ub)|0x00), TI((ub)|0x01), TI((ub)|0x02), TI((ub)|0x03), TI((ub)|0x04), TI((ub)|0x05), TI((ub)|0x06), TI((ub)|0x07), \
704         TN,            TN,            TI((ub)|0x0a), TI((ub)|0x0b), TN,            TN,            TI((ub)|0x0e), TI((ub)|0x0f), \
705         TN,            TN,            TI((ub)|0x12), TI((ub)|0x13), TN,            TN,            TI((ub)|0x16), TI((ub)|0x17), \
706         TN,            TN,            TI((ub)|0x1a), TI((ub)|0x1b), TN,            TN,            TI((ub)|0x1e), TI((ub)|0x1f), \
707         TI((ub)|0x20), TI((ub)|0x21), TI((ub)|0x22), TI((ub)|0x23), TI((ub)|0x24), TI((ub)|0x25), TI((ub)|0x26), TI((ub)|0x27), \
708         TN,            TN,            TI((ub)|0x2a), TI((ub)|0x2b), TN,            TN,            TI((ub)|0x2e), TI((ub)|0x2f), \
709         TN,            TN,            TI((ub)|0x32), TI((ub)|0x33), TN,            TN,            TI((ub)|0x36), TI((ub)|0x37), \
710         TN,            TN,            TI((ub)|0x3a), TI((ub)|0x3b), TN,            TN,            TI((ub)|0x3e), TI((ub)|0x3f), \
711         TI((ub)|0x40), TI((ub)|0x41), TI((ub)|0x42), TI((ub)|0x43), TI((ub)|0x44), TI((ub)|0x45), TI((ub)|0x46), TI((ub)|0x47), \
712         TN,            TN,            TI((ub)|0x4a), TI((ub)|0x4b), TN,            TN,            TI((ub)|0x4e), TI((ub)|0x4f), \
713         TN,            TN,            TI((ub)|0x52), TI((ub)|0x53), TN,            TN,            TI((ub)|0x56), TI((ub)|0x57), \
714         TN,            TN,            TI((ub)|0x5a), TI((ub)|0x5b), TN,            TN,            TI((ub)|0x5e), TI((ub)|0x5f), \
715         TI((ub)|0x60), TI((ub)|0x61), TI((ub)|0x62), TI((ub)|0x63), TI((ub)|0x64), TI((ub)|0x65), TI((ub)|0x66), TI((ub)|0x67), \
716         TN,            TN,            TI((ub)|0x6a), TI((ub)|0x6b), TN,            TN,            TI((ub)|0x6e), TI((ub)|0x6f), \
717         TN,            TN,            TI((ub)|0x72), TI((ub)|0x73), TN,            TN,            TI((ub)|0x76), TI((ub)|0x77), \
718         TN,            TN,            TI((ub)|0x7a), TI((ub)|0x7b), TN,            TN,            TI((ub)|0x7e), TI((ub)|0x7f), \
719         TN,            TI((ub)|0x81), TN,            TI((ub)|0x83), TN,            TI((ub)|0x85), TN,            TI((ub)|0x87), \
720         TN,            TN,            TN,            TI((ub)|0x8b), TN,            TN,            TN,            TI((ub)|0x8f), \
721         TN,            TN,            TN,            TI((ub)|0x93), TN,            TN,            TN,            TI((ub)|0x97), \
722         TN,            TN,            TN,            TI((ub)|0x9b), TN,            TN,            TN,            TI((ub)|0x9f), \
723         TN,            TI((ub)|0xa1), TN,            TI((ub)|0xa3), TN,            TI((ub)|0xa5), TN,            TI((ub)|0xa7), \
724         TN,            TN,            TN,            TI((ub)|0xab), TN,            TN,            TN,            TI((ub)|0xaf), \
725         TN,            TN,            TN,            TI((ub)|0xb3), TN,            TN,            TN,            TI((ub)|0xb7), \
726         TN,            TN,            TN,            TI((ub)|0xbb), TN,            TN,            TN,            TI((ub)|0xbf), \
727         TN,            TI((ub)|0xc1), TN,            TI((ub)|0xc3), TN,            TI((ub)|0xc5), TN,            TI((ub)|0xc7), \
728         TN,            TN,            TN,            TI((ub)|0xcb), TN,            TN,            TN,            TI((ub)|0xcf), \
729         TN,            TN,            TN,            TI((ub)|0xd3), TN,            TN,            TN,            TI((ub)|0xd7), \
730         TN,            TN,            TN,            TI((ub)|0xdb), TN,            TN,            TN,            TI((ub)|0xdf), \
731         TN,            TI((ub)|0xe1), TN,            TI((ub)|0xe3), TN,            TI((ub)|0xe5), TN,            TI((ub)|0xe7), \
732         TN,            TN,            TN,            TI((ub)|0xeb), TN,            TN,            TN,            TI((ub)|0xef), \
733         TN,            TN,            TN,            TI((ub)|0xf3), TN,            TN,            TN,            TI((ub)|0xf7), \
734         TN,            TN,            TN,            TI((ub)|0xfb), TN,            TN,            TN,            TI((ub)|0xff)
735
736 const PP gpuPolySpanDrivers[2048] = {
737         TIBLOCK(0<<8), TIBLOCK(1<<8), TIBLOCK(2<<8), TIBLOCK(3<<8),
738         TIBLOCK(4<<8), TIBLOCK(5<<8), TIBLOCK(6<<8), TIBLOCK(7<<8)
739 };
740
741 #undef TI
742 #undef TN
743 #undef TIBLOCK
744
745 #endif /* __GPU_UNAI_GPU_INNER_H__ */