1 /***************************************************************************
2 * Copyright (C) 2010 PCSX4ALL Team *
3 * Copyright (C) 2010 Unai *
4 * Copyright (C) 2016 Senquack (dansilsby <AT> gmail <DOT> com) *
6 * This program is free software; you can redistribute it and/or modify *
7 * it under the terms of the GNU General Public License as published by *
8 * the Free Software Foundation; either version 2 of the License, or *
9 * (at your option) any later version. *
11 * This program is distributed in the hope that it will be useful, *
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
14 * GNU General Public License for more details. *
16 * You should have received a copy of the GNU General Public License *
17 * along with this program; if not, write to the *
18 * Free Software Foundation, Inc., *
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02111-1307 USA. *
20 ***************************************************************************/
22 #ifndef __GPU_UNAI_GPU_INNER_H__
23 #define __GPU_UNAI_GPU_INNER_H__
25 ///////////////////////////////////////////////////////////////////////////////
26 // Inner loop driver instantiation file
28 ///////////////////////////////////////////////////////////////////////////////
29 // Option Masks (CF template paramter)
30 #define CF_LIGHT ((CF>> 0)&1) // Lighting
31 #define CF_BLEND ((CF>> 1)&1) // Blending
32 #define CF_MASKCHECK ((CF>> 2)&1) // Mask bit check
33 #define CF_BLENDMODE ((CF>> 3)&3) // Blend mode 0..3
34 #define CF_TEXTMODE ((CF>> 5)&3) // Texture mode 1..3 (0: texturing disabled)
35 #define CF_GOURAUD ((CF>> 7)&1) // Gouraud shading
36 #define CF_MASKSET ((CF>> 8)&1) // Mask bit set
37 #define CF_DITHER ((CF>> 9)&1) // Dithering
38 #define CF_BLITMASK ((CF>>10)&1) // blit_mask check (skip rendering pixels
39 // that wouldn't end up displayed on
40 // low-res screen using simple downscaler)
43 //#ifndef ENABLE_GPU_ARMV7
45 //#include "gpu_inner_blend_arm5.h"
48 //#include "gpu_inner_blend_arm7.h"
51 //#include "gpu_inner_blend.h"
54 #include "gpu_inner_blend.h"
55 #include "gpu_inner_quantization.h"
56 #include "gpu_inner_light.h"
59 #include "gpu_inner_blend_arm.h"
60 #include "gpu_inner_light_arm.h"
61 #define gpuBlending gpuBlendingARM
62 #define gpuLightingRGB gpuLightingRGBARM
63 #define gpuLightingTXT gpuLightingTXTARM
64 #define gpuLightingTXTGouraud gpuLightingTXTGouraudARM
65 // Non-dithering lighting and blending functions preserve uSrc
66 // MSB. This saves a few operations and useless load/stores.
67 #define MSB_PRESERVED (!CF_DITHER)
69 #define gpuBlending gpuBlendingGeneric
70 #define gpuLightingRGB gpuLightingRGBGeneric
71 #define gpuLightingTXT gpuLightingTXTGeneric
72 #define gpuLightingTXTGouraud gpuLightingTXTGouraudGeneric
73 #define MSB_PRESERVED 0
77 // If defined, Gouraud colors are fixed-point 5.11, otherwise they are 8.16
78 // This is only for debugging/verification of low-precision colors in C.
79 // Low-precision Gouraud is intended for use by SIMD-optimized inner drivers
80 // which get/use Gouraud colors in SIMD registers.
81 //#define GPU_GOURAUD_LOW_PRECISION
83 // How many bits of fixed-point precision GouraudColor uses
84 #ifdef GPU_GOURAUD_LOW_PRECISION
85 #define GPU_GOURAUD_FIXED_BITS 11
87 #define GPU_GOURAUD_FIXED_BITS 16
90 // Used to pass Gouraud colors to gpuPixelSpanFn() (lines)
92 #ifdef GPU_GOURAUD_LOW_PRECISION
94 s16 r_incr, g_incr, b_incr;
97 s32 r_incr, g_incr, b_incr;
101 static inline u16 gpuGouraudColor15bpp(u32 r, u32 g, u32 b)
103 r >>= GPU_GOURAUD_FIXED_BITS;
104 g >>= GPU_GOURAUD_FIXED_BITS;
105 b >>= GPU_GOURAUD_FIXED_BITS;
107 #ifndef GPU_GOURAUD_LOW_PRECISION
108 // High-precision Gouraud colors are 8-bit + fractional
109 r >>= 3; g >>= 3; b >>= 3;
112 return r | (g << 5) | (b << 10);
115 ///////////////////////////////////////////////////////////////////////////////
116 // GPU Pixel span operations generator gpuPixelSpanFn<>
117 // Oct 2016: Created/adapted from old gpuPixelFn by senquack:
118 // Original gpuPixelFn was used to draw lines one pixel at a time. I wrote
119 // new line algorithms that draw lines using horizontal/vertical/diagonal
120 // spans of pixels, necessitating new pixel-drawing function that could
121 // not only render spans of pixels, but gouraud-shade them as well.
122 // This speeds up line rendering and would allow tile-rendering (untextured
123 // rectangles) to use the same set of functions. Since tiles are always
124 // monochrome, they simply wouldn't use the extra set of 32 gouraud-shaded
125 // gpuPixelSpanFn functions (TODO?).
127 static le16_t* gpuPixelSpanFn(le16_t* pDst, uintptr_t data, ptrdiff_t incr, size_t len)
129 // Blend func can save an operation if it knows uSrc MSB is
130 // unset. For untextured prims, this is always true.
131 const bool skip_uSrc_mask = true;
134 struct GouraudColor * gcPtr;
136 s32 r_incr, g_incr, b_incr;
138 // Caller counts in bytes, we count in pixels
142 gcPtr = (GouraudColor*)data;
143 r = gcPtr->r; r_incr = gcPtr->r_incr;
144 g = gcPtr->g; g_incr = gcPtr->g_incr;
145 b = gcPtr->b; b_incr = gcPtr->b_incr;
153 if (!CF_MASKCHECK && !CF_BLEND) {
154 if (CF_MASKSET) { *pDst = u16_to_le16(col | 0x8000); }
155 else { *pDst = u16_to_le16(col); }
156 } else if (CF_MASKCHECK && !CF_BLEND) {
157 if (!(le16_raw(*pDst) & HTOLE16(0x8000))) {
158 if (CF_MASKSET) { *pDst = u16_to_le16(col | 0x8000); }
159 else { *pDst = u16_to_le16(col); }
162 uint_fast16_t uDst = le16_to_u16(*pDst);
163 if (CF_MASKCHECK) { if (uDst & 0x8000) goto endpixel; }
165 uint_fast16_t uSrc = col;
168 uSrc = gpuBlending<CF_BLENDMODE, skip_uSrc_mask>(uSrc, uDst);
170 if (CF_MASKSET) { *pDst = u16_to_le16(uSrc | 0x8000); }
171 else { *pDst = u16_to_le16(uSrc); }
177 if (!CF_MASKCHECK && !CF_BLEND) {
178 col = gpuGouraudColor15bpp(r, g, b);
179 if (CF_MASKSET) { *pDst = u16_to_le16(col | 0x8000); }
180 else { *pDst = u16_to_le16(col); }
181 } else if (CF_MASKCHECK && !CF_BLEND) {
182 col = gpuGouraudColor15bpp(r, g, b);
183 if (!(le16_raw(*pDst) & HTOLE16(0x8000))) {
184 if (CF_MASKSET) { *pDst = u16_to_le16(col | 0x8000); }
185 else { *pDst = u16_to_le16(col); }
188 uint_fast16_t uDst = le16_to_u16(*pDst);
189 if (CF_MASKCHECK) { if (uDst & 0x8000) goto endpixel; }
190 col = gpuGouraudColor15bpp(r, g, b);
192 uint_fast16_t uSrc = col;
194 // Blend func can save an operation if it knows uSrc MSB is
195 // unset. For untextured prims, this is always true.
196 const bool skip_uSrc_mask = true;
199 uSrc = gpuBlending<CF_BLENDMODE, skip_uSrc_mask>(uSrc, uDst);
201 if (CF_MASKSET) { *pDst = u16_to_le16(uSrc | 0x8000); }
202 else { *pDst = u16_to_le16(uSrc); }
215 // Note from senquack: Normally, I'd prefer to write a 'do {} while (--len)'
216 // loop, or even a for() loop, however, on MIPS platforms anything but the
217 // 'do {} while (len-- > 1)' tends to generate very unoptimal asm, with
218 // many unneeded MULs/ADDs/branches at the ends of these functions.
219 // If you change the loop structure above, be sure to compare the quality
220 // of the generated code!!
230 static le16_t* PixelSpanNULL(le16_t* pDst, uintptr_t data, ptrdiff_t incr, size_t len)
232 #ifdef ENABLE_GPU_LOG_SUPPORT
233 fprintf(stdout,"PixelSpanNULL()\n");
238 ///////////////////////////////////////////////////////////////////////////////
239 // PixelSpan (lines) innerloops driver
240 typedef le16_t* (*PSD)(le16_t* dst, uintptr_t data, ptrdiff_t incr, size_t len);
242 const PSD gpuPixelSpanDrivers[64] =
244 // Array index | 'CF' template field | Field value
245 // ------------+---------------------+----------------
246 // Bit 0 | CF_BLEND | off (0), on (1)
247 // Bit 1 | CF_MASKCHECK | off (0), on (1)
248 // Bit 3:2 | CF_BLENDMODE | 0..3
249 // Bit 4 | CF_MASKSET | off (0), on (1)
250 // Bit 5 | CF_GOURAUD | off (0), on (1)
252 // NULL entries are ones for which blending is disabled and blend-mode
253 // field is non-zero, which is obviously invalid.
256 gpuPixelSpanFn<0x00<<1>, gpuPixelSpanFn<0x01<<1>, gpuPixelSpanFn<0x02<<1>, gpuPixelSpanFn<0x03<<1>,
257 PixelSpanNULL, gpuPixelSpanFn<0x05<<1>, PixelSpanNULL, gpuPixelSpanFn<0x07<<1>,
258 PixelSpanNULL, gpuPixelSpanFn<0x09<<1>, PixelSpanNULL, gpuPixelSpanFn<0x0B<<1>,
259 PixelSpanNULL, gpuPixelSpanFn<0x0D<<1>, PixelSpanNULL, gpuPixelSpanFn<0x0F<<1>,
261 // Flat-shaded + PixelMSB (CF_MASKSET)
262 gpuPixelSpanFn<(0x00<<1)|0x100>, gpuPixelSpanFn<(0x01<<1)|0x100>, gpuPixelSpanFn<(0x02<<1)|0x100>, gpuPixelSpanFn<(0x03<<1)|0x100>,
263 PixelSpanNULL, gpuPixelSpanFn<(0x05<<1)|0x100>, PixelSpanNULL, gpuPixelSpanFn<(0x07<<1)|0x100>,
264 PixelSpanNULL, gpuPixelSpanFn<(0x09<<1)|0x100>, PixelSpanNULL, gpuPixelSpanFn<(0x0B<<1)|0x100>,
265 PixelSpanNULL, gpuPixelSpanFn<(0x0D<<1)|0x100>, PixelSpanNULL, gpuPixelSpanFn<(0x0F<<1)|0x100>,
267 // Gouraud-shaded (CF_GOURAUD)
268 gpuPixelSpanFn<(0x00<<1)|0x80>, gpuPixelSpanFn<(0x01<<1)|0x80>, gpuPixelSpanFn<(0x02<<1)|0x80>, gpuPixelSpanFn<(0x03<<1)|0x80>,
269 PixelSpanNULL, gpuPixelSpanFn<(0x05<<1)|0x80>, PixelSpanNULL, gpuPixelSpanFn<(0x07<<1)|0x80>,
270 PixelSpanNULL, gpuPixelSpanFn<(0x09<<1)|0x80>, PixelSpanNULL, gpuPixelSpanFn<(0x0B<<1)|0x80>,
271 PixelSpanNULL, gpuPixelSpanFn<(0x0D<<1)|0x80>, PixelSpanNULL, gpuPixelSpanFn<(0x0F<<1)|0x80>,
273 // Gouraud-shaded (CF_GOURAUD) + PixelMSB (CF_MASKSET)
274 gpuPixelSpanFn<(0x00<<1)|0x180>, gpuPixelSpanFn<(0x01<<1)|0x180>, gpuPixelSpanFn<(0x02<<1)|0x180>, gpuPixelSpanFn<(0x03<<1)|0x180>,
275 PixelSpanNULL, gpuPixelSpanFn<(0x05<<1)|0x180>, PixelSpanNULL, gpuPixelSpanFn<(0x07<<1)|0x180>,
276 PixelSpanNULL, gpuPixelSpanFn<(0x09<<1)|0x180>, PixelSpanNULL, gpuPixelSpanFn<(0x0B<<1)|0x180>,
277 PixelSpanNULL, gpuPixelSpanFn<(0x0D<<1)|0x180>, PixelSpanNULL, gpuPixelSpanFn<(0x0F<<1)|0x180>
280 ///////////////////////////////////////////////////////////////////////////////
281 // GPU Tiles innerloops generator
284 static void gpuTileSpanFn(le16_t *pDst, u32 count, u16 data)
288 if (!CF_MASKCHECK && !CF_BLEND) {
290 ldata = u16_to_le16(data | 0x8000);
292 ldata = u16_to_le16(data);
293 do { *pDst++ = ldata; } while (--count);
294 } else if (CF_MASKCHECK && !CF_BLEND) {
296 ldata = u16_to_le16(data | 0x8000);
298 ldata = u16_to_le16(data);
300 if (!(le16_raw(*pDst) & HTOLE16(0x8000)))
306 // Blend func can save an operation if it knows uSrc MSB is
307 // unset. For untextured prims, this is always true.
308 const bool skip_uSrc_mask = true;
310 uint_fast16_t uSrc, uDst;
313 if (CF_MASKCHECK || CF_BLEND) { uDst = le16_to_u16(*pDst); }
314 if (CF_MASKCHECK) if (uDst&0x8000) { goto endtile; }
319 uSrc = gpuBlending<CF_BLENDMODE, skip_uSrc_mask>(uSrc, uDst);
321 if (CF_MASKSET) { *pDst = u16_to_le16(uSrc | 0x8000); }
322 else { *pDst = u16_to_le16(uSrc); }
324 //senquack - Did not apply "Silent Hill" mask-bit fix to here.
325 // It is hard to tell from scarce documentation available and
326 // lack of comments in code, but I believe the tile-span
327 // functions here should not bother to preserve any source MSB,
328 // as they are not drawing from a texture.
336 static void TileNULL(le16_t *pDst, u32 count, u16 data)
338 #ifdef ENABLE_GPU_LOG_SUPPORT
339 fprintf(stdout,"TileNULL()\n");
343 ///////////////////////////////////////////////////////////////////////////////
344 // Tiles innerloops driver
345 typedef void (*PT)(le16_t *pDst, u32 count, u16 data);
347 // Template instantiation helper macros
348 #define TI(cf) gpuTileSpanFn<(cf)>
350 #define TIBLOCK(ub) \
351 TI((ub)|0x00), TI((ub)|0x02), TI((ub)|0x04), TI((ub)|0x06), \
352 TN, TI((ub)|0x0a), TN, TI((ub)|0x0e), \
353 TN, TI((ub)|0x12), TN, TI((ub)|0x16), \
354 TN, TI((ub)|0x1a), TN, TI((ub)|0x1e)
356 const PT gpuTileSpanDrivers[32] = {
357 TIBLOCK(0<<8), TIBLOCK(1<<8)
365 ///////////////////////////////////////////////////////////////////////////////
366 // GPU Sprites innerloops generator
369 static void gpuSpriteSpanFn(le16_t *pDst, u32 count, u8* pTxt, u32 u0)
371 // Blend func can save an operation if it knows uSrc MSB is unset.
372 // Untextured prims can always skip (source color always comes with MSB=0).
373 // For textured prims, the generic lighting funcs always return it unset. (bonus!)
374 const bool skip_uSrc_mask = MSB_PRESERVED ? (!CF_TEXTMODE) : (!CF_TEXTMODE) || CF_LIGHT;
376 uint_fast16_t uSrc, uDst, srcMSB;
378 u32 u0_mask = gpu_unai.TextureWindow[2];
387 if (CF_TEXTMODE==3) {
388 // Texture is accessed byte-wise, so adjust mask if 16bpp
392 const le16_t *CBA_; if (CF_TEXTMODE!=3) CBA_ = gpu_unai.CBA;
396 if (CF_MASKCHECK || CF_BLEND) { uDst = le16_to_u16(*pDst); }
397 if (CF_MASKCHECK) if (uDst&0x8000) { goto endsprite; }
399 if (CF_TEXTMODE==1) { // 4bpp (CLUT)
400 u8 rgb = pTxt[(u0 & u0_mask)>>1];
401 uSrc = le16_to_u16(CBA_[(rgb>>((u0&1)<<2))&0xf]);
403 if (CF_TEXTMODE==2) { // 8bpp (CLUT)
404 uSrc = le16_to_u16(CBA_[pTxt[u0 & u0_mask]]);
406 if (CF_TEXTMODE==3) { // 16bpp
407 uSrc = le16_to_u16(*(le16_t*)(&pTxt[u0 & u0_mask]));
410 if (!uSrc) goto endsprite;
412 //senquack - save source MSB, as blending or lighting macros will not
413 // (Silent Hill gray rectangles mask bit bug)
414 if (CF_BLEND || CF_LIGHT) srcMSB = uSrc & 0x8000;
417 uSrc = gpuLightingTXT(uSrc, r5, g5, b5);
419 should_blend = MSB_PRESERVED ? uSrc & 0x8000 : srcMSB;
421 if (CF_BLEND && should_blend)
422 uSrc = gpuBlending<CF_BLENDMODE, skip_uSrc_mask>(uSrc, uDst);
424 if (CF_MASKSET) { *pDst = u16_to_le16(uSrc | 0x8000); }
425 else if (!MSB_PRESERVED && (CF_BLEND || CF_LIGHT)) { *pDst = u16_to_le16(uSrc | srcMSB); }
426 else { *pDst = u16_to_le16(uSrc); }
429 u0 += (CF_TEXTMODE==3) ? 2 : 1;
435 static void SpriteNULL(le16_t *pDst, u32 count, u8* pTxt, u32 u0)
437 #ifdef ENABLE_GPU_LOG_SUPPORT
438 fprintf(stdout,"SpriteNULL()\n");
442 ///////////////////////////////////////////////////////////////////////////////
444 ///////////////////////////////////////////////////////////////////////////////
445 // Sprite innerloops driver
446 typedef void (*PS)(le16_t *pDst, u32 count, u8* pTxt, u32 u0);
448 // Template instantiation helper macros
449 #define TI(cf) gpuSpriteSpanFn<(cf)>
450 #define TN SpriteNULL
451 #define TIBLOCK(ub) \
452 TN, TN, TN, TN, TN, TN, TN, TN, \
453 TN, TN, TN, TN, TN, TN, TN, TN, \
454 TN, TN, TN, TN, TN, TN, TN, TN, \
455 TN, TN, TN, TN, TN, TN, TN, TN, \
456 TI((ub)|0x20), TI((ub)|0x21), TI((ub)|0x22), TI((ub)|0x23), TI((ub)|0x24), TI((ub)|0x25), TI((ub)|0x26), TI((ub)|0x27), \
457 TN, TN, TI((ub)|0x2a), TI((ub)|0x2b), TN, TN, TI((ub)|0x2e), TI((ub)|0x2f), \
458 TN, TN, TI((ub)|0x32), TI((ub)|0x33), TN, TN, TI((ub)|0x36), TI((ub)|0x37), \
459 TN, TN, TI((ub)|0x3a), TI((ub)|0x3b), TN, TN, TI((ub)|0x3e), TI((ub)|0x3f), \
460 TI((ub)|0x40), TI((ub)|0x41), TI((ub)|0x42), TI((ub)|0x43), TI((ub)|0x44), TI((ub)|0x45), TI((ub)|0x46), TI((ub)|0x47), \
461 TN, TN, TI((ub)|0x4a), TI((ub)|0x4b), TN, TN, TI((ub)|0x4e), TI((ub)|0x4f), \
462 TN, TN, TI((ub)|0x52), TI((ub)|0x53), TN, TN, TI((ub)|0x56), TI((ub)|0x57), \
463 TN, TN, TI((ub)|0x5a), TI((ub)|0x5b), TN, TN, TI((ub)|0x5e), TI((ub)|0x5f), \
464 TI((ub)|0x60), TI((ub)|0x61), TI((ub)|0x62), TI((ub)|0x63), TI((ub)|0x64), TI((ub)|0x65), TI((ub)|0x66), TI((ub)|0x67), \
465 TN, TN, TI((ub)|0x6a), TI((ub)|0x6b), TN, TN, TI((ub)|0x6e), TI((ub)|0x6f), \
466 TN, TN, TI((ub)|0x72), TI((ub)|0x73), TN, TN, TI((ub)|0x76), TI((ub)|0x77), \
467 TN, TN, TI((ub)|0x7a), TI((ub)|0x7b), TN, TN, TI((ub)|0x7e), TI((ub)|0x7f)
469 const PS gpuSpriteSpanDrivers[256] = {
470 TIBLOCK(0<<8), TIBLOCK(1<<8)
477 ///////////////////////////////////////////////////////////////////////////////
478 // GPU Polygon innerloops generator
480 //senquack - Newer version with following changes:
481 // * Adapted to work with new poly routings in gpu_raster_polygon.h
482 // adapted from DrHell GPU. They are less glitchy and use 22.10
483 // fixed-point instead of original UNAI's 16.16.
484 // * Texture coordinates are no longer packed together into one
485 // unsigned int. This seems to lose too much accuracy (they each
486 // end up being only 8.7 fixed-point that way) and pixel-droupouts
487 // were noticeable both with original code and current DrHell
488 // adaptations. An example would be the sky in NFS3. Now, they are
489 // stored in separate ints, using separate masks.
490 // * Function is no longer INLINE, as it was always called
491 // through a function pointer.
492 // * Function now ensures the mask bit of source texture is preserved
493 // across calls to blending functions (Silent Hill rectangles fix)
494 // * November 2016: Large refactoring of blending/lighting when
495 // JohnnyF added dithering. See gpu_inner_quantization.h and
496 // relevant blend/light headers.
497 // (see README_senquack.txt)
499 static void gpuPolySpanFn(const gpu_unai_t &gpu_unai, le16_t *pDst, u32 count)
501 // Blend func can save an operation if it knows uSrc MSB is unset.
502 // Untextured prims can always skip this (src color MSB is always 0).
503 // For textured prims, the generic lighting funcs always return it unset. (bonus!)
504 const bool skip_uSrc_mask = MSB_PRESERVED ? (!CF_TEXTMODE) : (!CF_TEXTMODE) || CF_LIGHT;
507 u32 bMsk; if (CF_BLITMASK) bMsk = gpu_unai.blit_mask;
513 // UNTEXTURED, NO GOURAUD
514 const u16 pix15 = gpu_unai.PixelData;
516 uint_fast16_t uSrc, uDst;
518 // NOTE: Don't enable CF_BLITMASK pixel skipping (speed hack)
519 // on untextured polys. It seems to do more harm than good: see
520 // gravestone text at end of Medieval intro sequence. -senquack
521 //if (CF_BLITMASK) { if ((bMsk>>((((uintptr_t)pDst)>>1)&7))&1) { goto endpolynotextnogou; } }
523 if (CF_BLEND || CF_MASKCHECK) uDst = le16_to_u16(*pDst);
524 if (CF_MASKCHECK) { if (uDst&0x8000) { goto endpolynotextnogou; } }
529 uSrc = gpuBlending<CF_BLENDMODE, skip_uSrc_mask>(uSrc, uDst);
531 if (CF_MASKSET) { *pDst = u16_to_le16(uSrc | 0x8000); }
532 else { *pDst = u16_to_le16(uSrc); }
540 // UNTEXTURED, GOURAUD
541 u32 l_gCol = gpu_unai.gCol;
542 u32 l_gInc = gpu_unai.gInc;
545 uint_fast16_t uDst, uSrc;
547 // See note in above loop regarding CF_BLITMASK
548 //if (CF_BLITMASK) { if ((bMsk>>((((uintptr_t)pDst)>>1)&7))&1) goto endpolynotextgou; }
550 if (CF_BLEND || CF_MASKCHECK) uDst = le16_to_u16(*pDst);
551 if (CF_MASKCHECK) { if (uDst&0x8000) goto endpolynotextgou; }
556 u32 uSrc24 = gpuLightingRGB24(l_gCol);
558 uSrc24 = gpuBlending24<CF_BLENDMODE>(uSrc24, uDst);
559 uSrc = gpuColorQuantization24<CF_DITHER>(uSrc24, pDst);
561 // GOURAUD, NO DITHER
563 uSrc = gpuLightingRGB(l_gCol);
566 uSrc = gpuBlending<CF_BLENDMODE, skip_uSrc_mask>(uSrc, uDst);
569 if (CF_MASKSET) { *pDst = u16_to_le16(uSrc | 0x8000); }
570 else { *pDst = u16_to_le16(uSrc); }
583 uint_fast16_t uDst, uSrc, srcMSB;
585 //senquack - note: original UNAI code had gpu_unai.{u4/v4} packed into
586 // one 32-bit unsigned int, but this proved to lose too much accuracy
587 // (pixel drouputs noticeable in NFS3 sky), so now are separate vars.
588 u32 l_u_msk = gpu_unai.u_msk; u32 l_v_msk = gpu_unai.v_msk;
589 u32 l_u = gpu_unai.u & l_u_msk; u32 l_v = gpu_unai.v & l_v_msk;
590 s32 l_u_inc = gpu_unai.u_inc; s32 l_v_inc = gpu_unai.v_inc;
592 const le16_t* TBA_ = gpu_unai.TBA;
593 const le16_t* CBA_; if (CF_TEXTMODE!=3) CBA_ = gpu_unai.CBA;
602 l_gInc = gpu_unai.gInc;
603 l_gCol = gpu_unai.gCol;
619 if (CF_BLITMASK) { if ((bMsk>>((((uintptr_t)pDst)>>1)&7))&1) goto endpolytext; }
620 if (CF_MASKCHECK || CF_BLEND) { uDst = le16_to_u16(*pDst); }
621 if (CF_MASKCHECK) if (uDst&0x8000) { goto endpolytext; }
623 //senquack - adapted to work with new 22.10 fixed point routines:
624 // (UNAI originally used 16.16)
625 if (CF_TEXTMODE==1) { // 4bpp (CLUT)
627 u32 tv=(l_v<<1)&(0xff<<11);
628 u8 rgb=((u8*)TBA_)[tv+(tu>>1)];
629 uSrc=le16_to_u16(CBA_[(rgb>>((tu&1)<<2))&0xf]);
630 if (!uSrc) goto endpolytext;
632 if (CF_TEXTMODE==2) { // 8bpp (CLUT)
633 uSrc = le16_to_u16(CBA_[(((u8*)TBA_)[(l_u>>10)+((l_v<<1)&(0xff<<11))])]);
634 if (!uSrc) goto endpolytext;
636 if (CF_TEXTMODE==3) { // 16bpp
637 uSrc = le16_to_u16(TBA_[(l_u>>10)+((l_v)&(0xff<<10))]);
638 if (!uSrc) goto endpolytext;
641 // Save source MSB, as blending or lighting will not (Silent Hill)
642 if (CF_BLEND || CF_LIGHT) srcMSB = uSrc & 0x8000;
644 // When textured, only dither when LIGHT (texture blend) is enabled
645 // LIGHT && BLEND => dither
646 // LIGHT && !BLEND => dither
647 //!LIGHT && BLEND => no dither
648 //!LIGHT && !BLEND => no dither
650 if (CF_DITHER && CF_LIGHT) {
653 uSrc24 = gpuLightingTXT24Gouraud(uSrc, l_gCol);
655 uSrc24 = gpuLightingTXT24(uSrc, r8, g8, b8);
657 if (CF_BLEND && srcMSB)
658 uSrc24 = gpuBlending24<CF_BLENDMODE>(uSrc24, uDst);
660 uSrc = gpuColorQuantization24<CF_DITHER>(uSrc24, pDst);
665 uSrc = gpuLightingTXTGouraud(uSrc, l_gCol);
667 uSrc = gpuLightingTXT(uSrc, r5, g5, b5);
670 should_blend = MSB_PRESERVED ? uSrc & 0x8000 : srcMSB;
671 if (CF_BLEND && should_blend)
672 uSrc = gpuBlending<CF_BLENDMODE, skip_uSrc_mask>(uSrc, uDst);
675 if (CF_MASKSET) { *pDst = u16_to_le16(uSrc | 0x8000); }
676 else if (!MSB_PRESERVED && (CF_BLEND || CF_LIGHT)) { *pDst = u16_to_le16(uSrc | srcMSB); }
677 else { *pDst = u16_to_le16(uSrc); }
680 l_u = (l_u + l_u_inc) & l_u_msk;
681 l_v = (l_v + l_v_inc) & l_v_msk;
682 if (CF_LIGHT && CF_GOURAUD) l_gCol += l_gInc;
688 static void PolyNULL(const gpu_unai_t &gpu_unai, le16_t *pDst, u32 count)
690 #ifdef ENABLE_GPU_LOG_SUPPORT
691 fprintf(stdout,"PolyNULL()\n");
695 ///////////////////////////////////////////////////////////////////////////////
696 // Polygon innerloops driver
697 typedef void (*PP)(const gpu_unai_t &gpu_unai, le16_t *pDst, u32 count);
699 // Template instantiation helper macros
700 #define TI(cf) gpuPolySpanFn<(cf)>
702 #define TIBLOCK(ub) \
703 TI((ub)|0x00), TI((ub)|0x01), TI((ub)|0x02), TI((ub)|0x03), TI((ub)|0x04), TI((ub)|0x05), TI((ub)|0x06), TI((ub)|0x07), \
704 TN, TN, TI((ub)|0x0a), TI((ub)|0x0b), TN, TN, TI((ub)|0x0e), TI((ub)|0x0f), \
705 TN, TN, TI((ub)|0x12), TI((ub)|0x13), TN, TN, TI((ub)|0x16), TI((ub)|0x17), \
706 TN, TN, TI((ub)|0x1a), TI((ub)|0x1b), TN, TN, TI((ub)|0x1e), TI((ub)|0x1f), \
707 TI((ub)|0x20), TI((ub)|0x21), TI((ub)|0x22), TI((ub)|0x23), TI((ub)|0x24), TI((ub)|0x25), TI((ub)|0x26), TI((ub)|0x27), \
708 TN, TN, TI((ub)|0x2a), TI((ub)|0x2b), TN, TN, TI((ub)|0x2e), TI((ub)|0x2f), \
709 TN, TN, TI((ub)|0x32), TI((ub)|0x33), TN, TN, TI((ub)|0x36), TI((ub)|0x37), \
710 TN, TN, TI((ub)|0x3a), TI((ub)|0x3b), TN, TN, TI((ub)|0x3e), TI((ub)|0x3f), \
711 TI((ub)|0x40), TI((ub)|0x41), TI((ub)|0x42), TI((ub)|0x43), TI((ub)|0x44), TI((ub)|0x45), TI((ub)|0x46), TI((ub)|0x47), \
712 TN, TN, TI((ub)|0x4a), TI((ub)|0x4b), TN, TN, TI((ub)|0x4e), TI((ub)|0x4f), \
713 TN, TN, TI((ub)|0x52), TI((ub)|0x53), TN, TN, TI((ub)|0x56), TI((ub)|0x57), \
714 TN, TN, TI((ub)|0x5a), TI((ub)|0x5b), TN, TN, TI((ub)|0x5e), TI((ub)|0x5f), \
715 TI((ub)|0x60), TI((ub)|0x61), TI((ub)|0x62), TI((ub)|0x63), TI((ub)|0x64), TI((ub)|0x65), TI((ub)|0x66), TI((ub)|0x67), \
716 TN, TN, TI((ub)|0x6a), TI((ub)|0x6b), TN, TN, TI((ub)|0x6e), TI((ub)|0x6f), \
717 TN, TN, TI((ub)|0x72), TI((ub)|0x73), TN, TN, TI((ub)|0x76), TI((ub)|0x77), \
718 TN, TN, TI((ub)|0x7a), TI((ub)|0x7b), TN, TN, TI((ub)|0x7e), TI((ub)|0x7f), \
719 TN, TI((ub)|0x81), TN, TI((ub)|0x83), TN, TI((ub)|0x85), TN, TI((ub)|0x87), \
720 TN, TN, TN, TI((ub)|0x8b), TN, TN, TN, TI((ub)|0x8f), \
721 TN, TN, TN, TI((ub)|0x93), TN, TN, TN, TI((ub)|0x97), \
722 TN, TN, TN, TI((ub)|0x9b), TN, TN, TN, TI((ub)|0x9f), \
723 TN, TI((ub)|0xa1), TN, TI((ub)|0xa3), TN, TI((ub)|0xa5), TN, TI((ub)|0xa7), \
724 TN, TN, TN, TI((ub)|0xab), TN, TN, TN, TI((ub)|0xaf), \
725 TN, TN, TN, TI((ub)|0xb3), TN, TN, TN, TI((ub)|0xb7), \
726 TN, TN, TN, TI((ub)|0xbb), TN, TN, TN, TI((ub)|0xbf), \
727 TN, TI((ub)|0xc1), TN, TI((ub)|0xc3), TN, TI((ub)|0xc5), TN, TI((ub)|0xc7), \
728 TN, TN, TN, TI((ub)|0xcb), TN, TN, TN, TI((ub)|0xcf), \
729 TN, TN, TN, TI((ub)|0xd3), TN, TN, TN, TI((ub)|0xd7), \
730 TN, TN, TN, TI((ub)|0xdb), TN, TN, TN, TI((ub)|0xdf), \
731 TN, TI((ub)|0xe1), TN, TI((ub)|0xe3), TN, TI((ub)|0xe5), TN, TI((ub)|0xe7), \
732 TN, TN, TN, TI((ub)|0xeb), TN, TN, TN, TI((ub)|0xef), \
733 TN, TN, TN, TI((ub)|0xf3), TN, TN, TN, TI((ub)|0xf7), \
734 TN, TN, TN, TI((ub)|0xfb), TN, TN, TN, TI((ub)|0xff)
736 const PP gpuPolySpanDrivers[2048] = {
737 TIBLOCK(0<<8), TIBLOCK(1<<8), TIBLOCK(2<<8), TIBLOCK(3<<8),
738 TIBLOCK(4<<8), TIBLOCK(5<<8), TIBLOCK(6<<8), TIBLOCK(7<<8)
745 #endif /* __GPU_UNAI_GPU_INNER_H__ */