1 /***************************************************************************
2 * Copyright (C) 2010 PCSX4ALL Team *
3 * Copyright (C) 2010 Unai *
4 * Copyright (C) 2016 Senquack (dansilsby <AT> gmail <DOT> com) *
6 * This program is free software; you can redistribute it and/or modify *
7 * it under the terms of the GNU General Public License as published by *
8 * the Free Software Foundation; either version 2 of the License, or *
9 * (at your option) any later version. *
11 * This program is distributed in the hope that it will be useful, *
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
14 * GNU General Public License for more details. *
16 * You should have received a copy of the GNU General Public License *
17 * along with this program; if not, write to the *
18 * Free Software Foundation, Inc., *
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02111-1307 USA. *
20 ***************************************************************************/
22 #ifndef __GPU_UNAI_GPU_INNER_H__
23 #define __GPU_UNAI_GPU_INNER_H__
25 ///////////////////////////////////////////////////////////////////////////////
26 // Inner loop driver instantiation file
28 ///////////////////////////////////////////////////////////////////////////////
29 // Option Masks (CF template paramter)
30 #define CF_LIGHT ((CF>> 0)&1) // Lighting
31 #define CF_BLEND ((CF>> 1)&1) // Blending
32 #define CF_MASKCHECK ((CF>> 2)&1) // Mask bit check
33 #define CF_BLENDMODE ((CF>> 3)&3) // Blend mode 0..3
34 #define CF_TEXTMODE ((CF>> 5)&3) // Texture mode 1..3 (0: texturing disabled)
35 #define CF_GOURAUD ((CF>> 7)&1) // Gouraud shading
36 #define CF_MASKSET ((CF>> 8)&1) // Mask bit set
37 #define CF_DITHER ((CF>> 9)&1) // Dithering
38 #define CF_BLITMASK ((CF>>10)&1) // blit_mask check (skip rendering pixels
39 // that wouldn't end up displayed on
40 // low-res screen using simple downscaler)
43 //#ifndef ENABLE_GPU_ARMV7
45 //#include "gpu_inner_blend_arm5.h"
48 //#include "gpu_inner_blend_arm7.h"
51 //#include "gpu_inner_blend.h"
54 #include "gpu_inner_blend.h"
55 #include "gpu_inner_quantization.h"
56 #include "gpu_inner_light.h"
59 #include "gpu_inner_blend_arm.h"
60 #include "gpu_inner_light_arm.h"
61 #define gpuBlending gpuBlendingARM
62 #define gpuLightingRGB gpuLightingRGBARM
63 #define gpuLightingTXT gpuLightingTXTARM
64 #define gpuLightingTXTGouraud gpuLightingTXTGouraudARM
66 #define gpuBlending gpuBlendingGeneric
67 #define gpuLightingRGB gpuLightingRGBGeneric
68 #define gpuLightingTXT gpuLightingTXTGeneric
69 #define gpuLightingTXTGouraud gpuLightingTXTGouraudGeneric
72 // Non-dithering lighting and blending functions preserve uSrc
73 // MSB. This saves a few operations and useless load/stores.
74 #define MSB_PRESERVED (!CF_DITHER)
76 // If defined, Gouraud colors are fixed-point 5.11, otherwise they are 8.16
77 // This is only for debugging/verification of low-precision colors in C.
78 // Low-precision Gouraud is intended for use by SIMD-optimized inner drivers
79 // which get/use Gouraud colors in SIMD registers.
80 //#define GPU_GOURAUD_LOW_PRECISION
82 // How many bits of fixed-point precision GouraudColor uses
83 #ifdef GPU_GOURAUD_LOW_PRECISION
84 #define GPU_GOURAUD_FIXED_BITS 11
86 #define GPU_GOURAUD_FIXED_BITS 16
89 // Used to pass Gouraud colors to gpuPixelSpanFn() (lines)
91 #ifdef GPU_GOURAUD_LOW_PRECISION
93 s16 r_incr, g_incr, b_incr;
96 s32 r_incr, g_incr, b_incr;
100 static inline u16 gpuGouraudColor15bpp(u32 r, u32 g, u32 b)
102 r >>= GPU_GOURAUD_FIXED_BITS;
103 g >>= GPU_GOURAUD_FIXED_BITS;
104 b >>= GPU_GOURAUD_FIXED_BITS;
106 #ifndef GPU_GOURAUD_LOW_PRECISION
107 // High-precision Gouraud colors are 8-bit + fractional
108 r >>= 3; g >>= 3; b >>= 3;
111 return r | (g << 5) | (b << 10);
114 ///////////////////////////////////////////////////////////////////////////////
115 // GPU Pixel span operations generator gpuPixelSpanFn<>
116 // Oct 2016: Created/adapted from old gpuPixelFn by senquack:
117 // Original gpuPixelFn was used to draw lines one pixel at a time. I wrote
118 // new line algorithms that draw lines using horizontal/vertical/diagonal
119 // spans of pixels, necessitating new pixel-drawing function that could
120 // not only render spans of pixels, but gouraud-shade them as well.
121 // This speeds up line rendering and would allow tile-rendering (untextured
122 // rectangles) to use the same set of functions. Since tiles are always
123 // monochrome, they simply wouldn't use the extra set of 32 gouraud-shaded
124 // gpuPixelSpanFn functions (TODO?).
126 static le16_t* gpuPixelSpanFn(le16_t* pDst, uintptr_t data, ptrdiff_t incr, size_t len)
128 // Blend func can save an operation if it knows uSrc MSB is
129 // unset. For untextured prims, this is always true.
130 const bool skip_uSrc_mask = true;
133 struct GouraudColor * gcPtr;
135 s32 r_incr, g_incr, b_incr;
137 // Caller counts in bytes, we count in pixels
141 gcPtr = (GouraudColor*)data;
142 r = gcPtr->r; r_incr = gcPtr->r_incr;
143 g = gcPtr->g; g_incr = gcPtr->g_incr;
144 b = gcPtr->b; b_incr = gcPtr->b_incr;
152 if (!CF_MASKCHECK && !CF_BLEND) {
153 if (CF_MASKSET) { *pDst = u16_to_le16(col | 0x8000); }
154 else { *pDst = u16_to_le16(col); }
155 } else if (CF_MASKCHECK && !CF_BLEND) {
156 if (!(le16_raw(*pDst) & HTOLE16(0x8000))) {
157 if (CF_MASKSET) { *pDst = u16_to_le16(col | 0x8000); }
158 else { *pDst = u16_to_le16(col); }
161 uint_fast16_t uDst = le16_to_u16(*pDst);
162 if (CF_MASKCHECK) { if (uDst & 0x8000) goto endpixel; }
164 uint_fast16_t uSrc = col;
167 uSrc = gpuBlending<CF_BLENDMODE, skip_uSrc_mask>(uSrc, uDst);
169 if (CF_MASKSET) { *pDst = u16_to_le16(uSrc | 0x8000); }
170 else { *pDst = u16_to_le16(uSrc); }
176 if (!CF_MASKCHECK && !CF_BLEND) {
177 col = gpuGouraudColor15bpp(r, g, b);
178 if (CF_MASKSET) { *pDst = u16_to_le16(col | 0x8000); }
179 else { *pDst = u16_to_le16(col); }
180 } else if (CF_MASKCHECK && !CF_BLEND) {
181 col = gpuGouraudColor15bpp(r, g, b);
182 if (!(le16_raw(*pDst) & HTOLE16(0x8000))) {
183 if (CF_MASKSET) { *pDst = u16_to_le16(col | 0x8000); }
184 else { *pDst = u16_to_le16(col); }
187 uint_fast16_t uDst = le16_to_u16(*pDst);
188 if (CF_MASKCHECK) { if (uDst & 0x8000) goto endpixel; }
189 col = gpuGouraudColor15bpp(r, g, b);
191 uint_fast16_t uSrc = col;
193 // Blend func can save an operation if it knows uSrc MSB is
194 // unset. For untextured prims, this is always true.
195 const bool skip_uSrc_mask = true;
198 uSrc = gpuBlending<CF_BLENDMODE, skip_uSrc_mask>(uSrc, uDst);
200 if (CF_MASKSET) { *pDst = u16_to_le16(uSrc | 0x8000); }
201 else { *pDst = u16_to_le16(uSrc); }
214 // Note from senquack: Normally, I'd prefer to write a 'do {} while (--len)'
215 // loop, or even a for() loop, however, on MIPS platforms anything but the
216 // 'do {} while (len-- > 1)' tends to generate very unoptimal asm, with
217 // many unneeded MULs/ADDs/branches at the ends of these functions.
218 // If you change the loop structure above, be sure to compare the quality
219 // of the generated code!!
229 static le16_t* PixelSpanNULL(le16_t* pDst, uintptr_t data, ptrdiff_t incr, size_t len)
231 #ifdef ENABLE_GPU_LOG_SUPPORT
232 fprintf(stdout,"PixelSpanNULL()\n");
237 ///////////////////////////////////////////////////////////////////////////////
238 // PixelSpan (lines) innerloops driver
239 typedef le16_t* (*PSD)(le16_t* dst, uintptr_t data, ptrdiff_t incr, size_t len);
241 const PSD gpuPixelSpanDrivers[64] =
243 // Array index | 'CF' template field | Field value
244 // ------------+---------------------+----------------
245 // Bit 0 | CF_BLEND | off (0), on (1)
246 // Bit 1 | CF_MASKCHECK | off (0), on (1)
247 // Bit 3:2 | CF_BLENDMODE | 0..3
248 // Bit 4 | CF_MASKSET | off (0), on (1)
249 // Bit 5 | CF_GOURAUD | off (0), on (1)
251 // NULL entries are ones for which blending is disabled and blend-mode
252 // field is non-zero, which is obviously invalid.
255 gpuPixelSpanFn<0x00<<1>, gpuPixelSpanFn<0x01<<1>, gpuPixelSpanFn<0x02<<1>, gpuPixelSpanFn<0x03<<1>,
256 PixelSpanNULL, gpuPixelSpanFn<0x05<<1>, PixelSpanNULL, gpuPixelSpanFn<0x07<<1>,
257 PixelSpanNULL, gpuPixelSpanFn<0x09<<1>, PixelSpanNULL, gpuPixelSpanFn<0x0B<<1>,
258 PixelSpanNULL, gpuPixelSpanFn<0x0D<<1>, PixelSpanNULL, gpuPixelSpanFn<0x0F<<1>,
260 // Flat-shaded + PixelMSB (CF_MASKSET)
261 gpuPixelSpanFn<(0x00<<1)|0x100>, gpuPixelSpanFn<(0x01<<1)|0x100>, gpuPixelSpanFn<(0x02<<1)|0x100>, gpuPixelSpanFn<(0x03<<1)|0x100>,
262 PixelSpanNULL, gpuPixelSpanFn<(0x05<<1)|0x100>, PixelSpanNULL, gpuPixelSpanFn<(0x07<<1)|0x100>,
263 PixelSpanNULL, gpuPixelSpanFn<(0x09<<1)|0x100>, PixelSpanNULL, gpuPixelSpanFn<(0x0B<<1)|0x100>,
264 PixelSpanNULL, gpuPixelSpanFn<(0x0D<<1)|0x100>, PixelSpanNULL, gpuPixelSpanFn<(0x0F<<1)|0x100>,
266 // Gouraud-shaded (CF_GOURAUD)
267 gpuPixelSpanFn<(0x00<<1)|0x80>, gpuPixelSpanFn<(0x01<<1)|0x80>, gpuPixelSpanFn<(0x02<<1)|0x80>, gpuPixelSpanFn<(0x03<<1)|0x80>,
268 PixelSpanNULL, gpuPixelSpanFn<(0x05<<1)|0x80>, PixelSpanNULL, gpuPixelSpanFn<(0x07<<1)|0x80>,
269 PixelSpanNULL, gpuPixelSpanFn<(0x09<<1)|0x80>, PixelSpanNULL, gpuPixelSpanFn<(0x0B<<1)|0x80>,
270 PixelSpanNULL, gpuPixelSpanFn<(0x0D<<1)|0x80>, PixelSpanNULL, gpuPixelSpanFn<(0x0F<<1)|0x80>,
272 // Gouraud-shaded (CF_GOURAUD) + PixelMSB (CF_MASKSET)
273 gpuPixelSpanFn<(0x00<<1)|0x180>, gpuPixelSpanFn<(0x01<<1)|0x180>, gpuPixelSpanFn<(0x02<<1)|0x180>, gpuPixelSpanFn<(0x03<<1)|0x180>,
274 PixelSpanNULL, gpuPixelSpanFn<(0x05<<1)|0x180>, PixelSpanNULL, gpuPixelSpanFn<(0x07<<1)|0x180>,
275 PixelSpanNULL, gpuPixelSpanFn<(0x09<<1)|0x180>, PixelSpanNULL, gpuPixelSpanFn<(0x0B<<1)|0x180>,
276 PixelSpanNULL, gpuPixelSpanFn<(0x0D<<1)|0x180>, PixelSpanNULL, gpuPixelSpanFn<(0x0F<<1)|0x180>
279 ///////////////////////////////////////////////////////////////////////////////
280 // GPU Tiles innerloops generator
283 static void gpuTileSpanFn(le16_t *pDst, u32 count, u16 data)
287 if (!CF_MASKCHECK && !CF_BLEND) {
289 ldata = u16_to_le16(data | 0x8000);
291 ldata = u16_to_le16(data);
292 do { *pDst++ = ldata; } while (--count);
293 } else if (CF_MASKCHECK && !CF_BLEND) {
295 ldata = u16_to_le16(data | 0x8000);
297 ldata = u16_to_le16(data);
299 if (!(le16_raw(*pDst) & HTOLE16(0x8000)))
305 // Blend func can save an operation if it knows uSrc MSB is
306 // unset. For untextured prims, this is always true.
307 const bool skip_uSrc_mask = true;
309 uint_fast16_t uSrc, uDst;
312 if (CF_MASKCHECK || CF_BLEND) { uDst = le16_to_u16(*pDst); }
313 if (CF_MASKCHECK) if (uDst&0x8000) { goto endtile; }
318 uSrc = gpuBlending<CF_BLENDMODE, skip_uSrc_mask>(uSrc, uDst);
320 if (CF_MASKSET) { *pDst = u16_to_le16(uSrc | 0x8000); }
321 else { *pDst = u16_to_le16(uSrc); }
323 //senquack - Did not apply "Silent Hill" mask-bit fix to here.
324 // It is hard to tell from scarce documentation available and
325 // lack of comments in code, but I believe the tile-span
326 // functions here should not bother to preserve any source MSB,
327 // as they are not drawing from a texture.
335 static void TileNULL(le16_t *pDst, u32 count, u16 data)
337 #ifdef ENABLE_GPU_LOG_SUPPORT
338 fprintf(stdout,"TileNULL()\n");
342 ///////////////////////////////////////////////////////////////////////////////
343 // Tiles innerloops driver
344 typedef void (*PT)(le16_t *pDst, u32 count, u16 data);
346 // Template instantiation helper macros
347 #define TI(cf) gpuTileSpanFn<(cf)>
349 #define TIBLOCK(ub) \
350 TI((ub)|0x00), TI((ub)|0x02), TI((ub)|0x04), TI((ub)|0x06), \
351 TN, TI((ub)|0x0a), TN, TI((ub)|0x0e), \
352 TN, TI((ub)|0x12), TN, TI((ub)|0x16), \
353 TN, TI((ub)|0x1a), TN, TI((ub)|0x1e)
355 const PT gpuTileSpanDrivers[32] = {
356 TIBLOCK(0<<8), TIBLOCK(1<<8)
364 ///////////////////////////////////////////////////////////////////////////////
365 // GPU Sprites innerloops generator
368 static void gpuSpriteSpanFn(le16_t *pDst, u32 count, u8* pTxt, u32 u0)
370 // Blend func can save an operation if it knows uSrc MSB is unset.
371 // Untextured prims can always skip (source color always comes with MSB=0).
372 // For textured prims, the generic lighting funcs always return it unset. (bonus!)
373 const bool skip_uSrc_mask = MSB_PRESERVED ? (!CF_TEXTMODE) : (!CF_TEXTMODE) || CF_LIGHT;
375 uint_fast16_t uSrc, uDst, srcMSB;
377 u32 u0_mask = gpu_unai.TextureWindow[2];
386 if (CF_TEXTMODE==3) {
387 // Texture is accessed byte-wise, so adjust mask if 16bpp
391 const le16_t *CBA_; if (CF_TEXTMODE!=3) CBA_ = gpu_unai.CBA;
395 if (CF_MASKCHECK || CF_BLEND) { uDst = le16_to_u16(*pDst); }
396 if (CF_MASKCHECK) if (uDst&0x8000) { goto endsprite; }
398 if (CF_TEXTMODE==1) { // 4bpp (CLUT)
399 u8 rgb = pTxt[(u0 & u0_mask)>>1];
400 uSrc = le16_to_u16(CBA_[(rgb>>((u0&1)<<2))&0xf]);
402 if (CF_TEXTMODE==2) { // 8bpp (CLUT)
403 uSrc = le16_to_u16(CBA_[pTxt[u0 & u0_mask]]);
405 if (CF_TEXTMODE==3) { // 16bpp
406 uSrc = le16_to_u16(*(le16_t*)(&pTxt[u0 & u0_mask]));
409 if (!uSrc) goto endsprite;
411 //senquack - save source MSB, as blending or lighting macros will not
412 // (Silent Hill gray rectangles mask bit bug)
413 if (CF_BLEND || CF_LIGHT) srcMSB = uSrc & 0x8000;
416 uSrc = gpuLightingTXT(uSrc, r5, g5, b5);
418 should_blend = MSB_PRESERVED ? uSrc & 0x8000 : srcMSB;
420 if (CF_BLEND && should_blend)
421 uSrc = gpuBlending<CF_BLENDMODE, skip_uSrc_mask>(uSrc, uDst);
423 if (CF_MASKSET) { *pDst = u16_to_le16(uSrc | 0x8000); }
424 else if (!MSB_PRESERVED && (CF_BLEND || CF_LIGHT)) { *pDst = u16_to_le16(uSrc | srcMSB); }
425 else { *pDst = u16_to_le16(uSrc); }
428 u0 += (CF_TEXTMODE==3) ? 2 : 1;
434 static void SpriteNULL(le16_t *pDst, u32 count, u8* pTxt, u32 u0)
436 #ifdef ENABLE_GPU_LOG_SUPPORT
437 fprintf(stdout,"SpriteNULL()\n");
441 ///////////////////////////////////////////////////////////////////////////////
443 ///////////////////////////////////////////////////////////////////////////////
444 // Sprite innerloops driver
445 typedef void (*PS)(le16_t *pDst, u32 count, u8* pTxt, u32 u0);
447 // Template instantiation helper macros
448 #define TI(cf) gpuSpriteSpanFn<(cf)>
449 #define TN SpriteNULL
450 #define TIBLOCK(ub) \
451 TN, TN, TN, TN, TN, TN, TN, TN, \
452 TN, TN, TN, TN, TN, TN, TN, TN, \
453 TN, TN, TN, TN, TN, TN, TN, TN, \
454 TN, TN, TN, TN, TN, TN, TN, TN, \
455 TI((ub)|0x20), TI((ub)|0x21), TI((ub)|0x22), TI((ub)|0x23), TI((ub)|0x24), TI((ub)|0x25), TI((ub)|0x26), TI((ub)|0x27), \
456 TN, TN, TI((ub)|0x2a), TI((ub)|0x2b), TN, TN, TI((ub)|0x2e), TI((ub)|0x2f), \
457 TN, TN, TI((ub)|0x32), TI((ub)|0x33), TN, TN, TI((ub)|0x36), TI((ub)|0x37), \
458 TN, TN, TI((ub)|0x3a), TI((ub)|0x3b), TN, TN, TI((ub)|0x3e), TI((ub)|0x3f), \
459 TI((ub)|0x40), TI((ub)|0x41), TI((ub)|0x42), TI((ub)|0x43), TI((ub)|0x44), TI((ub)|0x45), TI((ub)|0x46), TI((ub)|0x47), \
460 TN, TN, TI((ub)|0x4a), TI((ub)|0x4b), TN, TN, TI((ub)|0x4e), TI((ub)|0x4f), \
461 TN, TN, TI((ub)|0x52), TI((ub)|0x53), TN, TN, TI((ub)|0x56), TI((ub)|0x57), \
462 TN, TN, TI((ub)|0x5a), TI((ub)|0x5b), TN, TN, TI((ub)|0x5e), TI((ub)|0x5f), \
463 TI((ub)|0x60), TI((ub)|0x61), TI((ub)|0x62), TI((ub)|0x63), TI((ub)|0x64), TI((ub)|0x65), TI((ub)|0x66), TI((ub)|0x67), \
464 TN, TN, TI((ub)|0x6a), TI((ub)|0x6b), TN, TN, TI((ub)|0x6e), TI((ub)|0x6f), \
465 TN, TN, TI((ub)|0x72), TI((ub)|0x73), TN, TN, TI((ub)|0x76), TI((ub)|0x77), \
466 TN, TN, TI((ub)|0x7a), TI((ub)|0x7b), TN, TN, TI((ub)|0x7e), TI((ub)|0x7f)
468 const PS gpuSpriteSpanDrivers[256] = {
469 TIBLOCK(0<<8), TIBLOCK(1<<8)
476 ///////////////////////////////////////////////////////////////////////////////
477 // GPU Polygon innerloops generator
479 //senquack - Newer version with following changes:
480 // * Adapted to work with new poly routings in gpu_raster_polygon.h
481 // adapted from DrHell GPU. They are less glitchy and use 22.10
482 // fixed-point instead of original UNAI's 16.16.
483 // * Texture coordinates are no longer packed together into one
484 // unsigned int. This seems to lose too much accuracy (they each
485 // end up being only 8.7 fixed-point that way) and pixel-droupouts
486 // were noticeable both with original code and current DrHell
487 // adaptations. An example would be the sky in NFS3. Now, they are
488 // stored in separate ints, using separate masks.
489 // * Function is no longer INLINE, as it was always called
490 // through a function pointer.
491 // * Function now ensures the mask bit of source texture is preserved
492 // across calls to blending functions (Silent Hill rectangles fix)
493 // * November 2016: Large refactoring of blending/lighting when
494 // JohnnyF added dithering. See gpu_inner_quantization.h and
495 // relevant blend/light headers.
496 // (see README_senquack.txt)
498 static void gpuPolySpanFn(const gpu_unai_t &gpu_unai, le16_t *pDst, u32 count)
500 // Blend func can save an operation if it knows uSrc MSB is unset.
501 // Untextured prims can always skip this (src color MSB is always 0).
502 // For textured prims, the generic lighting funcs always return it unset. (bonus!)
503 const bool skip_uSrc_mask = MSB_PRESERVED ? (!CF_TEXTMODE) : (!CF_TEXTMODE) || CF_LIGHT;
506 u32 bMsk; if (CF_BLITMASK) bMsk = gpu_unai.blit_mask;
512 // UNTEXTURED, NO GOURAUD
513 const u16 pix15 = gpu_unai.PixelData;
515 uint_fast16_t uSrc, uDst;
517 // NOTE: Don't enable CF_BLITMASK pixel skipping (speed hack)
518 // on untextured polys. It seems to do more harm than good: see
519 // gravestone text at end of Medieval intro sequence. -senquack
520 //if (CF_BLITMASK) { if ((bMsk>>((((uintptr_t)pDst)>>1)&7))&1) { goto endpolynotextnogou; } }
522 if (CF_BLEND || CF_MASKCHECK) uDst = le16_to_u16(*pDst);
523 if (CF_MASKCHECK) { if (uDst&0x8000) { goto endpolynotextnogou; } }
528 uSrc = gpuBlending<CF_BLENDMODE, skip_uSrc_mask>(uSrc, uDst);
530 if (CF_MASKSET) { *pDst = u16_to_le16(uSrc | 0x8000); }
531 else { *pDst = u16_to_le16(uSrc); }
539 // UNTEXTURED, GOURAUD
540 u32 l_gCol = gpu_unai.gCol;
541 u32 l_gInc = gpu_unai.gInc;
544 uint_fast16_t uDst, uSrc;
546 // See note in above loop regarding CF_BLITMASK
547 //if (CF_BLITMASK) { if ((bMsk>>((((uintptr_t)pDst)>>1)&7))&1) goto endpolynotextgou; }
549 if (CF_BLEND || CF_MASKCHECK) uDst = le16_to_u16(*pDst);
550 if (CF_MASKCHECK) { if (uDst&0x8000) goto endpolynotextgou; }
555 u32 uSrc24 = gpuLightingRGB24(l_gCol);
557 uSrc24 = gpuBlending24<CF_BLENDMODE>(uSrc24, uDst);
558 uSrc = gpuColorQuantization24<CF_DITHER>(uSrc24, pDst);
560 // GOURAUD, NO DITHER
562 uSrc = gpuLightingRGB(l_gCol);
565 uSrc = gpuBlending<CF_BLENDMODE, skip_uSrc_mask>(uSrc, uDst);
568 if (CF_MASKSET) { *pDst = u16_to_le16(uSrc | 0x8000); }
569 else { *pDst = u16_to_le16(uSrc); }
582 uint_fast16_t uDst, uSrc, srcMSB;
584 //senquack - note: original UNAI code had gpu_unai.{u4/v4} packed into
585 // one 32-bit unsigned int, but this proved to lose too much accuracy
586 // (pixel drouputs noticeable in NFS3 sky), so now are separate vars.
587 u32 l_u_msk = gpu_unai.u_msk; u32 l_v_msk = gpu_unai.v_msk;
588 u32 l_u = gpu_unai.u & l_u_msk; u32 l_v = gpu_unai.v & l_v_msk;
589 s32 l_u_inc = gpu_unai.u_inc; s32 l_v_inc = gpu_unai.v_inc;
591 const le16_t* TBA_ = gpu_unai.TBA;
592 const le16_t* CBA_; if (CF_TEXTMODE!=3) CBA_ = gpu_unai.CBA;
601 l_gInc = gpu_unai.gInc;
602 l_gCol = gpu_unai.gCol;
618 if (CF_BLITMASK) { if ((bMsk>>((((uintptr_t)pDst)>>1)&7))&1) goto endpolytext; }
619 if (CF_MASKCHECK || CF_BLEND) { uDst = le16_to_u16(*pDst); }
620 if (CF_MASKCHECK) if (uDst&0x8000) { goto endpolytext; }
622 //senquack - adapted to work with new 22.10 fixed point routines:
623 // (UNAI originally used 16.16)
624 if (CF_TEXTMODE==1) { // 4bpp (CLUT)
626 u32 tv=(l_v<<1)&(0xff<<11);
627 u8 rgb=((u8*)TBA_)[tv+(tu>>1)];
628 uSrc=le16_to_u16(CBA_[(rgb>>((tu&1)<<2))&0xf]);
629 if (!uSrc) goto endpolytext;
631 if (CF_TEXTMODE==2) { // 8bpp (CLUT)
632 uSrc = le16_to_u16(CBA_[(((u8*)TBA_)[(l_u>>10)+((l_v<<1)&(0xff<<11))])]);
633 if (!uSrc) goto endpolytext;
635 if (CF_TEXTMODE==3) { // 16bpp
636 uSrc = le16_to_u16(TBA_[(l_u>>10)+((l_v)&(0xff<<10))]);
637 if (!uSrc) goto endpolytext;
640 // Save source MSB, as blending or lighting will not (Silent Hill)
641 if (CF_BLEND || CF_LIGHT) srcMSB = uSrc & 0x8000;
643 // When textured, only dither when LIGHT (texture blend) is enabled
644 // LIGHT && BLEND => dither
645 // LIGHT && !BLEND => dither
646 //!LIGHT && BLEND => no dither
647 //!LIGHT && !BLEND => no dither
649 if (CF_DITHER && CF_LIGHT) {
652 uSrc24 = gpuLightingTXT24Gouraud(uSrc, l_gCol);
654 uSrc24 = gpuLightingTXT24(uSrc, r8, g8, b8);
656 if (CF_BLEND && srcMSB)
657 uSrc24 = gpuBlending24<CF_BLENDMODE>(uSrc24, uDst);
659 uSrc = gpuColorQuantization24<CF_DITHER>(uSrc24, pDst);
664 uSrc = gpuLightingTXTGouraud(uSrc, l_gCol);
666 uSrc = gpuLightingTXT(uSrc, r5, g5, b5);
669 should_blend = MSB_PRESERVED ? uSrc & 0x8000 : srcMSB;
670 if (CF_BLEND && should_blend)
671 uSrc = gpuBlending<CF_BLENDMODE, skip_uSrc_mask>(uSrc, uDst);
674 if (CF_MASKSET) { *pDst = u16_to_le16(uSrc | 0x8000); }
675 else if (!MSB_PRESERVED && (CF_BLEND || CF_LIGHT)) { *pDst = u16_to_le16(uSrc | srcMSB); }
676 else { *pDst = u16_to_le16(uSrc); }
679 l_u = (l_u + l_u_inc) & l_u_msk;
680 l_v = (l_v + l_v_inc) & l_v_msk;
681 if (CF_LIGHT && CF_GOURAUD) l_gCol += l_gInc;
687 static void PolyNULL(const gpu_unai_t &gpu_unai, le16_t *pDst, u32 count)
689 #ifdef ENABLE_GPU_LOG_SUPPORT
690 fprintf(stdout,"PolyNULL()\n");
694 ///////////////////////////////////////////////////////////////////////////////
695 // Polygon innerloops driver
696 typedef void (*PP)(const gpu_unai_t &gpu_unai, le16_t *pDst, u32 count);
698 // Template instantiation helper macros
699 #define TI(cf) gpuPolySpanFn<(cf)>
701 #define TIBLOCK(ub) \
702 TI((ub)|0x00), TI((ub)|0x01), TI((ub)|0x02), TI((ub)|0x03), TI((ub)|0x04), TI((ub)|0x05), TI((ub)|0x06), TI((ub)|0x07), \
703 TN, TN, TI((ub)|0x0a), TI((ub)|0x0b), TN, TN, TI((ub)|0x0e), TI((ub)|0x0f), \
704 TN, TN, TI((ub)|0x12), TI((ub)|0x13), TN, TN, TI((ub)|0x16), TI((ub)|0x17), \
705 TN, TN, TI((ub)|0x1a), TI((ub)|0x1b), TN, TN, TI((ub)|0x1e), TI((ub)|0x1f), \
706 TI((ub)|0x20), TI((ub)|0x21), TI((ub)|0x22), TI((ub)|0x23), TI((ub)|0x24), TI((ub)|0x25), TI((ub)|0x26), TI((ub)|0x27), \
707 TN, TN, TI((ub)|0x2a), TI((ub)|0x2b), TN, TN, TI((ub)|0x2e), TI((ub)|0x2f), \
708 TN, TN, TI((ub)|0x32), TI((ub)|0x33), TN, TN, TI((ub)|0x36), TI((ub)|0x37), \
709 TN, TN, TI((ub)|0x3a), TI((ub)|0x3b), TN, TN, TI((ub)|0x3e), TI((ub)|0x3f), \
710 TI((ub)|0x40), TI((ub)|0x41), TI((ub)|0x42), TI((ub)|0x43), TI((ub)|0x44), TI((ub)|0x45), TI((ub)|0x46), TI((ub)|0x47), \
711 TN, TN, TI((ub)|0x4a), TI((ub)|0x4b), TN, TN, TI((ub)|0x4e), TI((ub)|0x4f), \
712 TN, TN, TI((ub)|0x52), TI((ub)|0x53), TN, TN, TI((ub)|0x56), TI((ub)|0x57), \
713 TN, TN, TI((ub)|0x5a), TI((ub)|0x5b), TN, TN, TI((ub)|0x5e), TI((ub)|0x5f), \
714 TI((ub)|0x60), TI((ub)|0x61), TI((ub)|0x62), TI((ub)|0x63), TI((ub)|0x64), TI((ub)|0x65), TI((ub)|0x66), TI((ub)|0x67), \
715 TN, TN, TI((ub)|0x6a), TI((ub)|0x6b), TN, TN, TI((ub)|0x6e), TI((ub)|0x6f), \
716 TN, TN, TI((ub)|0x72), TI((ub)|0x73), TN, TN, TI((ub)|0x76), TI((ub)|0x77), \
717 TN, TN, TI((ub)|0x7a), TI((ub)|0x7b), TN, TN, TI((ub)|0x7e), TI((ub)|0x7f), \
718 TN, TI((ub)|0x81), TN, TI((ub)|0x83), TN, TI((ub)|0x85), TN, TI((ub)|0x87), \
719 TN, TN, TN, TI((ub)|0x8b), TN, TN, TN, TI((ub)|0x8f), \
720 TN, TN, TN, TI((ub)|0x93), TN, TN, TN, TI((ub)|0x97), \
721 TN, TN, TN, TI((ub)|0x9b), TN, TN, TN, TI((ub)|0x9f), \
722 TN, TI((ub)|0xa1), TN, TI((ub)|0xa3), TN, TI((ub)|0xa5), TN, TI((ub)|0xa7), \
723 TN, TN, TN, TI((ub)|0xab), TN, TN, TN, TI((ub)|0xaf), \
724 TN, TN, TN, TI((ub)|0xb3), TN, TN, TN, TI((ub)|0xb7), \
725 TN, TN, TN, TI((ub)|0xbb), TN, TN, TN, TI((ub)|0xbf), \
726 TN, TI((ub)|0xc1), TN, TI((ub)|0xc3), TN, TI((ub)|0xc5), TN, TI((ub)|0xc7), \
727 TN, TN, TN, TI((ub)|0xcb), TN, TN, TN, TI((ub)|0xcf), \
728 TN, TN, TN, TI((ub)|0xd3), TN, TN, TN, TI((ub)|0xd7), \
729 TN, TN, TN, TI((ub)|0xdb), TN, TN, TN, TI((ub)|0xdf), \
730 TN, TI((ub)|0xe1), TN, TI((ub)|0xe3), TN, TI((ub)|0xe5), TN, TI((ub)|0xe7), \
731 TN, TN, TN, TI((ub)|0xeb), TN, TN, TN, TI((ub)|0xef), \
732 TN, TN, TN, TI((ub)|0xf3), TN, TN, TN, TI((ub)|0xf7), \
733 TN, TN, TN, TI((ub)|0xfb), TN, TN, TN, TI((ub)|0xff)
735 const PP gpuPolySpanDrivers[2048] = {
736 TIBLOCK(0<<8), TIBLOCK(1<<8), TIBLOCK(2<<8), TIBLOCK(3<<8),
737 TIBLOCK(4<<8), TIBLOCK(5<<8), TIBLOCK(6<<8), TIBLOCK(7<<8)
744 #endif /* __GPU_UNAI_GPU_INNER_H__ */