| 1 | /*************************************************************************** |
| 2 | * Copyright (C) 2010 PCSX4ALL Team * |
| 3 | * Copyright (C) 2010 Unai * |
| 4 | * Copyright (C) 2016 Senquack (dansilsby <AT> gmail <DOT> com) * |
| 5 | * * |
| 6 | * This program is free software; you can redistribute it and/or modify * |
| 7 | * it under the terms of the GNU General Public License as published by * |
| 8 | * the Free Software Foundation; either version 2 of the License, or * |
| 9 | * (at your option) any later version. * |
| 10 | * * |
| 11 | * This program is distributed in the hope that it will be useful, * |
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of * |
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * |
| 14 | * GNU General Public License for more details. * |
| 15 | * * |
| 16 | * You should have received a copy of the GNU General Public License * |
| 17 | * along with this program; if not, write to the * |
| 18 | * Free Software Foundation, Inc., * |
| 19 | * 51 Franklin Street, Fifth Floor, Boston, MA 02111-1307 USA. * |
| 20 | ***************************************************************************/ |
| 21 | |
| 22 | #ifndef __GPU_UNAI_GPU_INNER_H__ |
| 23 | #define __GPU_UNAI_GPU_INNER_H__ |
| 24 | |
| 25 | /////////////////////////////////////////////////////////////////////////////// |
| 26 | // Inner loop driver instantiation file |
| 27 | |
| 28 | /////////////////////////////////////////////////////////////////////////////// |
| 29 | // Option Masks (CF template paramter) |
| 30 | #define CF_LIGHT ((CF>> 0)&1) // Lighting |
| 31 | #define CF_BLEND ((CF>> 1)&1) // Blending |
| 32 | #define CF_MASKCHECK ((CF>> 2)&1) // Mask bit check |
| 33 | #define CF_BLENDMODE ((CF>> 3)&3) // Blend mode 0..3 |
| 34 | #define CF_TEXTMODE ((CF>> 5)&3) // Texture mode 1..3 (0: texturing disabled) |
| 35 | #define CF_GOURAUD ((CF>> 7)&1) // Gouraud shading |
| 36 | #define CF_MASKSET ((CF>> 8)&1) // Mask bit set |
| 37 | #define CF_DITHER ((CF>> 9)&1) // Dithering |
| 38 | #define CF_BLITMASK ((CF>>10)&1) // blit_mask check (skip rendering pixels |
| 39 | // that wouldn't end up displayed on |
| 40 | // low-res screen using simple downscaler) |
| 41 | |
| 42 | //#ifdef __arm__ |
| 43 | //#ifndef ENABLE_GPU_ARMV7 |
| 44 | /* ARMv5 */ |
| 45 | //#include "gpu_inner_blend_arm5.h" |
| 46 | //#else |
| 47 | /* ARMv7 optimized */ |
| 48 | //#include "gpu_inner_blend_arm7.h" |
| 49 | //#endif |
| 50 | //#else |
| 51 | //#include "gpu_inner_blend.h" |
| 52 | //#endif |
| 53 | |
| 54 | #include "gpu_inner_blend.h" |
| 55 | #include "gpu_inner_quantization.h" |
| 56 | #include "gpu_inner_light.h" |
| 57 | |
| 58 | #include "arm_features.h" |
| 59 | #include "compiler_features.h" |
| 60 | #ifdef __arm__ |
| 61 | #include "gpu_arm.h" |
| 62 | #include "gpu_inner_blend_arm.h" |
| 63 | #include "gpu_inner_light_arm.h" |
| 64 | #define gpuBlending gpuBlendingARM |
| 65 | #define gpuLightingTXT gpuLightingTXTARM |
| 66 | #else |
| 67 | #define gpuBlending gpuBlendingGeneric |
| 68 | #define gpuLightingTXT gpuLightingTXTGeneric |
| 69 | #endif |
| 70 | |
| 71 | // Non-dithering lighting and blending functions preserve uSrc |
| 72 | // MSB. This saves a few operations and useless load/stores. |
| 73 | #define MSB_PRESERVED (!CF_DITHER) |
| 74 | |
| 75 | // If defined, Gouraud colors are fixed-point 5.11, otherwise they are 8.16 |
| 76 | // This is only for debugging/verification of low-precision colors in C. |
| 77 | // Low-precision Gouraud is intended for use by SIMD-optimized inner drivers |
| 78 | // which get/use Gouraud colors in SIMD registers. |
| 79 | //#define GPU_GOURAUD_LOW_PRECISION |
| 80 | |
| 81 | // How many bits of fixed-point precision GouraudColor uses |
| 82 | #ifdef GPU_GOURAUD_LOW_PRECISION |
| 83 | #define GPU_GOURAUD_FIXED_BITS 11 |
| 84 | #else |
| 85 | #define GPU_GOURAUD_FIXED_BITS 16 |
| 86 | #endif |
| 87 | |
| 88 | // Used to pass Gouraud colors to gpuPixelSpanFn() (lines) |
| 89 | struct GouraudColor { |
| 90 | #ifdef GPU_GOURAUD_LOW_PRECISION |
| 91 | u16 r, g, b; |
| 92 | s16 r_incr, g_incr, b_incr; |
| 93 | #else |
| 94 | u32 r, g, b; |
| 95 | s32 r_incr, g_incr, b_incr; |
| 96 | #endif |
| 97 | }; |
| 98 | |
| 99 | static inline u16 gpuGouraudColor15bpp(u32 r, u32 g, u32 b) |
| 100 | { |
| 101 | r >>= GPU_GOURAUD_FIXED_BITS; |
| 102 | g >>= GPU_GOURAUD_FIXED_BITS; |
| 103 | b >>= GPU_GOURAUD_FIXED_BITS; |
| 104 | |
| 105 | #ifndef GPU_GOURAUD_LOW_PRECISION |
| 106 | // High-precision Gouraud colors are 8-bit + fractional |
| 107 | r >>= 3; g >>= 3; b >>= 3; |
| 108 | #endif |
| 109 | |
| 110 | return r | (g << 5) | (b << 10); |
| 111 | } |
| 112 | |
| 113 | /////////////////////////////////////////////////////////////////////////////// |
| 114 | // GPU Pixel span operations generator gpuPixelSpanFn<> |
| 115 | // Oct 2016: Created/adapted from old gpuPixelFn by senquack: |
| 116 | // Original gpuPixelFn was used to draw lines one pixel at a time. I wrote |
| 117 | // new line algorithms that draw lines using horizontal/vertical/diagonal |
| 118 | // spans of pixels, necessitating new pixel-drawing function that could |
| 119 | // not only render spans of pixels, but gouraud-shade them as well. |
| 120 | // This speeds up line rendering and would allow tile-rendering (untextured |
| 121 | // rectangles) to use the same set of functions. Since tiles are always |
| 122 | // monochrome, they simply wouldn't use the extra set of 32 gouraud-shaded |
| 123 | // gpuPixelSpanFn functions (TODO?). |
| 124 | template<int CF> |
| 125 | static le16_t* gpuPixelSpanFn(le16_t* pDst, uintptr_t data, ptrdiff_t incr, size_t len) |
| 126 | { |
| 127 | // Blend func can save an operation if it knows uSrc MSB is |
| 128 | // unset. For untextured prims, this is always true. |
| 129 | const bool skip_uSrc_mask = true; |
| 130 | |
| 131 | u16 col; |
| 132 | struct GouraudColor * gcPtr; |
| 133 | u32 r, g, b; |
| 134 | s32 r_incr, g_incr, b_incr; |
| 135 | |
| 136 | // Caller counts in bytes, we count in pixels |
| 137 | incr /= 2; |
| 138 | |
| 139 | if (CF_GOURAUD) { |
| 140 | gcPtr = (GouraudColor*)data; |
| 141 | r = gcPtr->r; r_incr = gcPtr->r_incr; |
| 142 | g = gcPtr->g; g_incr = gcPtr->g_incr; |
| 143 | b = gcPtr->b; b_incr = gcPtr->b_incr; |
| 144 | } else { |
| 145 | col = (u16)data; |
| 146 | } |
| 147 | |
| 148 | do { |
| 149 | if (!CF_GOURAUD) |
| 150 | { // NO GOURAUD |
| 151 | if (!CF_MASKCHECK && !CF_BLEND) { |
| 152 | if (CF_MASKSET) { *pDst = u16_to_le16(col | 0x8000); } |
| 153 | else { *pDst = u16_to_le16(col); } |
| 154 | } else if (CF_MASKCHECK && !CF_BLEND) { |
| 155 | if (!(le16_raw(*pDst) & HTOLE16(0x8000))) { |
| 156 | if (CF_MASKSET) { *pDst = u16_to_le16(col | 0x8000); } |
| 157 | else { *pDst = u16_to_le16(col); } |
| 158 | } |
| 159 | } else { |
| 160 | uint_fast16_t uDst = le16_to_u16(*pDst); |
| 161 | if (CF_MASKCHECK) { if (uDst & 0x8000) goto endpixel; } |
| 162 | |
| 163 | uint_fast16_t uSrc = col; |
| 164 | |
| 165 | if (CF_BLEND) |
| 166 | uSrc = gpuBlending<CF_BLENDMODE, skip_uSrc_mask>(uSrc, uDst); |
| 167 | |
| 168 | if (CF_MASKSET) { *pDst = u16_to_le16(uSrc | 0x8000); } |
| 169 | else { *pDst = u16_to_le16(uSrc); } |
| 170 | } |
| 171 | |
| 172 | } else |
| 173 | { // GOURAUD |
| 174 | |
| 175 | if (!CF_MASKCHECK && !CF_BLEND) { |
| 176 | col = gpuGouraudColor15bpp(r, g, b); |
| 177 | if (CF_MASKSET) { *pDst = u16_to_le16(col | 0x8000); } |
| 178 | else { *pDst = u16_to_le16(col); } |
| 179 | } else if (CF_MASKCHECK && !CF_BLEND) { |
| 180 | col = gpuGouraudColor15bpp(r, g, b); |
| 181 | if (!(le16_raw(*pDst) & HTOLE16(0x8000))) { |
| 182 | if (CF_MASKSET) { *pDst = u16_to_le16(col | 0x8000); } |
| 183 | else { *pDst = u16_to_le16(col); } |
| 184 | } |
| 185 | } else { |
| 186 | uint_fast16_t uDst = le16_to_u16(*pDst); |
| 187 | if (CF_MASKCHECK) { if (uDst & 0x8000) goto endpixel; } |
| 188 | col = gpuGouraudColor15bpp(r, g, b); |
| 189 | |
| 190 | uint_fast16_t uSrc = col; |
| 191 | |
| 192 | // Blend func can save an operation if it knows uSrc MSB is |
| 193 | // unset. For untextured prims, this is always true. |
| 194 | const bool skip_uSrc_mask = true; |
| 195 | |
| 196 | if (CF_BLEND) |
| 197 | uSrc = gpuBlending<CF_BLENDMODE, skip_uSrc_mask>(uSrc, uDst); |
| 198 | |
| 199 | if (CF_MASKSET) { *pDst = u16_to_le16(uSrc | 0x8000); } |
| 200 | else { *pDst = u16_to_le16(uSrc); } |
| 201 | } |
| 202 | } |
| 203 | |
| 204 | endpixel: |
| 205 | if (CF_GOURAUD) { |
| 206 | r += r_incr; |
| 207 | g += g_incr; |
| 208 | b += b_incr; |
| 209 | } |
| 210 | pDst += incr; |
| 211 | } while (len-- > 1); |
| 212 | |
| 213 | // Note from senquack: Normally, I'd prefer to write a 'do {} while (--len)' |
| 214 | // loop, or even a for() loop, however, on MIPS platforms anything but the |
| 215 | // 'do {} while (len-- > 1)' tends to generate very unoptimal asm, with |
| 216 | // many unneeded MULs/ADDs/branches at the ends of these functions. |
| 217 | // If you change the loop structure above, be sure to compare the quality |
| 218 | // of the generated code!! |
| 219 | |
| 220 | if (CF_GOURAUD) { |
| 221 | gcPtr->r = r; |
| 222 | gcPtr->g = g; |
| 223 | gcPtr->b = b; |
| 224 | } |
| 225 | return pDst; |
| 226 | } |
| 227 | |
| 228 | static le16_t* PixelSpanNULL(le16_t* pDst, uintptr_t data, ptrdiff_t incr, size_t len) |
| 229 | { |
| 230 | #ifdef ENABLE_GPU_LOG_SUPPORT |
| 231 | fprintf(stdout,"PixelSpanNULL()\n"); |
| 232 | #endif |
| 233 | return pDst; |
| 234 | } |
| 235 | |
| 236 | /////////////////////////////////////////////////////////////////////////////// |
| 237 | // PixelSpan (lines) innerloops driver |
| 238 | typedef le16_t* (*PSD)(le16_t* dst, uintptr_t data, ptrdiff_t incr, size_t len); |
| 239 | |
| 240 | const PSD gpuPixelSpanDrivers[64] = |
| 241 | { |
| 242 | // Array index | 'CF' template field | Field value |
| 243 | // ------------+---------------------+---------------- |
| 244 | // Bit 0 | CF_BLEND | off (0), on (1) |
| 245 | // Bit 1 | CF_MASKCHECK | off (0), on (1) |
| 246 | // Bit 3:2 | CF_BLENDMODE | 0..3 |
| 247 | // Bit 4 | CF_MASKSET | off (0), on (1) |
| 248 | // Bit 5 | CF_GOURAUD | off (0), on (1) |
| 249 | // |
| 250 | // NULL entries are ones for which blending is disabled and blend-mode |
| 251 | // field is non-zero, which is obviously invalid. |
| 252 | |
| 253 | // Flat-shaded |
| 254 | gpuPixelSpanFn<0x00<<1>, gpuPixelSpanFn<0x01<<1>, gpuPixelSpanFn<0x02<<1>, gpuPixelSpanFn<0x03<<1>, |
| 255 | PixelSpanNULL, gpuPixelSpanFn<0x05<<1>, PixelSpanNULL, gpuPixelSpanFn<0x07<<1>, |
| 256 | PixelSpanNULL, gpuPixelSpanFn<0x09<<1>, PixelSpanNULL, gpuPixelSpanFn<0x0B<<1>, |
| 257 | PixelSpanNULL, gpuPixelSpanFn<0x0D<<1>, PixelSpanNULL, gpuPixelSpanFn<0x0F<<1>, |
| 258 | |
| 259 | // Flat-shaded + PixelMSB (CF_MASKSET) |
| 260 | gpuPixelSpanFn<(0x00<<1)|0x100>, gpuPixelSpanFn<(0x01<<1)|0x100>, gpuPixelSpanFn<(0x02<<1)|0x100>, gpuPixelSpanFn<(0x03<<1)|0x100>, |
| 261 | PixelSpanNULL, gpuPixelSpanFn<(0x05<<1)|0x100>, PixelSpanNULL, gpuPixelSpanFn<(0x07<<1)|0x100>, |
| 262 | PixelSpanNULL, gpuPixelSpanFn<(0x09<<1)|0x100>, PixelSpanNULL, gpuPixelSpanFn<(0x0B<<1)|0x100>, |
| 263 | PixelSpanNULL, gpuPixelSpanFn<(0x0D<<1)|0x100>, PixelSpanNULL, gpuPixelSpanFn<(0x0F<<1)|0x100>, |
| 264 | |
| 265 | // Gouraud-shaded (CF_GOURAUD) |
| 266 | gpuPixelSpanFn<(0x00<<1)|0x80>, gpuPixelSpanFn<(0x01<<1)|0x80>, gpuPixelSpanFn<(0x02<<1)|0x80>, gpuPixelSpanFn<(0x03<<1)|0x80>, |
| 267 | PixelSpanNULL, gpuPixelSpanFn<(0x05<<1)|0x80>, PixelSpanNULL, gpuPixelSpanFn<(0x07<<1)|0x80>, |
| 268 | PixelSpanNULL, gpuPixelSpanFn<(0x09<<1)|0x80>, PixelSpanNULL, gpuPixelSpanFn<(0x0B<<1)|0x80>, |
| 269 | PixelSpanNULL, gpuPixelSpanFn<(0x0D<<1)|0x80>, PixelSpanNULL, gpuPixelSpanFn<(0x0F<<1)|0x80>, |
| 270 | |
| 271 | // Gouraud-shaded (CF_GOURAUD) + PixelMSB (CF_MASKSET) |
| 272 | gpuPixelSpanFn<(0x00<<1)|0x180>, gpuPixelSpanFn<(0x01<<1)|0x180>, gpuPixelSpanFn<(0x02<<1)|0x180>, gpuPixelSpanFn<(0x03<<1)|0x180>, |
| 273 | PixelSpanNULL, gpuPixelSpanFn<(0x05<<1)|0x180>, PixelSpanNULL, gpuPixelSpanFn<(0x07<<1)|0x180>, |
| 274 | PixelSpanNULL, gpuPixelSpanFn<(0x09<<1)|0x180>, PixelSpanNULL, gpuPixelSpanFn<(0x0B<<1)|0x180>, |
| 275 | PixelSpanNULL, gpuPixelSpanFn<(0x0D<<1)|0x180>, PixelSpanNULL, gpuPixelSpanFn<(0x0F<<1)|0x180> |
| 276 | }; |
| 277 | |
| 278 | /////////////////////////////////////////////////////////////////////////////// |
| 279 | // GPU Tiles innerloops generator |
| 280 | |
| 281 | template<int CF> |
| 282 | static inline void gpuTileSpanFn(le16_t *pDst, u16 data, u32 count) |
| 283 | { |
| 284 | le16_t ldata; |
| 285 | |
| 286 | if (!CF_MASKCHECK && !CF_BLEND) { |
| 287 | if (CF_MASKSET) |
| 288 | ldata = u16_to_le16(data | 0x8000); |
| 289 | else |
| 290 | ldata = u16_to_le16(data); |
| 291 | do { *pDst++ = ldata; } while (--count); |
| 292 | } else if (CF_MASKCHECK && !CF_BLEND) { |
| 293 | if (CF_MASKSET) |
| 294 | ldata = u16_to_le16(data | 0x8000); |
| 295 | else |
| 296 | ldata = u16_to_le16(data); |
| 297 | do { |
| 298 | if (!(le16_raw(*pDst) & HTOLE16(0x8000))) |
| 299 | *pDst = ldata; |
| 300 | pDst++; |
| 301 | } while (--count); |
| 302 | } else |
| 303 | { |
| 304 | // Blend func can save an operation if it knows uSrc MSB is |
| 305 | // unset. For untextured prims, this is always true. |
| 306 | const bool skip_uSrc_mask = true; |
| 307 | |
| 308 | uint_fast16_t uSrc, uDst; |
| 309 | do |
| 310 | { |
| 311 | if (CF_MASKCHECK || CF_BLEND) { uDst = le16_to_u16(*pDst); } |
| 312 | if (CF_MASKCHECK) if (uDst&0x8000) { goto endtile; } |
| 313 | |
| 314 | uSrc = data; |
| 315 | |
| 316 | if (CF_BLEND) |
| 317 | uSrc = gpuBlending<CF_BLENDMODE, skip_uSrc_mask>(uSrc, uDst); |
| 318 | |
| 319 | if (CF_MASKSET) { *pDst = u16_to_le16(uSrc | 0x8000); } |
| 320 | else { *pDst = u16_to_le16(uSrc); } |
| 321 | |
| 322 | //senquack - Did not apply "Silent Hill" mask-bit fix to here. |
| 323 | // It is hard to tell from scarce documentation available and |
| 324 | // lack of comments in code, but I believe the tile-span |
| 325 | // functions here should not bother to preserve any source MSB, |
| 326 | // as they are not drawing from a texture. |
| 327 | endtile: |
| 328 | pDst++; |
| 329 | } |
| 330 | while (--count); |
| 331 | } |
| 332 | } |
| 333 | |
| 334 | template<int CF> |
| 335 | static noinline void gpuTileDriverFn(le16_t *pDst, u16 data, u32 count, |
| 336 | const gpu_unai_inner_t &inn) |
| 337 | { |
| 338 | const int li=gpu_unai.inn.ilace_mask; |
| 339 | const int pi=(ProgressiveInterlaceEnabled()?(gpu_unai.inn.ilace_mask+1):0); |
| 340 | const int pif=(ProgressiveInterlaceEnabled()?(gpu_unai.prog_ilace_flag?(gpu_unai.inn.ilace_mask+1):0):1); |
| 341 | const int y1 = inn.y1; |
| 342 | int y0 = inn.y0; |
| 343 | |
| 344 | for (; y0 < y1; ++y0) { |
| 345 | if (!(y0&li) && (y0&pi) != pif) |
| 346 | gpuTileSpanFn<CF>(pDst, data, count); |
| 347 | pDst += FRAME_WIDTH; |
| 348 | } |
| 349 | } |
| 350 | |
| 351 | #ifdef __arm__ |
| 352 | |
| 353 | template<int CF> |
| 354 | static void TileAsm(le16_t *pDst, u16 data, u32 count, const gpu_unai_inner_t &inn) |
| 355 | { |
| 356 | switch (CF) { |
| 357 | case 0x02: tile_driver_st0_asm(pDst, data, count, &inn); return; |
| 358 | case 0x0a: tile_driver_st1_asm(pDst, data, count, &inn); return; |
| 359 | case 0x1a: tile_driver_st3_asm(pDst, data, count, &inn); return; |
| 360 | #ifdef HAVE_ARMV6 |
| 361 | case 0x12: tile_driver_st2_asm(pDst, data, count, &inn); return; |
| 362 | #endif |
| 363 | } |
| 364 | gpuTileDriverFn<CF>(pDst, data, count, inn); |
| 365 | } |
| 366 | |
| 367 | #endif |
| 368 | |
| 369 | static void TileNULL(le16_t *pDst, u16 data, u32 count, const gpu_unai_inner_t &inn) |
| 370 | { |
| 371 | #ifdef ENABLE_GPU_LOG_SUPPORT |
| 372 | fprintf(stdout,"TileNULL()\n"); |
| 373 | #endif |
| 374 | } |
| 375 | |
| 376 | /////////////////////////////////////////////////////////////////////////////// |
| 377 | // Tiles innerloops driver |
| 378 | typedef void (*PT)(le16_t *pDst, u16 data, u32 count, const gpu_unai_inner_t &inn); |
| 379 | |
| 380 | // Template instantiation helper macros |
| 381 | #define TI(cf) gpuTileDriverFn<(cf)> |
| 382 | #define TN TileNULL |
| 383 | #ifdef __arm__ |
| 384 | #define TA(cf) TileAsm<(cf)> |
| 385 | #else |
| 386 | #define TA(cf) TI(cf) |
| 387 | #endif |
| 388 | #ifdef HAVE_ARMV6 |
| 389 | #define TA6(cf) TileAsm<(cf)> |
| 390 | #else |
| 391 | #define TA6(cf) TI(cf) |
| 392 | #endif |
| 393 | #define TIBLOCK(ub) \ |
| 394 | TI((ub)|0x00), TA6((ub)|0x02), TI((ub)|0x04), TI((ub)|0x06), \ |
| 395 | TN, TA ((ub)|0x0a), TN, TI((ub)|0x0e), \ |
| 396 | TN, TA6((ub)|0x12), TN, TI((ub)|0x16), \ |
| 397 | TN, TA ((ub)|0x1a), TN, TI((ub)|0x1e) |
| 398 | |
| 399 | const PT gpuTileDrivers[32] = { |
| 400 | TIBLOCK(0<<8), TIBLOCK(1<<8) |
| 401 | }; |
| 402 | |
| 403 | #undef TI |
| 404 | #undef TN |
| 405 | #undef TA |
| 406 | #undef TA6 |
| 407 | #undef TIBLOCK |
| 408 | |
| 409 | |
| 410 | /////////////////////////////////////////////////////////////////////////////// |
| 411 | // GPU Sprites innerloops generator |
| 412 | |
| 413 | typedef void (*PS)(le16_t *pPixel, u32 count, const u8 *pTxt, |
| 414 | const gpu_unai_inner_t &inn); |
| 415 | |
| 416 | template<int CF> |
| 417 | static noinline void gpuSpriteDriverFn(le16_t *pPixel, u32 count, const u8 *pTxt_base, |
| 418 | const gpu_unai_inner_t &inn) |
| 419 | { |
| 420 | // Blend func can save an operation if it knows uSrc MSB is unset. |
| 421 | // Untextured prims can always skip (source color always comes with MSB=0). |
| 422 | // For textured prims, the generic lighting funcs always return it unset. (bonus!) |
| 423 | const bool skip_uSrc_mask = MSB_PRESERVED ? (!CF_TEXTMODE) : (!CF_TEXTMODE) || CF_LIGHT; |
| 424 | |
| 425 | uint_fast16_t uSrc, uDst, srcMSB; |
| 426 | bool should_blend; |
| 427 | u32 u0_mask = inn.u_msk >> 10; |
| 428 | |
| 429 | u8 r5, g5, b5; |
| 430 | if (CF_LIGHT) { |
| 431 | r5 = inn.r5; |
| 432 | g5 = inn.g5; |
| 433 | b5 = inn.b5; |
| 434 | } |
| 435 | |
| 436 | const le16_t *CBA_; if (CF_TEXTMODE!=3) CBA_ = inn.CBA; |
| 437 | const u32 v0_mask = inn.v_msk >> 10; |
| 438 | s32 y0 = inn.y0, y1 = inn.y1, li = inn.ilace_mask; |
| 439 | u32 u0_ = inn.u, v0 = inn.v; |
| 440 | |
| 441 | if (CF_TEXTMODE==3) { |
| 442 | // Texture is accessed byte-wise, so adjust to 16bpp |
| 443 | u0_ <<= 1; |
| 444 | u0_mask <<= 1; |
| 445 | } |
| 446 | |
| 447 | for (; y0 < y1; ++y0, pPixel += FRAME_WIDTH, ++v0) |
| 448 | { |
| 449 | if (y0 & li) continue; |
| 450 | const u8 *pTxt = pTxt_base + ((v0 & v0_mask) * 2048); |
| 451 | le16_t *pDst = pPixel; |
| 452 | u32 u0 = u0_; |
| 453 | u32 count1 = count; |
| 454 | do |
| 455 | { |
| 456 | if (CF_MASKCHECK || CF_BLEND) { uDst = le16_to_u16(*pDst); } |
| 457 | if (CF_MASKCHECK) if (uDst&0x8000) { goto endsprite; } |
| 458 | |
| 459 | if (CF_TEXTMODE==1) { // 4bpp (CLUT) |
| 460 | u8 rgb = pTxt[(u0 & u0_mask)>>1]; |
| 461 | uSrc = le16_to_u16(CBA_[(rgb>>((u0&1)<<2))&0xf]); |
| 462 | } |
| 463 | if (CF_TEXTMODE==2) { // 8bpp (CLUT) |
| 464 | uSrc = le16_to_u16(CBA_[pTxt[u0 & u0_mask]]); |
| 465 | } |
| 466 | if (CF_TEXTMODE==3) { // 16bpp |
| 467 | uSrc = le16_to_u16(*(le16_t*)(&pTxt[u0 & u0_mask])); |
| 468 | } |
| 469 | |
| 470 | if (!uSrc) goto endsprite; |
| 471 | |
| 472 | //senquack - save source MSB, as blending or lighting macros will not |
| 473 | // (Silent Hill gray rectangles mask bit bug) |
| 474 | if (CF_BLEND || CF_LIGHT) srcMSB = uSrc & 0x8000; |
| 475 | |
| 476 | if (CF_LIGHT) |
| 477 | uSrc = gpuLightingTXT(uSrc, r5, g5, b5); |
| 478 | |
| 479 | should_blend = MSB_PRESERVED ? uSrc & 0x8000 : srcMSB; |
| 480 | |
| 481 | if (CF_BLEND && should_blend) |
| 482 | uSrc = gpuBlending<CF_BLENDMODE, skip_uSrc_mask>(uSrc, uDst); |
| 483 | |
| 484 | if (CF_MASKSET) { *pDst = u16_to_le16(uSrc | 0x8000); } |
| 485 | else if (!MSB_PRESERVED && (CF_BLEND || CF_LIGHT)) { *pDst = u16_to_le16(uSrc | srcMSB); } |
| 486 | else { *pDst = u16_to_le16(uSrc); } |
| 487 | |
| 488 | endsprite: |
| 489 | u0 += (CF_TEXTMODE==3) ? 2 : 1; |
| 490 | pDst++; |
| 491 | } |
| 492 | while (--count1); |
| 493 | } |
| 494 | } |
| 495 | |
| 496 | #ifdef __arm__ |
| 497 | |
| 498 | template<int CF> |
| 499 | static void SpriteMaybeAsm(le16_t *pPixel, u32 count, const u8 *pTxt_base, |
| 500 | const gpu_unai_inner_t &inn) |
| 501 | { |
| 502 | #if 1 |
| 503 | s32 lines = inn.y1 - inn.y0; |
| 504 | u32 u1m = inn.u + count - 1, v1m = inn.v + lines - 1; |
| 505 | if (u1m == (u1m & (inn.u_msk >> 10)) && v1m == (v1m & (inn.v_msk >> 10))) { |
| 506 | const u8 *pTxt = pTxt_base + inn.v * 2048; |
| 507 | switch (CF) { |
| 508 | case 0x20: sprite_driver_4bpp_asm (pPixel, pTxt + inn.u / 2, count, &inn); return; |
| 509 | case 0x40: sprite_driver_8bpp_asm (pPixel, pTxt + inn.u, count, &inn); return; |
| 510 | case 0x60: sprite_driver_16bpp_asm(pPixel, pTxt + inn.u * 2, count, &inn); return; |
| 511 | } |
| 512 | } |
| 513 | if (v1m == (v1m & (inn.v_msk >> 10))) { |
| 514 | const u8 *pTxt = pTxt_base + inn.v * 2048; |
| 515 | switch (CF) { |
| 516 | case 0x20: sprite_driver_4bpp_l0_std_asm(pPixel, pTxt, count, &inn); return; |
| 517 | case 0x22: sprite_driver_4bpp_l0_st0_asm(pPixel, pTxt, count, &inn); return; |
| 518 | case 0x40: sprite_driver_8bpp_l0_std_asm(pPixel, pTxt, count, &inn); return; |
| 519 | case 0x42: sprite_driver_8bpp_l0_st0_asm(pPixel, pTxt, count, &inn); return; |
| 520 | #ifdef HAVE_ARMV6 |
| 521 | case 0x21: sprite_driver_4bpp_l1_std_asm(pPixel, pTxt, count, &inn); return; |
| 522 | case 0x23: sprite_driver_4bpp_l1_st0_asm(pPixel, pTxt, count, &inn); return; |
| 523 | case 0x2b: sprite_driver_4bpp_l1_st1_asm(pPixel, pTxt, count, &inn); return; |
| 524 | case 0x41: sprite_driver_8bpp_l1_std_asm(pPixel, pTxt, count, &inn); return; |
| 525 | case 0x43: sprite_driver_8bpp_l1_st0_asm(pPixel, pTxt, count, &inn); return; |
| 526 | case 0x4b: sprite_driver_8bpp_l1_st1_asm(pPixel, pTxt, count, &inn); return; |
| 527 | #endif |
| 528 | } |
| 529 | } |
| 530 | #endif |
| 531 | gpuSpriteDriverFn<CF>(pPixel, count, pTxt_base, inn); |
| 532 | } |
| 533 | #endif // __arm__ |
| 534 | |
| 535 | static void SpriteNULL(le16_t *pPixel, u32 count, const u8 *pTxt_base, |
| 536 | const gpu_unai_inner_t &inn) |
| 537 | { |
| 538 | #ifdef ENABLE_GPU_LOG_SUPPORT |
| 539 | fprintf(stdout,"SpriteNULL()\n"); |
| 540 | #endif |
| 541 | } |
| 542 | |
| 543 | /////////////////////////////////////////////////////////////////////////////// |
| 544 | |
| 545 | /////////////////////////////////////////////////////////////////////////////// |
| 546 | // Sprite innerloops driver |
| 547 | |
| 548 | // Template instantiation helper macros |
| 549 | #define TI(cf) gpuSpriteDriverFn<(cf)> |
| 550 | #define TN SpriteNULL |
| 551 | #ifdef __arm__ |
| 552 | #define TA(cf) SpriteMaybeAsm<(cf)> |
| 553 | #else |
| 554 | #define TA(cf) TI(cf) |
| 555 | #endif |
| 556 | #ifdef HAVE_ARMV6 |
| 557 | #define TA6(cf) SpriteMaybeAsm<(cf)> |
| 558 | #else |
| 559 | #define TA6(cf) TI(cf) |
| 560 | #endif |
| 561 | #define TIBLOCK(ub) \ |
| 562 | TN, TN, TN, TN, TN, TN, TN, TN, \ |
| 563 | TN, TN, TN, TN, TN, TN, TN, TN, \ |
| 564 | TN, TN, TN, TN, TN, TN, TN, TN, \ |
| 565 | TN, TN, TN, TN, TN, TN, TN, TN, \ |
| 566 | TA((ub)|0x20), TA6((ub)|0x21),TA6((ub)|0x22),TA6((ub)|0x23),TI((ub)|0x24), TI((ub)|0x25), TI((ub)|0x26), TI((ub)|0x27), \ |
| 567 | TN, TN, TI((ub)|0x2a), TA6((ub)|0x2b),TN, TN, TI((ub)|0x2e), TI((ub)|0x2f), \ |
| 568 | TN, TN, TI((ub)|0x32), TI((ub)|0x33), TN, TN, TI((ub)|0x36), TI((ub)|0x37), \ |
| 569 | TN, TN, TI((ub)|0x3a), TI((ub)|0x3b), TN, TN, TI((ub)|0x3e), TI((ub)|0x3f), \ |
| 570 | TA((ub)|0x40), TA6((ub)|0x41),TA6((ub)|0x42),TA6((ub)|0x43),TI((ub)|0x44), TI((ub)|0x45), TI((ub)|0x46), TI((ub)|0x47), \ |
| 571 | TN, TN, TI((ub)|0x4a), TA6((ub)|0x4b),TN, TN, TI((ub)|0x4e), TI((ub)|0x4f), \ |
| 572 | TN, TN, TI((ub)|0x52), TI((ub)|0x53), TN, TN, TI((ub)|0x56), TI((ub)|0x57), \ |
| 573 | TN, TN, TI((ub)|0x5a), TI((ub)|0x5b), TN, TN, TI((ub)|0x5e), TI((ub)|0x5f), \ |
| 574 | TA((ub)|0x60), TI((ub)|0x61), TI((ub)|0x62), TI((ub)|0x63), TI((ub)|0x64), TI((ub)|0x65), TI((ub)|0x66), TI((ub)|0x67), \ |
| 575 | TN, TN, TI((ub)|0x6a), TI((ub)|0x6b), TN, TN, TI((ub)|0x6e), TI((ub)|0x6f), \ |
| 576 | TN, TN, TI((ub)|0x72), TI((ub)|0x73), TN, TN, TI((ub)|0x76), TI((ub)|0x77), \ |
| 577 | TN, TN, TI((ub)|0x7a), TI((ub)|0x7b), TN, TN, TI((ub)|0x7e), TI((ub)|0x7f) |
| 578 | |
| 579 | const PS gpuSpriteDrivers[256] = { |
| 580 | TIBLOCK(0<<8), TIBLOCK(1<<8) |
| 581 | }; |
| 582 | |
| 583 | #undef TI |
| 584 | #undef TN |
| 585 | #undef TIBLOCK |
| 586 | #undef TA |
| 587 | #undef TA6 |
| 588 | |
| 589 | /////////////////////////////////////////////////////////////////////////////// |
| 590 | // GPU Polygon innerloops generator |
| 591 | |
| 592 | //senquack - Newer version with following changes: |
| 593 | // * Adapted to work with new poly routings in gpu_raster_polygon.h |
| 594 | // adapted from DrHell GPU. They are less glitchy and use 22.10 |
| 595 | // fixed-point instead of original UNAI's 16.16. |
| 596 | // * Texture coordinates are no longer packed together into one |
| 597 | // unsigned int. This seems to lose too much accuracy (they each |
| 598 | // end up being only 8.7 fixed-point that way) and pixel-droupouts |
| 599 | // were noticeable both with original code and current DrHell |
| 600 | // adaptations. An example would be the sky in NFS3. Now, they are |
| 601 | // stored in separate ints, using separate masks. |
| 602 | // * Function is no longer INLINE, as it was always called |
| 603 | // through a function pointer. |
| 604 | // * Function now ensures the mask bit of source texture is preserved |
| 605 | // across calls to blending functions (Silent Hill rectangles fix) |
| 606 | // * November 2016: Large refactoring of blending/lighting when |
| 607 | // JohnnyF added dithering. See gpu_inner_quantization.h and |
| 608 | // relevant blend/light headers. |
| 609 | // (see README_senquack.txt) |
| 610 | template<int CF> |
| 611 | static noinline void gpuPolySpanFn(const gpu_unai_t &gpu_unai, le16_t *pDst, u32 count) |
| 612 | { |
| 613 | // Blend func can save an operation if it knows uSrc MSB is unset. |
| 614 | // Untextured prims can always skip this (src color MSB is always 0). |
| 615 | // For textured prims, the generic lighting funcs always return it unset. (bonus!) |
| 616 | const bool skip_uSrc_mask = MSB_PRESERVED ? (!CF_TEXTMODE) : (!CF_TEXTMODE) || CF_LIGHT; |
| 617 | bool should_blend; |
| 618 | |
| 619 | u32 bMsk; if (CF_BLITMASK) bMsk = gpu_unai.inn.blit_mask; |
| 620 | |
| 621 | if (!CF_TEXTMODE) |
| 622 | { |
| 623 | if (!CF_GOURAUD) |
| 624 | { |
| 625 | // UNTEXTURED, NO GOURAUD |
| 626 | const u16 pix15 = gpu_unai.inn.PixelData; |
| 627 | do { |
| 628 | uint_fast16_t uSrc, uDst; |
| 629 | |
| 630 | // NOTE: Don't enable CF_BLITMASK pixel skipping (speed hack) |
| 631 | // on untextured polys. It seems to do more harm than good: see |
| 632 | // gravestone text at end of Medieval intro sequence. -senquack |
| 633 | //if (CF_BLITMASK) { if ((bMsk>>((((uintptr_t)pDst)>>1)&7))&1) { goto endpolynotextnogou; } } |
| 634 | |
| 635 | if (CF_BLEND || CF_MASKCHECK) uDst = le16_to_u16(*pDst); |
| 636 | if (CF_MASKCHECK) { if (uDst&0x8000) { goto endpolynotextnogou; } } |
| 637 | |
| 638 | uSrc = pix15; |
| 639 | |
| 640 | if (CF_BLEND) |
| 641 | uSrc = gpuBlending<CF_BLENDMODE, skip_uSrc_mask>(uSrc, uDst); |
| 642 | |
| 643 | if (CF_MASKSET) { *pDst = u16_to_le16(uSrc | 0x8000); } |
| 644 | else { *pDst = u16_to_le16(uSrc); } |
| 645 | |
| 646 | endpolynotextnogou: |
| 647 | pDst++; |
| 648 | } while(--count); |
| 649 | } |
| 650 | else |
| 651 | { |
| 652 | // UNTEXTURED, GOURAUD |
| 653 | gcol_t l_gCol = gpu_unai.inn.gCol; |
| 654 | gcol_t l_gInc = gpu_unai.inn.gInc; |
| 655 | |
| 656 | do { |
| 657 | uint_fast16_t uDst, uSrc; |
| 658 | |
| 659 | // See note in above loop regarding CF_BLITMASK |
| 660 | //if (CF_BLITMASK) { if ((bMsk>>((((uintptr_t)pDst)>>1)&7))&1) goto endpolynotextgou; } |
| 661 | |
| 662 | if (CF_BLEND || CF_MASKCHECK) uDst = le16_to_u16(*pDst); |
| 663 | if (CF_MASKCHECK) { if (uDst&0x8000) goto endpolynotextgou; } |
| 664 | |
| 665 | if (CF_DITHER) { |
| 666 | // GOURAUD, DITHER |
| 667 | |
| 668 | u32 uSrc24 = gpuLightingRGB24(l_gCol); |
| 669 | if (CF_BLEND) |
| 670 | uSrc24 = gpuBlending24<CF_BLENDMODE>(uSrc24, uDst); |
| 671 | uSrc = gpuColorQuantization24<CF_DITHER>(uSrc24, pDst); |
| 672 | } else { |
| 673 | // GOURAUD, NO DITHER |
| 674 | |
| 675 | uSrc = gpuLightingRGB(l_gCol); |
| 676 | |
| 677 | if (CF_BLEND) |
| 678 | uSrc = gpuBlending<CF_BLENDMODE, skip_uSrc_mask>(uSrc, uDst); |
| 679 | } |
| 680 | |
| 681 | if (CF_MASKSET) { *pDst = u16_to_le16(uSrc | 0x8000); } |
| 682 | else { *pDst = u16_to_le16(uSrc); } |
| 683 | |
| 684 | endpolynotextgou: |
| 685 | pDst++; |
| 686 | l_gCol.raw += l_gInc.raw; |
| 687 | } |
| 688 | while (--count); |
| 689 | } |
| 690 | } |
| 691 | else |
| 692 | { |
| 693 | // TEXTURED |
| 694 | |
| 695 | uint_fast16_t uDst, uSrc, srcMSB; |
| 696 | |
| 697 | //senquack - note: original UNAI code had gpu_unai.{u4/v4} packed into |
| 698 | // one 32-bit unsigned int, but this proved to lose too much accuracy |
| 699 | // (pixel drouputs noticeable in NFS3 sky), so now are separate vars. |
| 700 | u32 l_u_msk = gpu_unai.inn.u_msk; u32 l_v_msk = gpu_unai.inn.v_msk; |
| 701 | u32 l_u = gpu_unai.inn.u & l_u_msk; u32 l_v = gpu_unai.inn.v & l_v_msk; |
| 702 | s32 l_u_inc = gpu_unai.inn.u_inc; s32 l_v_inc = gpu_unai.inn.v_inc; |
| 703 | l_v <<= 1; |
| 704 | l_v_inc <<= 1; |
| 705 | l_v_msk = (l_v_msk & (0xff<<10)) << 1; |
| 706 | |
| 707 | const le16_t* TBA_ = gpu_unai.inn.TBA; |
| 708 | const le16_t* CBA_; if (CF_TEXTMODE!=3) CBA_ = gpu_unai.inn.CBA; |
| 709 | |
| 710 | u8 r5, g5, b5; |
| 711 | u8 r8, g8, b8; |
| 712 | |
| 713 | gcol_t l_gInc, l_gCol; |
| 714 | |
| 715 | if (CF_LIGHT) { |
| 716 | if (CF_GOURAUD) { |
| 717 | l_gInc = gpu_unai.inn.gInc; |
| 718 | l_gCol = gpu_unai.inn.gCol; |
| 719 | } else { |
| 720 | if (CF_DITHER) { |
| 721 | r8 = gpu_unai.inn.r8; |
| 722 | g8 = gpu_unai.inn.g8; |
| 723 | b8 = gpu_unai.inn.b8; |
| 724 | } else { |
| 725 | r5 = gpu_unai.inn.r5; |
| 726 | g5 = gpu_unai.inn.g5; |
| 727 | b5 = gpu_unai.inn.b5; |
| 728 | } |
| 729 | } |
| 730 | } |
| 731 | |
| 732 | do |
| 733 | { |
| 734 | if (CF_BLITMASK) { if ((bMsk>>((((uintptr_t)pDst)>>1)&7))&1) goto endpolytext; } |
| 735 | if (CF_MASKCHECK || CF_BLEND) { uDst = le16_to_u16(*pDst); } |
| 736 | if (CF_MASKCHECK) if (uDst&0x8000) { goto endpolytext; } |
| 737 | |
| 738 | //senquack - adapted to work with new 22.10 fixed point routines: |
| 739 | // (UNAI originally used 16.16) |
| 740 | if (CF_TEXTMODE==1) { // 4bpp (CLUT) |
| 741 | u32 tu=(l_u>>10); |
| 742 | u32 tv=l_v&l_v_msk; |
| 743 | u8 rgb=((u8*)TBA_)[tv+(tu>>1)]; |
| 744 | uSrc=le16_to_u16(CBA_[(rgb>>((tu&1)<<2))&0xf]); |
| 745 | if (!uSrc) goto endpolytext; |
| 746 | } |
| 747 | if (CF_TEXTMODE==2) { // 8bpp (CLUT) |
| 748 | u32 tv=l_v&l_v_msk; |
| 749 | uSrc = le16_to_u16(CBA_[((u8*)TBA_)[tv+(l_u>>10)]]); |
| 750 | if (!uSrc) goto endpolytext; |
| 751 | } |
| 752 | if (CF_TEXTMODE==3) { // 16bpp |
| 753 | u32 tv=(l_v&l_v_msk)>>1; |
| 754 | uSrc = le16_to_u16(TBA_[tv+(l_u>>10)]); |
| 755 | if (!uSrc) goto endpolytext; |
| 756 | } |
| 757 | |
| 758 | // Save source MSB, as blending or lighting will not (Silent Hill) |
| 759 | if (CF_BLEND || CF_LIGHT) srcMSB = uSrc & 0x8000; |
| 760 | |
| 761 | // When textured, only dither when LIGHT (texture blend) is enabled |
| 762 | // LIGHT && BLEND => dither |
| 763 | // LIGHT && !BLEND => dither |
| 764 | //!LIGHT && BLEND => no dither |
| 765 | //!LIGHT && !BLEND => no dither |
| 766 | |
| 767 | if (CF_DITHER && CF_LIGHT) { |
| 768 | u32 uSrc24; |
| 769 | if ( CF_GOURAUD) |
| 770 | uSrc24 = gpuLightingTXT24Gouraud(uSrc, l_gCol); |
| 771 | if (!CF_GOURAUD) |
| 772 | uSrc24 = gpuLightingTXT24(uSrc, r8, g8, b8); |
| 773 | |
| 774 | if (CF_BLEND && srcMSB) |
| 775 | uSrc24 = gpuBlending24<CF_BLENDMODE>(uSrc24, uDst); |
| 776 | |
| 777 | uSrc = gpuColorQuantization24<CF_DITHER>(uSrc24, pDst); |
| 778 | } else |
| 779 | { |
| 780 | if (CF_LIGHT) { |
| 781 | if ( CF_GOURAUD) |
| 782 | uSrc = gpuLightingTXTGouraud(uSrc, l_gCol); |
| 783 | if (!CF_GOURAUD) |
| 784 | uSrc = gpuLightingTXT(uSrc, r5, g5, b5); |
| 785 | } |
| 786 | |
| 787 | should_blend = MSB_PRESERVED ? uSrc & 0x8000 : srcMSB; |
| 788 | if (CF_BLEND && should_blend) |
| 789 | uSrc = gpuBlending<CF_BLENDMODE, skip_uSrc_mask>(uSrc, uDst); |
| 790 | } |
| 791 | |
| 792 | if (CF_MASKSET) { *pDst = u16_to_le16(uSrc | 0x8000); } |
| 793 | else if (!MSB_PRESERVED && (CF_BLEND || CF_LIGHT)) { *pDst = u16_to_le16(uSrc | srcMSB); } |
| 794 | else { *pDst = u16_to_le16(uSrc); } |
| 795 | endpolytext: |
| 796 | pDst++; |
| 797 | l_u = (l_u + l_u_inc) & l_u_msk; |
| 798 | l_v += l_v_inc; |
| 799 | if (CF_LIGHT && CF_GOURAUD) |
| 800 | l_gCol.raw += l_gInc.raw; |
| 801 | } |
| 802 | while (--count); |
| 803 | } |
| 804 | } |
| 805 | |
| 806 | #ifdef __arm__ |
| 807 | template<int CF> |
| 808 | static void PolySpanMaybeAsm(const gpu_unai_t &gpu_unai, le16_t *pDst, u32 count) |
| 809 | { |
| 810 | switch (CF) { |
| 811 | case 0x02: poly_untex_st0_asm (pDst, &gpu_unai.inn, count); break; |
| 812 | case 0x0a: poly_untex_st1_asm (pDst, &gpu_unai.inn, count); break; |
| 813 | case 0x1a: poly_untex_st3_asm (pDst, &gpu_unai.inn, count); break; |
| 814 | case 0x20: poly_4bpp_asm (pDst, &gpu_unai.inn, count); break; |
| 815 | case 0x22: poly_4bpp_l0_st0_asm(pDst, &gpu_unai.inn, count); break; |
| 816 | case 0x40: poly_8bpp_asm (pDst, &gpu_unai.inn, count); break; |
| 817 | case 0x42: poly_8bpp_l0_st0_asm(pDst, &gpu_unai.inn, count); break; |
| 818 | #ifdef HAVE_ARMV6 |
| 819 | case 0x12: poly_untex_st2_asm (pDst, &gpu_unai.inn, count); break; |
| 820 | case 0x21: poly_4bpp_l1_std_asm(pDst, &gpu_unai.inn, count); break; |
| 821 | case 0x23: poly_4bpp_l1_st0_asm(pDst, &gpu_unai.inn, count); break; |
| 822 | case 0x41: poly_8bpp_l1_std_asm(pDst, &gpu_unai.inn, count); break; |
| 823 | case 0x43: poly_8bpp_l1_st0_asm(pDst, &gpu_unai.inn, count); break; |
| 824 | #endif |
| 825 | default: gpuPolySpanFn<CF>(gpu_unai, pDst, count); |
| 826 | } |
| 827 | } |
| 828 | #endif |
| 829 | |
| 830 | static void PolyNULL(const gpu_unai_t &gpu_unai, le16_t *pDst, u32 count) |
| 831 | { |
| 832 | #ifdef ENABLE_GPU_LOG_SUPPORT |
| 833 | fprintf(stdout,"PolyNULL()\n"); |
| 834 | #endif |
| 835 | } |
| 836 | |
| 837 | /////////////////////////////////////////////////////////////////////////////// |
| 838 | // Polygon innerloops driver |
| 839 | typedef void (*PP)(const gpu_unai_t &gpu_unai, le16_t *pDst, u32 count); |
| 840 | |
| 841 | // Template instantiation helper macros |
| 842 | #define TI(cf) gpuPolySpanFn<(cf)> |
| 843 | #define TN PolyNULL |
| 844 | #ifdef __arm__ |
| 845 | #define TA(cf) PolySpanMaybeAsm<(cf)> |
| 846 | #else |
| 847 | #define TA(cf) TI(cf) |
| 848 | #endif |
| 849 | #ifdef HAVE_ARMV6 |
| 850 | #define TA6(cf) PolySpanMaybeAsm<(cf)> |
| 851 | #else |
| 852 | #define TA6(cf) TI(cf) |
| 853 | #endif |
| 854 | #define TIBLOCK(ub) \ |
| 855 | TI((ub)|0x00), TI((ub)|0x01), TA6((ub)|0x02),TI((ub)|0x03), TI((ub)|0x04), TI((ub)|0x05), TI((ub)|0x06), TI((ub)|0x07), \ |
| 856 | TN, TN, TA((ub)|0x0a), TI((ub)|0x0b), TN, TN, TI((ub)|0x0e), TI((ub)|0x0f), \ |
| 857 | TN, TN, TA6((ub)|0x12),TI((ub)|0x13), TN, TN, TI((ub)|0x16), TI((ub)|0x17), \ |
| 858 | TN, TN, TA((ub)|0x1a), TI((ub)|0x1b), TN, TN, TI((ub)|0x1e), TI((ub)|0x1f), \ |
| 859 | TA((ub)|0x20), TA6((ub)|0x21),TA6((ub)|0x22),TA6((ub)|0x23),TI((ub)|0x24), TI((ub)|0x25), TI((ub)|0x26), TI((ub)|0x27), \ |
| 860 | TN, TN, TI((ub)|0x2a), TI((ub)|0x2b), TN, TN, TI((ub)|0x2e), TI((ub)|0x2f), \ |
| 861 | TN, TN, TI((ub)|0x32), TI((ub)|0x33), TN, TN, TI((ub)|0x36), TI((ub)|0x37), \ |
| 862 | TN, TN, TI((ub)|0x3a), TI((ub)|0x3b), TN, TN, TI((ub)|0x3e), TI((ub)|0x3f), \ |
| 863 | TA((ub)|0x40), TA6((ub)|0x41),TA6((ub)|0x42),TA6((ub)|0x43),TI((ub)|0x44), TI((ub)|0x45), TI((ub)|0x46), TI((ub)|0x47), \ |
| 864 | TN, TN, TI((ub)|0x4a), TI((ub)|0x4b), TN, TN, TI((ub)|0x4e), TI((ub)|0x4f), \ |
| 865 | TN, TN, TI((ub)|0x52), TI((ub)|0x53), TN, TN, TI((ub)|0x56), TI((ub)|0x57), \ |
| 866 | TN, TN, TI((ub)|0x5a), TI((ub)|0x5b), TN, TN, TI((ub)|0x5e), TI((ub)|0x5f), \ |
| 867 | TI((ub)|0x60), TI((ub)|0x61), TI((ub)|0x62), TI((ub)|0x63), TI((ub)|0x64), TI((ub)|0x65), TI((ub)|0x66), TI((ub)|0x67), \ |
| 868 | TN, TN, TI((ub)|0x6a), TI((ub)|0x6b), TN, TN, TI((ub)|0x6e), TI((ub)|0x6f), \ |
| 869 | TN, TN, TI((ub)|0x72), TI((ub)|0x73), TN, TN, TI((ub)|0x76), TI((ub)|0x77), \ |
| 870 | TN, TN, TI((ub)|0x7a), TI((ub)|0x7b), TN, TN, TI((ub)|0x7e), TI((ub)|0x7f), \ |
| 871 | TN, TI((ub)|0x81), TN, TI((ub)|0x83), TN, TI((ub)|0x85), TN, TI((ub)|0x87), \ |
| 872 | TN, TN, TN, TI((ub)|0x8b), TN, TN, TN, TI((ub)|0x8f), \ |
| 873 | TN, TN, TN, TI((ub)|0x93), TN, TN, TN, TI((ub)|0x97), \ |
| 874 | TN, TN, TN, TI((ub)|0x9b), TN, TN, TN, TI((ub)|0x9f), \ |
| 875 | TN, TI((ub)|0xa1), TN, TI((ub)|0xa3), TN, TI((ub)|0xa5), TN, TI((ub)|0xa7), \ |
| 876 | TN, TN, TN, TI((ub)|0xab), TN, TN, TN, TI((ub)|0xaf), \ |
| 877 | TN, TN, TN, TI((ub)|0xb3), TN, TN, TN, TI((ub)|0xb7), \ |
| 878 | TN, TN, TN, TI((ub)|0xbb), TN, TN, TN, TI((ub)|0xbf), \ |
| 879 | TN, TI((ub)|0xc1), TN, TI((ub)|0xc3), TN, TI((ub)|0xc5), TN, TI((ub)|0xc7), \ |
| 880 | TN, TN, TN, TI((ub)|0xcb), TN, TN, TN, TI((ub)|0xcf), \ |
| 881 | TN, TN, TN, TI((ub)|0xd3), TN, TN, TN, TI((ub)|0xd7), \ |
| 882 | TN, TN, TN, TI((ub)|0xdb), TN, TN, TN, TI((ub)|0xdf), \ |
| 883 | TN, TI((ub)|0xe1), TN, TI((ub)|0xe3), TN, TI((ub)|0xe5), TN, TI((ub)|0xe7), \ |
| 884 | TN, TN, TN, TI((ub)|0xeb), TN, TN, TN, TI((ub)|0xef), \ |
| 885 | TN, TN, TN, TI((ub)|0xf3), TN, TN, TN, TI((ub)|0xf7), \ |
| 886 | TN, TN, TN, TI((ub)|0xfb), TN, TN, TN, TI((ub)|0xff) |
| 887 | |
| 888 | const PP gpuPolySpanDrivers[2048] = { |
| 889 | TIBLOCK(0<<8), TIBLOCK(1<<8), TIBLOCK(2<<8), TIBLOCK(3<<8), |
| 890 | TIBLOCK(4<<8), TIBLOCK(5<<8), TIBLOCK(6<<8), TIBLOCK(7<<8) |
| 891 | }; |
| 892 | |
| 893 | #undef TI |
| 894 | #undef TN |
| 895 | #undef TIBLOCK |
| 896 | #undef TA |
| 897 | #undef TA6 |
| 898 | |
| 899 | #endif /* __GPU_UNAI_GPU_INNER_H__ */ |