--- /dev/null
+#ifndef COMMON_H
+#define COMMON_H
+
+typedef signed char s8;
+typedef unsigned char u8;
+typedef signed short s16;
+typedef unsigned short u16;
+typedef signed int s32;
+typedef unsigned int u32;
+typedef signed long long int s64;
+typedef unsigned long long int u64;
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/time.h>
+
+#include "vector_ops.h"
+#include "psx_gpu.h"
+
+#endif
+
--- /dev/null
+/*
+ * Copyright (C) 2011 Gilead Kutnick "Exophase" <exophase@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "common.h"
+
+u32 span_pixels = 0;
+u32 span_pixel_blocks = 0;
+u32 span_pixel_blocks_unaligned = 0;
+u32 spans = 0;
+u32 triangles = 0;
+u32 sprites = 0;
+u32 sprites_4bpp = 0;
+u32 sprites_8bpp = 0;
+u32 sprites_16bpp = 0;
+u32 sprite_blocks = 0;
+u32 sprites_untextured = 0;
+u32 lines = 0;
+u32 trivial_rejects = 0;
+u32 texels_4bpp = 0;
+u32 texels_8bpp = 0;
+u32 texels_16bpp = 0;
+u32 texel_blocks_4bpp = 0;
+u32 texel_blocks_8bpp = 0;
+u32 texel_blocks_16bpp = 0;
+u32 texel_blocks_untextured = 0;
+u32 blend_blocks = 0;
+u32 untextured_pixels = 0;
+u32 blend_pixels = 0;
+u32 transparent_pixels = 0;
+u32 render_buffer_flushes = 0;
+u32 state_changes = 0;
+u32 left_split_triangles = 0;
+u32 flat_triangles = 0;
+u32 clipped_triangles = 0;
+u32 zero_block_spans = 0;
+u32 texture_cache_loads = 0;
+u32 false_modulated_triangles = 0;
+u32 false_modulated_sprites = 0;
+
+u32 reciprocal_table[512];
+
+
+typedef s32 fixed_type;
+
+#define EDGE_STEP_BITS 32
+#define FIXED_BITS 12
+
+#define fixed_center(value) \
+ ((((fixed_type)(value)) << FIXED_BITS) + (1 << (FIXED_BITS - 1))) \
+
+#define int_to_fixed(value) \
+ (((fixed_type)(value)) << FIXED_BITS) \
+
+#define fixed_to_int(value) \
+ ((value) >> FIXED_BITS) \
+
+#define fixed_to_double(value) \
+ ((value) / (double)(1 << FIXED_BITS)) \
+
+#define double_to_fixed(value) \
+ (fixed_type)(((value) * (double)(1 << FIXED_BITS))) \
+
+typedef void (setup_blocks_function_type)(psx_gpu_struct *psx_gpu);
+typedef void (texture_blocks_function_type)(psx_gpu_struct *psx_gpu);
+typedef void (shade_blocks_function_type)(psx_gpu_struct *psx_gpu);
+typedef void (blend_blocks_function_type)(psx_gpu_struct *psx_gpu);
+
+typedef void (setup_sprite_function_type)(psx_gpu_struct *psx_gpu, s32 x,
+ s32 y, s32 u, s32 v, s32 width, s32 height, u32 color);
+
+struct render_block_handler_struct
+{
+ void *setup_blocks;
+ texture_blocks_function_type *texture_blocks;
+ shade_blocks_function_type *shade_blocks;
+ blend_blocks_function_type *blend_blocks;
+};
+
+#ifndef PANDORA_BUILD
+
+u32 fixed_reciprocal(u32 denominator, u32 *_shift)
+{
+ u32 shift = __builtin_clz(denominator);
+ u32 denominator_normalized = denominator << shift;
+
+ double numerator = (1ULL << 62) + denominator_normalized;
+ double numerator_b;
+
+ double denominator_normalized_dp_b;
+ u64 denominator_normalized_dp_u64;
+
+ u32 reciprocal;
+ double reciprocal_dp;
+
+ u64 numerator_u64 = (denominator_normalized >> 10) |
+ ((u64)(62 + 1023) << 52);
+ *((u64 *)(&numerator_b)) = numerator_u64;
+
+ denominator_normalized_dp_u64 =
+ (u64)(denominator_normalized << 21) |
+ ((u64)((denominator_normalized >> 11) + ((1022 + 31) << 20)) << 32);
+ *((u64 *)(&denominator_normalized_dp_b)) = denominator_normalized_dp_u64;
+
+ // Implement with a DP divide
+ reciprocal_dp = numerator / denominator_normalized_dp_b;
+ reciprocal = reciprocal_dp;
+
+ if(reciprocal == 0x80000001)
+ reciprocal = 0x80000000;
+
+ *_shift = 62 - shift;
+ return reciprocal;
+}
+
+double reciprocal_estimate(double a)
+{
+ int q, s;
+ double r;
+
+ q = (int)(a * 512.0);
+ /* a in units of 1/512 rounded down */
+ r = 1.0 / (((double)q + 0.5) / 512.0); /* reciprocal r */
+ s = (int)(256.0 * r + 0.5);
+
+ /* r in units of 1/256 rounded to nearest */
+
+ return (double)s / 256.0;
+}
+
+u32 reciprocal_estimate_u32(u32 value)
+{
+ u64 dp_value_u64;
+ volatile double dp_value;
+ volatile u64 *dp_value_ptr = (volatile u64 *)&dp_value;
+
+ if((value >> 31) == 0)
+ return 0xFFFFFFFF;
+
+ dp_value_u64 = (0x3FEULL << (31 + 21)) | ((u64)(value & 0x7FFFFFFF) << 21);
+
+ *dp_value_ptr = dp_value_u64;
+
+ dp_value = reciprocal_estimate(dp_value);
+ dp_value_u64 = *dp_value_ptr;
+
+ return (0x80000000 | ((dp_value_u64 >> 21) & 0x7FFFFFFF));
+}
+
+u32 fixed_reciprocal_nr(u32 value, u32 *_shift)
+{
+ u32 shift = __builtin_clz(value);
+ u32 value_normalized = value << shift;
+
+ *_shift = 62 - shift;
+
+ value_normalized -= 2;
+
+ u32 reciprocal_normalized = reciprocal_estimate_u32(value_normalized) >> 1;
+
+ u32 temp = -(((u64)value_normalized * (u32)reciprocal_normalized) >> 31);
+ reciprocal_normalized = (((u64)reciprocal_normalized * temp) >> 31);
+ temp = -(((u64)value_normalized * (u32)reciprocal_normalized) >> 31);
+ reciprocal_normalized = (((u64)reciprocal_normalized * temp) >> 31);
+ temp = -(((u64)value_normalized * (u32)reciprocal_normalized) >> 31);
+ reciprocal_normalized = (((u64)reciprocal_normalized * temp) >> 31);
+
+ return reciprocal_normalized;
+}
+
+#endif
+
+
+s32 triangle_signed_area_x2(s32 x0, s32 y0, s32 x1, s32 y1, s32 x2, s32 y2)
+{
+ return ((x1 - x0) * (y2 - y1)) - ((x2 - x1) * (y1 - y0));
+}
+
+u32 texture_region_mask(s32 x1, s32 y1, s32 x2, s32 y2)
+{
+ s32 coverage_x, coverage_y;
+
+ u32 mask_up_left;
+ u32 mask_down_right;
+
+ coverage_x = x2 >> 6;
+ coverage_y = y2 >> 8;
+
+ if(coverage_x < 0)
+ coverage_x = 0;
+
+ if(coverage_x > 31)
+ coverage_x = 31;
+
+ mask_down_right = ~(0xFFFFFFFF << (coverage_x + 1)) & 0xFFFF;
+
+ if(coverage_y >= 1)
+ mask_down_right |= mask_down_right << 16;
+
+ coverage_x = x1 >> 6;
+
+ mask_up_left = 0xFFFF0000 << coverage_x;
+ if(coverage_x < 0)
+ mask_up_left = 0xFFFF0000;
+
+ coverage_y = y1 >> 8;
+ if(coverage_y <= 0)
+ mask_up_left |= mask_up_left >> 16;
+
+ return mask_up_left & mask_down_right;
+}
+
+u32 invalidate_texture_cache_region(psx_gpu_struct *psx_gpu, u32 x1, u32 y1,
+ u32 x2, u32 y2)
+{
+ u32 mask = texture_region_mask(x1, y1, x2, y2);
+
+ psx_gpu->dirty_textures_4bpp_mask |= mask;
+ psx_gpu->dirty_textures_8bpp_mask |= mask;
+ psx_gpu->dirty_textures_8bpp_alternate_mask |= mask;
+
+ return mask;
+}
+
+u32 invalidate_texture_cache_region_viewport(psx_gpu_struct *psx_gpu, u32 x1,
+ u32 y1, u32 x2, u32 y2)
+{
+ u32 mask = texture_region_mask(x1, y1, x2, y2) &
+ psx_gpu->viewport_mask;
+ psx_gpu->dirty_textures_4bpp_mask |= mask;
+ psx_gpu->dirty_textures_8bpp_mask |= mask;
+ psx_gpu->dirty_textures_8bpp_alternate_mask |= mask;
+
+ return mask;
+}
+
+
+void update_texture_8bpp_cache_slice(psx_gpu_struct *psx_gpu,
+ u32 texture_page);
+
+#ifndef PANDORA_BUILD
+
+void update_texture_4bpp_cache(psx_gpu_struct *psx_gpu)
+{
+ u32 current_texture_page = psx_gpu->current_texture_page;
+ u8 *texture_page_ptr = psx_gpu->texture_page_ptr;
+ u16 *vram_ptr = psx_gpu->vram_ptr;
+
+ u32 texel_block;
+ u32 tile_x, tile_y;
+ u32 sub_x, sub_y;
+
+ vram_ptr += (current_texture_page >> 4) * 256 * 1024;
+ vram_ptr += (current_texture_page & 0xF) * 64;
+
+ texture_cache_loads++;
+
+ tile_y = 16;
+ tile_x = 16;
+ sub_x = 4;
+ sub_y = 16;
+
+ psx_gpu->dirty_textures_4bpp_mask &= ~(psx_gpu->current_texture_mask);
+
+ while(tile_y)
+ {
+ while(tile_x)
+ {
+ while(sub_y)
+ {
+ while(sub_x)
+ {
+ texel_block = *vram_ptr;
+ texture_page_ptr[0] = texel_block & 0xF;
+ texture_page_ptr[1] = (texel_block >> 4) & 0xF;
+ texture_page_ptr[2] = (texel_block >> 8) & 0xF;
+ texture_page_ptr[3] = texel_block >> 12;
+
+ vram_ptr++;
+ texture_page_ptr += 4;
+
+ sub_x--;
+ }
+
+ vram_ptr -= 4;
+ sub_x = 4;
+
+ sub_y--;
+ vram_ptr += 1024;
+ }
+
+ sub_y = 16;
+
+ vram_ptr -= (1024 * 16) - 4;
+ tile_x--;
+ }
+
+ tile_x = 16;
+
+ vram_ptr += (16 * 1024) - (4 * 16);
+ tile_y--;
+ }
+}
+
+void update_texture_8bpp_cache_slice(psx_gpu_struct *psx_gpu,
+ u32 texture_page)
+{
+ u16 *texture_page_ptr = psx_gpu->texture_page_ptr;
+ u16 *vram_ptr = psx_gpu->vram_ptr;
+
+ u32 tile_x, tile_y;
+ u32 sub_y;
+
+ vec_8x16u texels;
+
+ texture_cache_loads++;
+
+ vram_ptr += (texture_page >> 4) * 256 * 1024;
+ vram_ptr += (texture_page & 0xF) * 64;
+
+ if((texture_page ^ psx_gpu->current_texture_page) & 0x1)
+ texture_page_ptr += (8 * 16) * 8;
+
+ tile_x = 8;
+ tile_y = 16;
+
+ sub_y = 16;
+
+ while(tile_y)
+ {
+ while(tile_x)
+ {
+ while(sub_y)
+ {
+ load_128b(texels, vram_ptr);
+ store_128b(texels, texture_page_ptr);
+
+ texture_page_ptr += 8;
+ vram_ptr += 1024;
+
+ sub_y--;
+ }
+
+ sub_y = 16;
+
+ vram_ptr -= (1024 * 16);
+ vram_ptr += 8;
+
+ tile_x--;
+ }
+
+ tile_x = 8;
+
+ vram_ptr -= (8 * 8);
+ vram_ptr += (16 * 1024);
+
+ texture_page_ptr += (8 * 16) * 8;
+ tile_y--;
+ }
+}
+
+#endif
+
+
+void update_texture_8bpp_cache(psx_gpu_struct *psx_gpu)
+{
+ u32 current_texture_page = psx_gpu->current_texture_page;
+ u32 update_textures =
+ psx_gpu->dirty_textures_8bpp_mask & psx_gpu->current_texture_mask;
+
+ psx_gpu->dirty_textures_8bpp_mask &= ~update_textures;
+
+ if(update_textures & (1 << current_texture_page))
+ {
+ update_texture_8bpp_cache_slice(psx_gpu, current_texture_page);
+ update_textures &= ~(1 << current_texture_page);
+ }
+
+ if(update_textures)
+ {
+ u32 adjacent_texture_page = ((current_texture_page + 1) & 0xF) |
+ (current_texture_page & 0x10);
+
+ update_texture_8bpp_cache_slice(psx_gpu, adjacent_texture_page);
+ }
+}
+
+void setup_blocks_shaded_untextured_undithered_unswizzled_indirect(
+ psx_gpu_struct *psx_gpu);
+
+void flush_render_block_buffer(psx_gpu_struct *psx_gpu)
+{
+ if(psx_gpu->num_blocks)
+ {
+ render_block_handler_struct *render_block_handler =
+ psx_gpu->render_block_handler;
+
+ render_block_handler->texture_blocks(psx_gpu);
+ render_block_handler->shade_blocks(psx_gpu);
+ render_block_handler->blend_blocks(psx_gpu);
+
+ span_pixel_blocks += psx_gpu->num_blocks;
+ render_buffer_flushes++;
+
+ psx_gpu->num_blocks = 0;
+ }
+}
+
+
+void compute_all_gradients(psx_gpu_struct *psx_gpu, vertex_struct *a,
+ vertex_struct *b, vertex_struct *c);
+
+#ifndef PANDORA_BUILD
+
+#define setup_gradient_calculation_input(set, vertex) \
+ /* First type is: uvrg bxxx xxxx */\
+ /* Second type is: yyyy ybyy uvrg */\
+ /* Since x_a and y_c are the same the same variable is used for both. */\
+ x##set##_a_y##set##_c.e[0] = vertex->u; \
+ x##set##_a_y##set##_c.e[1] = vertex->v; \
+ x##set##_a_y##set##_c.e[2] = vertex->r; \
+ x##set##_a_y##set##_c.e[3] = vertex->g; \
+ dup_4x16b(x##set##_b, vertex->x); \
+ dup_4x16b(x##set##_c, vertex->x); \
+ dup_4x16b(y##set##_a, vertex->y); \
+ dup_4x16b(y##set##_b, vertex->y); \
+ x##set##_b.e[0] = vertex->b; \
+ y##set##_b.e[1] = vertex->b \
+
+
+void compute_all_gradients(psx_gpu_struct *psx_gpu, vertex_struct *a,
+ vertex_struct *b, vertex_struct *c)
+{
+ u32 triangle_area = psx_gpu->triangle_area;
+ u32 winding_mask_scalar;
+
+ u32 triangle_area_shift;
+ u64 triangle_area_reciprocal =
+ fixed_reciprocal(triangle_area, &triangle_area_shift);
+ triangle_area_shift = -(triangle_area_shift - FIXED_BITS);
+
+ // ((x1 - x0) * (y2 - y1)) - ((x2 - x1) * (y1 - y0)) =
+ // ( d0 * d1 ) - ( d2 * d3 ) =
+ // ( m0 ) - ( m1 ) = gradient
+
+ // This is split to do 12 elements at a time over three sets: a, b, and c.
+ // Technically we only need to do 10 elements (uvrgb_x and uvrgb_y), so
+ // two of the slots are unused.
+
+ // Inputs are all 16-bit signed. The m0/m1 results are 32-bit signed, as
+ // is g.
+
+ vec_4x16s x0_a_y0_c, x0_b, x0_c;
+ vec_4x16s y0_a, y0_b;
+ vec_4x16s x1_a_y1_c, x1_b, x1_c;
+ vec_4x16s y1_a, y1_b;
+ vec_4x16s x2_a_y2_c, x2_b, x2_c;
+ vec_4x16s y2_a, y2_b;
+
+ vec_4x32u uvrg_base;
+ vec_4x32u b_base;
+ vec_4x32u const_0x8000;
+
+ vec_4x16s d0_a_d3_c, d0_b, d0_c;
+ vec_4x16s d1_a, d1_b, d1_c_d2_a;
+ vec_4x16s d2_b, d2_c;
+ vec_4x16s d3_a, d3_b;
+
+ vec_4x32s m0_a, m0_b, m0_c;
+ vec_4x32s m1_a, m1_b, m1_c;
+
+ vec_4x32u gradient_area_a, gradient_area_c;
+ vec_2x32u gradient_area_b;
+
+ vec_4x32u gradient_area_sign_a, gradient_area_sign_c;
+ vec_2x32u gradient_area_sign_b;
+ vec_4x32u winding_mask;
+
+ vec_2x64u gradient_wide_a0, gradient_wide_a1;
+ vec_2x64u gradient_wide_c0, gradient_wide_c1;
+ vec_2x64u gradient_wide_b;
+
+ vec_4x32u gradient_a, gradient_c;
+ vec_2x32u gradient_b;
+ vec_16x8s gradient_shift;
+
+ setup_gradient_calculation_input(0, a);
+ setup_gradient_calculation_input(1, b);
+ setup_gradient_calculation_input(2, c);
+
+ dup_4x32b(const_0x8000, 0x8000);
+ shl_long_4x16b(uvrg_base, x0_a_y0_c, 16);
+ shl_long_4x16b(b_base, x0_b, 16);
+
+ add_4x32b(uvrg_base, uvrg_base, const_0x8000);
+ add_4x32b(b_base, b_base, const_0x8000);
+
+ // Can probably pair these, but it'll require careful register allocation
+ sub_4x16b(d0_a_d3_c, x1_a_y1_c, x0_a_y0_c);
+ sub_4x16b(d1_c_d2_a, x2_a_y2_c, x1_a_y1_c);
+
+ sub_4x16b(d0_b, x1_b, x0_b);
+ sub_4x16b(d0_c, x1_c, x0_c);
+
+ sub_4x16b(d1_a, y2_a, y1_a);
+ sub_4x16b(d1_b, y2_b, y1_b);
+
+ sub_4x16b(d2_b, x2_b, x1_b);
+ sub_4x16b(d2_c, x2_c, x1_c);
+
+ sub_4x16b(d3_a, y1_a, y0_a);
+ sub_4x16b(d3_b, y1_b, y0_b);
+
+ mul_long_4x16b(m0_a, d0_a_d3_c, d1_a);
+ mul_long_4x16b(m0_b, d0_b, d1_b);
+ mul_long_4x16b(m0_c, d0_c, d1_c_d2_a);
+
+ mul_long_4x16b(m1_a, d1_c_d2_a, d3_a);
+ mul_long_4x16b(m1_b, d2_b, d3_b);
+ mul_long_4x16b(m1_c, d2_c, d0_a_d3_c);
+
+ sub_4x32b(gradient_area_a, m0_a, m1_a);
+ sub_2x32b(gradient_area_b, m0_b.low, m1_b.low);
+ sub_4x32b(gradient_area_c, m0_c, m1_c);
+
+ cmpltz_4x32b(gradient_area_sign_a, gradient_area_a);
+ cmpltz_2x32b(gradient_area_sign_b, gradient_area_b);
+ cmpltz_4x32b(gradient_area_sign_c, gradient_area_c);
+
+ abs_4x32b(gradient_area_a, gradient_area_a);
+ abs_2x32b(gradient_area_b, gradient_area_b);
+ abs_4x32b(gradient_area_c, gradient_area_c);
+
+ winding_mask_scalar = -psx_gpu->triangle_winding;
+
+ dup_4x32b(winding_mask, winding_mask_scalar);
+ eor_4x32b(gradient_area_sign_a, gradient_area_sign_a, winding_mask);
+ eor_2x32b(gradient_area_sign_b, gradient_area_sign_b, winding_mask);
+ eor_4x32b(gradient_area_sign_c, gradient_area_sign_c, winding_mask);
+
+ mul_scalar_long_2x32b(gradient_wide_a0,
+ vector_cast(vec_2x32s, gradient_area_a.low),
+ (s64)triangle_area_reciprocal);
+ mul_scalar_long_2x32b(gradient_wide_a1,
+ vector_cast(vec_2x32s, gradient_area_a.high),
+ (s64)triangle_area_reciprocal);
+ mul_scalar_long_2x32b(gradient_wide_b,
+ vector_cast(vec_2x32s, gradient_area_b),
+ (s64)triangle_area_reciprocal);
+ mul_scalar_long_2x32b(gradient_wide_c0,
+ vector_cast(vec_2x32s, gradient_area_c.low),
+ (s64)triangle_area_reciprocal);
+ mul_scalar_long_2x32b(gradient_wide_c1,
+ vector_cast(vec_2x32s, gradient_area_c.high),
+ (s64)triangle_area_reciprocal);
+
+ dup_16x8b(gradient_shift, triangle_area_shift);
+ shl_reg_2x64b(gradient_wide_a0, gradient_wide_a0,
+ vector_cast(vec_2x64u, gradient_shift));
+ shl_reg_2x64b(gradient_wide_a1, gradient_wide_a1,
+ vector_cast(vec_2x64u, gradient_shift));
+ shl_reg_2x64b(gradient_wide_b, gradient_wide_b,
+ vector_cast(vec_2x64u, gradient_shift));
+ shl_reg_2x64b(gradient_wide_c0, gradient_wide_c0,
+ vector_cast(vec_2x64u, gradient_shift));
+ shl_reg_2x64b(gradient_wide_c1, gradient_wide_c1,
+ vector_cast(vec_2x64u, gradient_shift));
+
+ mov_narrow_2x64b(gradient_a.low, gradient_wide_a0);
+ mov_narrow_2x64b(gradient_a.high, gradient_wide_a1);
+ mov_narrow_2x64b(gradient_b, gradient_wide_b);
+ mov_narrow_2x64b(gradient_c.low, gradient_wide_c0);
+ mov_narrow_2x64b(gradient_c.high, gradient_wide_c1);
+
+ shl_4x32b(gradient_a, gradient_a, 4);
+ shl_2x32b(gradient_b, gradient_b, 4);
+ shl_4x32b(gradient_c, gradient_c, 4);
+
+ eor_4x32b(gradient_a, gradient_a, gradient_area_sign_a);
+ eor_2x32b(gradient_b, gradient_b, gradient_area_sign_b);
+ eor_4x32b(gradient_c, gradient_c, gradient_area_sign_c);
+
+ sub_4x32b(gradient_a, gradient_a, gradient_area_sign_a);
+ sub_2x32b(gradient_b, gradient_b, gradient_area_sign_b);
+ sub_4x32b(gradient_c, gradient_c, gradient_area_sign_c);
+
+ u32 left_adjust = a->x;
+ mls_scalar_4x32b(uvrg_base, gradient_a, left_adjust);
+ mls_scalar_2x32b(b_base.low, gradient_b, left_adjust);
+
+ vec_4x32u uvrg_dx2;
+ vec_2x32u b_dx2;
+
+ vec_4x32u uvrg_dx3;
+ vec_2x32u b_dx3;
+
+ vec_4x32u zero;
+
+ eor_4x32b(zero, zero, zero);
+ add_4x32b(uvrg_dx2, gradient_a, gradient_a);
+ add_2x32b(b_dx2, gradient_b, gradient_b);
+ add_4x32b(uvrg_dx3, gradient_a, uvrg_dx2);
+ add_2x32b(b_dx3, gradient_b, b_dx2);
+
+ // Can be done with vst4, assuming that the zero, dx, dx2, and dx3 are
+ // lined up properly
+ psx_gpu->u_block_span.e[0] = zero.e[0];
+ psx_gpu->u_block_span.e[1] = gradient_a.e[0];
+ psx_gpu->u_block_span.e[2] = uvrg_dx2.e[0];
+ psx_gpu->u_block_span.e[3] = uvrg_dx3.e[0];
+
+ psx_gpu->v_block_span.e[0] = zero.e[1];
+ psx_gpu->v_block_span.e[1] = gradient_a.e[1];
+ psx_gpu->v_block_span.e[2] = uvrg_dx2.e[1];
+ psx_gpu->v_block_span.e[3] = uvrg_dx3.e[1];
+
+ psx_gpu->r_block_span.e[0] = zero.e[2];
+ psx_gpu->r_block_span.e[1] = gradient_a.e[2];
+ psx_gpu->r_block_span.e[2] = uvrg_dx2.e[2];
+ psx_gpu->r_block_span.e[3] = uvrg_dx3.e[2];
+
+ psx_gpu->g_block_span.e[0] = zero.e[3];
+ psx_gpu->g_block_span.e[1] = gradient_a.e[3];
+ psx_gpu->g_block_span.e[2] = uvrg_dx2.e[3];
+ psx_gpu->g_block_span.e[3] = uvrg_dx3.e[3];
+
+ psx_gpu->b_block_span.e[0] = zero.e[0];
+ psx_gpu->b_block_span.e[1] = gradient_b.e[0];
+ psx_gpu->b_block_span.e[2] = b_dx2.e[0];
+ psx_gpu->b_block_span.e[3] = b_dx3.e[0];
+
+ psx_gpu->uvrg = uvrg_base;
+ psx_gpu->b = b_base.e[0];
+
+ psx_gpu->uvrg_dx = gradient_a;
+ psx_gpu->uvrg_dy = gradient_c;
+ psx_gpu->b_dy = gradient_b.e[1];
+}
+#endif
+
+#define vector_check(_a, _b) \
+ if(memcmp(&_a, &_b, sizeof(_b))) \
+ { \
+ if(sizeof(_b) == 8) \
+ { \
+ printf("mismatch on %s vs %s: (%x %x) vs (%x %x)\n", \
+ #_a, #_b, _a.e[0], _a.e[1], _b.e[0], _b.e[1]); \
+ } \
+ else \
+ { \
+ printf("mismatch on %s vs %s: (%x %x %x %x) vs (%x %x %x %x)\n", \
+ #_a, #_b, _a.e[0], _a.e[1], _a.e[2], _a.e[3], _b.e[0], _b.e[1], \
+ _b.e[2], _b.e[3]); \
+ } \
+ } \
+
+#define scalar_check(_a, _b) \
+ if(_a != _b) \
+ printf("mismatch on %s %s: %x vs %x\n", #_a, #_b, _a, _b) \
+
+
+#define setup_spans_prologue_alternate_yes() \
+ vec_2x64s alternate_x; \
+ vec_2x64s alternate_dx_dy; \
+ vec_4x32s alternate_x_32; \
+ vec_2x32s alternate_x_16; \
+ \
+ vec_4x16u alternate_select; \
+ vec_4x16s y_mid_point; \
+ \
+ s32 y_b = v_b->y; \
+ s64 edge_alt; \
+ s32 edge_dx_dy_alt; \
+ u32 edge_shift_alt \
+
+#define setup_spans_prologue_alternate_no() \
+
+#define setup_spans_prologue(alternate_active) \
+ edge_data_struct *span_edge_data; \
+ vec_4x32u *span_uvrg_offset; \
+ u32 *span_b_offset; \
+ \
+ s32 clip; \
+ \
+ vec_2x64s edges_xy; \
+ vec_2x32s edges_dx_dy; \
+ vec_2x32u edge_shifts; \
+ \
+ vec_2x64s left_x, right_x; \
+ vec_2x64s left_dx_dy, right_dx_dy; \
+ vec_4x32s left_x_32, right_x_32; \
+ vec_8x16s left_right_x_16; \
+ vec_4x16s y_x4; \
+ vec_8x16s left_edge; \
+ vec_8x16s right_edge; \
+ vec_4x16u span_shift; \
+ \
+ vec_2x32u c_0x01; \
+ vec_4x16u c_0x04; \
+ vec_4x16u c_0xFFFE; \
+ vec_4x16u c_0x07; \
+ \
+ vec_2x32s x_starts; \
+ vec_2x32s x_ends; \
+ \
+ s32 x_a = v_a->x; \
+ s32 x_b = v_b->x; \
+ s32 x_c = v_c->x; \
+ s32 y_a = v_a->y; \
+ s32 y_c = v_c->y; \
+ \
+ vec_4x32u uvrg = psx_gpu->uvrg; \
+ vec_4x32u uvrg_dy = psx_gpu->uvrg_dy; \
+ u32 b = psx_gpu->b; \
+ u32 b_dy = psx_gpu->b_dy; \
+ \
+ dup_2x32b(c_0x01, 0x01); \
+ setup_spans_prologue_alternate_##alternate_active() \
+
+#define setup_spans_prologue_b() \
+ span_edge_data = psx_gpu->span_edge_data; \
+ span_uvrg_offset = psx_gpu->span_uvrg_offset; \
+ span_b_offset = psx_gpu->span_b_offset; \
+ \
+ vec_8x16u c_0x0001; \
+ \
+ dup_8x16b(c_0x0001, 0x0001); \
+ dup_8x16b(left_edge, psx_gpu->viewport_start_x); \
+ dup_8x16b(right_edge, psx_gpu->viewport_end_x); \
+ add_8x16b(right_edge, right_edge, c_0x0001); \
+ dup_4x16b(c_0x04, 0x04); \
+ dup_4x16b(c_0x07, 0x07); \
+ dup_4x16b(c_0xFFFE, 0xFFFE); \
+
+
+#define compute_edge_delta_x2() \
+{ \
+ vec_2x32s heights; \
+ vec_2x32s height_reciprocals; \
+ vec_2x32s heights_b; \
+ vec_4x32u widths; \
+ \
+ u32 edge_shift = reciprocal_table[height]; \
+ \
+ dup_2x32b(heights, height); \
+ sub_2x32b(widths, x_ends, x_starts); \
+ \
+ dup_2x32b(edge_shifts, edge_shift); \
+ sub_2x32b(heights_b, heights, c_0x01); \
+ shr_2x32b(height_reciprocals, edge_shifts, 12); \
+ \
+ mla_2x32b(heights_b, x_starts, heights); \
+ bic_immediate_4x16b(vector_cast(vec_4x16u, edge_shifts), 0xE0); \
+ mul_2x32b(edges_dx_dy, widths, height_reciprocals); \
+ mul_long_2x32b(edges_xy, heights_b, height_reciprocals); \
+} \
+
+#define compute_edge_delta_x3(start_c, height_a, height_b) \
+{ \
+ vec_2x32s heights; \
+ vec_2x32s height_reciprocals; \
+ vec_2x32s heights_b; \
+ vec_2x32u widths; \
+ \
+ u32 width_alt; \
+ s32 height_b_alt; \
+ u32 height_reciprocal_alt; \
+ \
+ heights.e[0] = height_a; \
+ heights.e[1] = height_b; \
+ \
+ edge_shifts.e[0] = reciprocal_table[height_a]; \
+ edge_shifts.e[1] = reciprocal_table[height_b]; \
+ edge_shift_alt = reciprocal_table[height_minor_b]; \
+ \
+ sub_2x32b(widths, x_ends, x_starts); \
+ width_alt = x_c - start_c; \
+ \
+ shr_2x32b(height_reciprocals, edge_shifts, 12); \
+ height_reciprocal_alt = edge_shift_alt >> 12; \
+ \
+ bic_immediate_4x16b(vector_cast(vec_4x16u, edge_shifts), 0xE0); \
+ edge_shift_alt &= 0x1F; \
+ \
+ sub_2x32b(heights_b, heights, c_0x01); \
+ height_b_alt = height_minor_b - 1; \
+ \
+ mla_2x32b(heights_b, x_starts, heights); \
+ height_b_alt += height_minor_b * start_c; \
+ \
+ mul_long_2x32b(edges_xy, heights_b, height_reciprocals); \
+ edge_alt = (s64)height_b_alt * height_reciprocal_alt; \
+ \
+ mul_2x32b(edges_dx_dy, widths, height_reciprocals); \
+ edge_dx_dy_alt = width_alt * height_reciprocal_alt; \
+} \
+
+
+#define setup_spans_adjust_y_up() \
+ sub_4x32b(y_x4, y_x4, c_0x04) \
+
+#define setup_spans_adjust_y_down() \
+ add_4x32b(y_x4, y_x4, c_0x04) \
+
+#define setup_spans_adjust_interpolants_up() \
+ sub_4x32b(uvrg, uvrg, uvrg_dy); \
+ b -= b_dy \
+
+#define setup_spans_adjust_interpolants_down() \
+ add_4x32b(uvrg, uvrg, uvrg_dy); \
+ b += b_dy \
+
+
+#define setup_spans_clip_interpolants_increment() \
+ mla_scalar_4x32b(uvrg, uvrg_dy, clip); \
+ b += b_dy * clip \
+
+#define setup_spans_clip_interpolants_decrement() \
+ mls_scalar_4x32b(uvrg, uvrg_dy, clip); \
+ b -= b_dy * clip \
+
+#define setup_spans_clip_alternate_yes() \
+ edge_alt += edge_dx_dy_alt * (s64)(clip) \
+
+#define setup_spans_clip_alternate_no() \
+
+#define setup_spans_clip(direction, alternate_active) \
+{ \
+ clipped_triangles++; \
+ mla_scalar_long_2x32b(edges_xy, edges_dx_dy, (s64)clip); \
+ setup_spans_clip_alternate_##alternate_active(); \
+ setup_spans_clip_interpolants_##direction(); \
+} \
+
+
+#define setup_spans_adjust_edges_alternate_no(left_index, right_index) \
+{ \
+ vec_2x64u edge_shifts_64; \
+ vec_2x64s edges_dx_dy_64; \
+ \
+ mov_wide_2x32b(edge_shifts_64, edge_shifts); \
+ shl_variable_2x64b(edges_xy, edges_xy, edge_shifts_64); \
+ \
+ mov_wide_2x32b(edges_dx_dy_64, edges_dx_dy); \
+ shl_variable_2x64b(edges_dx_dy_64, edges_dx_dy_64, edge_shifts_64); \
+ \
+ left_x.e[0] = edges_xy.e[left_index]; \
+ right_x.e[0] = edges_xy.e[right_index]; \
+ \
+ left_dx_dy.e[0] = edges_dx_dy_64.e[left_index]; \
+ left_dx_dy.e[1] = edges_dx_dy_64.e[left_index]; \
+ right_dx_dy.e[0] = edges_dx_dy_64.e[right_index]; \
+ right_dx_dy.e[1] = edges_dx_dy_64.e[right_index]; \
+ \
+ add_1x64b(left_x.high, left_x.low, left_dx_dy.low); \
+ add_1x64b(right_x.high, right_x.low, right_dx_dy.low); \
+ \
+ add_2x64b(left_dx_dy, left_dx_dy, left_dx_dy); \
+ add_2x64b(right_dx_dy, right_dx_dy, right_dx_dy); \
+} \
+
+#define setup_spans_adjust_edges_alternate_yes(left_index, right_index) \
+{ \
+ setup_spans_adjust_edges_alternate_no(left_index, right_index); \
+ s64 edge_dx_dy_alt_64; \
+ \
+ dup_4x16b(y_mid_point, y_b); \
+ \
+ edge_alt <<= edge_shift_alt; \
+ edge_dx_dy_alt_64 = (s64)edge_dx_dy_alt << edge_shift_alt; \
+ \
+ alternate_x.e[0] = edge_alt; \
+ alternate_dx_dy.e[0] = edge_dx_dy_alt_64; \
+ alternate_dx_dy.e[1] = edge_dx_dy_alt_64; \
+ \
+ add_1x64b(alternate_x.high, alternate_x.low, alternate_dx_dy.low); \
+ add_2x64b(alternate_dx_dy, alternate_dx_dy, alternate_dx_dy); \
+} \
+
+
+#define setup_spans_y_select_up() \
+ cmplt_4x16b(alternate_select, y_x4, y_mid_point) \
+
+#define setup_spans_y_select_down() \
+ cmpgt_4x16b(alternate_select, y_x4, y_mid_point) \
+
+#define setup_spans_y_select_alternate_yes(direction) \
+ setup_spans_y_select_##direction() \
+
+#define setup_spans_y_select_alternate_no(direction) \
+
+#define setup_spans_alternate_select_left() \
+ bit_4x16b(left_right_x_16.low, alternate_x_16, alternate_select) \
+
+#define setup_spans_alternate_select_right() \
+ bit_4x16b(left_right_x_16.high, alternate_x_16, alternate_select) \
+
+#define setup_spans_alternate_select_none() \
+
+#define setup_spans_increment_alternate_yes() \
+ shr_narrow_2x64b(alternate_x_32.low, alternate_x, 32); \
+ add_2x64b(alternate_x, alternate_x, alternate_dx_dy); \
+ shr_narrow_2x64b(alternate_x_32.high, alternate_x, 32); \
+ add_2x64b(alternate_x, alternate_x, alternate_dx_dy); \
+ mov_narrow_4x32b(alternate_x_16, alternate_x_32) \
+
+#define setup_spans_increment_alternate_no() \
+
+#define setup_spans_set_x4(alternate, direction, alternate_active) \
+{ \
+ span_uvrg_offset[0] = uvrg; \
+ span_b_offset[0] = b; \
+ setup_spans_adjust_interpolants_##direction(); \
+ \
+ span_uvrg_offset[1] = uvrg; \
+ span_b_offset[1] = b; \
+ setup_spans_adjust_interpolants_##direction(); \
+ \
+ span_uvrg_offset[2] = uvrg; \
+ span_b_offset[2] = b; \
+ setup_spans_adjust_interpolants_##direction(); \
+ \
+ span_uvrg_offset[3] = uvrg; \
+ span_b_offset[3] = b; \
+ setup_spans_adjust_interpolants_##direction(); \
+ \
+ span_uvrg_offset += 4; \
+ span_b_offset += 4; \
+ \
+ shr_narrow_2x64b(left_x_32.low, left_x, 32); \
+ shr_narrow_2x64b(right_x_32.low, right_x, 32); \
+ \
+ add_2x64b(left_x, left_x, left_dx_dy); \
+ add_2x64b(right_x, right_x, right_dx_dy); \
+ \
+ shr_narrow_2x64b(left_x_32.high, left_x, 32); \
+ shr_narrow_2x64b(right_x_32.high, right_x, 32); \
+ \
+ add_2x64b(left_x, left_x, left_dx_dy); \
+ add_2x64b(right_x, right_x, right_dx_dy); \
+ \
+ mov_narrow_4x32b(left_right_x_16.low, left_x_32); \
+ mov_narrow_4x32b(left_right_x_16.high, right_x_32); \
+ \
+ setup_spans_increment_alternate_##alternate_active(); \
+ setup_spans_y_select_alternate_##alternate_active(direction); \
+ setup_spans_alternate_select_##alternate(); \
+ \
+ max_8x16b(left_right_x_16, left_right_x_16, left_edge); \
+ min_8x16b(left_right_x_16, left_right_x_16, right_edge); \
+ \
+ sub_4x16b(left_right_x_16.high, left_right_x_16.high, left_right_x_16.low); \
+ add_4x16b(left_right_x_16.high, left_right_x_16.high, c_0x07); \
+ and_4x16b(span_shift, left_right_x_16.high, c_0x07); \
+ shl_variable_4x16b(span_shift, c_0xFFFE, span_shift); \
+ shr_4x16b(left_right_x_16.high, left_right_x_16.high, 3); \
+ \
+ u32 i; \
+ for(i = 0; i < 4; i++) \
+ { \
+ span_edge_data[i].left_x = left_right_x_16.low.e[i]; \
+ span_edge_data[i].num_blocks = left_right_x_16.high.e[i]; \
+ span_edge_data[i].right_mask = span_shift.e[i]; \
+ span_edge_data[i].y = y_x4.e[i]; \
+ } \
+ \
+ span_edge_data += 4; \
+ \
+ setup_spans_adjust_y_##direction(); \
+} \
+
+
+#define setup_spans_alternate_adjust_yes() \
+ edge_alt -= edge_dx_dy_alt * (s64)height_minor_a \
+
+#define setup_spans_alternate_adjust_no() \
+
+
+#define setup_spans_down(left_index, right_index, alternate, alternate_active) \
+ setup_spans_alternate_adjust_##alternate_active(); \
+ if(y_c > psx_gpu->viewport_end_y) \
+ height -= y_c - psx_gpu->viewport_end_y - 1; \
+ \
+ clip = psx_gpu->viewport_start_y - y_a; \
+ if(clip > 0) \
+ { \
+ height -= clip; \
+ y_a += clip; \
+ setup_spans_clip(increment, alternate_active); \
+ } \
+ \
+ setup_spans_prologue_b(); \
+ \
+ if(height > 0) \
+ { \
+ y_x4.e[0] = y_a; \
+ y_x4.e[1] = y_a + 1; \
+ y_x4.e[2] = y_a + 2; \
+ y_x4.e[3] = y_a + 3; \
+ setup_spans_adjust_edges_alternate_##alternate_active(left_index, \
+ right_index); \
+ \
+ psx_gpu->num_spans = height; \
+ do \
+ { \
+ setup_spans_set_x4(alternate, down, alternate_active); \
+ height -= 4; \
+ } while(height > 0); \
+ } \
+
+
+#define setup_spans_alternate_pre_increment_yes() \
+ edge_alt += edge_dx_dy_alt \
+
+#define setup_spans_alternate_pre_increment_no() \
+
+#define setup_spans_up_decrement_height_yes() \
+ height-- \
+
+#define setup_spans_up_decrement_height_no() \
+ {} \
+
+#define setup_spans_up(left_index, right_index, alternate, alternate_active) \
+ setup_spans_alternate_adjust_##alternate_active(); \
+ y_a--; \
+ \
+ if(y_c < psx_gpu->viewport_start_y) \
+ height -= psx_gpu->viewport_start_y - y_c; \
+ else \
+ setup_spans_up_decrement_height_##alternate_active(); \
+ \
+ clip = y_a - psx_gpu->viewport_end_y; \
+ if(clip > 0) \
+ { \
+ height -= clip; \
+ y_a -= clip; \
+ setup_spans_clip(decrement, alternate_active); \
+ } \
+ \
+ setup_spans_prologue_b(); \
+ \
+ if(height > 0) \
+ { \
+ y_x4.e[0] = y_a; \
+ y_x4.e[1] = y_a - 1; \
+ y_x4.e[2] = y_a - 2; \
+ y_x4.e[3] = y_a - 3; \
+ add_wide_2x32b(edges_xy, edges_xy, edges_dx_dy); \
+ setup_spans_alternate_pre_increment_##alternate_active(); \
+ setup_spans_adjust_edges_alternate_##alternate_active(left_index, \
+ right_index); \
+ setup_spans_adjust_interpolants_up(); \
+ \
+ psx_gpu->num_spans = height; \
+ while(height > 0) \
+ { \
+ setup_spans_set_x4(alternate, up, alternate_active); \
+ height -= 4; \
+ } \
+ } \
+
+#define index_left 0
+#define index_right 1
+
+#define setup_spans_up_up(minor, major) \
+ setup_spans_prologue(yes); \
+ s32 height_minor_a = y_a - y_b; \
+ s32 height_minor_b = y_b - y_c; \
+ s32 height = y_a - y_c; \
+ \
+ dup_2x32b(x_starts, x_a); \
+ x_ends.e[0] = x_c; \
+ x_ends.e[1] = x_b; \
+ \
+ compute_edge_delta_x3(x_b, height, height_minor_a); \
+ setup_spans_up(index_##major, index_##minor, minor, yes) \
+
+
+void setup_spans_up_left(psx_gpu_struct *psx_gpu, vertex_struct *v_a,
+ vertex_struct *v_b, vertex_struct *v_c);
+void setup_spans_up_right(psx_gpu_struct *psx_gpu, vertex_struct *v_a,
+ vertex_struct *v_b, vertex_struct *v_c);
+void setup_spans_down_left(psx_gpu_struct *psx_gpu, vertex_struct *v_a,
+ vertex_struct *v_b, vertex_struct *v_c);
+void setup_spans_down_right(psx_gpu_struct *psx_gpu, vertex_struct *v_a,
+ vertex_struct *v_b, vertex_struct *v_c);
+void setup_spans_up_a(psx_gpu_struct *psx_gpu, vertex_struct *v_a,
+ vertex_struct *v_b, vertex_struct *v_c);
+void setup_spans_up_b(psx_gpu_struct *psx_gpu, vertex_struct *v_a,
+ vertex_struct *v_b, vertex_struct *v_c);
+void setup_spans_down_a(psx_gpu_struct *psx_gpu, vertex_struct *v_a,
+ vertex_struct *v_b, vertex_struct *v_c);
+void setup_spans_down_b(psx_gpu_struct *psx_gpu, vertex_struct *v_a,
+ vertex_struct *v_b, vertex_struct *v_c);
+void setup_spans_up_down(psx_gpu_struct *psx_gpu, vertex_struct *v_a,
+ vertex_struct *v_b, vertex_struct *v_c);
+
+
+#ifndef PANDORA_BUILD
+
+void setup_spans_up_left(psx_gpu_struct *psx_gpu, vertex_struct *v_a,
+ vertex_struct *v_b, vertex_struct *v_c)
+{
+ setup_spans_up_up(left, right);
+}
+
+void setup_spans_up_right(psx_gpu_struct *psx_gpu, vertex_struct *v_a,
+ vertex_struct *v_b, vertex_struct *v_c)
+{
+ setup_spans_up_up(right, left);
+}
+
+#define setup_spans_down_down(minor, major) \
+ setup_spans_prologue(yes); \
+ s32 height_minor_a = y_b - y_a; \
+ s32 height_minor_b = y_c - y_b; \
+ s32 height = y_c - y_a; \
+ \
+ dup_2x32b(x_starts, x_a); \
+ x_ends.e[0] = x_c; \
+ x_ends.e[1] = x_b; \
+ \
+ compute_edge_delta_x3(x_b, height, height_minor_a); \
+ setup_spans_down(index_##major, index_##minor, minor, yes) \
+
+void setup_spans_down_left(psx_gpu_struct *psx_gpu, vertex_struct *v_a,
+ vertex_struct *v_b, vertex_struct *v_c)
+{
+ setup_spans_down_down(left, right);
+}
+
+void setup_spans_down_right(psx_gpu_struct *psx_gpu, vertex_struct *v_a,
+ vertex_struct *v_b, vertex_struct *v_c)
+{
+ setup_spans_down_down(right, left);
+}
+
+#define setup_spans_up_flat() \
+ s32 height = y_a - y_c; \
+ \
+ flat_triangles++; \
+ compute_edge_delta_x2(); \
+ setup_spans_up(index_left, index_right, none, no) \
+
+void setup_spans_up_a(psx_gpu_struct *psx_gpu, vertex_struct *v_a,
+ vertex_struct *v_b, vertex_struct *v_c)
+{
+ setup_spans_prologue(no);
+ x_starts.e[0] = x_a;
+ x_starts.e[1] = x_b;
+ dup_2x32b(x_ends, x_c);
+
+ setup_spans_up_flat();
+}
+
+void setup_spans_up_b(psx_gpu_struct *psx_gpu, vertex_struct *v_a,
+ vertex_struct *v_b, vertex_struct *v_c)
+{
+ setup_spans_prologue(no);
+ dup_2x32b(x_starts, x_a);
+ x_ends.e[0] = x_b;
+ x_ends.e[1] = x_c;
+
+ setup_spans_up_flat();
+}
+
+#define setup_spans_down_flat() \
+ s32 height = y_c - y_a; \
+ \
+ flat_triangles++; \
+ compute_edge_delta_x2(); \
+ setup_spans_down(index_left, index_right, none, no) \
+
+void setup_spans_down_a(psx_gpu_struct *psx_gpu, vertex_struct *v_a,
+ vertex_struct *v_b, vertex_struct *v_c)
+{
+ setup_spans_prologue(no);
+ x_starts.e[0] = x_a;
+ x_starts.e[1] = x_b;
+ dup_2x32b(x_ends, x_c);
+
+ setup_spans_down_flat();
+}
+
+void setup_spans_down_b(psx_gpu_struct *psx_gpu, vertex_struct *v_a,
+ vertex_struct *v_b, vertex_struct *v_c)
+{
+ setup_spans_prologue(no);
+ dup_2x32b(x_starts, x_a);
+ x_ends.e[0] = x_b;
+ x_ends.e[1] = x_c;
+
+ setup_spans_down_flat();
+}
+
+void setup_spans_up_down(psx_gpu_struct *psx_gpu, vertex_struct *v_a,
+ vertex_struct *v_b, vertex_struct *v_c)
+{
+ setup_spans_prologue(no);
+
+ s32 y_b = v_b->y;
+ s64 edge_alt;
+ s32 edge_dx_dy_alt;
+ u32 edge_shift_alt;
+
+ s32 middle_y = y_a;
+ s32 height_minor_a = y_a - y_b;
+ s32 height_minor_b = y_c - y_a;
+ s32 height_major = y_c - y_b;
+
+ vec_2x64s edges_xy_b;
+ vec_2x32s edges_dx_dy_b;
+ vec_2x32u edge_shifts_b;
+
+ vec_2x32s height_increment;
+
+ x_starts.e[0] = x_a;
+ x_starts.e[1] = x_c;
+ dup_2x32b(x_ends, x_b);
+
+ compute_edge_delta_x3(x_a, height_minor_a, height_major);
+
+ height_increment.e[0] = 0;
+ height_increment.e[1] = height_minor_b;
+
+ mla_long_2x32b(edges_xy, edges_dx_dy, height_increment);
+
+ edges_xy_b.e[0] = edge_alt;
+ edges_xy_b.e[1] = edges_xy.e[1];
+
+ edge_shifts_b = edge_shifts;
+ edge_shifts_b.e[0] = edge_shift_alt;
+
+ neg_2x32b(edges_dx_dy_b, edges_dx_dy);
+ edges_dx_dy_b.e[0] = edge_dx_dy_alt;
+
+ y_a--;
+
+ if(y_b < psx_gpu->viewport_start_y)
+ height_minor_a -= psx_gpu->viewport_start_y - y_b;
+
+ clip = y_a - psx_gpu->viewport_end_y;
+ if(clip > 0)
+ {
+ height_minor_a -= clip;
+ y_a -= clip;
+ setup_spans_clip(decrement, no);
+ }
+
+ setup_spans_prologue_b();
+
+ if(height_minor_a > 0)
+ {
+ y_x4.e[0] = y_a;
+ y_x4.e[1] = y_a - 1;
+ y_x4.e[2] = y_a - 2;
+ y_x4.e[3] = y_a - 3;
+ add_wide_2x32b(edges_xy, edges_xy, edges_dx_dy);
+ setup_spans_adjust_edges_alternate_no(index_left, index_right);
+ setup_spans_adjust_interpolants_up();
+
+ psx_gpu->num_spans = height_minor_a;
+ while(height_minor_a > 0)
+ {
+ setup_spans_set_x4(none, up, no);
+ height_minor_a -= 4;
+ }
+
+ span_edge_data += height_minor_a;
+ span_uvrg_offset += height_minor_a;
+ span_b_offset += height_minor_a;
+ }
+
+ edges_xy = edges_xy_b;
+ edges_dx_dy = edges_dx_dy_b;
+ edge_shifts = edge_shifts_b;
+
+ uvrg = psx_gpu->uvrg;
+ b = psx_gpu->b;
+
+ y_a = middle_y;
+
+ if(y_c > psx_gpu->viewport_end_y)
+ height_minor_b -= y_c - psx_gpu->viewport_end_y - 1;
+
+ clip = psx_gpu->viewport_start_y - y_a;
+ if(clip > 0)
+ {
+ height_minor_b -= clip;
+ y_a += clip;
+ setup_spans_clip(increment, no);
+ }
+
+ if(height_minor_b > 0)
+ {
+ y_x4.e[0] = y_a;
+ y_x4.e[1] = y_a + 1;
+ y_x4.e[2] = y_a + 2;
+ y_x4.e[3] = y_a + 3;
+ setup_spans_adjust_edges_alternate_no(index_left, index_right);
+
+ psx_gpu->num_spans += height_minor_b;
+ do
+ {
+ setup_spans_set_x4(none, down, no);
+ height_minor_b -= 4;
+ } while(height_minor_b > 0);
+ }
+
+ left_split_triangles++;
+}
+
+#endif
+
+
+#define dither_table_entry_normal(value) \
+ (value) \
+
+
+#define setup_blocks_load_msb_mask_indirect() \
+
+#define setup_blocks_load_msb_mask_direct() \
+ vec_8x16u msb_mask; \
+ dup_8x16b(msb_mask, psx_gpu->mask_msb); \
+
+
+#define setup_blocks_variables_shaded_textured(target) \
+ vec_4x32u u_block; \
+ vec_4x32u v_block; \
+ vec_4x32u r_block; \
+ vec_4x32u g_block; \
+ vec_4x32u b_block; \
+ vec_4x32u uvrg_dx = psx_gpu->uvrg_dx; \
+ vec_4x32u uvrg_dx4; \
+ vec_4x32u uvrg_dx8; \
+ vec_4x32u uvrg; \
+ u32 b_dx = psx_gpu->b_block_span.e[1]; \
+ u32 b_dx4 = b_dx << 2; \
+ u32 b_dx8 = b_dx << 3; \
+ u32 b; \
+ \
+ vec_16x8u texture_mask; \
+ shl_4x32b(uvrg_dx4, uvrg_dx, 2); \
+ shl_4x32b(uvrg_dx8, uvrg_dx, 3); \
+ dup_8x8b(texture_mask.low, psx_gpu->texture_mask_width); \
+ dup_8x8b(texture_mask.high, psx_gpu->texture_mask_height) \
+
+#define setup_blocks_variables_shaded_untextured(target) \
+ vec_4x32u r_block; \
+ vec_4x32u g_block; \
+ vec_4x32u b_block; \
+ vec_4x32u rgb_dx; \
+ vec_4x32u rgb_dx4; \
+ vec_4x32u rgb_dx8; \
+ vec_4x32u rgb; \
+ \
+ vec_8x8u d64_0x07; \
+ vec_8x8u d64_1; \
+ vec_8x8u d64_4; \
+ vec_8x8u d64_128; \
+ \
+ dup_8x8b(d64_0x07, 0x07); \
+ dup_8x8b(d64_1, 1); \
+ dup_8x8b(d64_4, 4); \
+ dup_8x8b(d64_128, 128); \
+ \
+ rgb_dx.low = psx_gpu->uvrg_dx.high; \
+ rgb_dx.e[2] = psx_gpu->b_block_span.e[1]; \
+ shl_4x32b(rgb_dx4, rgb_dx, 2); \
+ shl_4x32b(rgb_dx8, rgb_dx, 3) \
+
+#define setup_blocks_variables_unshaded_textured(target) \
+ vec_4x32u u_block; \
+ vec_4x32u v_block; \
+ vec_2x32u uv_dx = psx_gpu->uvrg_dx.low; \
+ vec_2x32u uv_dx4; \
+ vec_2x32u uv_dx8; \
+ vec_2x32u uv = psx_gpu->uvrg.low; \
+ \
+ vec_16x8u texture_mask; \
+ shl_2x32b(uv_dx4, uv_dx, 2); \
+ shl_2x32b(uv_dx8, uv_dx, 3); \
+ dup_8x8b(texture_mask.low, psx_gpu->texture_mask_width); \
+ dup_8x8b(texture_mask.high, psx_gpu->texture_mask_height) \
+
+
+#define setup_blocks_variables_unshaded_untextured_direct() \
+ or_8x16b(colors, colors, msb_mask) \
+
+#define setup_blocks_variables_unshaded_untextured_indirect() \
+
+#define setup_blocks_variables_unshaded_untextured(target) \
+ u32 color = psx_gpu->triangle_color; \
+ vec_8x16u colors; \
+ \
+ u32 color_r = color & 0xFF; \
+ u32 color_g = (color >> 8) & 0xFF; \
+ u32 color_b = (color >> 16) & 0xFF; \
+ \
+ color = (color_r >> 3) | ((color_g >> 3) << 5) | \
+ ((color_b >> 3) << 10); \
+ dup_8x16b(colors, color); \
+ setup_blocks_variables_unshaded_untextured_##target() \
+
+#define setup_blocks_span_initialize_dithered_textured() \
+ vec_8x16u dither_offsets; \
+ shl_long_8x8b(dither_offsets, dither_offsets_short, 4) \
+
+#define setup_blocks_span_initialize_dithered_untextured() \
+ vec_8x8u dither_offsets; \
+ add_8x8b(dither_offsets, dither_offsets_short, d64_4) \
+
+#define setup_blocks_span_initialize_dithered(texturing) \
+ u32 dither_row = psx_gpu->dither_table[y & 0x3]; \
+ u32 dither_shift = (span_edge_data->left_x & 0x3) * 8; \
+ vec_8x8s dither_offsets_short; \
+ \
+ dither_row = \
+ (dither_row >> dither_shift) | (dither_row << (32 - dither_shift)); \
+ dup_2x32b(vector_cast(vec_2x32u, dither_offsets_short), dither_row); \
+ setup_blocks_span_initialize_dithered_##texturing() \
+
+#define setup_blocks_span_initialize_undithered(texturing) \
+
+
+#define setup_blocks_span_initialize_shaded_textured() \
+{ \
+ vec_4x32u block_span; \
+ u32 offset = span_edge_data->left_x; \
+ \
+ uvrg = *span_uvrg_offset; \
+ mla_scalar_4x32b(uvrg, uvrg_dx, offset); \
+ b = *span_b_offset; \
+ b += b_dx * offset; \
+ \
+ dup_4x32b(u_block, uvrg.e[0]); \
+ dup_4x32b(v_block, uvrg.e[1]); \
+ dup_4x32b(r_block, uvrg.e[2]); \
+ dup_4x32b(g_block, uvrg.e[3]); \
+ dup_4x32b(b_block, b); \
+ \
+ block_span = psx_gpu->u_block_span; \
+ add_4x32b(u_block, u_block, block_span); \
+ block_span = psx_gpu->v_block_span; \
+ add_4x32b(v_block, v_block, block_span); \
+ block_span = psx_gpu->r_block_span; \
+ add_4x32b(r_block, r_block, block_span); \
+ block_span = psx_gpu->g_block_span; \
+ add_4x32b(g_block, g_block, block_span); \
+ block_span = psx_gpu->b_block_span; \
+ add_4x32b(b_block, b_block, block_span); \
+}
+
+#define setup_blocks_span_initialize_shaded_untextured() \
+{ \
+ vec_4x32u block_span; \
+ u32 offset = span_edge_data->left_x; \
+ \
+ rgb.low = span_uvrg_offset->high; \
+ rgb.high.e[0] = *span_b_offset; \
+ mla_scalar_4x32b(rgb, rgb_dx, offset); \
+ \
+ dup_4x32b(r_block, rgb.e[0]); \
+ dup_4x32b(g_block, rgb.e[1]); \
+ dup_4x32b(b_block, rgb.e[2]); \
+ \
+ block_span = psx_gpu->r_block_span; \
+ add_4x32b(r_block, r_block, block_span); \
+ block_span = psx_gpu->g_block_span; \
+ add_4x32b(g_block, g_block, block_span); \
+ block_span = psx_gpu->b_block_span; \
+ add_4x32b(b_block, b_block, block_span); \
+} \
+
+#define setup_blocks_span_initialize_unshaded_textured() \
+{ \
+ vec_4x32u block_span; \
+ u32 offset = span_edge_data->left_x; \
+ \
+ uv = span_uvrg_offset->low; \
+ mla_scalar_2x32b(uv, uv_dx, offset); \
+ \
+ dup_4x32b(u_block, uv.e[0]); \
+ dup_4x32b(v_block, uv.e[1]); \
+ \
+ block_span = psx_gpu->u_block_span; \
+ add_4x32b(u_block, u_block, block_span); \
+ block_span = psx_gpu->v_block_span; \
+ add_4x32b(v_block, v_block, block_span); \
+} \
+
+#define setup_blocks_span_initialize_unshaded_untextured() \
+
+
+#define setup_blocks_texture_swizzled() \
+{ \
+ vec_8x8u u_saved = u; \
+ sli_8x8b(u, v, 4); \
+ sri_8x8b(v, u_saved, 4); \
+} \
+
+#define setup_blocks_texture_unswizzled() \
+
+#define setup_blocks_store_shaded_textured(swizzling, dithering, target, \
+ edge_type) \
+{ \
+ vec_8x16u u_whole; \
+ vec_8x16u v_whole; \
+ vec_8x16u r_whole; \
+ vec_8x16u g_whole; \
+ vec_8x16u b_whole; \
+ \
+ vec_8x8u u; \
+ vec_8x8u v; \
+ vec_8x8u r; \
+ vec_8x8u g; \
+ vec_8x8u b; \
+ vec_8x16u uv; \
+ \
+ vec_4x32u dx4; \
+ vec_4x32u dx8; \
+ \
+ shr_narrow_4x32b(u_whole.low, u_block, 16); \
+ shr_narrow_4x32b(v_whole.low, v_block, 16); \
+ shr_narrow_4x32b(r_whole.low, r_block, 16); \
+ shr_narrow_4x32b(g_whole.low, g_block, 16); \
+ shr_narrow_4x32b(b_whole.low, b_block, 16); \
+ \
+ dup_4x32b(dx4, uvrg_dx4.e[0]); \
+ add_high_narrow_4x32b(u_whole.high, u_block, dx4); \
+ dup_4x32b(dx4, uvrg_dx4.e[1]); \
+ add_high_narrow_4x32b(v_whole.high, v_block, dx4); \
+ dup_4x32b(dx4, uvrg_dx4.e[2]); \
+ add_high_narrow_4x32b(r_whole.high, r_block, dx4); \
+ dup_4x32b(dx4, uvrg_dx4.e[3]); \
+ add_high_narrow_4x32b(g_whole.high, g_block, dx4); \
+ dup_4x32b(dx4, b_dx4); \
+ add_high_narrow_4x32b(b_whole.high, b_block, dx4); \
+ \
+ mov_narrow_8x16b(u, u_whole); \
+ mov_narrow_8x16b(v, v_whole); \
+ mov_narrow_8x16b(r, r_whole); \
+ mov_narrow_8x16b(g, g_whole); \
+ mov_narrow_8x16b(b, b_whole); \
+ \
+ dup_4x32b(dx8, uvrg_dx8.e[0]); \
+ add_4x32b(u_block, u_block, dx8); \
+ dup_4x32b(dx8, uvrg_dx8.e[1]); \
+ add_4x32b(v_block, v_block, dx8); \
+ dup_4x32b(dx8, uvrg_dx8.e[2]); \
+ add_4x32b(r_block, r_block, dx8); \
+ dup_4x32b(dx8, uvrg_dx8.e[3]); \
+ add_4x32b(g_block, g_block, dx8); \
+ dup_4x32b(dx8, b_dx8); \
+ add_4x32b(b_block, b_block, dx8); \
+ \
+ and_8x8b(u, u, texture_mask.low); \
+ and_8x8b(v, v, texture_mask.high); \
+ setup_blocks_texture_##swizzling(); \
+ \
+ zip_8x16b(uv, u, v); \
+ block->uv = uv; \
+ block->r = r; \
+ block->g = g; \
+ block->b = b; \
+ block->dither_offsets = vector_cast(vec_8x16u, dither_offsets); \
+ block->fb_ptr = fb_ptr; \
+} \
+
+#define setup_blocks_store_unshaded_textured(swizzling, dithering, target, \
+ edge_type) \
+{ \
+ vec_8x16u u_whole; \
+ vec_8x16u v_whole; \
+ \
+ vec_8x8u u; \
+ vec_8x8u v; \
+ vec_8x16u uv; \
+ \
+ vec_4x32u dx4; \
+ vec_4x32u dx8; \
+ \
+ shr_narrow_4x32b(u_whole.low, u_block, 16); \
+ shr_narrow_4x32b(v_whole.low, v_block, 16); \
+ \
+ dup_4x32b(dx4, uv_dx4.e[0]); \
+ add_high_narrow_4x32b(u_whole.high, u_block, dx4); \
+ dup_4x32b(dx4, uv_dx4.e[1]); \
+ add_high_narrow_4x32b(v_whole.high, v_block, dx4); \
+ \
+ mov_narrow_8x16b(u, u_whole); \
+ mov_narrow_8x16b(v, v_whole); \
+ \
+ dup_4x32b(dx8, uv_dx8.e[0]); \
+ add_4x32b(u_block, u_block, dx8); \
+ dup_4x32b(dx8, uv_dx8.e[1]); \
+ add_4x32b(v_block, v_block, dx8); \
+ \
+ and_8x8b(u, u, texture_mask.low); \
+ and_8x8b(v, v, texture_mask.high); \
+ setup_blocks_texture_##swizzling(); \
+ \
+ zip_8x16b(uv, u, v); \
+ block->uv = uv; \
+ block->dither_offsets = vector_cast(vec_8x16u, dither_offsets); \
+ block->fb_ptr = fb_ptr; \
+} \
+
+#define setup_blocks_store_shaded_untextured_dithered() \
+ addq_8x8b(r, r, dither_offsets); \
+ addq_8x8b(g, g, dither_offsets); \
+ addq_8x8b(b, b, dither_offsets); \
+ \
+ subq_8x8b(r, r, d64_4); \
+ subq_8x8b(g, g, d64_4); \
+ subq_8x8b(b, b, d64_4) \
+
+#define setup_blocks_store_shaded_untextured_undithered() \
+
+
+#define setup_blocks_store_untextured_pixels_indirect_full(_pixels) \
+ block->pixels = _pixels; \
+ block->fb_ptr = fb_ptr \
+
+#define setup_blocks_store_untextured_pixels_indirect_edge(_pixels) \
+ block->pixels = _pixels; \
+ block->fb_ptr = fb_ptr \
+
+#define setup_blocks_store_shaded_untextured_seed_pixels_indirect() \
+ mul_long_8x8b(pixels, r, d64_1) \
+
+
+#define setup_blocks_store_untextured_pixels_direct_full(_pixels) \
+ store_8x16b(_pixels, fb_ptr) \
+
+#define setup_blocks_store_untextured_pixels_direct_edge(_pixels) \
+{ \
+ vec_8x16u fb_pixels; \
+ vec_8x16u draw_mask; \
+ vec_8x16u test_mask = psx_gpu->test_mask; \
+ \
+ load_8x16b(fb_pixels, fb_ptr); \
+ dup_8x16b(draw_mask, span_edge_data->right_mask); \
+ tst_8x16b(draw_mask, draw_mask, test_mask); \
+ bif_8x16b(fb_pixels, _pixels, draw_mask); \
+ store_8x16b(fb_pixels, fb_ptr); \
+} \
+
+#define setup_blocks_store_shaded_untextured_seed_pixels_direct() \
+ pixels = msb_mask; \
+ mla_long_8x8b(pixels, r, d64_1) \
+
+
+#define setup_blocks_store_shaded_untextured(swizzling, dithering, target, \
+ edge_type) \
+{ \
+ vec_8x16u r_whole; \
+ vec_8x16u g_whole; \
+ vec_8x16u b_whole; \
+ \
+ vec_8x8u r; \
+ vec_8x8u g; \
+ vec_8x8u b; \
+ \
+ vec_4x32u dx4; \
+ vec_4x32u dx8; \
+ \
+ vec_8x16u pixels; \
+ \
+ shr_narrow_4x32b(r_whole.low, r_block, 16); \
+ shr_narrow_4x32b(g_whole.low, g_block, 16); \
+ shr_narrow_4x32b(b_whole.low, b_block, 16); \
+ \
+ dup_4x32b(dx4, rgb_dx4.e[0]); \
+ add_high_narrow_4x32b(r_whole.high, r_block, dx4); \
+ dup_4x32b(dx4, rgb_dx4.e[1]); \
+ add_high_narrow_4x32b(g_whole.high, g_block, dx4); \
+ dup_4x32b(dx4, rgb_dx4.e[2]); \
+ add_high_narrow_4x32b(b_whole.high, b_block, dx4); \
+ \
+ mov_narrow_8x16b(r, r_whole); \
+ mov_narrow_8x16b(g, g_whole); \
+ mov_narrow_8x16b(b, b_whole); \
+ \
+ dup_4x32b(dx8, rgb_dx8.e[0]); \
+ add_4x32b(r_block, r_block, dx8); \
+ dup_4x32b(dx8, rgb_dx8.e[1]); \
+ add_4x32b(g_block, g_block, dx8); \
+ dup_4x32b(dx8, rgb_dx8.e[2]); \
+ add_4x32b(b_block, b_block, dx8); \
+ \
+ setup_blocks_store_shaded_untextured_##dithering(); \
+ \
+ shr_8x8b(r, r, 3); \
+ bic_8x8b(g, g, d64_0x07); \
+ bic_8x8b(b, b, d64_0x07); \
+ \
+ setup_blocks_store_shaded_untextured_seed_pixels_##target(); \
+ mla_long_8x8b(pixels, g, d64_4); \
+ mla_long_8x8b(pixels, b, d64_128) \
+ \
+ setup_blocks_store_untextured_pixels_##target##_##edge_type(pixels); \
+} \
+
+#define setup_blocks_store_unshaded_untextured(swizzling, dithering, target, \
+ edge_type) \
+ setup_blocks_store_untextured_pixels_##target##_##edge_type(colors) \
+
+
+#define setup_blocks_store_draw_mask_textured_indirect(_block, bits) \
+ (_block)->draw_mask_bits = bits \
+
+#define setup_blocks_store_draw_mask_untextured_indirect(_block, bits) \
+{ \
+ vec_8x16u bits_mask; \
+ vec_8x16u test_mask = psx_gpu->test_mask; \
+ dup_8x16b(bits_mask, bits); \
+ tst_8x16b(bits_mask, bits_mask, test_mask); \
+ (_block)->draw_mask = bits_mask; \
+} \
+
+#define setup_blocks_store_draw_mask_untextured_direct(_block, bits) \
+
+
+#define setup_blocks_add_blocks_indirect() \
+ num_blocks += span_num_blocks; \
+ \
+ if(num_blocks > MAX_BLOCKS) \
+ { \
+ psx_gpu->num_blocks = num_blocks - span_num_blocks; \
+ flush_render_block_buffer(psx_gpu); \
+ num_blocks = span_num_blocks; \
+ block = psx_gpu->blocks; \
+ } \
+
+#define setup_blocks_add_blocks_direct() \
+
+
+#define setup_blocks_builder(shading, texturing, dithering, sw, target) \
+void setup_blocks_##shading##_##texturing##_##dithering##_##sw##_##target( \
+ psx_gpu_struct *psx_gpu) \
+{ \
+ setup_blocks_load_msb_mask_##target(); \
+ setup_blocks_variables_##shading##_##texturing(target); \
+ \
+ edge_data_struct *span_edge_data = psx_gpu->span_edge_data; \
+ vec_4x32u *span_uvrg_offset = psx_gpu->span_uvrg_offset; \
+ u32 *span_b_offset = psx_gpu->span_b_offset; \
+ \
+ block_struct *block = psx_gpu->blocks + psx_gpu->num_blocks; \
+ \
+ u32 num_spans = psx_gpu->num_spans; \
+ \
+ u16 *fb_ptr; \
+ u32 y; \
+ \
+ u32 num_blocks = psx_gpu->num_blocks; \
+ u32 span_num_blocks; \
+ \
+ while(num_spans) \
+ { \
+ span_num_blocks = span_edge_data->num_blocks; \
+ if(span_num_blocks) \
+ { \
+ y = span_edge_data->y; \
+ fb_ptr = psx_gpu->vram_ptr + span_edge_data->left_x + (y * 1024); \
+ \
+ setup_blocks_span_initialize_##shading##_##texturing(); \
+ setup_blocks_span_initialize_##dithering(texturing); \
+ \
+ setup_blocks_add_blocks_##target(); \
+ \
+ s32 pixel_span = span_num_blocks * 8; \
+ pixel_span -= __builtin_popcount(span_edge_data->right_mask & 0xFF); \
+ span_pixels += pixel_span; \
+ span_pixel_blocks_unaligned += (pixel_span + 7) / 8; \
+ \
+ span_num_blocks--; \
+ while(span_num_blocks) \
+ { \
+ setup_blocks_store_##shading##_##texturing(sw, dithering, target, \
+ full); \
+ setup_blocks_store_draw_mask_##texturing##_##target(block, 0x00); \
+ \
+ fb_ptr += 8; \
+ block++; \
+ span_num_blocks--; \
+ } \
+ \
+ setup_blocks_store_##shading##_##texturing(sw, dithering, target, edge); \
+ setup_blocks_store_draw_mask_##texturing##_##target(block, \
+ span_edge_data->right_mask); \
+ \
+ block++; \
+ } \
+ else \
+ { \
+ zero_block_spans++; \
+ } \
+ \
+ num_spans--; \
+ span_edge_data++; \
+ span_uvrg_offset++; \
+ span_b_offset++; \
+ } \
+ \
+ psx_gpu->num_blocks = num_blocks; \
+} \
+
+void setup_blocks_shaded_textured_dithered_unswizzled_indirect(psx_gpu_struct
+ *psx_gpu);
+
+void setup_blocks_shaded_untextured_dithered_unswizzled_indirect(psx_gpu_struct
+ *psx_gpu);
+void setup_blocks_shaded_untextured_undithered_unswizzled_indirect(
+ psx_gpu_struct *psx_gpu);
+void setup_blocks_shaded_untextured_dithered_unswizzled_direct(psx_gpu_struct
+ *psx_gpu);
+void setup_blocks_shaded_untextured_undithered_unswizzled_direct(
+ psx_gpu_struct *psx_gpu);
+
+void setup_blocks_unshaded_textured_dithered_unswizzled_indirect(psx_gpu_struct
+ *psx_gpu);
+void setup_blocks_unshaded_untextured_undithered_unswizzled_indirect(
+ psx_gpu_struct *psx_gpu);
+void setup_blocks_unshaded_untextured_undithered_unswizzled_direct(
+ psx_gpu_struct *psx_gpu);
+
+void setup_blocks_shaded_textured_dithered_swizzled_indirect(psx_gpu_struct
+ *psx_gpu);
+void setup_blocks_unshaded_textured_dithered_swizzled_indirect(psx_gpu_struct
+ *psx_gpu);
+
+
+//setup_blocks_builder(unshaded, untextured, undithered, unswizzled, direct);
+
+#ifndef PANDORA_BUILD
+
+setup_blocks_builder(shaded, textured, dithered, swizzled, indirect);
+setup_blocks_builder(shaded, textured, dithered, unswizzled, indirect);
+
+setup_blocks_builder(unshaded, textured, dithered, unswizzled, indirect);
+setup_blocks_builder(unshaded, textured, dithered, swizzled, indirect);
+
+setup_blocks_builder(shaded, untextured, undithered, unswizzled, indirect);
+setup_blocks_builder(shaded, untextured, dithered, unswizzled, indirect);
+setup_blocks_builder(shaded, untextured, undithered, unswizzled, direct);
+setup_blocks_builder(shaded, untextured, dithered, unswizzled, direct);
+
+setup_blocks_builder(unshaded, untextured, undithered, unswizzled, indirect);
+setup_blocks_builder(unshaded, untextured, undithered, unswizzled, direct);
+
+#endif
+
+void texture_blocks_untextured(psx_gpu_struct *psx_gpu);
+void texture_blocks_4bpp(psx_gpu_struct *psx_gpu);
+void texture_blocks_8bpp(psx_gpu_struct *psx_gpu);
+void texture_blocks_16bpp(psx_gpu_struct *psx_gpu);
+
+#ifndef PANDORA_BUILD
+
+void texture_blocks_untextured(psx_gpu_struct *psx_gpu)
+{
+ if(psx_gpu->primitive_type != PRIMITIVE_TYPE_SPRITE)
+ texel_blocks_untextured += psx_gpu->num_blocks;
+}
+
+void texture_blocks_4bpp(psx_gpu_struct *psx_gpu)
+{
+ block_struct *block = psx_gpu->blocks;
+ u32 num_blocks = psx_gpu->num_blocks;
+ texel_blocks_4bpp += num_blocks;
+
+ vec_8x8u texels_low;
+ vec_8x8u texels_high;
+ vec_8x8u texels;
+ vec_8x16u pixels;
+
+ vec_8x16u clut_a;
+ vec_8x16u clut_b;
+ vec_16x8u clut_low;
+ vec_16x8u clut_high;
+
+ u8 *texture_ptr_8bpp = psx_gpu->texture_page_ptr;
+ u16 *clut_ptr = psx_gpu->clut_ptr;
+
+ // Can be done with one deinterleaving load on NEON
+ load_8x16b(clut_a, clut_ptr);
+ load_8x16b(clut_b, clut_ptr + 8);
+ unzip_16x8b(clut_low, clut_high, clut_a, clut_b);
+
+ if(psx_gpu->current_texture_mask & psx_gpu->dirty_textures_4bpp_mask)
+ update_texture_4bpp_cache(psx_gpu);
+
+ while(num_blocks)
+ {
+ texels.e[0] = texture_ptr_8bpp[block->uv.e[0]];
+ texels.e[1] = texture_ptr_8bpp[block->uv.e[1]];
+ texels.e[2] = texture_ptr_8bpp[block->uv.e[2]];
+ texels.e[3] = texture_ptr_8bpp[block->uv.e[3]];
+ texels.e[4] = texture_ptr_8bpp[block->uv.e[4]];
+ texels.e[5] = texture_ptr_8bpp[block->uv.e[5]];
+ texels.e[6] = texture_ptr_8bpp[block->uv.e[6]];
+ texels.e[7] = texture_ptr_8bpp[block->uv.e[7]];
+
+ tbl_16(texels_low, texels, clut_low);
+ tbl_16(texels_high, texels, clut_high);
+
+ // Can be done with an interleaving store on NEON
+ zip_8x16b(pixels, texels_low, texels_high);
+
+ block->texels = pixels;
+
+ num_blocks--;
+ block++;
+ }
+}
+
+void texture_blocks_8bpp(psx_gpu_struct *psx_gpu)
+{
+ block_struct *block = psx_gpu->blocks;
+ u32 num_blocks = psx_gpu->num_blocks;
+
+ texel_blocks_8bpp += num_blocks;
+
+ if(psx_gpu->current_texture_mask & psx_gpu->dirty_textures_8bpp_mask)
+ update_texture_8bpp_cache(psx_gpu);
+
+ vec_8x16u texels;
+ u8 *texture_ptr_8bpp = psx_gpu->texture_page_ptr;
+
+ u32 texel;
+ u32 offset;
+ u32 i;
+
+ while(num_blocks)
+ {
+ for(i = 0; i < 8; i++)
+ {
+ offset = block->uv.e[i];
+
+ texel = texture_ptr_8bpp[offset];
+ texels.e[i] = psx_gpu->clut_ptr[texel];
+ }
+
+ block->texels = texels;
+
+ num_blocks--;
+ block++;
+ }
+}
+
+void texture_blocks_16bpp(psx_gpu_struct *psx_gpu)
+{
+ block_struct *block = psx_gpu->blocks;
+ u32 num_blocks = psx_gpu->num_blocks;
+
+ texel_blocks_16bpp += num_blocks;
+
+ vec_8x16u texels;
+
+ u16 *texture_ptr_16bpp = psx_gpu->texture_page_ptr;
+ u32 offset;
+ u32 i;
+
+ while(num_blocks)
+ {
+ for(i = 0; i < 8; i++)
+ {
+ offset = block->uv.e[i];
+ offset += ((offset & 0xFF00) * 3);
+
+ texels.e[i] = texture_ptr_16bpp[offset];
+ }
+
+ block->texels = texels;
+
+ num_blocks--;
+ block++;
+ }
+}
+
+#endif
+
+
+#define shade_blocks_load_msb_mask_indirect() \
+
+#define shade_blocks_load_msb_mask_direct() \
+ vec_8x16u msb_mask; \
+ dup_8x16b(msb_mask, psx_gpu->mask_msb); \
+
+#define shade_blocks_store_indirect(_draw_mask, _pixels) \
+ block->draw_mask = _draw_mask; \
+ block->pixels = _pixels \
+
+#define shade_blocks_store_direct(_draw_mask, _pixels) \
+{ \
+ vec_8x16u fb_pixels; \
+ or_8x16b(_pixels, _pixels, msb_mask); \
+ load_8x16b(fb_pixels, block->fb_ptr); \
+ bif_8x16b(fb_pixels, _pixels, _draw_mask); \
+ store_8x16b(fb_pixels, block->fb_ptr); \
+} \
+
+
+#define shade_blocks_textured_modulated_shaded_primitive_load() \
+
+#define shade_blocks_textured_modulated_unshaded_primitive_load() \
+{ \
+ u32 color = psx_gpu->triangle_color; \
+ dup_8x8b(colors_r, color); \
+ dup_8x8b(colors_g, color >> 8); \
+ dup_8x8b(colors_b, color >> 16); \
+ if(psx_gpu->triangle_color == 0x808080) \
+ false_modulated_triangles++; \
+} \
+
+#define shade_blocks_textured_modulated_shaded_block_load() \
+ colors_r = block->r; \
+ colors_g = block->g; \
+ colors_b = block->b \
+
+#define shade_blocks_textured_modulated_unshaded_block_load() \
+
+#define shade_blocks_textured_modulate_dithered(component) \
+ pixels_##component = block->dither_offsets; \
+ mla_long_8x8b(pixels_##component, texels_##component, colors_##component) \
+
+#define shade_blocks_textured_modulate_undithered(component) \
+ mul_long_8x8b(pixels_##component, texels_##component, colors_##component) \
+
+#define shade_blocks_textured_modulated_builder(shading, dithering, target) \
+void shade_blocks_##shading##_textured_modulated_##dithering##_##target( \
+ psx_gpu_struct *psx_gpu) \
+{ \
+ block_struct *block = psx_gpu->blocks; \
+ u32 num_blocks = psx_gpu->num_blocks; \
+ vec_8x16u texels; \
+ \
+ vec_8x8u texels_r; \
+ vec_8x8u texels_g; \
+ vec_8x8u texels_b; \
+ \
+ vec_8x8u colors_r; \
+ vec_8x8u colors_g; \
+ vec_8x8u colors_b; \
+ \
+ vec_8x8u pixels_r_low; \
+ vec_8x8u pixels_g_low; \
+ vec_8x8u pixels_b_low; \
+ vec_8x16u pixels; \
+ \
+ vec_8x16u pixels_r; \
+ vec_8x16u pixels_g; \
+ vec_8x16u pixels_b; \
+ \
+ vec_8x16u draw_mask; \
+ vec_8x16u zero_mask; \
+ \
+ vec_8x8u d64_0x07; \
+ vec_8x8u d64_0x1F; \
+ vec_8x8u d64_1; \
+ vec_8x8u d64_4; \
+ vec_8x8u d64_128; \
+ \
+ vec_8x16u d128_0x8000; \
+ \
+ vec_8x16u test_mask = psx_gpu->test_mask; \
+ u32 draw_mask_bits; \
+ shade_blocks_load_msb_mask_##target(); \
+ \
+ dup_8x8b(d64_0x07, 0x07); \
+ dup_8x8b(d64_0x1F, 0x1F); \
+ dup_8x8b(d64_1, 1); \
+ dup_8x8b(d64_4, 4); \
+ dup_8x8b(d64_128, 128); \
+ \
+ dup_8x16b(d128_0x8000, 0x8000); \
+ \
+ shade_blocks_textured_modulated_##shading##_primitive_load(); \
+ \
+ while(num_blocks) \
+ { \
+ draw_mask_bits = block->draw_mask_bits; \
+ dup_8x16b(draw_mask, draw_mask_bits); \
+ tst_8x16b(draw_mask, draw_mask, test_mask); \
+ \
+ shade_blocks_textured_modulated_##shading##_block_load(); \
+ \
+ texels = block->texels; \
+ \
+ mov_narrow_8x16b(texels_r, texels); \
+ shr_narrow_8x16b(texels_g, texels, 5); \
+ shr_narrow_8x16b(texels_b, texels, 7); \
+ \
+ and_8x8b(texels_r, texels_r, d64_0x1F); \
+ and_8x8b(texels_g, texels_g, d64_0x1F); \
+ shr_8x8b(texels_b, texels_b, 3); \
+ \
+ shade_blocks_textured_modulate_##dithering(r); \
+ shade_blocks_textured_modulate_##dithering(g); \
+ shade_blocks_textured_modulate_##dithering(b); \
+ \
+ cmpeqz_8x16b(zero_mask, texels); \
+ and_8x16b(pixels, texels, d128_0x8000); \
+ \
+ shrq_narrow_signed_8x16b(pixels_r_low, pixels_r, 4); \
+ shrq_narrow_signed_8x16b(pixels_g_low, pixels_g, 4); \
+ shrq_narrow_signed_8x16b(pixels_b_low, pixels_b, 4); \
+ \
+ or_8x16b(zero_mask, draw_mask, zero_mask); \
+ \
+ shr_8x8b(pixels_r_low, pixels_r_low, 3); \
+ bic_8x8b(pixels_g_low, pixels_g_low, d64_0x07); \
+ bic_8x8b(pixels_b_low, pixels_b_low, d64_0x07); \
+ \
+ mla_long_8x8b(pixels, pixels_r_low, d64_1); \
+ mla_long_8x8b(pixels, pixels_g_low, d64_4); \
+ mla_long_8x8b(pixels, pixels_b_low, d64_128); \
+ \
+ shade_blocks_store_##target(zero_mask, pixels); \
+ \
+ num_blocks--; \
+ block++; \
+ } \
+} \
+
+void shade_blocks_shaded_textured_modulated_dithered_direct(psx_gpu_struct
+ *psx_gpu);
+void shade_blocks_shaded_textured_modulated_undithered_direct(psx_gpu_struct
+ *psx_gpu);
+void shade_blocks_unshaded_textured_modulated_dithered_direct(psx_gpu_struct
+ *psx_gpu);
+void shade_blocks_unshaded_textured_modulated_undithered_direct(psx_gpu_struct
+ *psx_gpu);
+
+void shade_blocks_shaded_textured_modulated_dithered_indirect(psx_gpu_struct
+ *psx_gpu);
+void shade_blocks_shaded_textured_modulated_undithered_indirect(psx_gpu_struct
+ *psx_gpu);
+void shade_blocks_unshaded_textured_modulated_dithered_indirect(psx_gpu_struct
+ *psx_gpu);
+void shade_blocks_unshaded_textured_modulated_undithered_indirect(psx_gpu_struct
+ *psx_gpu);
+
+#ifndef PANDORA_BUILD
+
+shade_blocks_textured_modulated_builder(shaded, dithered, direct);
+shade_blocks_textured_modulated_builder(shaded, undithered, direct);
+shade_blocks_textured_modulated_builder(unshaded, dithered, direct);
+shade_blocks_textured_modulated_builder(unshaded, undithered, direct);
+
+shade_blocks_textured_modulated_builder(shaded, dithered, indirect);
+shade_blocks_textured_modulated_builder(shaded, undithered, indirect);
+shade_blocks_textured_modulated_builder(unshaded, dithered, indirect);
+shade_blocks_textured_modulated_builder(unshaded, undithered, indirect);
+
+#endif
+
+
+#define shade_blocks_textured_unmodulated_builder(target) \
+void shade_blocks_textured_unmodulated_##target(psx_gpu_struct *psx_gpu) \
+{ \
+ block_struct *block = psx_gpu->blocks; \
+ u32 num_blocks = psx_gpu->num_blocks; \
+ vec_8x16u draw_mask; \
+ vec_8x16u test_mask = psx_gpu->test_mask; \
+ u32 draw_mask_bits; \
+ \
+ vec_8x16u pixels; \
+ shade_blocks_load_msb_mask_##target(); \
+ \
+ while(num_blocks) \
+ { \
+ vec_8x16u zero_mask; \
+ \
+ draw_mask_bits = block->draw_mask_bits; \
+ dup_8x16b(draw_mask, draw_mask_bits); \
+ tst_8x16b(draw_mask, draw_mask, test_mask); \
+ \
+ pixels = block->texels; \
+ \
+ cmpeqz_8x16b(zero_mask, pixels); \
+ or_8x16b(zero_mask, draw_mask, zero_mask); \
+ \
+ shade_blocks_store_##target(zero_mask, pixels); \
+ \
+ num_blocks--; \
+ block++; \
+ } \
+} \
+
+void shade_blocks_textured_unmodulated_indirect(psx_gpu_struct *psx_gpu);
+void shade_blocks_textured_unmodulated_direct(psx_gpu_struct *psx_gpu);
+
+#ifndef PANDORA_BUILD
+
+shade_blocks_textured_unmodulated_builder(indirect)
+shade_blocks_textured_unmodulated_builder(direct)
+
+#endif
+
+
+void shade_blocks_unshaded_untextured_indirect(psx_gpu_struct *psx_gpu);
+void shade_blocks_unshaded_untextured_direct(psx_gpu_struct *psx_gpu);
+
+#ifndef PANDORA_BUILD
+
+void shade_blocks_unshaded_untextured_indirect(psx_gpu_struct *psx_gpu)
+{
+}
+
+void shade_blocks_unshaded_untextured_direct(psx_gpu_struct *psx_gpu)
+{
+ block_struct *block = psx_gpu->blocks;
+ u32 num_blocks = psx_gpu->num_blocks;
+
+ vec_8x16u pixels = block->pixels;
+ shade_blocks_load_msb_mask_direct();
+
+ while(num_blocks)
+ {
+ shade_blocks_store_direct(block->draw_mask, pixels);
+
+ num_blocks--;
+ block++;
+ }
+}
+
+#endif
+
+void shade_blocks_shaded_untextured(psx_gpu_struct *psx_gpu)
+{
+}
+
+
+#define blend_blocks_mask_evaluate_on() \
+ vec_8x16u mask_pixels; \
+ cmpltz_8x16b(mask_pixels, framebuffer_pixels); \
+ or_8x16b(draw_mask, draw_mask, mask_pixels) \
+
+#define blend_blocks_mask_evaluate_off() \
+
+#define blend_blocks_average() \
+{ \
+ vec_8x16u pixels_no_msb; \
+ vec_8x16u fb_pixels_no_msb; \
+ \
+ vec_8x16u d128_0x0421; \
+ vec_8x16u d128_0x8000; \
+ \
+ dup_8x16b(d128_0x0421, 0x0421); \
+ dup_8x16b(d128_0x8000, 0x8000); \
+ \
+ eor_8x16b(blend_pixels, pixels, framebuffer_pixels); \
+ bic_8x16b(pixels_no_msb, pixels, d128_0x8000); \
+ and_8x16b(blend_pixels, blend_pixels, d128_0x0421); \
+ sub_8x16b(blend_pixels, pixels_no_msb, blend_pixels); \
+ bic_8x16b(fb_pixels_no_msb, framebuffer_pixels, d128_0x8000); \
+ average_8x16b(blend_pixels, fb_pixels_no_msb, blend_pixels); \
+} \
+
+#define blend_blocks_add() \
+{ \
+ vec_8x16u pixels_rb, pixels_g; \
+ vec_8x16u fb_rb, fb_g; \
+ \
+ vec_8x16u d128_0x7C1F; \
+ vec_8x16u d128_0x03E0; \
+ \
+ dup_8x16b(d128_0x7C1F, 0x7C1F); \
+ dup_8x16b(d128_0x03E0, 0x03E0); \
+ \
+ and_8x16b(pixels_rb, pixels, d128_0x7C1F); \
+ and_8x16b(pixels_g, pixels, d128_0x03E0); \
+ \
+ and_8x16b(fb_rb, framebuffer_pixels, d128_0x7C1F); \
+ and_8x16b(fb_g, framebuffer_pixels, d128_0x03E0); \
+ \
+ add_8x16b(fb_rb, fb_rb, pixels_rb); \
+ add_8x16b(fb_g, fb_g, pixels_g); \
+ \
+ min_16x8b(vector_cast(vec_16x8u, fb_rb), vector_cast(vec_16x8u, fb_rb), \
+ vector_cast(vec_16x8u, d128_0x7C1F)); \
+ min_8x16b(fb_g, fb_g, d128_0x03E0); \
+ \
+ or_8x16b(blend_pixels, fb_rb, fb_g); \
+} \
+
+#define blend_blocks_subtract() \
+{ \
+ vec_8x16u pixels_rb, pixels_g; \
+ vec_8x16u fb_rb, fb_g; \
+ \
+ vec_8x16u d128_0x7C1F; \
+ vec_8x16u d128_0x03E0; \
+ \
+ dup_8x16b(d128_0x7C1F, 0x7C1F); \
+ dup_8x16b(d128_0x03E0, 0x03E0); \
+ \
+ and_8x16b(pixels_rb, pixels, d128_0x7C1F); \
+ and_8x16b(pixels_g, pixels, d128_0x03E0); \
+ \
+ and_8x16b(fb_rb, framebuffer_pixels, d128_0x7C1F); \
+ and_8x16b(fb_g, framebuffer_pixels, d128_0x03E0); \
+ \
+ subs_16x8b(vector_cast(vec_16x8u, fb_rb), \
+ vector_cast(vec_16x8u, fb_rb), vector_cast(vec_16x8u, pixels_rb)); \
+ subs_8x16b(fb_g, fb_g, pixels_g); \
+ \
+ or_8x16b(blend_pixels, fb_rb, fb_g); \
+} \
+
+#define blend_blocks_add_fourth() \
+{ \
+ vec_8x16u pixels_rb, pixels_g; \
+ vec_8x16u pixels_fourth; \
+ vec_8x16u fb_rb, fb_g; \
+ \
+ vec_8x16u d128_0x7C1F; \
+ vec_8x16u d128_0x1C07; \
+ vec_8x16u d128_0x03E0; \
+ vec_8x16u d128_0x00E0; \
+ \
+ dup_8x16b(d128_0x7C1F, 0x7C1F); \
+ dup_8x16b(d128_0x1C07, 0x1C07); \
+ dup_8x16b(d128_0x03E0, 0x03E0); \
+ dup_8x16b(d128_0x00E0, 0x00E0); \
+ \
+ shr_8x16b(pixels_fourth, vector_cast(vec_8x16s, pixels), 2); \
+ \
+ and_8x16b(fb_rb, framebuffer_pixels, d128_0x7C1F); \
+ and_8x16b(fb_g, framebuffer_pixels, d128_0x03E0); \
+ \
+ and_8x16b(pixels_rb, pixels_fourth, d128_0x1C07); \
+ and_8x16b(pixels_g, pixels_fourth, d128_0x00E0); \
+ \
+ add_8x16b(fb_rb, fb_rb, pixels_rb); \
+ add_8x16b(fb_g, fb_g, pixels_g); \
+ \
+ min_16x8b(vector_cast(vec_16x8u, fb_rb), vector_cast(vec_16x8u, fb_rb), \
+ vector_cast(vec_16x8u, d128_0x7C1F)); \
+ min_8x16b(fb_g, fb_g, d128_0x03E0); \
+ \
+ or_8x16b(blend_pixels, fb_rb, fb_g); \
+} \
+
+#define blend_blocks_blended_combine_textured() \
+{ \
+ vec_8x16u blend_mask; \
+ cmpltz_8x16b(blend_mask, pixels); \
+ \
+ or_immediate_8x16b(blend_pixels, blend_pixels, 0x8000); \
+ bif_8x16b(blend_pixels, pixels, blend_mask); \
+} \
+
+#define blend_blocks_blended_combine_untextured() \
+
+
+#define blend_blocks_body_blend(blend_mode, texturing) \
+{ \
+ blend_blocks_##blend_mode(); \
+ blend_blocks_blended_combine_##texturing(); \
+} \
+
+#define blend_blocks_body_average(texturing) \
+ blend_blocks_body_blend(average, texturing) \
+
+#define blend_blocks_body_add(texturing) \
+ blend_blocks_body_blend(add, texturing) \
+
+#define blend_blocks_body_subtract(texturing) \
+ blend_blocks_body_blend(subtract, texturing) \
+
+#define blend_blocks_body_add_fourth(texturing) \
+ blend_blocks_body_blend(add_fourth, texturing) \
+
+#define blend_blocks_body_unblended(texturing) \
+ blend_pixels = pixels \
+
+
+#define blend_blocks_builder(texturing, blend_mode, mask_evaluate) \
+void \
+ blend_blocks_##texturing##_##blend_mode##_##mask_evaluate(psx_gpu_struct \
+ *psx_gpu) \
+{ \
+ block_struct *block = psx_gpu->blocks; \
+ u32 num_blocks = psx_gpu->num_blocks; \
+ vec_8x16u draw_mask; \
+ vec_8x16u pixels; \
+ vec_8x16u blend_pixels; \
+ vec_8x16u framebuffer_pixels; \
+ vec_8x16u msb_mask; \
+ \
+ u16 *fb_ptr; \
+ \
+ dup_8x16b(msb_mask, psx_gpu->mask_msb); \
+ \
+ while(num_blocks) \
+ { \
+ pixels = block->pixels; \
+ draw_mask = block->draw_mask; \
+ fb_ptr = block->fb_ptr; \
+ \
+ load_8x16b(framebuffer_pixels, fb_ptr); \
+ \
+ blend_blocks_mask_evaluate_##mask_evaluate(); \
+ blend_blocks_body_##blend_mode(texturing); \
+ \
+ or_8x16b(blend_pixels, blend_pixels, msb_mask); \
+ bif_8x16b(framebuffer_pixels, blend_pixels, draw_mask); \
+ store_8x16b(framebuffer_pixels, fb_ptr); \
+ \
+ blend_blocks++; \
+ num_blocks--; \
+ block++; \
+ } \
+} \
+
+void blend_blocks_textured_average_off(psx_gpu_struct *psx_gpu);
+void blend_blocks_textured_average_on(psx_gpu_struct *psx_gpu);
+void blend_blocks_textured_add_off(psx_gpu_struct *psx_gpu);
+void blend_blocks_textured_add_on(psx_gpu_struct *psx_gpu);
+void blend_blocks_textured_subtract_off(psx_gpu_struct *psx_gpu);
+void blend_blocks_textured_subtract_on(psx_gpu_struct *psx_gpu);
+void blend_blocks_textured_add_fourth_off(psx_gpu_struct *psx_gpu);
+void blend_blocks_textured_add_fourth_on(psx_gpu_struct *psx_gpu);
+
+void blend_blocks_untextured_average_off(psx_gpu_struct *psx_gpu);
+void blend_blocks_untextured_average_on(psx_gpu_struct *psx_gpu);
+void blend_blocks_untextured_add_off(psx_gpu_struct *psx_gpu);
+void blend_blocks_untextured_add_on(psx_gpu_struct *psx_gpu);
+void blend_blocks_untextured_subtract_off(psx_gpu_struct *psx_gpu);
+void blend_blocks_untextured_subtract_on(psx_gpu_struct *psx_gpu);
+void blend_blocks_untextured_add_fourth_off(psx_gpu_struct *psx_gpu);
+void blend_blocks_untextured_add_fourth_on(psx_gpu_struct *psx_gpu);
+
+void blend_blocks_textured_unblended_off(psx_gpu_struct *psx_gpu);
+void blend_blocks_textured_unblended_on(psx_gpu_struct *psx_gpu);
+
+#ifndef PANDORA_BUILD
+
+void blend_blocks_textured_unblended_off(psx_gpu_struct *psx_gpu)
+{
+}
+
+blend_blocks_builder(textured, average, off);
+blend_blocks_builder(textured, average, on);
+blend_blocks_builder(textured, add, off);
+blend_blocks_builder(textured, add, on);
+blend_blocks_builder(textured, subtract, off);
+blend_blocks_builder(textured, subtract, on);
+blend_blocks_builder(textured, add_fourth, off);
+blend_blocks_builder(textured, add_fourth, on);
+
+blend_blocks_builder(untextured, average, off);
+blend_blocks_builder(untextured, average, on);
+blend_blocks_builder(untextured, add, off);
+blend_blocks_builder(untextured, add, on);
+blend_blocks_builder(untextured, subtract, off);
+blend_blocks_builder(untextured, subtract, on);
+blend_blocks_builder(untextured, add_fourth, off);
+blend_blocks_builder(untextured, add_fourth, on);
+
+blend_blocks_builder(textured, unblended, on);
+
+#endif
+
+
+#define vertex_swap(_a, _b) \
+{ \
+ vertex_struct *temp_vertex = _a; \
+ _a = _b; \
+ _b = temp_vertex; \
+ triangle_winding ^= 1; \
+} \
+
+
+// Setup blocks parametric-variables:
+// SHADE TEXTURE_MAP SWIZZLING
+// 0 0 x
+// 0 1 0
+// 0 1 1
+// 1 0 x
+// 1 1 0
+// 1 1 1
+// 8 inputs, 6 combinations
+
+#define setup_blocks_switch_untextured_unshaded(dithering, target) \
+ setup_blocks_unshaded_untextured_undithered_unswizzled_##target \
+
+#define setup_blocks_switch_untextured_shaded(dithering, target) \
+ setup_blocks_shaded_untextured_##dithering##_unswizzled_##target \
+
+#define setup_blocks_switch_untextured(shading, texture_mode, dithering, \
+ target) \
+ setup_blocks_switch_untextured_##shading(dithering, target) \
+
+#define setup_blocks_switch_texture_mode_4bpp(shading) \
+ setup_blocks_##shading##_textured_dithered_swizzled_indirect \
+
+#define setup_blocks_switch_texture_mode_8bpp(shading) \
+ setup_blocks_##shading##_textured_dithered_swizzled_indirect \
+
+#define setup_blocks_switch_texture_mode_16bpp(shading) \
+ setup_blocks_##shading##_textured_dithered_unswizzled_indirect \
+
+#define setup_blocks_switch_textured(shading, texture_mode, dithering, target) \
+ setup_blocks_switch_texture_mode_##texture_mode(shading) \
+
+#define setup_blocks_switch_blended(shading, texturing, texture_mode, \
+ dithering, mask_evaluate) \
+ setup_blocks_switch_##texturing(shading, texture_mode, dithering, indirect) \
+
+#define setup_blocks_switch_unblended_on(shading, texturing, texture_mode, \
+ dithering) \
+ setup_blocks_switch_##texturing(shading, texture_mode, dithering, indirect) \
+
+#define setup_blocks_switch_unblended_off(shading, texturing, texture_mode, \
+ dithering) \
+ setup_blocks_switch_##texturing(shading, texture_mode, dithering, direct) \
+
+#define setup_blocks_switch_unblended(shading, texturing, texture_mode, \
+ dithering, mask_evaluate) \
+ setup_blocks_switch_unblended_##mask_evaluate(shading, texturing, \
+ texture_mode, dithering) \
+
+#define setup_blocks_switch(shading, texturing, texture_mode, dithering, \
+ blending, mask_evaluate) \
+ setup_blocks_switch_##blending(shading, texturing, texture_mode, \
+ dithering, mask_evaluate) \
+
+
+// Texture blocks:
+
+#define texture_blocks_switch_untextured(texture_mode) \
+ texture_blocks_untextured \
+
+#define texture_blocks_switch_textured(texture_mode) \
+ texture_blocks_##texture_mode \
+
+#define texture_blocks_switch(texturing, texture_mode) \
+ texture_blocks_switch_##texturing(texture_mode) \
+
+
+// Shade blocks parametric-variables:
+// SHADE TEXTURE_MAP MODULATE_TEXELS dither_mode
+// 0 0 x x
+// 0 1 0 0
+// 0 1 0 1
+// x 1 1 x
+// 1 0 x 0
+// 1 0 x 1
+// 1 1 0 0
+// 1 1 0 1
+// 16 inputs, 8 combinations
+
+#define shade_blocks_switch_unshaded_untextured(modulation, dithering, target) \
+ shade_blocks_unshaded_untextured_##target \
+
+#define shade_blocks_switch_unshaded_textured_unmodulated(dithering, target) \
+ shade_blocks_textured_unmodulated_##target \
+
+#define shade_blocks_switch_unshaded_textured_modulated(dithering, target) \
+ shade_blocks_unshaded_textured_modulated_##dithering##_##target \
+
+#define shade_blocks_switch_unshaded_textured(modulation, dithering, target) \
+ shade_blocks_switch_unshaded_textured_##modulation(dithering, target) \
+
+#define shade_blocks_switch_unshaded(texturing, modulation, dithering, target) \
+ shade_blocks_switch_unshaded_##texturing(modulation, dithering, target) \
+
+#define shade_blocks_switch_shaded_untextured(modulation, dithering, target) \
+ shade_blocks_shaded_untextured \
+
+#define shade_blocks_switch_shaded_textured_unmodulated(dithering, target) \
+ shade_blocks_textured_unmodulated_##target \
+
+#define shade_blocks_switch_shaded_textured_modulated(dithering, target) \
+ shade_blocks_shaded_textured_modulated_##dithering##_##target \
+
+#define shade_blocks_switch_shaded_textured(modulation, dithering, target) \
+ shade_blocks_switch_shaded_textured_##modulation(dithering, target) \
+
+#define shade_blocks_switch_shaded(texturing, modulation, dithering, target) \
+ shade_blocks_switch_shaded_##texturing(modulation, dithering, target) \
+
+#define shade_blocks_switch_mask_off(shading, texturing, modulation, \
+ dithering) \
+ shade_blocks_switch_##shading(texturing, modulation, dithering, direct) \
+
+#define shade_blocks_switch_mask_on(shading, texturing, modulation, \
+ dithering) \
+ shade_blocks_switch_##shading(texturing, modulation, dithering, indirect) \
+
+#define shade_blocks_switch_blended(shading, texturing, modulation, dithering, \
+ mask_evaluate) \
+ shade_blocks_switch_##shading(texturing, modulation, dithering, indirect) \
+
+#define shade_blocks_switch_unblended(shading, texturing, modulation, \
+ dithering, mask_evaluate) \
+ shade_blocks_switch_mask_##mask_evaluate(shading, texturing, modulation, \
+ dithering) \
+
+#define shade_blocks_switch(shading, texturing, modulation, dithering, \
+ blending, mask_evaluate) \
+ shade_blocks_switch_##blending(shading, texturing, modulation, dithering, \
+ mask_evaluate) \
+
+
+// Blend blocks parametric-variables:
+// TEXTURE_MAP BLEND BM_A BM_B mask_evaluate
+// x 0 x x 0
+// x 0 x x 1
+// 0 1 0 0 0
+// 0 1 0 0 1
+// 0 1 0 1 0
+// 0 1 0 1 1
+// 0 1 1 0 0
+// 0 1 1 0 1
+// 0 1 1 1 0
+// 0 1 1 1 1
+// 1 1 0 0 0
+// 1 1 0 0 1
+// 1 1 0 1 0
+// 1 1 0 1 1
+// 1 1 1 0 0
+// 1 1 1 0 1
+// 1 1 1 1 0
+// 1 1 1 1 1
+// 32 inputs, 18 combinations
+
+#define blend_blocks_switch_unblended(texturing, blend_mode, mask_evaluate) \
+ blend_blocks_textured_unblended_##mask_evaluate \
+
+#define blend_blocks_switch_blended(texturing, blend_mode, mask_evaluate) \
+ blend_blocks_##texturing##_##blend_mode##_##mask_evaluate \
+
+#define blend_blocks_switch(texturing, blending, blend_mode, mask_evaluate) \
+ blend_blocks_switch_##blending(texturing, blend_mode, mask_evaluate) \
+
+
+#define render_blocks_switch_block_modulation(texture_mode, blend_mode, \
+ mask_evaluate, shading, dithering, texturing, blending, modulation) \
+{ \
+ setup_blocks_switch(shading, texturing, texture_mode, dithering, blending, \
+ mask_evaluate), \
+ texture_blocks_switch(texturing, texture_mode), \
+ shade_blocks_switch(shading, texturing, modulation, dithering, blending, \
+ mask_evaluate), \
+ blend_blocks_switch(texturing, blending, blend_mode, mask_evaluate) \
+} \
+
+#define render_blocks_switch_block_blending(texture_mode, blend_mode, \
+ mask_evaluate, shading, dithering, texturing, blending) \
+ render_blocks_switch_block_modulation(texture_mode, blend_mode, \
+ mask_evaluate, shading, dithering, texturing, blending, modulated), \
+ render_blocks_switch_block_modulation(texture_mode, blend_mode, \
+ mask_evaluate, shading, dithering, texturing, blending, unmodulated) \
+
+#define render_blocks_switch_block_texturing(texture_mode, blend_mode, \
+ mask_evaluate, shading, dithering, texturing) \
+ render_blocks_switch_block_blending(texture_mode, blend_mode, \
+ mask_evaluate, shading, dithering, texturing, unblended), \
+ render_blocks_switch_block_blending(texture_mode, blend_mode, \
+ mask_evaluate, shading, dithering, texturing, blended) \
+
+#define render_blocks_switch_block_dithering(texture_mode, blend_mode, \
+ mask_evaluate, shading, dithering) \
+ render_blocks_switch_block_texturing(texture_mode, blend_mode, \
+ mask_evaluate, shading, dithering, untextured), \
+ render_blocks_switch_block_texturing(texture_mode, blend_mode, \
+ mask_evaluate, shading, dithering, textured) \
+
+#define render_blocks_switch_block_shading(texture_mode, blend_mode, \
+ mask_evaluate, shading) \
+ render_blocks_switch_block_dithering(texture_mode, blend_mode, \
+ mask_evaluate, shading, undithered), \
+ render_blocks_switch_block_dithering(texture_mode, blend_mode, \
+ mask_evaluate, shading, dithered) \
+
+#define render_blocks_switch_block_mask_evaluate(texture_mode, blend_mode, \
+ mask_evaluate) \
+ render_blocks_switch_block_shading(texture_mode, blend_mode, mask_evaluate, \
+ unshaded), \
+ render_blocks_switch_block_shading(texture_mode, blend_mode, mask_evaluate, \
+ shaded) \
+
+#define render_blocks_switch_block_blend_mode(texture_mode, blend_mode) \
+ render_blocks_switch_block_mask_evaluate(texture_mode, blend_mode, off), \
+ render_blocks_switch_block_mask_evaluate(texture_mode, blend_mode, on) \
+
+#define render_blocks_switch_block_texture_mode(texture_mode) \
+ render_blocks_switch_block_blend_mode(texture_mode, average), \
+ render_blocks_switch_block_blend_mode(texture_mode, add), \
+ render_blocks_switch_block_blend_mode(texture_mode, subtract), \
+ render_blocks_switch_block_blend_mode(texture_mode, add_fourth) \
+
+#define render_blocks_switch_block() \
+ render_blocks_switch_block_texture_mode(4bpp), \
+ render_blocks_switch_block_texture_mode(8bpp), \
+ render_blocks_switch_block_texture_mode(16bpp), \
+ render_blocks_switch_block_texture_mode(4bpp) \
+
+
+render_block_handler_struct render_triangle_block_handlers[] =
+{
+ render_blocks_switch_block()
+};
+
+#undef render_blocks_switch_block_modulation
+
+#define render_blocks_switch_block_modulation(texture_mode, blend_mode, \
+ mask_evaluate, shading, dithering, texturing, blending, modulation) \
+ "render flags:\n" \
+ "texture mode: " #texture_mode "\n" \
+ "blend mode: " #blend_mode "\n" \
+ "mask evaluation: " #mask_evaluate "\n" \
+ #shading "\n" \
+ #dithering "\n" \
+ #texturing "\n" \
+ #blending "\n" \
+ #modulation "\n" \
+
+char *render_block_flag_strings[] =
+{
+ render_blocks_switch_block()
+};
+
+
+#define triangle_y_direction_up 1
+#define triangle_y_direction_flat 2
+#define triangle_y_direction_down 0
+
+#define triangle_winding_positive 0
+#define triangle_winding_negative 1
+
+#define triangle_set_direction(direction_variable, value) \
+ u32 direction_variable = (u32)(value) >> 31; \
+ if(value == 0) \
+ direction_variable = 2 \
+
+#define triangle_case(direction_a, direction_b, direction_c, winding) \
+ case (triangle_y_direction_##direction_a | \
+ (triangle_y_direction_##direction_b << 2) | \
+ (triangle_y_direction_##direction_c << 4) | \
+ (triangle_winding_##winding << 6)) \
+
+psx_gpu_struct __attribute__((aligned(64))) psx_gpu_alt;
+
+void render_triangle(psx_gpu_struct *psx_gpu, vertex_struct *vertexes,
+ u32 flags)
+{
+ s32 y_top, y_bottom;
+ s32 triangle_area;
+ u32 triangle_winding = 0;
+
+ vertex_struct *a = &(vertexes[0]);
+ vertex_struct *b = &(vertexes[1]);
+ vertex_struct *c = &(vertexes[2]);
+
+ triangle_area = triangle_signed_area_x2(a->x, a->y, b->x, b->y, c->x, c->y);
+
+ triangles++;
+
+ if(triangle_area == 0)
+ {
+ trivial_rejects++;
+ return;
+ }
+
+ if(b->y < a->y)
+ vertex_swap(a, b);
+
+ if(c->y < b->y)
+ {
+ vertex_swap(b, c);
+
+ if(b->y < a->y)
+ vertex_swap(a, b);
+ }
+
+ y_bottom = c->y;
+ y_top = a->y;
+
+ if((y_bottom - y_top) >= 512)
+ {
+ trivial_rejects++;
+ return;
+ }
+
+ if(triangle_area < 0)
+ {
+ triangle_area = -triangle_area;
+ triangle_winding ^= 1;
+ vertex_swap(a, c);
+ }
+
+ if(b->x < a->x)
+ vertex_swap(a, b);
+
+ if(c->x < b->x)
+ {
+ vertex_swap(b, c);
+
+ if(b->x < a->x)
+ vertex_swap(a, b);
+ }
+
+ if((c->x - a->x) >= 1024)
+ {
+ trivial_rejects++;
+ return;
+ }
+
+ if(invalidate_texture_cache_region_viewport(psx_gpu, a->x, y_top, c->x,
+ y_bottom) == 0)
+ {
+ trivial_rejects++;
+ return;
+ }
+
+ psx_gpu->num_spans = 0;
+ psx_gpu->triangle_area = triangle_area;
+ psx_gpu->triangle_winding = triangle_winding;
+
+ s32 y_delta_a = b->y - a->y;
+ s32 y_delta_b = c->y - b->y;
+ s32 y_delta_c = c->y - a->y;
+
+ triangle_set_direction(y_direction_a, y_delta_a);
+ triangle_set_direction(y_direction_b, y_delta_b);
+ triangle_set_direction(y_direction_c, y_delta_c);
+
+ compute_all_gradients(psx_gpu, a, b, c);
+
+ switch(y_direction_a | (y_direction_b << 2) | (y_direction_c << 4) |
+ (triangle_winding << 6))
+ {
+ triangle_case(up, up, up, negative):
+ triangle_case(up, up, flat, negative):
+ triangle_case(up, up, down, negative):
+ setup_spans_up_right(psx_gpu, a, b, c);
+ break;
+
+ triangle_case(flat, up, up, negative):
+ triangle_case(flat, up, flat, negative):
+ triangle_case(flat, up, down, negative):
+ setup_spans_up_a(psx_gpu, a, b, c);
+ break;
+
+ triangle_case(down, up, up, negative):
+ setup_spans_up_down(psx_gpu, a, c, b);
+ break;
+
+ triangle_case(down, up, flat, negative):
+ setup_spans_down_a(psx_gpu, a, c, b);
+ break;
+
+ triangle_case(down, up, down, negative):
+ setup_spans_down_right(psx_gpu, a, c, b);
+ break;
+
+ triangle_case(down, flat, up, negative):
+ triangle_case(down, flat, flat, negative):
+ triangle_case(down, flat, down, negative):
+ setup_spans_down_b(psx_gpu, a, b, c);
+ break;
+
+ triangle_case(down, down, up, negative):
+ triangle_case(down, down, flat, negative):
+ triangle_case(down, down, down, negative):
+ setup_spans_down_left(psx_gpu, a, b, c);
+ break;
+
+ triangle_case(up, up, up, positive):
+ triangle_case(up, up, flat, positive):
+ triangle_case(up, up, down, positive):
+ setup_spans_up_left(psx_gpu, a, b, c);
+ break;
+
+ triangle_case(up, flat, up, positive):
+ triangle_case(up, flat, flat, positive):
+ triangle_case(up, flat, down, positive):
+ setup_spans_up_b(psx_gpu, a, b, c);
+ break;
+
+ triangle_case(up, down, up, positive):
+ setup_spans_up_right(psx_gpu, a, c, b);
+ break;
+
+ triangle_case(up, down, flat, positive):
+ setup_spans_up_a(psx_gpu, a, c, b);
+ break;
+
+ triangle_case(up, down, down, positive):
+ setup_spans_up_down(psx_gpu, a, b, c);
+ break;
+
+ triangle_case(flat, down, up, positive):
+ triangle_case(flat, down, flat, positive):
+ triangle_case(flat, down, down, positive):
+ setup_spans_down_a(psx_gpu, a, b, c);
+ break;
+
+ triangle_case(down, down, up, positive):
+ triangle_case(down, down, flat, positive):
+ triangle_case(down, down, down, positive):
+ setup_spans_down_right(psx_gpu, a, b, c);
+ break;
+ }
+
+ spans += psx_gpu->num_spans;
+
+ u32 render_state = flags &
+ (RENDER_FLAGS_MODULATE_TEXELS | RENDER_FLAGS_BLEND |
+ RENDER_FLAGS_TEXTURE_MAP | RENDER_FLAGS_SHADE);
+ render_state |= psx_gpu->render_state_base;
+
+ if((psx_gpu->render_state != render_state) ||
+ (psx_gpu->primitive_type != PRIMITIVE_TYPE_TRIANGLE))
+ {
+ psx_gpu->render_state = render_state;
+ flush_render_block_buffer(psx_gpu);
+ state_changes++;
+ }
+
+ psx_gpu->primitive_type = PRIMITIVE_TYPE_TRIANGLE;
+
+ psx_gpu->render_block_handler =
+ &(render_triangle_block_handlers[render_state]);
+ ((setup_blocks_function_type *)psx_gpu->render_block_handler->setup_blocks)
+ (psx_gpu);
+}
+
+
+void texture_sprite_blocks_8bpp(psx_gpu_struct *psx_gpu);
+
+#ifndef PANDORA_BUILD
+
+void texture_sprite_blocks_8bpp(psx_gpu_struct *psx_gpu)
+{
+ block_struct *block = psx_gpu->blocks;
+ u32 num_blocks = psx_gpu->num_blocks;
+
+ vec_8x16u texels;
+ vec_8x8u texel_indexes;
+
+ u16 *clut_ptr = psx_gpu->clut_ptr;
+ u32 i;
+
+ while(num_blocks)
+ {
+ texel_indexes = block->r;
+
+ for(i = 0; i < 8; i++)
+ {
+ texels.e[i] = clut_ptr[texel_indexes.e[i]];
+ }
+
+ block->texels = texels;
+
+ num_blocks--;
+ block++;
+ }
+}
+
+#endif
+
+
+#define setup_sprite_tiled_initialize_4bpp() \
+ u16 *clut_ptr = psx_gpu->clut_ptr; \
+ vec_8x16u clut_a, clut_b; \
+ vec_16x8u clut_low, clut_high; \
+ \
+ load_8x16b(clut_a, clut_ptr); \
+ load_8x16b(clut_b, clut_ptr + 8); \
+ unzip_16x8b(clut_low, clut_high, clut_a, clut_b); \
+ \
+ if(psx_gpu->current_texture_mask & psx_gpu->dirty_textures_4bpp_mask) \
+ update_texture_4bpp_cache(psx_gpu) \
+
+#define setup_sprite_tiled_initialize_8bpp() \
+ if(psx_gpu->current_texture_mask & psx_gpu->dirty_textures_8bpp_mask) \
+ update_texture_8bpp_cache(psx_gpu) \
+
+
+#define setup_sprite_tile_fetch_texel_block_8bpp(offset) \
+ texture_block_ptr = psx_gpu->texture_page_ptr + \
+ ((texture_offset + offset) & texture_mask); \
+ \
+ load_64b(texels, texture_block_ptr) \
+
+
+#define setup_sprite_tile_setup_block_yes(side, offset, texture_mode) \
+
+#define setup_sprite_tile_setup_block_no(side, offset, texture_mode) \
+
+#define setup_sprite_tile_add_blocks(tile_num_blocks) \
+ num_blocks += tile_num_blocks; \
+ sprite_blocks += tile_num_blocks; \
+ \
+ if(num_blocks > MAX_BLOCKS) \
+ { \
+ flush_render_block_buffer(psx_gpu); \
+ num_blocks = tile_num_blocks; \
+ block = psx_gpu->blocks; \
+ } \
+
+#define setup_sprite_tile_full_4bpp(edge) \
+{ \
+ vec_8x8u texels_low, texels_high; \
+ vec_8x16u pixels; \
+ setup_sprite_tile_add_blocks(sub_tile_height * 2); \
+ \
+ while(sub_tile_height) \
+ { \
+ setup_sprite_tile_fetch_texel_block_8bpp(0); \
+ tbl_16(texels_low, texels, clut_low); \
+ tbl_16(texels_high, texels, clut_high); \
+ zip_8x16b(pixels, texels_low, texels_high); \
+ \
+ block->texels = pixels; \
+ block->draw_mask_bits = left_mask_bits; \
+ block->fb_ptr = fb_ptr; \
+ block++; \
+ \
+ setup_sprite_tile_fetch_texel_block_8bpp(8); \
+ tbl_16(texels_low, texels, clut_low); \
+ tbl_16(texels_high, texels, clut_high); \
+ zip_8x16b(pixels, texels_low, texels_high); \
+ \
+ block->texels = pixels; \
+ block->draw_mask_bits = right_mask_bits; \
+ block->fb_ptr = fb_ptr + 8; \
+ block++; \
+ \
+ fb_ptr += 1024; \
+ texture_offset += 0x10; \
+ sub_tile_height--; \
+ } \
+ texture_offset += 0xF00; \
+ psx_gpu->num_blocks = num_blocks; \
+} \
+
+#define setup_sprite_tile_half_4bpp(edge) \
+{ \
+ vec_8x8u texels_low, texels_high; \
+ vec_8x16u pixels; \
+ setup_sprite_tile_add_blocks(sub_tile_height); \
+ \
+ while(sub_tile_height) \
+ { \
+ setup_sprite_tile_fetch_texel_block_8bpp(0); \
+ tbl_16(texels_low, texels, clut_low); \
+ tbl_16(texels_high, texels, clut_high); \
+ zip_8x16b(pixels, texels_low, texels_high); \
+ \
+ block->texels = pixels; \
+ block->draw_mask_bits = edge##_mask_bits; \
+ block->fb_ptr = fb_ptr; \
+ block++; \
+ \
+ fb_ptr += 1024; \
+ texture_offset += 0x10; \
+ sub_tile_height--; \
+ } \
+ texture_offset += 0xF00; \
+ psx_gpu->num_blocks = num_blocks; \
+} \
+
+
+#define setup_sprite_tile_full_8bpp(edge) \
+{ \
+ setup_sprite_tile_add_blocks(sub_tile_height * 2); \
+ \
+ while(sub_tile_height) \
+ { \
+ setup_sprite_tile_fetch_texel_block_8bpp(0); \
+ block->r = texels; \
+ block->draw_mask_bits = left_mask_bits; \
+ block->fb_ptr = fb_ptr; \
+ block++; \
+ \
+ setup_sprite_tile_fetch_texel_block_8bpp(8); \
+ block->r = texels; \
+ block->draw_mask_bits = right_mask_bits; \
+ block->fb_ptr = fb_ptr + 8; \
+ block++; \
+ \
+ fb_ptr += 1024; \
+ texture_offset += 0x10; \
+ sub_tile_height--; \
+ } \
+ texture_offset += 0xF00; \
+ psx_gpu->num_blocks = num_blocks; \
+} \
+
+#define setup_sprite_tile_half_8bpp(edge) \
+{ \
+ setup_sprite_tile_add_blocks(sub_tile_height * 2); \
+ \
+ while(sub_tile_height) \
+ { \
+ setup_sprite_tile_fetch_texel_block_8bpp(0); \
+ block->r = texels; \
+ block->draw_mask_bits = edge##_mask_bits; \
+ block->fb_ptr = fb_ptr; \
+ block++; \
+ \
+ fb_ptr += 1024; \
+ texture_offset += 0x10; \
+ sub_tile_height--; \
+ } \
+ texture_offset += 0xF00; \
+ psx_gpu->num_blocks = num_blocks; \
+} \
+
+
+#define setup_sprite_tile_column_edge_pre_adjust_half_right() \
+ texture_offset = texture_offset_base + 8; \
+ fb_ptr += 8 \
+
+#define setup_sprite_tile_column_edge_pre_adjust_half_left() \
+ texture_offset = texture_offset_base \
+
+#define setup_sprite_tile_column_edge_pre_adjust_half(edge) \
+ setup_sprite_tile_column_edge_pre_adjust_half_##edge() \
+
+#define setup_sprite_tile_column_edge_pre_adjust_full(edge) \
+ texture_offset = texture_offset_base \
+
+#define setup_sprite_tile_column_edge_post_adjust_half_right() \
+ fb_ptr -= 8 \
+
+#define setup_sprite_tile_column_edge_post_adjust_half_left() \
+
+#define setup_sprite_tile_column_edge_post_adjust_half(edge) \
+ setup_sprite_tile_column_edge_post_adjust_half_##edge() \
+
+#define setup_sprite_tile_column_edge_post_adjust_full(edge) \
+
+
+#define setup_sprite_tile_column_height_single(edge_mode, edge, texture_mode) \
+do \
+{ \
+ sub_tile_height = column_data; \
+ setup_sprite_tile_column_edge_pre_adjust_##edge_mode(edge); \
+ setup_sprite_tile_##edge_mode##_##texture_mode(edge); \
+ setup_sprite_tile_column_edge_post_adjust_##edge_mode(edge); \
+} while(0) \
+
+#define setup_sprite_tile_column_height_multi(edge_mode, edge, texture_mode) \
+do \
+{ \
+ u32 tiles_remaining = column_data >> 16; \
+ sub_tile_height = column_data & 0xFF; \
+ setup_sprite_tile_column_edge_pre_adjust_##edge_mode(edge); \
+ setup_sprite_tile_##edge_mode##_##texture_mode(edge); \
+ tiles_remaining -= 1; \
+ \
+ while(tiles_remaining) \
+ { \
+ sub_tile_height = 16; \
+ setup_sprite_tile_##edge_mode##_##texture_mode(edge); \
+ tiles_remaining--; \
+ } \
+ \
+ sub_tile_height = (column_data >> 8) & 0xFF; \
+ setup_sprite_tile_##edge_mode##_##texture_mode(edge); \
+ setup_sprite_tile_column_edge_post_adjust_##edge_mode(edge); \
+} while(0) \
+
+
+#define setup_sprite_column_data_single() \
+ column_data = height \
+
+#define setup_sprite_column_data_multi() \
+ column_data = 16 - offset_v; \
+ column_data |= ((height_rounded & 0xF) + 1) << 8; \
+ column_data |= (tile_height - 1) << 16 \
+
+
+#define setup_sprite_tile_column_width_single(texture_mode, multi_height, \
+ edge_mode, edge) \
+{ \
+ setup_sprite_column_data_##multi_height(); \
+ left_mask_bits = left_block_mask | right_block_mask; \
+ right_mask_bits = left_mask_bits >> 8; \
+ \
+ setup_sprite_tile_column_height_##multi_height(edge_mode, edge, \
+ texture_mode); \
+} \
+
+#define setup_sprite_tiled_advance_column() \
+ texture_offset_base += 0x100; \
+ if((texture_offset_base & 0xF00) == 0) \
+ texture_offset_base -= (0x100 + 0xF00) \
+
+#define setup_sprite_tile_column_width_multi(texture_mode, multi_height, \
+ left_mode, right_mode) \
+{ \
+ setup_sprite_column_data_##multi_height(); \
+ s32 fb_ptr_advance_column = 16 - (1024 * height); \
+ \
+ tile_width -= 2; \
+ left_mask_bits = left_block_mask; \
+ right_mask_bits = left_mask_bits >> 8; \
+ \
+ setup_sprite_tile_column_height_##multi_height(left_mode, right, \
+ texture_mode); \
+ fb_ptr += fb_ptr_advance_column; \
+ \
+ left_mask_bits = 0x00; \
+ right_mask_bits = 0x00; \
+ \
+ while(tile_width) \
+ { \
+ setup_sprite_tiled_advance_column(); \
+ setup_sprite_tile_column_height_##multi_height(full, none, texture_mode); \
+ fb_ptr += fb_ptr_advance_column; \
+ tile_width--; \
+ } \
+ \
+ left_mask_bits = right_block_mask; \
+ right_mask_bits = left_mask_bits >> 8; \
+ \
+ setup_sprite_tiled_advance_column(); \
+ setup_sprite_tile_column_height_##multi_height(right_mode, left, \
+ texture_mode); \
+} \
+
+
+#define setup_sprite_tiled_builder(texture_mode) \
+void setup_sprite_##texture_mode(psx_gpu_struct *psx_gpu, s32 x, s32 y, \
+ s32 u, s32 v, s32 width, s32 height, u32 color) \
+{ \
+ s32 offset_u = u & 0xF; \
+ s32 offset_v = v & 0xF; \
+ \
+ s32 width_rounded = offset_u + width + 15; \
+ s32 height_rounded = offset_v + height + 15; \
+ s32 tile_height = height_rounded / 16; \
+ s32 tile_width = width_rounded / 16; \
+ u32 offset_u_right = width_rounded & 0xF; \
+ \
+ u32 left_block_mask = ~(0xFFFF << offset_u); \
+ u32 right_block_mask = 0xFFFE << offset_u_right; \
+ \
+ u32 left_mask_bits; \
+ u32 right_mask_bits; \
+ \
+ u32 sub_tile_height; \
+ u32 column_data; \
+ \
+ u32 texture_mask = (psx_gpu->texture_mask_width & 0xF) | \
+ ((psx_gpu->texture_mask_height & 0xF) << 4) | \
+ ((psx_gpu->texture_mask_width >> 4) << 8) | \
+ ((psx_gpu->texture_mask_height >> 4) << 12); \
+ u32 texture_offset = ((v & 0xF) << 4) | ((u & 0xF0) << 4) | \
+ ((v & 0xF0) << 8); \
+ u32 texture_offset_base = texture_offset; \
+ u32 control_mask; \
+ \
+ u16 *fb_ptr = psx_gpu->vram_ptr + (y * 1024) + (x - offset_u); \
+ u32 num_blocks = psx_gpu->num_blocks; \
+ block_struct *block = psx_gpu->blocks + num_blocks; \
+ \
+ u16 *texture_block_ptr; \
+ vec_8x8u texels; \
+ \
+ setup_sprite_tiled_initialize_##texture_mode(); \
+ \
+ control_mask = tile_width == 1; \
+ control_mask |= (tile_height == 1) << 1; \
+ control_mask |= ((left_block_mask & 0xFF) == 0xFF) << 2; \
+ control_mask |= (((right_block_mask >> 8) & 0xFF) == 0xFF) << 3; \
+ \
+ sprites_##texture_mode++; \
+ \
+ switch(control_mask) \
+ { \
+ default: \
+ case 0x0: \
+ setup_sprite_tile_column_width_multi(texture_mode, multi, full, full); \
+ break; \
+ \
+ case 0x1: \
+ setup_sprite_tile_column_width_single(texture_mode, multi, full, none); \
+ break; \
+ \
+ case 0x2: \
+ setup_sprite_tile_column_width_multi(texture_mode, single, full, full); \
+ break; \
+ \
+ case 0x3: \
+ setup_sprite_tile_column_width_single(texture_mode, single, full, none); \
+ break; \
+ \
+ case 0x4: \
+ setup_sprite_tile_column_width_multi(texture_mode, multi, half, full); \
+ break; \
+ \
+ case 0x5: \
+ setup_sprite_tile_column_width_single(texture_mode, multi, half, right); \
+ break; \
+ \
+ case 0x6: \
+ setup_sprite_tile_column_width_multi(texture_mode, single, half, full); \
+ break; \
+ \
+ case 0x7: \
+ setup_sprite_tile_column_width_single(texture_mode, single, half, right);\
+ break; \
+ \
+ case 0x8: \
+ setup_sprite_tile_column_width_multi(texture_mode, multi, full, half); \
+ break; \
+ \
+ case 0x9: \
+ setup_sprite_tile_column_width_single(texture_mode, multi, half, left); \
+ break; \
+ \
+ case 0xA: \
+ setup_sprite_tile_column_width_multi(texture_mode, single, full, half); \
+ break; \
+ \
+ case 0xB: \
+ setup_sprite_tile_column_width_single(texture_mode, single, half, left); \
+ break; \
+ \
+ case 0xC: \
+ setup_sprite_tile_column_width_multi(texture_mode, multi, half, half); \
+ break; \
+ \
+ case 0xE: \
+ setup_sprite_tile_column_width_multi(texture_mode, single, half, half); \
+ break; \
+ } \
+} \
+
+
+void setup_sprite_4bpp(psx_gpu_struct *psx_gpu, s32 x, s32 y, s32 u, s32 v,
+ s32 width, s32 height, u32 color);
+void setup_sprite_8bpp(psx_gpu_struct *psx_gpu, s32 x, s32 y, s32 u, s32 v,
+ s32 width, s32 height, u32 color);
+void setup_sprite_16bpp(psx_gpu_struct *psx_gpu, s32 x, s32 y, s32 u, s32 v,
+ s32 width, s32 height, u32 color);
+
+#ifndef PANDORA_BUILD
+setup_sprite_tiled_builder(4bpp);
+setup_sprite_tiled_builder(8bpp);
+
+void setup_sprite_16bpp(psx_gpu_struct *psx_gpu, s32 x, s32 y, s32 u,
+ s32 v, s32 width, s32 height, u32 color)
+{
+ u32 left_offset = u & 0x7;
+ u32 width_rounded = width + left_offset + 7;
+
+ u16 *fb_ptr = psx_gpu->vram_ptr + (y * 1024) + (x - left_offset);
+ u32 right_width = width_rounded & 0x7;
+ u32 block_width = width_rounded / 8;
+ u32 fb_ptr_pitch = (1024 + 8) - (block_width * 8);
+
+ u32 left_mask_bits = ~(0xFF << left_offset);
+ u32 right_mask_bits = 0xFE << right_width;
+
+ u32 texture_offset_base = u + (v * 1024);
+ u32 texture_mask =
+ psx_gpu->texture_mask_width | (psx_gpu->texture_mask_height * 1024);
+
+ u32 blocks_remaining;
+ u32 num_blocks = psx_gpu->num_blocks;
+ block_struct *block = psx_gpu->blocks + num_blocks;
+
+ u16 *texture_page_ptr = psx_gpu->texture_page_ptr;
+ u16 *texture_block_ptr;
+
+ texture_offset_base &= ~0x7;
+
+ sprites_16bpp++;
+
+ if(block_width == 1)
+ {
+ u32 mask_bits = left_mask_bits | right_mask_bits;
+
+ while(height)
+ {
+ num_blocks++;
+ sprite_blocks++;
+
+ if(num_blocks > MAX_BLOCKS)
+ {
+ flush_render_block_buffer(psx_gpu);
+ num_blocks = 1;
+ block = psx_gpu->blocks;
+ }
+
+ texture_block_ptr =
+ texture_page_ptr + (texture_offset_base & texture_mask);
+
+ load_128b(block->texels, texture_block_ptr);
+ block->draw_mask_bits = mask_bits;
+ block->fb_ptr = fb_ptr;
+
+ block++;
+
+ texture_offset_base += 1024;
+ fb_ptr += 1024;
+
+ height--;
+ psx_gpu->num_blocks = num_blocks;
+ }
+ }
+ else
+ {
+ u32 texture_offset;
+
+ while(height)
+ {
+ blocks_remaining = block_width - 2;
+ num_blocks += block_width;
+ sprite_blocks += block_width;
+
+ if(num_blocks > MAX_BLOCKS)
+ {
+ flush_render_block_buffer(psx_gpu);
+ num_blocks = block_width;
+ block = psx_gpu->blocks;
+ }
+
+ texture_offset = texture_offset_base;
+ texture_offset_base += 1024;
+
+ texture_block_ptr = texture_page_ptr + (texture_offset & texture_mask);
+ load_128b(block->texels, texture_block_ptr);
+
+ block->draw_mask_bits = left_mask_bits;
+ block->fb_ptr = fb_ptr;
+
+ texture_offset += 8;
+ fb_ptr += 8;
+ block++;
+
+ while(blocks_remaining)
+ {
+ texture_block_ptr = texture_page_ptr + (texture_offset & texture_mask);
+ load_128b(block->texels, texture_block_ptr);
+
+ block->draw_mask_bits = 0;
+ block->fb_ptr = fb_ptr;
+
+ texture_offset += 8;
+ fb_ptr += 8;
+ block++;
+
+ blocks_remaining--;
+ }
+
+ texture_block_ptr = texture_page_ptr + (texture_offset & texture_mask);
+ load_128b(block->texels, texture_block_ptr);
+
+ block->draw_mask_bits = right_mask_bits;
+ block->fb_ptr = fb_ptr;
+
+ fb_ptr += fb_ptr_pitch;
+ block++;
+
+ height--;
+ psx_gpu->num_blocks = num_blocks;
+ }
+ }
+}
+
+#endif
+
+void setup_sprite_untextured(psx_gpu_struct *psx_gpu, s32 x, s32 y, s32 u,
+ s32 v, s32 width, s32 height, u32 color)
+{
+ u32 right_width = ((width - 1) & 0x7) + 1;
+ u32 right_mask_bits = (0xFF << right_width);
+ u16 *fb_ptr = psx_gpu->vram_ptr + (y * 1024) + x;
+ u32 block_width = (width + 7) / 8;
+ u32 fb_ptr_pitch = 1024 - ((block_width - 1) * 8);
+ u32 blocks_remaining;
+ u32 num_blocks = psx_gpu->num_blocks;
+ block_struct *block = psx_gpu->blocks + num_blocks;
+
+ u32 color_r = color & 0xFF;
+ u32 color_g = (color >> 8) & 0xFF;
+ u32 color_b = (color >> 16) & 0xFF;
+ vec_8x16u colors;
+ vec_8x16u right_mask;
+ vec_8x16u test_mask = psx_gpu->test_mask;
+ vec_8x16u zero_mask;
+
+ sprites_untextured++;
+
+ color = (color_r >> 3) | ((color_g >> 3) << 5) | ((color_b >> 3) << 10);
+
+ dup_8x16b(colors, color);
+ dup_8x16b(zero_mask, 0x00);
+ dup_8x16b(right_mask, right_mask_bits);
+ tst_8x16b(right_mask, right_mask, test_mask);
+
+ while(height)
+ {
+ blocks_remaining = block_width - 1;
+ num_blocks += block_width;
+ sprite_blocks += block_width;
+
+ if(num_blocks > MAX_BLOCKS)
+ {
+ flush_render_block_buffer(psx_gpu);
+ num_blocks = block_width;
+ block = psx_gpu->blocks;
+ }
+
+ while(blocks_remaining)
+ {
+ block->pixels = colors;
+ block->draw_mask = zero_mask;
+ block->fb_ptr = fb_ptr;
+
+ fb_ptr += 8;
+ block++;
+ blocks_remaining--;
+ }
+
+ block->pixels = colors;
+ block->draw_mask = right_mask;
+ block->fb_ptr = fb_ptr;
+
+ block++;
+ fb_ptr += fb_ptr_pitch;
+
+ height--;
+ psx_gpu->num_blocks = num_blocks;
+ }
+}
+
+
+
+#define setup_sprite_blocks_switch_textured(texture_mode) \
+ setup_sprite_##texture_mode \
+
+#define setup_sprite_blocks_switch_untextured(texture_mode) \
+ setup_sprite_untextured \
+
+#define setup_sprite_blocks_switch(texturing, texture_mode) \
+ setup_sprite_blocks_switch_##texturing(texture_mode) \
+
+
+#define texture_sprite_blocks_switch_4bpp() \
+ texture_blocks_untextured \
+
+#define texture_sprite_blocks_switch_8bpp() \
+ texture_sprite_blocks_8bpp \
+
+#define texture_sprite_blocks_switch_16bpp() \
+ texture_blocks_untextured \
+
+#define texture_sprite_blocks_switch_untextured(texture_mode) \
+ texture_blocks_untextured \
+
+#define texture_sprite_blocks_switch_textured(texture_mode) \
+ texture_sprite_blocks_switch_##texture_mode() \
+
+#define render_sprite_blocks_switch_block_modulation(texture_mode, blend_mode, \
+ mask_evaluate, shading, dithering, texturing, blending, modulation) \
+{ \
+ setup_sprite_blocks_switch(texturing, texture_mode), \
+ texture_sprite_blocks_switch_##texturing(texture_mode), \
+ shade_blocks_switch(unshaded, texturing, modulation, undithered, blending, \
+ mask_evaluate), \
+ blend_blocks_switch(texturing, blending, blend_mode, mask_evaluate) \
+} \
+
+#define render_sprite_blocks_switch_block_blending(texture_mode, blend_mode, \
+ mask_evaluate, shading, dithering, texturing, blending) \
+ render_sprite_blocks_switch_block_modulation(texture_mode, blend_mode, \
+ mask_evaluate, shading, dithering, texturing, blending, modulated), \
+ render_sprite_blocks_switch_block_modulation(texture_mode, blend_mode, \
+ mask_evaluate, shading, dithering, texturing, blending, unmodulated) \
+
+#define render_sprite_blocks_switch_block_texturing(texture_mode, blend_mode, \
+ mask_evaluate, shading, dithering, texturing) \
+ render_sprite_blocks_switch_block_blending(texture_mode, blend_mode, \
+ mask_evaluate, shading, dithering, texturing, unblended), \
+ render_sprite_blocks_switch_block_blending(texture_mode, blend_mode, \
+ mask_evaluate, shading, dithering, texturing, blended) \
+
+#define render_sprite_blocks_switch_block_dithering(texture_mode, blend_mode, \
+ mask_evaluate, shading, dithering) \
+ render_sprite_blocks_switch_block_texturing(texture_mode, blend_mode, \
+ mask_evaluate, shading, dithering, untextured), \
+ render_sprite_blocks_switch_block_texturing(texture_mode, blend_mode, \
+ mask_evaluate, shading, dithering, textured) \
+
+#define render_sprite_blocks_switch_block_shading(texture_mode, blend_mode, \
+ mask_evaluate, shading) \
+ render_sprite_blocks_switch_block_dithering(texture_mode, blend_mode, \
+ mask_evaluate, shading, undithered), \
+ render_sprite_blocks_switch_block_dithering(texture_mode, blend_mode, \
+ mask_evaluate, shading, dithered) \
+
+#define render_sprite_blocks_switch_block_mask_evaluate(texture_mode, \
+ blend_mode, mask_evaluate) \
+ render_sprite_blocks_switch_block_shading(texture_mode, blend_mode, \
+ mask_evaluate, unshaded), \
+ render_sprite_blocks_switch_block_shading(texture_mode, blend_mode, \
+ mask_evaluate, shaded) \
+
+#define render_sprite_blocks_switch_block_blend_mode(texture_mode, blend_mode) \
+ render_sprite_blocks_switch_block_mask_evaluate(texture_mode, blend_mode, \
+ off), \
+ render_sprite_blocks_switch_block_mask_evaluate(texture_mode, blend_mode, \
+ on) \
+
+#define render_sprite_blocks_switch_block_texture_mode(texture_mode) \
+ render_sprite_blocks_switch_block_blend_mode(texture_mode, average), \
+ render_sprite_blocks_switch_block_blend_mode(texture_mode, add), \
+ render_sprite_blocks_switch_block_blend_mode(texture_mode, subtract), \
+ render_sprite_blocks_switch_block_blend_mode(texture_mode, add_fourth) \
+
+#define render_sprite_blocks_switch_block() \
+ render_sprite_blocks_switch_block_texture_mode(4bpp), \
+ render_sprite_blocks_switch_block_texture_mode(8bpp), \
+ render_sprite_blocks_switch_block_texture_mode(16bpp), \
+ render_sprite_blocks_switch_block_texture_mode(4bpp) \
+
+
+render_block_handler_struct render_sprite_block_handlers[] =
+{
+ render_sprite_blocks_switch_block()
+};
+
+
+void render_sprite(psx_gpu_struct *psx_gpu, s32 x, s32 y, u32 u, u32 v,
+ s32 width, s32 height, u32 flags, u32 color)
+{
+ s32 x_right = x + width - 1;
+ s32 y_bottom = y + height - 1;
+
+ if(invalidate_texture_cache_region_viewport(psx_gpu, x, y, x_right,
+ y_bottom) == 0)
+ {
+ return;
+ }
+
+ if(x < psx_gpu->viewport_start_x)
+ {
+ u32 clip = psx_gpu->viewport_start_x - x;
+ x += clip;
+ u += clip;
+ width -= clip;
+ }
+
+ if(y < psx_gpu->viewport_start_y)
+ {
+ s32 clip = psx_gpu->viewport_start_y - y;
+ y += clip;
+ v += clip;
+ height -= clip;
+ }
+
+ if(x_right > psx_gpu->viewport_end_x)
+ width -= x_right - psx_gpu->viewport_end_x;
+
+ if(y_bottom > psx_gpu->viewport_end_y)
+ height -= y_bottom - psx_gpu->viewport_end_y;
+
+ if((width <= 0) || (height <= 0))
+ return;
+
+ sprites++;
+
+ span_pixels += width * height;
+ spans += height;
+
+ u32 render_state = flags &
+ (RENDER_FLAGS_MODULATE_TEXELS | RENDER_FLAGS_BLEND |
+ RENDER_FLAGS_TEXTURE_MAP);
+ render_state |=
+ (psx_gpu->render_state_base & ~RENDER_STATE_DITHER);
+
+ if((psx_gpu->render_state != render_state) ||
+ (psx_gpu->primitive_type != PRIMITIVE_TYPE_SPRITE))
+ {
+ psx_gpu->render_state = render_state;
+ flush_render_block_buffer(psx_gpu);
+ state_changes++;
+ }
+
+ psx_gpu->primitive_type = PRIMITIVE_TYPE_SPRITE;
+
+ color &= 0xFFFFFF;
+
+ if(psx_gpu->triangle_color != color)
+ {
+ flush_render_block_buffer(psx_gpu);
+ psx_gpu->triangle_color = color;
+ }
+
+ if(color == 0x808080)
+ render_state |= RENDER_FLAGS_MODULATE_TEXELS;
+
+ render_block_handler_struct *render_block_handler =
+ &(render_sprite_block_handlers[render_state]);
+ psx_gpu->render_block_handler = render_block_handler;
+
+ ((setup_sprite_function_type *)render_block_handler->setup_blocks)
+ (psx_gpu, x, y, u, v, width, height, color);
+}
+
+#define draw_pixel_line_mask_evaluate_yes() \
+ if(*vram_ptr & 0x8000) \
+
+#define draw_pixel_line_mask_evaluate_no() \
+
+
+#define draw_pixel_line_shaded() \
+{ \
+ color_r = fixed_to_int(current_r); \
+ color_g = fixed_to_int(current_g); \
+ color_b = fixed_to_int(current_b); \
+ \
+ current_r += gradient_r; \
+ current_g += gradient_g; \
+ current_b += gradient_b; \
+} \
+
+#define draw_pixel_line_unshaded() \
+{ \
+ color_r = color & 0xFF; \
+ color_g = (color >> 8) & 0xFF; \
+ color_b = (color >> 16) & 0xFF; \
+} \
+
+
+#define draw_pixel_line_dithered(_x, _y) \
+{ \
+ u32 dither_xor = _x ^ _y; \
+ s32 dither_offset = (dither_xor >> 1) & 0x1; \
+ dither_offset |= (_y & 0x1) << 1; \
+ dither_offset |= (dither_xor & 0x1) << 2; \
+ dither_offset -= 4; \
+ \
+ color_r += dither_offset; \
+ color_g += dither_offset; \
+ color_b += dither_offset; \
+ \
+ if(color_r < 0) \
+ color_r = 0; \
+ \
+ if(color_g < 0) \
+ color_g = 0; \
+ \
+ if(color_b < 0) \
+ color_b = 0; \
+ \
+ if(color_r > 255) \
+ color_r = 255; \
+ \
+ if(color_g > 255) \
+ color_g = 255; \
+ \
+ if(color_b > 255) \
+ color_b = 255; \
+} \
+
+#define draw_pixel_line_undithered(_x, _y) \
+
+
+#define draw_pixel_line_average() \
+ color_r = (color_r + fb_r) / 2; \
+ color_g = (color_g + fb_g) / 2; \
+ color_b = (color_b + fb_b) / 2 \
+
+#define draw_pixel_line_add() \
+ color_r += fb_r; \
+ color_g += fb_g; \
+ color_b += fb_b; \
+ \
+ if(color_r > 31) \
+ color_r = 31; \
+ \
+ if(color_g > 31) \
+ color_g = 31; \
+ \
+ if(color_b > 31) \
+ color_b = 31 \
+ \
+
+#define draw_pixel_line_subtract() \
+ color_r = fb_r - color_r; \
+ color_g = fb_g - color_g; \
+ color_b = fb_b - color_b; \
+ \
+ if(color_r < 0) \
+ color_r = 0; \
+ \
+ if(color_g < 0) \
+ color_g = 0; \
+ \
+ if(color_b < 0) \
+ color_b = 0 \
+
+#define draw_pixel_line_add_fourth() \
+ color_r = fb_r + (color_r / 4); \
+ color_g = fb_g + (color_g / 4); \
+ color_b = fb_b + (color_b / 4); \
+ \
+ if(color_r > 31) \
+ color_r = 31; \
+ \
+ if(color_g > 31) \
+ color_g = 31; \
+ \
+ if(color_b > 31) \
+ color_b = 31 \
+
+
+#define draw_pixel_line_blended(blend_mode) \
+ s32 fb_pixel = *vram_ptr; \
+ s32 fb_r = fb_pixel & 0x1F; \
+ s32 fb_g = (fb_pixel >> 5) & 0x1F; \
+ s32 fb_b = (fb_pixel >> 10) & 0x1F; \
+ \
+ draw_pixel_line_##blend_mode() \
+
+#define draw_pixel_line_unblended(blend_mode) \
+
+
+#define draw_pixel_line(_x, _y, shading, blending, dithering, mask_evaluate, \
+ blend_mode) \
+ if((_x >= psx_gpu->viewport_start_x) && (_y >= psx_gpu->viewport_start_y) && \
+ (_x <= psx_gpu->viewport_end_x) && (_y <= psx_gpu->viewport_end_y)) \
+ { \
+ draw_pixel_line_mask_evaluate_##mask_evaluate() \
+ { \
+ draw_pixel_line_##shading(); \
+ draw_pixel_line_##dithering(_x, _y); \
+ \
+ color_r >>= 3; \
+ color_g >>= 3; \
+ color_b >>= 3; \
+ \
+ draw_pixel_line_##blending(blend_mode); \
+ \
+ *vram_ptr = color_r | (color_g << 5) | (color_b << 10) | \
+ psx_gpu->mask_msb; \
+ } \
+ } \
+
+#define update_increment(value) \
+ value++ \
+
+#define update_decrement(value) \
+ value-- \
+
+#define update_vram_row_increment(value) \
+ vram_ptr += 1024 \
+
+#define update_vram_row_decrement(value) \
+ vram_ptr -= 1024 \
+
+#define compare_increment(a, b) \
+ (a <= b) \
+
+#define compare_decrement(a, b) \
+ (a >= b) \
+
+#define set_line_gradients(minor) \
+{ \
+ s32 gradient_divisor = delta_##minor; \
+ gradient_r = int_to_fixed(vertex_b->r - vertex_a->r) / gradient_divisor; \
+ gradient_g = int_to_fixed(vertex_b->g - vertex_a->g) / gradient_divisor; \
+ gradient_b = int_to_fixed(vertex_b->b - vertex_a->b) / gradient_divisor; \
+ current_r = fixed_center(vertex_a->r); \
+ current_g = fixed_center(vertex_a->g); \
+ current_b = fixed_center(vertex_a->b); \
+}
+
+#define draw_line_span_horizontal(direction, shading, blending, dithering, \
+ mask_evaluate, blend_mode) \
+do \
+{ \
+ error_step = delta_y * 2; \
+ error_wrap = delta_x * 2; \
+ error = delta_x; \
+ \
+ current_y = y_a; \
+ set_line_gradients(x); \
+ \
+ for(current_x = x_a; current_x <= x_b; current_x++) \
+ { \
+ draw_pixel_line(current_x, current_y, shading, blending, dithering, \
+ mask_evaluate, blend_mode); \
+ error += error_step; \
+ vram_ptr++; \
+ \
+ if(error >= error_wrap) \
+ { \
+ update_##direction(current_y); \
+ update_vram_row_##direction(); \
+ error -= error_wrap; \
+ } \
+ } \
+} while(0) \
+
+#define draw_line_span_vertical(direction, shading, blending, dithering, \
+ mask_evaluate, blend_mode) \
+do \
+{ \
+ error_step = delta_x * 2; \
+ error_wrap = delta_y * 2; \
+ error = delta_y; \
+ \
+ current_x = x_a; \
+ set_line_gradients(y); \
+ \
+ for(current_y = y_a; compare_##direction(current_y, y_b); \
+ update_##direction(current_y)) \
+ { \
+ draw_pixel_line(current_x, current_y, shading, blending, dithering, \
+ mask_evaluate, blend_mode); \
+ error += error_step; \
+ update_vram_row_##direction(); \
+ \
+ if(error > error_wrap) \
+ { \
+ vram_ptr++; \
+ current_x++; \
+ error -= error_wrap; \
+ } \
+ } \
+} while(0) \
+
+
+#define render_line_body(shading, blending, dithering, mask_evaluate, \
+ blend_mode) \
+ if(delta_y < 0) \
+ { \
+ delta_y *= -1; \
+ \
+ if(delta_y >= 512) \
+ return; \
+ \
+ if(delta_x > delta_y) \
+ { \
+ draw_line_span_horizontal(decrement, shading, blending, dithering, \
+ mask_evaluate, blend_mode); \
+ } \
+ else \
+ { \
+ draw_line_span_vertical(decrement, shading, blending, dithering, \
+ mask_evaluate, blend_mode); \
+ } \
+ } \
+ else \
+ { \
+ if(delta_y >= 512) \
+ return; \
+ \
+ if(delta_x > delta_y) \
+ { \
+ draw_line_span_horizontal(increment, shading, blending, dithering, \
+ mask_evaluate, blend_mode); \
+ } \
+ else \
+ { \
+ draw_line_span_vertical(increment, shading, blending, dithering, \
+ mask_evaluate, blend_mode); \
+ } \
+ } \
+
+
+void render_line(psx_gpu_struct *psx_gpu, vertex_struct *vertexes, u32 flags,
+ u32 color)
+{
+ s32 color_r, color_g, color_b;
+ u32 triangle_winding = 0;
+
+ fixed_type gradient_r = 0;
+ fixed_type gradient_g = 0;
+ fixed_type gradient_b = 0;
+ fixed_type current_r = 0;
+ fixed_type current_g = 0;
+ fixed_type current_b = 0;
+
+ s32 y_a, y_b;
+ s32 x_a, x_b;
+
+ s32 delta_x, delta_y;
+
+ s32 current_x;
+ s32 current_y;
+
+ u32 error_step;
+ u32 error;
+ u32 error_wrap;
+
+ u16 *vram_ptr;
+
+ flush_render_block_buffer(psx_gpu);
+ psx_gpu->primitive_type = PRIMITIVE_TYPE_LINE;
+
+ vertex_struct *vertex_a = &(vertexes[0]);
+ vertex_struct *vertex_b = &(vertexes[1]);
+
+ u32 control_mask;
+
+ lines++;
+
+ if(vertex_a->x >= vertex_b->x)
+ {
+ vertex_swap(vertex_a, vertex_b);
+ }
+
+ x_a = vertex_a->x;
+ x_b = vertex_b->x;
+
+ y_a = vertex_a->y;
+ y_b = vertex_b->y;
+
+ delta_x = x_b - x_a;
+ delta_y = y_b - y_a;
+
+ if(delta_x >= 1024)
+ return;
+
+ flags &= ~RENDER_FLAGS_TEXTURE_MAP;
+
+ vram_ptr = psx_gpu->vram_ptr + (y_a * 1024) + x_a;
+
+ control_mask = 0x0;
+
+ if(flags & RENDER_FLAGS_SHADE)
+ control_mask |= 0x1;
+
+ if(flags & RENDER_FLAGS_BLEND)
+ {
+ control_mask |= 0x2;
+ control_mask |= ((psx_gpu->render_state_base >> 6) & 0x3) << 4;
+ }
+
+ if(psx_gpu->render_state_base & RENDER_STATE_DITHER)
+ control_mask |= 0x4;
+
+ if(psx_gpu->render_state_base & RENDER_STATE_MASK_EVALUATE)
+ control_mask |= 0x8;
+
+ switch(control_mask)
+ {
+ case 0x0:
+ render_line_body(unshaded, unblended, undithered, no, none);
+ break;
+
+ case 0x1:
+ render_line_body(shaded, unblended, undithered, no, none);
+ break;
+
+ case 0x2:
+ render_line_body(unshaded, blended, undithered, no, average);
+ break;
+
+ case 0x3:
+ render_line_body(shaded, blended, undithered, no, average);
+ break;
+
+ case 0x4:
+ render_line_body(unshaded, unblended, dithered, no, none);
+ break;
+
+ case 0x5:
+ render_line_body(shaded, unblended, dithered, no, none);
+ break;
+
+ case 0x6:
+ render_line_body(unshaded, blended, dithered, no, average);
+ break;
+
+ case 0x7:
+ render_line_body(shaded, blended, dithered, no, average);
+ break;
+
+ case 0x8:
+ render_line_body(unshaded, unblended, undithered, yes, none);
+ break;
+
+ case 0x9:
+ render_line_body(shaded, unblended, undithered, yes, none);
+ break;
+
+ case 0xA:
+ render_line_body(unshaded, blended, undithered, yes, average);
+ break;
+
+ case 0xB:
+ render_line_body(shaded, blended, undithered, yes, average);
+ break;
+
+ case 0xC:
+ render_line_body(unshaded, unblended, dithered, yes, none);
+ break;
+
+ case 0xD:
+ render_line_body(shaded, unblended, dithered, yes, none);
+ break;
+
+ case 0xE:
+ render_line_body(unshaded, blended, dithered, yes, average);
+ break;
+
+ case 0xF:
+ render_line_body(shaded, blended, dithered, yes, average);
+ break;
+
+ case 0x12:
+ render_line_body(unshaded, blended, undithered, no, add);
+ break;
+
+ case 0x13:
+ render_line_body(shaded, blended, undithered, no, add);
+ break;
+
+ case 0x16:
+ render_line_body(unshaded, blended, dithered, no, add);
+ break;
+
+ case 0x17:
+ render_line_body(shaded, blended, dithered, no, add);
+ break;
+
+ case 0x1A:
+ render_line_body(unshaded, blended, undithered, yes, add);
+ break;
+
+ case 0x1B:
+ render_line_body(shaded, blended, undithered, yes, add);
+ break;
+
+ case 0x1E:
+ render_line_body(unshaded, blended, dithered, yes, add);
+ break;
+
+ case 0x1F:
+ render_line_body(shaded, blended, dithered, yes, add);
+ break;
+
+ case 0x22:
+ render_line_body(unshaded, blended, undithered, no, subtract);
+ break;
+
+ case 0x23:
+ render_line_body(shaded, blended, undithered, no, subtract);
+ break;
+
+ case 0x26:
+ render_line_body(unshaded, blended, dithered, no, subtract);
+ break;
+
+ case 0x27:
+ render_line_body(shaded, blended, dithered, no, subtract);
+ break;
+
+ case 0x2A:
+ render_line_body(unshaded, blended, undithered, yes, subtract);
+ break;
+
+ case 0x2B:
+ render_line_body(shaded, blended, undithered, yes, subtract);
+ break;
+
+ case 0x2E:
+ render_line_body(unshaded, blended, dithered, yes, subtract);
+ break;
+
+ case 0x2F:
+ render_line_body(shaded, blended, dithered, yes, subtract);
+ break;
+
+ case 0x32:
+ render_line_body(unshaded, blended, undithered, no, add_fourth);
+ break;
+
+ case 0x33:
+ render_line_body(shaded, blended, undithered, no, add_fourth);
+ break;
+
+ case 0x36:
+ render_line_body(unshaded, blended, dithered, no, add_fourth);
+ break;
+
+ case 0x37:
+ render_line_body(shaded, blended, dithered, no, add_fourth);
+ break;
+
+ case 0x3A:
+ render_line_body(unshaded, blended, undithered, yes, add_fourth);
+ break;
+
+ case 0x3B:
+ render_line_body(shaded, blended, undithered, yes, add_fourth);
+ break;
+
+ case 0x3E:
+ render_line_body(unshaded, blended, dithered, yes, add_fourth);
+ break;
+
+ case 0x3F:
+ render_line_body(shaded, blended, dithered, yes, add_fourth);
+ break;
+ }
+}
+
+
+void render_block_fill(psx_gpu_struct *psx_gpu, u32 color, u32 x, u32 y,
+ u32 width, u32 height)
+{
+ invalidate_texture_cache_region(psx_gpu, x, y, x + width - 1, y + height - 1);
+
+#ifndef PANDORA_BUILD
+ u32 r = color & 0xFF;
+ u32 g = (color >> 8) & 0xFF;
+ u32 b = (color >> 16) & 0xFF;
+ u32 color_16bpp = (r >> 3) | ((g >> 3) << 5) | ((b >> 3) << 10);
+
+ u16 *vram_ptr = psx_gpu->vram_ptr + x + (y * 1024);
+ u32 draw_x, draw_y;
+
+ for(draw_y = 0; draw_y < height; draw_y++)
+ {
+ for(draw_x = 0; draw_x < width; draw_x++)
+ {
+ vram_ptr[draw_x] = color_16bpp;
+ }
+
+ vram_ptr += 1024;
+ }
+#else
+ void render_block_fill_body(psx_gpu_struct *psx_gpu, u32 color, u32 x, u32 y,
+ u32 width, u32 height);
+
+ render_block_fill_body(psx_gpu, color, x, y, width, height);
+#endif
+}
+
+void render_block_copy(psx_gpu_struct *psx_gpu, u16 *source, u32 x, u32 y,
+ u32 width, u32 height, u32 pitch)
+{
+ u16 *vram_ptr = psx_gpu->vram_ptr + x + (y * 1024);
+ u32 draw_x, draw_y;
+
+ invalidate_texture_cache_region(psx_gpu, x, y, x + width - 1, y + height - 1);
+
+ //printf("copy for %d, %d\n", width, height);
+
+ for(draw_y = 0; draw_y < height; draw_y++)
+ {
+ for(draw_x = 0; draw_x < width; draw_x++)
+ {
+ vram_ptr[draw_x] = source[draw_x];
+ }
+
+ source += pitch;
+ vram_ptr += 1024;
+ }
+}
+
+void render_block_move(psx_gpu_struct *psx_gpu, u32 source_x, u32 source_y,
+ u32 dest_x, u32 dest_y, u32 width, u32 height)
+{
+ render_block_copy(psx_gpu, psx_gpu->vram_ptr + source_x + (source_y * 1024),
+ dest_x, dest_y, width, height, 1024);
+}
+
+
+void initialize_reciprocal_table(void)
+{
+ u32 height;
+ u32 height_normalized;
+ u32 height_reciprocal;
+ s32 shift;
+
+ for(height = 1; height < 512; height++)
+ {
+ shift = __builtin_clz(height);
+ height_normalized = height << shift;
+ height_reciprocal = ((1ULL << 50) + (height_normalized - 1)) /
+ height_normalized;
+
+ shift = 32 - (50 - shift);
+
+ reciprocal_table[height] = (height_reciprocal << 12) | shift;
+ }
+}
+
+
+#define dither_table_row(a, b, c, d) \
+ ((a & 0xFF) | ((b & 0xFF) << 8) | ((c & 0xFF) << 16) | ((d & 0xFF) << 24)) \
+
+void initialize_psx_gpu(psx_gpu_struct *psx_gpu)
+{
+ vec_8x16u test_mask =
+ { { { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80 } } };
+
+ psx_gpu->test_mask = test_mask;
+
+ psx_gpu->pixel_count_mode = 0;
+ psx_gpu->pixel_compare_mode = 0;
+
+ psx_gpu->vram_pixel_counts_a = malloc(sizeof(u8) * 1024 * 512);
+ psx_gpu->vram_pixel_counts_b = malloc(sizeof(u8) * 1024 * 512);
+ memset(psx_gpu->vram_pixel_counts_a, 0, sizeof(u8) * 1024 * 512);
+ memset(psx_gpu->vram_pixel_counts_b, 0, sizeof(u8) * 1024 * 512);
+ psx_gpu->compare_vram = malloc(sizeof(u16) * 1024 * 512);
+
+ psx_gpu->dirty_textures_4bpp_mask = 0xFFFFFFFF;
+ psx_gpu->dirty_textures_8bpp_mask = 0xFFFFFFFF;
+ psx_gpu->dirty_textures_8bpp_alternate_mask = 0xFFFFFFFF;
+ psx_gpu->viewport_mask = 0;
+ psx_gpu->current_texture_page = 0;
+ psx_gpu->current_texture_mask = 0;
+ psx_gpu->last_8bpp_texture_page = 0;
+
+ psx_gpu->clut_settings = 0;
+ psx_gpu->texture_settings = 0;
+ psx_gpu->render_state = 0;
+ psx_gpu->render_state_base = 0;
+ psx_gpu->num_blocks = 0;
+
+ psx_gpu->vram_ptr = psx_gpu->_vram;
+
+ psx_gpu->texture_page_ptr = psx_gpu->vram_ptr;
+ psx_gpu->clut_ptr = psx_gpu->vram_ptr;
+
+ psx_gpu->mask_msb = 0;
+
+ memset(psx_gpu->vram_ptr, 0, sizeof(u16) * 1024 * 512);
+
+ initialize_reciprocal_table();
+
+ // 00 01 10 11
+ // 00 0 4 1 5
+ // 01 6 2 7 3
+ // 10 1 5 0 4
+ // 11 7 3 6 2
+ // (minus ones(4) * 4)
+
+ // d0: (1 3 5 7): x1 ^ y1
+ // d1: (2 3 6 7): y0
+ // d2: (4 5 6 7): x0 ^ y0
+
+
+ psx_gpu->dither_table[0] = dither_table_row(-4, 0, -3, 1);
+ psx_gpu->dither_table[1] = dither_table_row(2, -2, 3, -1);
+ psx_gpu->dither_table[2] = dither_table_row(-3, 1, -4, 0);
+ psx_gpu->dither_table[3] = dither_table_row(3, -1, 2, -2);
+
+ psx_gpu->primitive_type = PRIMITIVE_TYPE_UNKNOWN;
+}
+
+u64 get_us(void)
+{
+ struct timeval tv;
+ gettimeofday(&tv, NULL);
+
+ return (tv.tv_sec * 1000000ULL) + tv.tv_usec;
+}
+
+#ifdef PANDORA_BUILD
+
+u32 get_counter()
+{
+ u32 counter;
+ __asm__ volatile("mrc p15, 0, %0, c9, c13, 0" : "=r"(counter));
+
+ return counter;
+}
+
+void init_counter(void)
+{
+ u32 value;
+ asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(value));
+ value |= 5; // master enable, ccnt reset
+ value &= ~8; // ccnt divider 0
+ asm volatile("mcr p15, 0, %0, c9, c12, 0" :: "r"(value));
+ // enable cycle counter
+ asm volatile("mcr p15, 0, %0, c9, c12, 1" :: "r"(1 << 31));
+}
+
+void triangle_benchmark(psx_gpu_struct *psx_gpu)
+{
+ u32 i;
+
+ u32 ticks;
+ u32 ticks_elapsed;
+
+ const u32 iterations = 500000;
+
+ psx_gpu->num_blocks = 64;
+ psx_gpu->clut_ptr = psx_gpu->vram_ptr;
+
+ for(i = 0; i < 64; i++)
+ {
+ memset(&(psx_gpu->blocks[i].r), 0, 16);
+ }
+
+ init_counter();
+
+ ticks = get_counter();
+
+ for(i = 0; i < iterations; i++)
+ {
+ texture_sprite_blocks_8bpp(psx_gpu);
+ }
+
+ ticks_elapsed = get_counter() - ticks;
+
+ printf("benchmark: %lf cycles\n", (double)ticks_elapsed / (iterations * 64));
+}
+
+#endif
+
--- /dev/null
+/*
+ * Copyright (C) 2011 Gilead Kutnick "Exophase" <exophase@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef PSX_GPU_H
+#define PSX_GPU_H
+
+typedef enum
+{
+ PRIMITIVE_TYPE_TRIANGLE = 0,
+ PRIMITIVE_TYPE_SPRITE = 1,
+ PRIMITIVE_TYPE_LINE = 2,
+ PRIMITIVE_TYPE_UNKNOWN = 3
+} primitive_type_enum;
+
+typedef enum
+{
+ TEXTURE_MODE_4BPP = 0,
+ TEXTURE_MODE_8BPP = 1,
+ TEXTURE_MODE_16BPP = 2
+} texture_mode_enum;
+
+typedef enum
+{
+ BLEND_MODE_AVERAGE = 0,
+ BLEND_MODE_ADD = 1,
+ BLEND_MODE_SUBTRACT = 2,
+ BLEND_MODE_ADD_FOURTH = 3
+} blend_mode_enum;
+
+typedef enum
+{
+ RENDER_FLAGS_MODULATE_TEXELS = 0x1,
+ RENDER_FLAGS_BLEND = 0x2,
+ RENDER_FLAGS_TEXTURE_MAP = 0x4,
+ RENDER_FLAGS_QUAD = 0x8,
+ RENDER_FLAGS_SHADE = 0x10,
+} render_flags_enum;
+
+typedef enum
+{
+ RENDER_STATE_DITHER = 0x8,
+ RENDER_STATE_MASK_EVALUATE = 0x20,
+} render_state_enum;
+
+typedef struct
+{
+ u16 left_x;
+ u16 num_blocks;
+ u16 right_mask;
+ u16 y;
+} edge_data_struct;
+
+// 64 bytes total
+typedef struct
+{
+ // 16 bytes
+ union
+ {
+ vec_8x16u uv;
+ vec_8x16u texels;
+ vec_8x16u draw_mask;
+ };
+
+ // 24 bytes
+ union
+ {
+ struct
+ {
+ vec_8x8u r;
+ vec_8x8u g;
+ vec_8x8u b;
+ };
+
+ vec_8x16u pixels;
+ };
+
+ // 8 bytes
+ u32 draw_mask_bits;
+ u16 *fb_ptr;
+
+ // 16 bytes
+ vec_8x16u dither_offsets;
+} block_struct;
+
+#define MAX_SPANS 512
+#define MAX_BLOCKS 64
+#define MAX_BLOCKS_PER_ROW 128
+
+#define SPAN_DATA_BLOCKS_SIZE 32
+
+typedef struct render_block_handler_struct render_block_handler_struct;
+
+typedef struct
+{
+ // 144 bytes
+ vec_8x16u test_mask;
+
+ vec_4x32u uvrg;
+ vec_4x32u uvrg_dx;
+ vec_4x32u uvrg_dy;
+
+ vec_4x32u u_block_span;
+ vec_4x32u v_block_span;
+ vec_4x32u r_block_span;
+ vec_4x32u g_block_span;
+ vec_4x32u b_block_span;
+
+ // 72 bytes
+ u32 b;
+ u32 b_dy;
+
+ u32 triangle_area;
+
+ u32 texture_window_settings;
+ u32 current_texture_mask;
+ u32 viewport_mask;
+ u32 dirty_textures_4bpp_mask;
+ u32 dirty_textures_8bpp_mask;
+ u32 dirty_textures_8bpp_alternate_mask;
+
+ u32 triangle_color;
+ u32 primitive_color;
+
+ u32 dither_table[4];
+
+ struct render_block_handler_struct *render_block_handler;
+ void *texture_page_ptr;
+ u16 *clut_ptr;
+ u16 *vram_ptr;
+
+ // 26 bytes
+ u16 render_state_base;
+ u16 render_state;
+
+ u16 num_spans;
+ u16 num_blocks;
+
+ s16 offset_x;
+ s16 offset_y;
+
+ u16 clut_settings;
+ u16 texture_settings;
+
+ s16 viewport_start_x;
+ s16 viewport_start_y;
+ s16 viewport_end_x;
+ s16 viewport_end_y;
+
+ u16 mask_msb;
+
+ // 8 bytes
+ u8 triangle_winding;
+
+ u8 display_area_draw_enable;
+
+ u8 current_texture_page;
+ u8 last_8bpp_texture_page;
+
+ u8 texture_mask_width;
+ u8 texture_mask_height;
+ u8 texture_window_x;
+ u8 texture_window_y;
+
+ u8 primitive_type;
+
+ // Align up to 64 byte boundary to keep the upcoming buffers cache line
+ // aligned
+ u8 reserved_a[1];
+
+ // 8KB
+ block_struct blocks[MAX_BLOCKS_PER_ROW];
+
+ // 14336 bytes
+ vec_4x32u span_uvrg_offset[MAX_SPANS];
+ edge_data_struct span_edge_data[MAX_SPANS];
+ u32 span_b_offset[MAX_SPANS];
+
+ u16 _vram[1024 * 512];
+ u8 texture_4bpp_cache[32][256 * 256];
+ u8 texture_8bpp_even_cache[16][256 * 256];
+ u8 texture_8bpp_odd_cache[16][256 * 256];
+
+ u32 pixel_count_mode;
+ u32 pixel_compare_mode;
+
+ u8 *vram_pixel_counts_a;
+ u8 *vram_pixel_counts_b;
+ u16 *compare_vram;
+} psx_gpu_struct;
+
+typedef struct __attribute__((aligned(16)))
+{
+ u8 u;
+ u8 v;
+
+ u8 r;
+ u8 g;
+ u8 b;
+
+ u8 reserved[3];
+
+ s16 x;
+ s16 y;
+} vertex_struct;
+
+void render_block_fill(psx_gpu_struct *psx_gpu, u32 color, u32 x, u32 y,
+ u32 width, u32 height);
+void render_block_copy(psx_gpu_struct *psx_gpu, u16 *source, u32 x, u32 y,
+ u32 width, u32 height, u32 pitch);
+void render_block_move(psx_gpu_struct *psx_gpu, u32 source_x, u32 source_y,
+ u32 dest_x, u32 dest_y, u32 width, u32 height);
+
+void render_triangle(psx_gpu_struct *psx_gpu, vertex_struct *vertexes,
+ u32 flags);
+void render_sprite(psx_gpu_struct *psx_gpu, s32 x, s32 y, u32 u, u32 v,
+ s32 width, s32 height, u32 flags, u32 color);
+void render_line(psx_gpu_struct *gpu, vertex_struct *vertexes, u32 flags,
+ u32 color);
+
+u32 texture_region_mask(s32 x1, s32 y1, s32 x2, s32 y2);
+
+void flush_render_block_buffer(psx_gpu_struct *psx_gpu);
+
+void initialize_psx_gpu(psx_gpu_struct *psx_gpu);
+void gpu_parse(psx_gpu_struct *psx_gpu, u32 *list, u32 size);
+
+void triangle_benchmark(psx_gpu_struct *psx_gpu);
+
+#endif
+
--- /dev/null
+/*
+ * Copyright (C) 2011 Gilead Kutnick "Exophase" <exophase@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#define MAX_SPANS 512
+#define MAX_BLOCKS 64
+#define MAX_BLOCKS_PER_ROW 128
+
+#define psx_gpu_test_mask_offset 0
+#define psx_gpu_uvrg_offset 16
+#define psx_gpu_uvrg_dx_offset 32
+#define psx_gpu_uvrg_dy_offset 48
+#define psx_gpu_u_block_span_offset 64
+#define psx_gpu_v_block_span_offset 80
+#define psx_gpu_r_block_span_offset 96
+#define psx_gpu_g_block_span_offset 112
+#define psx_gpu_b_block_span_offset 128
+
+#define psx_gpu_b_dx_offset 132
+
+#define psx_gpu_b_offset 144
+#define psx_gpu_b_dy_offset 148
+#define psx_gpu_triangle_area_offset 152
+#define psx_gpu_texture_window_settings_offset 156
+#define psx_gpu_current_texture_mask_offset 160
+#define psx_gpu_viewport_mask_offset 164
+#define psx_gpu_dirty_textures_4bpp_mask_offset 168
+#define psx_gpu_dirty_textures_8bpp_mask_offset 172
+#define psx_gpu_dirty_textures_8bpp_alternate_mask_offset 176
+#define psx_gpu_triangle_color_offset 180
+#define psx_gpu_primitive_color_offset 184
+#define psx_gpu_dither_table_offset 188
+#define psx_gpu_render_block_handler_offset 204
+#define psx_gpu_texture_page_ptr_offset 208
+#define psx_gpu_clut_ptr_offset 212
+#define psx_gpu_vram_ptr_offset 216
+
+#define psx_gpu_render_state_base_offset 220
+#define psx_gpu_render_state_offset 222
+#define psx_gpu_num_spans_offset 224
+#define psx_gpu_num_blocks_offset 226
+#define psx_gpu_offset_x_offset 228
+#define psx_gpu_offset_y_offset 230
+#define psx_gpu_clut_settings_offset 232
+#define psx_gpu_texture_settings_offset 234
+#define psx_gpu_viewport_start_x_offset 236
+#define psx_gpu_viewport_start_y_offset 238
+#define psx_gpu_viewport_end_x_offset 240
+#define psx_gpu_viewport_end_y_offset 242
+#define psx_gpu_mask_msb_offset 244
+
+#define psx_gpu_triangle_winding_offset 246
+#define psx_gpu_display_area_draw_enable_offset 247
+#define psx_gpu_current_texture_page_offset 248
+#define psx_gpu_last_8bpp_texture_page_offset 249
+#define psx_gpu_texture_mask_width_offset 250
+#define psx_gpu_texture_mask_height_offset 251
+#define psx_gpu_texture_window_x_offset 252
+#define psx_gpu_texture_window_y_offset 253
+#define psx_gpu_primitive_type_offset 254
+
+#define psx_gpu_reserved_a_offset 255
+
+#define psx_gpu_blocks_offset 0x0100
+#define psx_gpu_span_uvrg_offset_offset 0x2100
+#define psx_gpu_span_edge_data_offset 0x4100
+#define psx_gpu_span_b_offset_offset 0x5100
+
+#define psx_gpu__vram_offset 0x005900
+
+#define edge_data_left_x_offset 0
+#define edge_data_num_blocks_offset 2
+#define edge_data_right_mask_offset 4
+#define edge_data_y_offset 6
+
+
+#define psx_gpu r0
+#define v_a r1
+#define v_b r2
+#define v_c r3
+
+#define x0 r4
+#define x1 r5
+#define x2 r6
+#define x0_x1 r5
+#define x1_x2 r6
+#define y0 r7
+#define y1 r8
+#define y2 r9
+#define y0_y1 r7
+#define y1_y2 r8
+#define b0 r9
+#define b1 r10
+#define b2 r11
+#define b0_b1 r10
+#define b1_b2 r11
+
+
+#define area_r_s r5
+
+#define g_bx0 r2
+#define g_bx r3
+#define g_bx2 r4
+#define g_bx3 r5
+#define b_base r6
+#define g_by r8
+
+#define gs_bx r7
+#define gs_by r10
+
+#define ga_bx g_bx
+#define ga_by g_by
+
+#define gw_bx_h g_bx
+#define gw_by_h g_by
+
+#define gw_bx_l r11
+#define gw_by_l gw_bx_l
+
+#define store_a r0
+#define store_b r1
+#define store_inc r5
+
+
+#define v0 q0
+#define uvrgb0 d0
+#define x0_y0 d1
+
+#define v1 q1
+#define uvrgb1 d2
+#define x1_y1 d3
+
+#define v2 q2
+#define uvrgb2 d4
+#define x2_y2 d5
+
+#define x0_ab q3
+#define uvrg_xxxx0 q3
+#define uvrg0 d6
+#define xxxx0 d7
+
+#define x1_ab q4
+#define uvrg_xxxx1 q4
+#define uvrg1 d8
+#define xxxx1 d9
+
+#define x2_ab q5
+#define uvrg_xxxx2 q5
+#define uvrg2 d10
+#define xxxx2 d11
+
+#define y0_ab q6
+#define yyyy_uvrg0 q6
+#define yyyy0 d12
+#define uvrg0b d13
+
+#define y1_ab q7
+#define yyyy_uvrg1 q7
+#define yyyy1 d14
+#define uvrg1b d15
+
+#define y2_ab q8
+#define yyyy_uvrg2 q8
+#define yyyy2 d16
+#define uvrg2b d17
+
+#define d0_ab q9
+#define d0_a d18
+#define d0_b d19
+
+#define d1_ab q10
+#define d1_a d20
+#define d1_b d21
+
+#define d2_ab q11
+#define d2_a d22
+#define d2_b d23
+
+#define d3_ab q12
+#define d3_a d24
+#define d3_b d25
+
+#define ga_uvrg_x q1
+#define ga_uvrg_y q4
+
+#define dx x0_x1
+#define dy y0_y1
+#define db b0_b1
+
+#define uvrg_base q11
+
+#define gs_uvrg_x q5
+#define gs_uvrg_y q6
+
+#define g_uvrg_x q1
+#define ga_uv_x d2
+#define g_uv_x d2
+#define ga_rg_x d3
+#define g_rg_x d3
+
+#define g_uvrg_y q4
+#define ga_uv_y d8
+#define g_uv_y d8
+#define ga_rg_y d9
+#define g_rg_y d9
+
+#define gw_uv_x q1
+#define gw_rg_x q2
+#define gw_uv_y q4
+#define gw_rg_y q3
+
+#define w_mask q9
+#define w_mask_l d18
+
+#define r_shift q10
+
+#define uvrg_dx0 q0
+#define uvrg_dx0l d0
+#define uvrg_dx0h d1
+
+#define uvrg_dx1 q1
+#define uvrg_dx1l d2
+#define uvrg_dx1h d3
+
+#define uvrg_dx2 q2
+#define uvrg_dx2l d4
+#define uvrg_dx2h d5
+
+#define uvrg_dx3 q3
+#define uvrg_dx3l d6
+#define uvrg_dx3h d7
+
+
+.align 4
+
+#define function(name) \
+ .global name; \
+ name: \
+
+@ r0: psx_gpu
+@ r1: v_a
+@ r2: v_b
+@ r3: v_c
+
+function(compute_all_gradients)
+ // First compute the triangle area reciprocal and shift. The division will
+ // happen concurrently with much of the work which follows.
+ @ r12 = psx_gpu->triangle_area
+ ldr r12, [ psx_gpu, #psx_gpu_triangle_area_offset ]
+ stmdb sp!, { r4 - r11, lr }
+
+ @ load exponent of 62 into upper half of double
+ movw r4, #0
+ clz r14, r12 @ r14 = shift
+
+ movt r4, #((62 + 1023) << 4)
+ mov r12, r12, lsl r14 @ r12 = triangle_area_normalized
+
+ @ load area normalized into lower half of double
+ mov r5, r12, lsr #10
+ vmov.f64 d30, r5, r4 @ d30 = (1 << 62) + ta_n
+
+ movt r4, #((1022 + 31) << 4)
+ mov r5, r12, lsl #20
+
+ add r4, r4, r12, lsr #11
+ vmov.f64 d31, r5, r4
+
+ vdiv.f64 d30, d30, d31 @ d30 = ((1 << 62) + ta_n) / ta_n
+
+ // ((x1 - x0) * (y2 - y1)) - ((x2 - x1) * (y1 - y0)) =
+ // ( d0 * d1 ) - ( d2 * d3 ) =
+ // ( m0 ) - ( m1 ) = gradient
+
+ // This is split to do 12 elements at a time over three sets: a, b, and c.
+ // Technically we only need to do 10 elements (uvrgb_x and uvrgb_y), so
+ // two of the slots are unused.
+
+ // Inputs are all 16-bit signed. The m0/m1 results are 32-bit signed, as
+ // is g.
+
+ // First type is: uvrg bxxx xxxx
+ // Second type is: yyyy ybyy uvrg
+ // Since x_a and y_c are the same the same variable is used for both.
+
+ vld1.u32 { v0 }, [ v_a, : 128 ] @ v0 = { uvrg0, b0, x0, y0 }
+ ldrsh x0, [ v_a, #8 ] @ load x0
+
+ vld1.u32 { v1 }, [ v_b, : 128 ] @ v1 = { uvrg1, b1, x1, y1}
+ ldrh x1, [ v_b, #8 ] @ load x1
+
+ vld1.u32 { v2 }, [ v_c, : 128 ] @ v2 = { uvrg2, b2, x2, y2 }
+ ldrh x2, [ v_c, #8 ] @ load x2
+
+ vmovl.u8 uvrg_xxxx0, uvrgb0 @ uvrg_xxxx0 = { uv0, rg0, b0-, -- }
+ ldrh y0, [ v_a, #10 ] @ load y0
+
+ vmovl.u8 uvrg_xxxx1, uvrgb1 @ uvrg_xxxx1 = { uv1, rg1, b1-, -- }
+ ldrh y1, [ v_b, #10 ] @ load y1
+
+ vmovl.u8 uvrg_xxxx2, uvrgb2 @ uvrg_xxxx2 = { uv2, rg2, b2-, -- }
+ ldrh y2, [ v_c, #10 ] @ load y2
+
+ vmov.u8 uvrg0b, uvrg0 @ uvrg0b = { uv0, rg0 }
+ vdup.u16 xxxx0, x0_y0[0] @ xxxx0 = { xx0, xx0 }
+
+ orr x1_x2, x1, x2, lsl #16 @ x1_x2 = { x1, x2 }
+ pkhbt x0_x1, x0, x1, lsl #16 @ x0_x1 = { x0, x1 }
+
+ vmov.u8 uvrg1b, uvrg1 @ uvrg1b = { uv1, rg1 }
+ vdup.u16 xxxx1, x1_y1[0] @ xxxx1 = { xx1, xx1 }
+
+ vmov.u8 uvrg2b, uvrg2 @ uvrg2b = { uv2, rg2 }
+ vdup.u16 xxxx2, x2_y2[0] @ xxxx2 = { xx2, xx2 }
+
+ ldrb b2, [ v_c, #4 ] @ load b2
+ orr y0_y1, y0, y1, lsl #16 @ y0_y1 = { y0, y1 }
+
+ ldrb b1, [ v_b, #4 ] @ load b1
+ orr y1_y2, y1, y2, lsl #16 @ y1_y2 = { y1, y2 }
+
+ vdup.u16 yyyy0, x0_y0[1] @ yyyy0 = { yy0, yy0 }
+ vsub.s16 d0_ab, x1_ab, x0_ab
+
+ ldrb b0, [ v_a, #4 ] @ load b0
+ orr b1_b2, b1, b2, lsl #16 @ b1_b2 = { b1, b2 }
+
+ vdup.u16 yyyy1, x1_y1[1] @ yyyy1 = { yy1, yy1 }
+ vsub.s16 d2_ab, x2_ab, x1_ab
+
+ vdup.u16 yyyy2, x2_y2[1] @ yyyy2 = { yy2, yy2 }
+ vsub.s16 d1_ab, y2_ab, y1_ab
+
+ orr b0_b1, b0, b1, lsl #16 @ b1_b2 = { b1, b2 }
+ ssub16 dx, x1_x2, x0_x1 @ dx = { x1 - x0, x2 - x1 }
+
+ ssub16 dy, y1_y2, y0_y1 @ dy = { y1 - y0, y2 - y1 }
+ ssub16 db, b1_b2, b0_b1 @ db = { b1 - b0, b2 - b1 }
+
+ vsub.s16 d3_ab, y1_ab, y0_ab
+ smusdx ga_by, dx, db @ ga_by = ((x1 - x0) * (b2 - b1)) -
+ @ ((x2 - X1) * (b1 - b0))
+ vmull.s16 ga_uvrg_x, d0_a, d1_a
+ smusdx ga_bx, db, dy @ ga_bx = ((b1 - b0) * (y2 - y1)) -
+ @ ((b2 - b1) * (y1 - y0))
+ vmlsl.s16 ga_uvrg_x, d2_a, d3_a
+ movs gs_bx, ga_bx, asr #31
+
+ vmull.s16 ga_uvrg_y, d0_b, d1_b
+ rsbmi ga_bx, ga_bx, #0
+
+ vmlsl.s16 ga_uvrg_y, d2_b, d3_b
+ movs gs_by, ga_by, asr #31
+
+ vshr.u64 d0, d30, #22
+ mov b_base, b0, lsl #16
+
+ rsbmi ga_by, ga_by, #0
+ vclt.s32 gs_uvrg_x, ga_uvrg_x, #0 @ gs_uvrg_x = ga_uvrg_x < 0
+
+ @ r12 = psx_gpu->triangle_winding_offset
+ ldrb r12, [ psx_gpu, #psx_gpu_triangle_winding_offset ]
+ vclt.s32 gs_uvrg_y, ga_uvrg_y, #0 @ gs_uvrg_y = ga_uvrg_y < 0
+
+ add b_base, b_base, #0x8000
+ rsb r12, r12, #0 @ r12 = -(triangle->winding)
+
+ vdup.u32 w_mask, r12 @ w_mask = { -w, -w, -w, -w }
+ sub r14, r14, #(62 - 12) @ r14 = shift - (62 - FIXED_BITS)
+
+ vshll.u16 uvrg_base, uvrg0, #16 @ uvrg_base = uvrg0 << 16
+ vdup.u32 r_shift, r14 @ r_shift = { shift, shift, shift, shift }
+
+ vorr.u32 uvrg_base, #0x8000
+ vabs.s32 ga_uvrg_x, ga_uvrg_x @ ga_uvrg_x = abs(ga_uvrg_x)
+
+ vmov area_r_s, s0 @ area_r_s = triangle_reciprocal
+ vabs.s32 ga_uvrg_y, ga_uvrg_y @ ga_uvrg_y = abs(ga_uvrg_y)
+
+ vmull.u32 gw_rg_x, ga_rg_x, d0[0]
+ vmull.u32 gw_uv_x, ga_uv_x, d0[0]
+ vmull.u32 gw_rg_y, ga_rg_y, d0[0]
+ vmull.u32 gw_uv_y, ga_uv_y, d0[0]
+
+ vshl.u64 gw_rg_x, gw_rg_x, r_shift
+ vshl.u64 gw_uv_x, gw_uv_x, r_shift
+ vshl.u64 gw_rg_y, gw_rg_y, r_shift
+ vshl.u64 gw_uv_y, gw_uv_y, r_shift
+
+ veor.u32 gs_uvrg_x, gs_uvrg_x, w_mask
+ vmovn.u64 g_uv_x, gw_uv_x
+
+ veor.u32 gs_uvrg_y, gs_uvrg_y, w_mask
+ vmovn.u64 g_rg_x, gw_rg_x
+
+ veor.u32 g_uvrg_x, g_uvrg_x, gs_uvrg_x
+ vmovn.u64 g_uv_y, gw_uv_y
+
+ vsub.u32 g_uvrg_x, g_uvrg_x, gs_uvrg_x
+ vmovn.u64 g_rg_y, gw_rg_y
+
+ veor.u32 g_uvrg_y, g_uvrg_y, gs_uvrg_y
+ mov ga_bx, ga_bx, lsl #13
+
+ vsub.u32 g_uvrg_y, g_uvrg_y, gs_uvrg_y
+ mov ga_by, ga_by, lsl #13
+
+ vdup.u32 x0_y0, x0
+ umull gw_bx_l, gw_bx_h, ga_bx, area_r_s
+
+ vshl.u32 g_uvrg_x, g_uvrg_x, #4
+ vshl.u32 g_uvrg_y, g_uvrg_y, #4
+
+ umull gw_by_l, gw_by_h, ga_by, area_r_s
+ vmls.s32 uvrg_base, ga_uvrg_x, x0_y0[0]
+
+ eor gs_bx, gs_bx, r12
+ vadd.u32 uvrg_dx2, uvrg_dx1, uvrg_dx1
+
+ veor.u32 uvrg_dx0, uvrg_dx0, uvrg_dx0
+ eor gs_by, gs_by, r12
+
+ rsb r11, r14, #0 @ r11 = negative shift for scalar lsr
+ add store_a, psx_gpu, #psx_gpu_uvrg_offset
+
+ sub r11, r11, #(32 - 13)
+
+ add store_b, store_a, #16
+ mov store_inc, #32
+
+ vadd.u32 uvrg_dx3, uvrg_dx2, uvrg_dx1
+ vst1.u32 { uvrg_base }, [ store_a, : 128 ], store_inc
+
+ vst1.u32 { uvrg_dx1 }, [ store_b, : 128 ], store_inc
+ mov g_bx, gw_bx_h, lsr r11
+
+ vst1.u32 { g_uvrg_y }, [ store_a, : 128 ], store_inc
+ mov g_by, gw_by_h, lsr r11
+
+ vst4.u32 { uvrg_dx0l, uvrg_dx1l, uvrg_dx2l, uvrg_dx3l }, \
+ [ store_b, : 128 ], store_inc
+ eor g_bx, g_bx, gs_bx
+
+ vst4.u32 { uvrg_dx0h, uvrg_dx1h, uvrg_dx2h, uvrg_dx3h }, \
+ [ store_b, : 128 ], store_inc
+ sub g_bx, g_bx, gs_bx
+
+ lsl g_bx, g_bx, #4
+ eor g_by, g_by, gs_by
+
+ mls b_base, g_bx, x0, b_base
+ sub g_by, g_by, gs_by
+
+ lsl g_by, g_by, #4
+ mov g_bx0, #0
+
+ add g_bx2, g_bx, g_bx
+ add g_bx3, g_bx, g_bx2
+
+ stmia store_b, { g_bx0, g_bx, g_bx2, g_bx3, b_base, g_by }
+
+ ldmia sp!, { r4 - r11, pc }
+
+
+#define psx_gpu r0
+#define v_a r1
+#define v_b r2
+#define v_c r3
+
+#define temp r14
+
+#define x_a r4
+#define x_b r5
+#define x_c r6
+#define y_a r1
+#define y_b r2
+#define y_c r3
+
+#define height_minor_a r7
+#define height_minor_b r8
+#define height_major r9
+#define height r9
+
+#define reciprocal_table_ptr r10
+
+#define edge_alt_low r4
+#define edge_alt_high r5
+#define edge_dx_dy_alt r6
+#define edge_shift_alt r10
+
+#define edge_dx_dy_alt_low r4
+#define edge_dx_dy_alt_high r5
+
+#define span_edge_data r4
+#define span_uvrg_offset r5
+#define span_b_offset r6
+
+#define clip r14
+
+#define b r11
+#define b_dy r12
+
+
+#define alternate_x q0
+#define alternate_dx_dy q1
+#define alternate_x_32 q2
+
+#define alternate_x_low d0
+#define alternate_x_high d1
+#define alternate_dx_dy_low d2
+#define alternate_dx_dy_high d3
+#define alternate_x_32_low d4
+#define alternate_x_32_high d5
+
+#define left_x q3
+#define right_x q4
+#define left_dx_dy q5
+#define right_dx_dy q6
+#define left_edge q7
+#define right_edge q8
+
+#define left_x_low d6
+#define left_x_high d7
+#define right_x_low d8
+#define right_x_high d9
+#define left_dx_dy_low d10
+#define left_dx_dy_high d11
+#define right_dx_dy_low d12
+#define right_dx_dy_high d13
+#define left_edge_low d14
+#define left_edge_high d15
+#define right_edge_low d16
+#define right_edge_high d17
+
+#define y_mid_point d18
+#define c_0x0004 d19
+
+#define left_right_x_16 q11
+#define span_shifts_y q12
+#define c_0x0001 q13
+
+#define span_shifts d24
+#define y_x4 d25
+#define c_0xFFFE d26
+#define c_0x0007 d27
+
+#define left_right_x_16_low d22
+#define left_right_x_16_high d23
+
+#define uvrg q14
+#define uvrg_dy q15
+
+#define alternate_x_16 d4
+
+#define v_clip q3
+#define v_clip_low d6
+
+#define right_x_32 q10
+#define left_x_32 q11
+#define alternate_select d24
+
+#define right_x_32_low d20
+#define right_x_32_high d21
+#define left_x_32_low d22
+#define left_x_32_high d23
+
+#define edges_xy q0
+#define edges_dx_dy d2
+#define edge_shifts d3
+#define edge_shifts_64 q2
+
+#define edges_xy_left d0
+#define edges_xy_right d1
+
+#define height_reciprocals d6
+#define heights d7
+
+#define widths d8
+#define c_0x01 d9
+#define x_starts d10
+#define x_ends d11
+
+#define heights_b d12
+#define edges_dx_dy_64 q10
+
+#define edges_dx_dy_64_left d20
+#define edges_dx_dy_64_right d21
+
+
+#define setup_spans_prologue() \
+ stmdb sp!, { r4 - r11, lr }; \
+ \
+ ldrsh x_a, [ v_a, #8 ]; \
+ ldrsh x_b, [ v_b, #8 ]; \
+ ldrsh x_c, [ v_c, #8 ]; \
+ ldrsh y_a, [ v_a, #10 ]; \
+ ldrsh y_b, [ v_b, #10 ]; \
+ ldrsh y_c, [ v_c, #10 ]; \
+ \
+ add temp, psx_gpu, #psx_gpu_uvrg_offset; \
+ vld1.32 { uvrg }, [ temp ]; \
+ add temp, psx_gpu, #psx_gpu_uvrg_dy_offset; \
+ vld1.32 { uvrg_dy }, [ temp ]; \
+ movw reciprocal_table_ptr, :lower16:reciprocal_table; \
+ movt reciprocal_table_ptr, :upper16:reciprocal_table; \
+ \
+ vmov.u32 c_0x01, #0x01 \
+
+#define setup_spans_load_b() \
+ ldr b, [ psx_gpu, #psx_gpu_b_offset ]; \
+ ldr b_dy, [ psx_gpu, #psx_gpu_b_dy_offset ] \
+
+#define setup_spans_prologue_b() \
+ add span_uvrg_offset, psx_gpu, #psx_gpu_span_uvrg_offset_offset; \
+ add temp, psx_gpu, #psx_gpu_viewport_start_x_offset; \
+ \
+ add span_edge_data, psx_gpu, #psx_gpu_span_edge_data_offset; \
+ vmov.u16 c_0x0004, #0x0004; \
+ \
+ add span_b_offset, psx_gpu, #psx_gpu_span_b_offset_offset; \
+ vmov.u16 c_0x0001, #0x0001; \
+ \
+ vld1.u16 { left_edge_low[], left_edge_high[] }, [ temp ]; \
+ add temp, psx_gpu, #psx_gpu_viewport_end_x_offset; \
+ \
+ vld1.u16 { right_edge_low[], right_edge_high[] }, [ temp ]; \
+ vadd.u16 right_edge, right_edge, c_0x0001; \
+ \
+ vmov.u16 c_0x0007, #0x0007; \
+ vmvn.u16 c_0xFFFE, #0x0001 \
+
+
+#define compute_edge_delta_x2() \
+ ldr temp, [ reciprocal_table_ptr, height, lsl #2 ]; \
+ \
+ vdup.u32 heights, height; \
+ vsub.u32 widths, x_ends, x_starts; \
+ \
+ vdup.u32 edge_shifts, temp; \
+ vsub.u32 heights_b, heights, c_0x01; \
+ vshr.u32 height_reciprocals, edge_shifts, #12; \
+ \
+ vmla.s32 heights_b, x_starts, heights; \
+ vbic.u16 edge_shifts, #0xE0; \
+ vmul.s32 edges_dx_dy, widths, height_reciprocals; \
+ vmull.s32 edges_xy, heights_b, height_reciprocals \
+
+#define width_alt r6
+#define height_reciprocal_alt r11
+#define height_b_alt r12
+
+#define compute_edge_delta_x3(start_c, height_a, height_b) \
+ vmov.u32 heights, height_a, height_b; \
+ ldr temp, [ reciprocal_table_ptr, height_a, lsl #2 ]; \
+ vmov.u32 edge_shifts[0], temp; \
+ ldr temp, [ reciprocal_table_ptr, height_b, lsl #2 ]; \
+ vmov.u32 edge_shifts[1], temp; \
+ ldr edge_shift_alt, [ reciprocal_table_ptr, height_minor_b, lsl #2 ]; \
+ \
+ vsub.u32 widths, x_ends, x_starts; \
+ sub width_alt, x_c, start_c; \
+ \
+ vsub.u32 heights_b, heights, c_0x01; \
+ sub height_b_alt, height_minor_b, #1; \
+ \
+ vshr.u32 height_reciprocals, edge_shifts, #12; \
+ lsr height_reciprocal_alt, edge_shift_alt, #12; \
+ \
+ vmla.s32 heights_b, x_starts, heights; \
+ mla height_b_alt, height_minor_b, start_c, height_b_alt; \
+ \
+ vbic.u16 edge_shifts, #0xE0; \
+ and edge_shift_alt, edge_shift_alt, #0x1F; \
+ \
+ vmul.s32 edges_dx_dy, widths, height_reciprocals; \
+ mul edge_dx_dy_alt, width_alt, height_reciprocal_alt; \
+ \
+ vmull.s32 edges_xy, heights_b, height_reciprocals; \
+ smull edge_alt_low, edge_alt_high, height_b_alt, height_reciprocal_alt \
+
+
+#define setup_spans_adjust_y_up() \
+ vsub.u32 y_x4, y_x4, c_0x0004 \
+
+#define setup_spans_adjust_y_down() \
+ vadd.u32 y_x4, y_x4, c_0x0004 \
+
+#define setup_spans_adjust_interpolants_up() \
+ vsub.u32 uvrg, uvrg, uvrg_dy; \
+ sub b, b, b_dy \
+
+#define setup_spans_adjust_interpolants_down() \
+ vadd.u32 uvrg, uvrg, uvrg_dy; \
+ add b, b, b_dy \
+
+
+#define setup_spans_clip_interpolants_increment() \
+ mla b, b_dy, clip, b; \
+ vmla.s32 uvrg, uvrg_dy, v_clip \
+
+#define setup_spans_clip_interpolants_decrement() \
+ mls b, b_dy, clip, b; \
+ vmls.s32 uvrg, uvrg_dy, v_clip \
+
+#define setup_spans_clip_alternate_yes() \
+ smlal edge_alt_low, edge_alt_high, edge_dx_dy_alt, clip \
+
+#define setup_spans_clip_alternate_no() \
+
+#define setup_spans_clip(direction, alternate_active) \
+ vdup.u32 v_clip, clip; \
+ setup_spans_clip_alternate_##alternate_active(); \
+ setup_spans_clip_interpolants_##direction(); \
+ vmlal.s32 edges_xy, edges_dx_dy, v_clip_low \
+
+
+#define setup_spans_adjust_edges_alternate_no(left_index, right_index) \
+ vmovl.s32 edge_shifts_64, edge_shifts; \
+ vmovl.s32 edges_dx_dy_64, edges_dx_dy; \
+ \
+ vshl.s64 edges_xy, edges_xy, edge_shifts_64; \
+ vshl.s64 edges_dx_dy_64, edges_dx_dy_64, edge_shifts_64; \
+ \
+ vmov left_x_low, edges_xy_##left_index; \
+ vmov right_x_low, edges_xy_##right_index; \
+ \
+ vmov left_dx_dy_low, edges_dx_dy_64_##left_index; \
+ vmov left_dx_dy_high, edges_dx_dy_64_##left_index; \
+ vmov right_dx_dy_low, edges_dx_dy_64_##right_index; \
+ vmov right_dx_dy_high, edges_dx_dy_64_##right_index; \
+ \
+ vadd.u64 left_x_high, left_x_low, left_dx_dy_low; \
+ vadd.u64 right_x_high, right_x_low, right_dx_dy_low; \
+ \
+ vadd.u64 left_dx_dy, left_dx_dy, left_dx_dy; \
+ vadd.u64 right_dx_dy, right_dx_dy, right_dx_dy \
+
+
+#define setup_spans_adjust_edges_alternate_yes(left_index, right_index) \
+ setup_spans_adjust_edges_alternate_no(left_index, right_index); \
+ \
+ vdup.u16 y_mid_point, y_b; \
+ rsb temp, edge_shift_alt, #32; \
+ \
+ lsl edge_alt_high, edge_alt_high, edge_shift_alt; \
+ orr edge_alt_high, edge_alt_high, edge_alt_low, lsr temp; \
+ lsl edge_alt_low, edge_alt_low, edge_shift_alt; \
+ vmov alternate_x_low, edge_alt_low, edge_alt_high; \
+ \
+ asr edge_dx_dy_alt_high, edge_dx_dy_alt, temp; \
+ lsl edge_dx_dy_alt_low, edge_dx_dy_alt, edge_shift_alt; \
+ vmov alternate_dx_dy_low, edge_dx_dy_alt_low, edge_dx_dy_alt_high; \
+ vmov alternate_dx_dy_high, alternate_dx_dy_low; \
+ \
+ vadd.u64 alternate_x_high, alternate_x_low, alternate_dx_dy_low; \
+ vadd.u64 alternate_dx_dy, alternate_dx_dy, alternate_dx_dy \
+
+
+#define setup_spans_y_select_up() \
+ vclt.s16 alternate_select, y_x4, y_mid_point \
+
+#define setup_spans_y_select_down() \
+ vcgt.s16 alternate_select, y_x4, y_mid_point \
+
+
+#define setup_spans_alternate_select_left() \
+ vbit.u16 left_right_x_16_low, alternate_x_16, alternate_select \
+
+#define setup_spans_alternate_select_right() \
+ vbit.u16 left_right_x_16_high, alternate_x_16, alternate_select \
+
+
+#define setup_spans_set_x4_alternate_yes(alternate, direction) \
+ vshrn.s64 alternate_x_32_low, alternate_x, #32; \
+ vshrn.s64 left_x_32_low, left_x, #32; \
+ vshrn.s64 right_x_32_low, right_x, #32; \
+ \
+ vadd.u64 alternate_x, alternate_x, alternate_dx_dy; \
+ vadd.u64 left_x, left_x, left_dx_dy; \
+ vadd.u64 right_x, right_x, right_dx_dy; \
+ \
+ vshrn.s64 alternate_x_32_high, alternate_x, #32; \
+ vshrn.s64 left_x_32_high, left_x, #32; \
+ vshrn.s64 right_x_32_high, right_x, #32; \
+ \
+ vadd.u64 alternate_x, alternate_x, alternate_dx_dy; \
+ vadd.u64 left_x, left_x, left_dx_dy; \
+ vadd.u64 right_x, right_x, right_dx_dy; \
+ \
+ vmovn.u32 alternate_x_16, alternate_x_32; \
+ setup_spans_y_select_##direction(); \
+ vmovn.u32 left_right_x_16_low, left_x_32; \
+ \
+ vmovn.u32 left_right_x_16_high, right_x_32; \
+ setup_spans_alternate_select_##alternate(); \
+ \
+ vst1.u32 { uvrg }, [ span_uvrg_offset, :128 ]!; \
+ str b, [ span_b_offset ], #4; \
+ setup_spans_adjust_interpolants_##direction(); \
+ \
+ vmax.s16 left_right_x_16, left_right_x_16, left_edge; \
+ \
+ vst1.u32 { uvrg }, [ span_uvrg_offset, :128 ]!; \
+ str b, [ span_b_offset ], #4; \
+ setup_spans_adjust_interpolants_##direction(); \
+ \
+ vmin.s16 left_right_x_16, left_right_x_16, right_edge; \
+ \
+ vst1.u32 { uvrg }, [ span_uvrg_offset, :128 ]!; \
+ str b, [ span_b_offset ], #4; \
+ setup_spans_adjust_interpolants_##direction(); \
+ \
+ vsub.u16 left_right_x_16_high, left_right_x_16_high, left_right_x_16_low; \
+ vadd.u16 left_right_x_16_high, left_right_x_16_high, c_0x0007; \
+ vand.u16 span_shifts, left_right_x_16_high, c_0x0007; \
+ \
+ vst1.u32 { uvrg }, [ span_uvrg_offset, :128 ]!; \
+ str b, [ span_b_offset ], #4; \
+ setup_spans_adjust_interpolants_##direction(); \
+ \
+ vshr.u16 left_right_x_16_high, left_right_x_16_high, #3; \
+ vshl.u16 span_shifts, c_0xFFFE, span_shifts; \
+ \
+ vst4.u16 { left_right_x_16, span_shifts_y }, [ span_edge_data ]!; \
+ \
+ setup_spans_adjust_y_##direction() \
+
+
+#define setup_spans_set_x4_alternate_no(alternate, direction) \
+ vshrn.s64 left_x_32_low, left_x, #32; \
+ vshrn.s64 right_x_32_low, right_x, #32; \
+ \
+ vadd.u64 left_x, left_x, left_dx_dy; \
+ vadd.u64 right_x, right_x, right_dx_dy; \
+ \
+ vshrn.s64 left_x_32_high, left_x, #32; \
+ vshrn.s64 right_x_32_high, right_x, #32; \
+ \
+ vadd.u64 left_x, left_x, left_dx_dy; \
+ vadd.u64 right_x, right_x, right_dx_dy; \
+ \
+ vmovn.u32 left_right_x_16_low, left_x_32; \
+ vmovn.u32 left_right_x_16_high, right_x_32; \
+ \
+ vst1.u32 { uvrg }, [ span_uvrg_offset, :128 ]!; \
+ str b, [ span_b_offset ], #4; \
+ setup_spans_adjust_interpolants_##direction(); \
+ \
+ vmax.s16 left_right_x_16, left_right_x_16, left_edge; \
+ \
+ vst1.u32 { uvrg }, [ span_uvrg_offset, :128 ]!; \
+ str b, [ span_b_offset ], #4; \
+ setup_spans_adjust_interpolants_##direction(); \
+ \
+ vmin.s16 left_right_x_16, left_right_x_16, right_edge; \
+ \
+ vst1.u32 { uvrg }, [ span_uvrg_offset, :128 ]!; \
+ str b, [ span_b_offset ], #4; \
+ setup_spans_adjust_interpolants_##direction(); \
+ \
+ vsub.u16 left_right_x_16_high, left_right_x_16_high, left_right_x_16_low; \
+ vadd.u16 left_right_x_16_high, left_right_x_16_high, c_0x0007; \
+ vand.u16 span_shifts, left_right_x_16_high, c_0x0007; \
+ \
+ vst1.u32 { uvrg }, [ span_uvrg_offset, :128 ]!; \
+ str b, [ span_b_offset ], #4; \
+ setup_spans_adjust_interpolants_##direction(); \
+ \
+ vshl.u16 span_shifts, c_0xFFFE, span_shifts; \
+ vshr.u16 left_right_x_16_high, left_right_x_16_high, #3; \
+ \
+ vst4.u16 { left_right_x_16, span_shifts_y }, [ span_edge_data ]!; \
+ \
+ setup_spans_adjust_y_##direction() \
+
+
+#define edge_adjust_low r11
+#define edge_adjust_high r12
+
+#define setup_spans_alternate_adjust_yes() \
+ smull edge_adjust_low, edge_adjust_high, edge_dx_dy_alt, height_minor_a; \
+ subs edge_alt_low, edge_alt_low, edge_adjust_low; \
+ sbc edge_alt_high, edge_alt_high, edge_adjust_high \
+
+#define setup_spans_alternate_adjust_no() \
+
+
+#define setup_spans_down(left_index, right_index, alternate, alternate_active) \
+ setup_spans_alternate_adjust_##alternate_active(); \
+ setup_spans_load_b(); \
+ \
+ ldrsh temp, [ psx_gpu, #psx_gpu_viewport_end_y_offset ]; \
+ subs y_c, y_c, temp; \
+ subgt height, height, y_c; \
+ addgt height, height, #1; \
+ \
+ ldrsh temp, [ psx_gpu, #psx_gpu_viewport_start_y_offset ]; \
+ subs clip, temp, y_a; \
+ ble 0f; \
+ \
+ sub height, height, clip; \
+ add y_a, y_a, clip; \
+ setup_spans_clip(increment, alternate_active); \
+ \
+ 0: \
+ cmp height, #0; \
+ ble 1f; \
+ \
+ orr temp, y_a, y_a, lsl #16; \
+ add temp, temp, #(1 << 16); \
+ add y_a, temp, #2; \
+ add y_a, y_a, #(2 << 16); \
+ vmov.u32 y_x4, temp, y_a; \
+ \
+ setup_spans_adjust_edges_alternate_##alternate_active(left_index, \
+ right_index); \
+ setup_spans_prologue_b(); \
+ \
+ strh height, [ psx_gpu, #psx_gpu_num_spans_offset ]; \
+ \
+ 2: \
+ setup_spans_set_x4_alternate_##alternate_active(alternate, down); \
+ subs height, height, #4; \
+ bhi 2b; \
+ \
+ 1: \
+
+
+#define setup_spans_alternate_pre_increment_yes() \
+ adds edge_alt_low, edge_alt_low, edge_dx_dy_alt; \
+ adc edge_alt_high, edge_alt_high, edge_dx_dy_alt, asr #31 \
+
+#define setup_spans_alternate_pre_increment_no() \
+
+
+#define setup_spans_up_decrement_yes() \
+ suble height, height, #1 \
+
+#define setup_spans_up_decrement_no() \
+
+
+#define setup_spans_up(left_index, right_index, alternate, alternate_active) \
+ setup_spans_alternate_adjust_##alternate_active(); \
+ setup_spans_load_b(); \
+ sub y_a, y_a, #1; \
+ \
+ ldrh temp, [ psx_gpu, #psx_gpu_viewport_start_y_offset ]; \
+ subs temp, temp, y_c; \
+ subgt height, height, temp; \
+ setup_spans_up_decrement_##alternate_active(); \
+ \
+ ldrh temp, [ psx_gpu, #psx_gpu_viewport_end_y_offset ]; \
+ subs clip, y_a, temp; \
+ ble 0f; \
+ \
+ sub height, height, clip; \
+ sub y_a, y_a, clip; \
+ setup_spans_clip(decrement, alternate_active); \
+ \
+ 0: \
+ cmp height, #0; \
+ ble 1f; \
+ \
+ orr temp, y_a, y_a, lsl #16; \
+ sub temp, temp, #(1 << 16); \
+ sub y_a, temp, #2; \
+ sub y_a, y_a, #(2 << 16); \
+ vmov.u32 y_x4, temp, y_a; \
+ \
+ vaddw.s32 edges_xy, edges_xy, edges_dx_dy; \
+ \
+ setup_spans_alternate_pre_increment_##alternate_active(); \
+ setup_spans_adjust_edges_alternate_##alternate_active(left_index, \
+ right_index); \
+ setup_spans_adjust_interpolants_up(); \
+ setup_spans_prologue_b(); \
+ \
+ strh height, [ psx_gpu, #psx_gpu_num_spans_offset ]; \
+ \
+ 2: \
+ setup_spans_set_x4_alternate_##alternate_active(alternate, up); \
+ subs height, height, #4; \
+ bhi 2b; \
+ \
+ 1: \
+
+
+#define setup_spans_epilogue() \
+ ldmia sp!, { r4 - r11, pc } \
+
+
+#define setup_spans_up_up(minor, major) \
+ setup_spans_prologue(); \
+ sub height_minor_a, y_a, y_b; \
+ sub height_minor_b, y_b, y_c; \
+ sub height, y_a, y_c; \
+ \
+ vdup.u32 x_starts, x_a; \
+ vmov.u32 x_ends, x_c, x_b; \
+ \
+ compute_edge_delta_x3(x_b, height_major, height_minor_a); \
+ setup_spans_up(major, minor, minor, yes); \
+ setup_spans_epilogue() \
+
+function(setup_spans_up_left)
+ setup_spans_up_up(left, right)
+
+function(setup_spans_up_right)
+ setup_spans_up_up(right, left)
+
+
+#define setup_spans_down_down(minor, major) \
+ setup_spans_prologue(); \
+ sub height_minor_a, y_b, y_a; \
+ sub height_minor_b, y_c, y_b; \
+ sub height, y_c, y_a; \
+ \
+ vdup.u32 x_starts, x_a; \
+ vmov.u32 x_ends, x_c, x_b; \
+ \
+ compute_edge_delta_x3(x_b, height_major, height_minor_a); \
+ setup_spans_down(major, minor, minor, yes); \
+ setup_spans_epilogue() \
+
+function(setup_spans_down_left)
+ setup_spans_down_down(left, right)
+
+function(setup_spans_down_right)
+ setup_spans_down_down(right, left)
+
+
+#define setup_spans_up_flat() \
+ sub height, y_a, y_c; \
+ \
+ compute_edge_delta_x2(); \
+ setup_spans_up(left, right, none, no); \
+ setup_spans_epilogue() \
+
+function(setup_spans_up_a)
+ setup_spans_prologue()
+
+ vmov.u32 x_starts, x_a, x_b
+ vdup.u32 x_ends, x_c
+
+ setup_spans_up_flat()
+
+function(setup_spans_up_b)
+ setup_spans_prologue()
+
+ vdup.u32 x_starts, x_a
+ vmov.u32 x_ends, x_b, x_c
+
+ setup_spans_up_flat()
+
+#define setup_spans_down_flat() \
+ sub height, y_c, y_a; \
+ \
+ compute_edge_delta_x2(); \
+ setup_spans_down(left, right, none, no); \
+ setup_spans_epilogue() \
+
+function(setup_spans_down_a)
+ setup_spans_prologue()
+
+ vmov.u32 x_starts, x_a, x_b
+ vdup.u32 x_ends, x_c
+
+ setup_spans_down_flat()
+
+function(setup_spans_down_b)
+ setup_spans_prologue()
+
+ vdup.u32 x_starts, x_a
+ vmov.u32 x_ends, x_b, x_c
+
+ setup_spans_down_flat()
+
+
+#define middle_y r9
+
+#define edges_xy_b q11
+#define edges_dx_dy_b d26
+#define edge_shifts_b d27
+#define edges_dx_dy_and_shifts_b q13
+#define height_increment d20
+
+#define edges_dx_dy_and_shifts q1
+
+#define edges_xy_b_left d22
+#define edges_xy_b_right d23
+
+#define setup_spans_up_down_load_edge_set_b() \
+ vmov edges_xy, edges_xy_b; \
+ vmov edges_dx_dy_and_shifts, edges_dx_dy_and_shifts_b \
+
+
+function(setup_spans_up_down)
+ setup_spans_prologue()
+
+ // s32 middle_y = y_a;
+ sub height_minor_a, y_a, y_b
+ sub height_minor_b, y_c, y_a
+ sub height_major, y_c, y_b
+
+ vmov.u32 x_starts, x_a, x_c
+ vdup.u32 x_ends, x_b
+
+ compute_edge_delta_x3(x_a, height_minor_a, height_major)
+
+ mov temp, #0
+ vmov.u32 height_increment, temp, height_minor_b
+ vmlal.s32 edges_xy, edges_dx_dy, height_increment
+
+ vmov edges_xy_b_left, edge_alt_low, edge_alt_high
+ vmov edges_xy_b_right, edges_xy_right
+
+ vmov edge_shifts_b, edge_shifts
+ vmov.u32 edge_shifts_b[0], edge_shift_alt
+
+ vneg.s32 edges_dx_dy_b, edges_dx_dy
+ vmov.u32 edges_dx_dy_b[0], edge_dx_dy_alt
+
+ mov middle_y, y_a
+
+ setup_spans_load_b()
+ sub y_a, y_a, #1
+
+ ldrh temp, [ psx_gpu, #psx_gpu_viewport_start_y_offset ]
+ subs temp, temp, y_b
+ subgt height_minor_a, height_minor_a, temp
+
+ ldrh temp, [ psx_gpu, #psx_gpu_viewport_end_y_offset ]
+ subs clip, y_a, temp
+ ble 0f
+
+ sub height_minor_a, height_minor_a, clip
+ sub y_a, y_a, clip
+ setup_spans_clip(decrement, no)
+
+ 0:
+ cmp height_minor_a, #0
+ ble 3f
+
+ orr temp, y_a, y_a, lsl #16
+ sub temp, temp, #(1 << 16)
+ sub y_a, temp, #2
+ sub y_a, y_a, #(2 << 16)
+ vmov.u32 y_x4, temp, y_a
+
+ vaddw.s32 edges_xy, edges_xy, edges_dx_dy
+
+ strh height_minor_a, [ psx_gpu, #psx_gpu_num_spans_offset ]
+
+ setup_spans_adjust_edges_alternate_no(left, right);
+ setup_spans_adjust_interpolants_up()
+ setup_spans_up_down_load_edge_set_b()
+
+ setup_spans_prologue_b()
+
+
+ 2:
+ setup_spans_set_x4_alternate_no(none, up)
+ subs height_minor_a, height_minor_a, #4
+ bhi 2b
+
+ add span_edge_data, span_edge_data, height_minor_a, lsl #3
+ add span_uvrg_offset, span_uvrg_offset, height_minor_a, lsl #4
+ add span_b_offset, span_b_offset, height_minor_a, lsl #2
+
+ 4:
+ add temp, psx_gpu, #psx_gpu_uvrg_offset
+ vld1.32 { uvrg }, [ temp ]
+ mov y_a, middle_y
+
+ setup_spans_load_b()
+
+ ldrh temp, [ psx_gpu, #psx_gpu_viewport_end_y_offset ]
+ subs y_c, y_c, temp
+ subgt height_minor_b, height_minor_b, y_c
+ addgt height_minor_b, height_minor_b, #1
+
+ ldrh temp, [ psx_gpu, #psx_gpu_viewport_start_y_offset ]
+ subs clip, temp, y_a
+ ble 0f
+
+ sub height_minor_b, height_minor_b, clip
+ add y_a, y_a, clip
+ setup_spans_clip(increment, no)
+
+ 0:
+ cmp height_minor_b, #0
+ ble 1f
+
+ orr temp, y_a, y_a, lsl #16
+ add temp, temp, #(1 << 16)
+ add y_a, temp, #2
+ add y_a, y_a, #(2 << 16)
+ vmov.u32 y_x4, temp, y_a
+
+ setup_spans_adjust_edges_alternate_no(left, right)
+
+ ldrh temp, [ psx_gpu, #psx_gpu_num_spans_offset ]
+ add temp, temp, height_minor_b
+ strh temp, [ psx_gpu, #psx_gpu_num_spans_offset ]
+
+ 2:
+ setup_spans_set_x4_alternate_no(none, down)
+ subs height_minor_b, height_minor_b, #4
+ bhi 2b
+
+ 1:
+ setup_spans_epilogue()
+
+ 3:
+ setup_spans_up_down_load_edge_set_b()
+ setup_spans_prologue_b()
+ bal 4b
+
+
+#undef span_uvrg_offset
+#undef span_edge_data
+#undef span_b_offset
+#undef left_x
+#undef b
+
+#define psx_gpu r0
+#define num_spans r1
+#define span_uvrg_offset r2
+#define span_edge_data r3
+#define span_b_offset r4
+#define b_dx r5
+#define span_num_blocks r6
+#define y r7
+#define left_x r8
+#define b r9
+#define dither_offset_ptr r10
+#define block_ptr_a r11
+#define fb_ptr r12
+#define num_blocks r14
+
+#define uvrg_dx_ptr r2
+#define texture_mask_ptr r3
+#define dither_shift r8
+#define dither_row r10
+
+#define c_32 r7
+#define b_dx4 r8
+#define b_dx8 r9
+#define block_ptr_b r10
+
+#define block_span_ptr r10
+#define right_mask r8
+
+#define color r2
+#define color_r r3
+#define color_g r4
+#define color_b r5
+
+#undef uvrg
+
+#define u_block q0
+#define v_block q1
+#define r_block q2
+#define g_block q3
+#define b_block q4
+
+#define uv_dx4 d10
+#define rg_dx4 d11
+#define uv_dx8 d12
+#define rg_dx8 d13
+#define b_whole_8 d14
+#define fb_mask_ptrs d15
+
+#define uvrg_dx4 q5
+#define uvrg_dx8 q6
+#define uv_dx8 d12
+#define rg_dx8 d13
+
+#define u_whole q8
+#define v_whole q9
+#define r_whole q10
+#define g_whole q11
+#define b_whole q12
+
+#define u_whole_low d16
+#define u_whole_high d17
+#define v_whole_low d18
+#define v_whole_high d19
+#define r_whole_low d20
+#define r_whole_high d21
+#define g_whole_low d22
+#define g_whole_high d23
+#define b_whole_low d24
+#define b_whole_high d25
+
+#define dx4 q13
+#define dx8 q13
+
+#define u_whole_8 d26
+#define v_whole_8 d27
+#define u_whole_8b d24
+#define r_whole_8 d24
+#define g_whole_8 d25
+
+#define uv_whole_8 q13
+#define uv_whole_8b q14
+
+#define dither_offsets q14
+#define texture_mask q15
+#define texture_mask_u d30
+#define texture_mask_v d31
+
+#define dither_offsets_short d28
+
+#define v_left_x q8
+#define uvrg q9
+#define block_span q10
+
+#define uv d18
+#define rg d19
+
+#define draw_mask q1
+#define draw_mask_edge q13
+#define test_mask q0
+
+#define uvrg_dx q3
+
+#define colors q2
+
+#define setup_blocks_texture_swizzled() \
+ vand.u8 u_whole_8b, u_whole_8, texture_mask_u; \
+ vsli.u8 u_whole_8, v_whole_8, #4; \
+ vsri.u8 v_whole_8, u_whole_8b, #4 \
+
+#define setup_blocks_texture_unswizzled() \
+
+
+#define setup_blocks_shaded_textured_builder(swizzling) \
+.align 3; \
+ \
+function(setup_blocks_shaded_textured_dithered_##swizzling##_indirect) \
+ ldrh num_spans, [ psx_gpu, #psx_gpu_num_spans_offset ]; \
+ add uvrg_dx_ptr, psx_gpu, #psx_gpu_uvrg_dx_offset; \
+ \
+ vld1.u32 { uvrg_dx }, [ uvrg_dx_ptr, :128 ]; \
+ add texture_mask_ptr, psx_gpu, #psx_gpu_texture_mask_width_offset; \
+ \
+ cmp num_spans, #0; \
+ bxeq lr; \
+ \
+ stmdb sp!, { r4 - r11, r14 }; \
+ vshl.u32 uvrg_dx4, uvrg_dx, #2; \
+ \
+ ldr b_dx, [ psx_gpu, #psx_gpu_b_dx_offset ]; \
+ vshl.u32 uvrg_dx8, uvrg_dx, #3; \
+ \
+ vld2.u8 { texture_mask_u[], texture_mask_v[] }, [ texture_mask_ptr, :16 ]; \
+ add span_uvrg_offset, psx_gpu, #psx_gpu_span_uvrg_offset_offset; \
+ \
+ ldrh num_blocks, [ psx_gpu, #psx_gpu_num_blocks_offset ]; \
+ add span_edge_data, psx_gpu, #psx_gpu_span_edge_data_offset; \
+ \
+ add span_b_offset, psx_gpu, #psx_gpu_span_b_offset_offset; \
+ add block_ptr_a, psx_gpu, #psx_gpu_blocks_offset; \
+ \
+ add block_ptr_a, block_ptr_a, num_blocks, lsl #6; \
+ \
+ 0: \
+ vmov.u8 fb_mask_ptrs, #0; \
+ \
+ ldrh span_num_blocks, [ span_edge_data, #edge_data_num_blocks_offset ]; \
+ add dither_offset_ptr, psx_gpu, #psx_gpu_dither_table_offset; \
+ \
+ ldrh y, [ span_edge_data, #edge_data_y_offset ]; \
+ ldr fb_ptr, [ psx_gpu, #psx_gpu_vram_ptr_offset ]; \
+ \
+ cmp span_num_blocks, #0; \
+ beq 1f; \
+ \
+ ldrh left_x, [ span_edge_data, #edge_data_left_x_offset ]; \
+ add num_blocks, span_num_blocks, num_blocks; \
+ \
+ cmp num_blocks, #MAX_BLOCKS; \
+ bgt 2f; \
+ \
+ 3: \
+ ldr b, [ span_b_offset ]; \
+ add fb_ptr, fb_ptr, y, lsl #11; \
+ \
+ vdup.u32 v_left_x, left_x; \
+ and y, y, #0x3; \
+ \
+ ldr dither_row, [ dither_offset_ptr, y, lsl #2 ]; \
+ add fb_ptr, fb_ptr, left_x, lsl #1; \
+ \
+ mla b, b_dx, left_x, b; \
+ and dither_shift, left_x, #0x03; \
+ \
+ vld1.u32 { uvrg }, [ span_uvrg_offset, :128 ]; \
+ vshr.u32 uvrg_dx, uvrg_dx4, #2; \
+ \
+ mov dither_shift, dither_shift, lsl #3; \
+ vmla.u32 uvrg, uvrg_dx, v_left_x; \
+ \
+ mov c_32, #32; \
+ subs span_num_blocks, span_num_blocks, #1; \
+ \
+ mov dither_row, dither_row, ror dither_shift; \
+ mov b_dx4, b_dx, lsl #2; \
+ \
+ vdup.u32 dither_offsets_short, dither_row; \
+ add block_span_ptr, psx_gpu, #psx_gpu_u_block_span_offset; \
+ \
+ vdup.u32 b_block, b; \
+ vshll.s8 dither_offsets, dither_offsets_short, #4; \
+ \
+ vdup.u32 u_block, uv[0]; \
+ mov b_dx8, b_dx, lsl #3; \
+ \
+ vdup.u32 v_block, uv[1]; \
+ vdup.u32 r_block, rg[0]; \
+ vdup.u32 g_block, rg[1]; \
+ \
+ vld1.u32 { block_span }, [ block_span_ptr, :128 ]!; \
+ \
+ vadd.u32 u_block, u_block, block_span; \
+ vld1.u32 { block_span }, [ block_span_ptr, :128 ]!; \
+ \
+ vadd.u32 v_block, v_block, block_span; \
+ vld1.u32 { block_span }, [ block_span_ptr, :128 ]!; \
+ \
+ vadd.u32 r_block, r_block, block_span; \
+ vld1.u32 { block_span }, [ block_span_ptr, :128 ]!; \
+ \
+ vadd.u32 g_block, g_block, block_span; \
+ vld1.u32 { block_span }, [ block_span_ptr, :128 ]; \
+ \
+ vadd.u32 b_block, b_block, block_span; \
+ add block_ptr_b, block_ptr_a, #16; \
+ \
+ vshrn.u32 u_whole_low, u_block, #16; \
+ vshrn.u32 v_whole_low, v_block, #16; \
+ vshrn.u32 r_whole_low, r_block, #16; \
+ vshrn.u32 g_whole_low, g_block, #16; \
+ \
+ vdup.u32 dx4, uv_dx4[0]; \
+ vshrn.u32 b_whole_low, b_block, #16; \
+ \
+ vaddhn.u32 u_whole_high, u_block, dx4; \
+ vdup.u32 dx4, uv_dx4[1]; \
+ \
+ vaddhn.u32 v_whole_high, v_block, dx4; \
+ vdup.u32 dx4, rg_dx4[0]; \
+ \
+ vaddhn.u32 r_whole_high, r_block, dx4; \
+ vdup.u32 dx4, rg_dx4[1]; \
+ \
+ vaddhn.u32 g_whole_high, g_block, dx4; \
+ vdup.u32 dx4, b_dx4; \
+ \
+ vaddhn.u32 b_whole_high, b_block, dx4; \
+ vdup.u32 dx8, uv_dx8[0]; \
+ \
+ vadd.u32 u_block, u_block, dx8; \
+ vdup.u32 dx8, uv_dx8[1]; \
+ \
+ vadd.u32 v_block, v_block, dx8; \
+ vdup.u32 dx8, rg_dx8[0]; \
+ \
+ vadd.u32 r_block, r_block, dx8; \
+ vdup.u32 dx8, rg_dx8[1]; \
+ \
+ vadd.u32 g_block, g_block, dx8; \
+ vdup.u32 dx8, b_dx8; \
+ \
+ vadd.u32 b_block, b_block, dx8; \
+ vmovn.u16 u_whole_8, u_whole; \
+ \
+ vmovn.u16 v_whole_8, v_whole; \
+ \
+ vmovn.u16 b_whole_8, b_whole; \
+ pld [ fb_ptr ]; \
+ vmov.u32 fb_mask_ptrs[1], fb_ptr; \
+ \
+ vand.u8 uv_whole_8, uv_whole_8, texture_mask; \
+ setup_blocks_texture_##swizzling(); \
+ \
+ vmovn.u16 r_whole_8, r_whole; \
+ beq 5f; \
+ \
+ 4: \
+ vmovn.u16 g_whole_8, g_whole; \
+ vshrn.u32 u_whole_low, u_block, #16; \
+ \
+ vst2.u8 { u_whole_8, v_whole_8 }, [ block_ptr_a, :128 ], c_32; \
+ vshrn.u32 v_whole_low, v_block, #16; \
+ \
+ vst1.u32 { r_whole_8, g_whole_8 }, [ block_ptr_b, :128 ], c_32; \
+ vshrn.u32 r_whole_low, r_block, #16; \
+ \
+ vst1.u32 { b_whole_8, fb_mask_ptrs }, [ block_ptr_a, :128 ], c_32; \
+ vshrn.u32 g_whole_low, g_block, #16; \
+ \
+ vdup.u32 dx4, uv_dx4[0]; \
+ vshrn.u32 b_whole_low, b_block, #16; \
+ \
+ vaddhn.u32 u_whole_high, u_block, dx4; \
+ vdup.u32 dx4, uv_dx4[1]; \
+ \
+ vaddhn.u32 v_whole_high, v_block, dx4; \
+ vdup.u32 dx4, rg_dx4[0]; \
+ \
+ vaddhn.u32 r_whole_high, r_block, dx4; \
+ vdup.u32 dx4, rg_dx4[1]; \
+ \
+ vaddhn.u32 g_whole_high, g_block, dx4; \
+ vdup.u32 dx4, b_dx4; \
+ \
+ vaddhn.u32 b_whole_high, b_block, dx4; \
+ vdup.u32 dx8, uv_dx8[0]; \
+ \
+ vadd.u32 u_block, u_block, dx8; \
+ vdup.u32 dx8, uv_dx8[1]; \
+ \
+ vadd.u32 v_block, v_block, dx8; \
+ vdup.u32 dx8, rg_dx8[0]; \
+ \
+ vadd.u32 r_block, r_block, dx8; \
+ vdup.u32 dx8, rg_dx8[1]; \
+ \
+ vadd.u32 g_block, g_block, dx8; \
+ vdup.u32 dx8, b_dx8; \
+ \
+ vadd.u32 b_block, b_block, dx8; \
+ vmovn.u16 u_whole_8, u_whole; \
+ \
+ add fb_ptr, fb_ptr, #16; \
+ vmovn.u16 v_whole_8, v_whole; \
+ \
+ vst1.u32 { dither_offsets }, [ block_ptr_b, :128 ], c_32; \
+ vmovn.u16 b_whole_8, b_whole; \
+ \
+ pld [ fb_ptr ]; \
+ \
+ vmov.u32 fb_mask_ptrs[1], fb_ptr; \
+ subs span_num_blocks, span_num_blocks, #1; \
+ \
+ vand.u8 uv_whole_8, uv_whole_8, texture_mask; \
+ setup_blocks_texture_##swizzling(); \
+ \
+ vmovn.u16 r_whole_8, r_whole; \
+ bne 4b; \
+ \
+ 5: \
+ vmovn.u16 g_whole_8, g_whole; \
+ ldrh right_mask, [ span_edge_data, #edge_data_right_mask_offset ]; \
+ \
+ vld1.u32 { test_mask }, [ psx_gpu, :128 ]; \
+ vdup.u8 draw_mask, right_mask; \
+ \
+ vmov.u32 fb_mask_ptrs[0], right_mask; \
+ vtst.u16 draw_mask, draw_mask, test_mask; \
+ vzip.u8 u_whole_8, v_whole_8; \
+ \
+ vbic.u16 uv_whole_8, uv_whole_8, draw_mask; \
+ vst1.u32 { r_whole_8, g_whole_8 }, [ block_ptr_b, :128 ], c_32; \
+ vst1.u32 { uv_whole_8 }, [ block_ptr_a, :128 ], c_32; \
+ vst1.u32 { dither_offsets }, [ block_ptr_b, :128 ], c_32; \
+ vst1.u32 { b_whole_8, fb_mask_ptrs }, [ block_ptr_a, :128 ], c_32; \
+ \
+ 1: \
+ add span_uvrg_offset, span_uvrg_offset, #16; \
+ add span_b_offset, span_b_offset, #4; \
+ \
+ add span_edge_data, span_edge_data, #8; \
+ subs num_spans, num_spans, #1; \
+ \
+ strh num_blocks, [ psx_gpu, #psx_gpu_num_blocks_offset ]; \
+ bne 0b; \
+ \
+ ldmia sp!, { r4 - r11, pc }; \
+ \
+ 2: \
+ /* TODO: Load from psx_gpu instead of saving/restoring these */\
+ vpush { texture_mask }; \
+ vpush { uvrg_dx4 }; \
+ \
+ stmdb sp!, { r0 - r3, r12, r14 }; \
+ bl flush_render_block_buffer; \
+ ldmia sp!, { r0 - r3, r12, r14 }; \
+ \
+ vpop { uvrg_dx4 }; \
+ vpop { texture_mask }; \
+ \
+ vadd.u32 uvrg_dx8, uvrg_dx4, uvrg_dx4; \
+ vmov.u8 fb_mask_ptrs, #0; \
+ \
+ mov num_blocks, span_num_blocks; \
+ add block_ptr_a, psx_gpu, #psx_gpu_blocks_offset; \
+ bal 3b \
+
+
+setup_blocks_shaded_textured_builder(swizzled)
+setup_blocks_shaded_textured_builder(unswizzled)
+
+
+#define setup_blocks_unshaded_textured_builder(swizzling) \
+.align 3; \
+ \
+function(setup_blocks_unshaded_textured_dithered_##swizzling##_indirect) \
+ ldrh num_spans, [ psx_gpu, #psx_gpu_num_spans_offset ]; \
+ add uvrg_dx_ptr, psx_gpu, #psx_gpu_uvrg_dx_offset; \
+ \
+ vld1.u32 { uvrg_dx }, [ uvrg_dx_ptr, :128 ]; \
+ add texture_mask_ptr, psx_gpu, #psx_gpu_texture_mask_width_offset; \
+ \
+ cmp num_spans, #0; \
+ bxeq lr; \
+ \
+ stmdb sp!, { r4 - r11, r14 }; \
+ vshl.u32 uvrg_dx4, uvrg_dx, #2; \
+ \
+ vshl.u32 uvrg_dx8, uvrg_dx, #3; \
+ \
+ vld2.u8 { texture_mask_u[], texture_mask_v[] }, [ texture_mask_ptr, :16 ]; \
+ add span_uvrg_offset, psx_gpu, #psx_gpu_span_uvrg_offset_offset; \
+ \
+ ldrh num_blocks, [ psx_gpu, #psx_gpu_num_blocks_offset ]; \
+ add span_edge_data, psx_gpu, #psx_gpu_span_edge_data_offset; \
+ \
+ add block_ptr_a, psx_gpu, #psx_gpu_blocks_offset; \
+ \
+ add block_ptr_a, block_ptr_a, num_blocks, lsl #6; \
+ \
+ 0: \
+ vmov.u8 fb_mask_ptrs, #0; \
+ \
+ ldrh span_num_blocks, [ span_edge_data, #edge_data_num_blocks_offset ]; \
+ add dither_offset_ptr, psx_gpu, #psx_gpu_dither_table_offset; \
+ \
+ ldrh y, [ span_edge_data, #edge_data_y_offset ]; \
+ ldr fb_ptr, [ psx_gpu, #psx_gpu_vram_ptr_offset ]; \
+ \
+ cmp span_num_blocks, #0; \
+ beq 1f; \
+ \
+ ldrh left_x, [ span_edge_data, #edge_data_left_x_offset ]; \
+ add num_blocks, span_num_blocks, num_blocks; \
+ \
+ cmp num_blocks, #MAX_BLOCKS; \
+ bgt 2f; \
+ \
+ 3: \
+ add fb_ptr, fb_ptr, y, lsl #11; \
+ \
+ vdup.u32 v_left_x, left_x; \
+ and y, y, #0x3; \
+ \
+ ldr dither_row, [ dither_offset_ptr, y, lsl #2 ]; \
+ add fb_ptr, fb_ptr, left_x, lsl #1; \
+ \
+ and dither_shift, left_x, #0x03; \
+ \
+ vld1.u32 { uvrg }, [ span_uvrg_offset, :128 ]; \
+ vshr.u32 uvrg_dx, uvrg_dx4, #2; \
+ \
+ mov dither_shift, dither_shift, lsl #3; \
+ vmla.u32 uvrg, uvrg_dx, v_left_x; \
+ \
+ mov c_32, #32; \
+ subs span_num_blocks, span_num_blocks, #1; \
+ \
+ mov dither_row, dither_row, ror dither_shift; \
+ \
+ vdup.u32 dither_offsets_short, dither_row; \
+ add block_span_ptr, psx_gpu, #psx_gpu_u_block_span_offset; \
+ \
+ vshll.s8 dither_offsets, dither_offsets_short, #4; \
+ \
+ vdup.u32 u_block, uv[0]; \
+ \
+ vdup.u32 v_block, uv[1]; \
+ vld1.u32 { block_span }, [ block_span_ptr, :128 ]!; \
+ \
+ vadd.u32 u_block, u_block, block_span; \
+ vld1.u32 { block_span }, [ block_span_ptr, :128 ]!; \
+ \
+ vadd.u32 v_block, v_block, block_span; \
+ add block_ptr_b, block_ptr_a, #16; \
+ \
+ vshrn.u32 u_whole_low, u_block, #16; \
+ vshrn.u32 v_whole_low, v_block, #16; \
+ \
+ vdup.u32 dx4, uv_dx4[0]; \
+ \
+ vaddhn.u32 u_whole_high, u_block, dx4; \
+ vdup.u32 dx4, uv_dx4[1]; \
+ \
+ vaddhn.u32 v_whole_high, v_block, dx4; \
+ vdup.u32 dx8, uv_dx8[0]; \
+ \
+ vadd.u32 u_block, u_block, dx8; \
+ vdup.u32 dx8, uv_dx8[1]; \
+ \
+ vadd.u32 v_block, v_block, dx8; \
+ vmovn.u16 u_whole_8, u_whole; \
+ \
+ vmovn.u16 v_whole_8, v_whole; \
+ \
+ pld [ fb_ptr ]; \
+ vmov.u32 fb_mask_ptrs[1], fb_ptr; \
+ \
+ vand.u8 uv_whole_8, uv_whole_8, texture_mask; \
+ setup_blocks_texture_##swizzling(); \
+ \
+ beq 5f; \
+ \
+ 4: \
+ vshrn.u32 u_whole_low, u_block, #16; \
+ \
+ vst2.u8 { u_whole_8, v_whole_8 }, [ block_ptr_a, :128 ], c_32; \
+ vshrn.u32 v_whole_low, v_block, #16; \
+ \
+ add block_ptr_b, block_ptr_b, #32; \
+ vst1.u32 { b_whole_8, fb_mask_ptrs }, [ block_ptr_a, :128 ], c_32; \
+ \
+ vdup.u32 dx4, uv_dx4[0]; \
+ vaddhn.u32 u_whole_high, u_block, dx4; \
+ vdup.u32 dx4, uv_dx4[1]; \
+ \
+ vaddhn.u32 v_whole_high, v_block, dx4; \
+ vdup.u32 dx8, uv_dx8[0]; \
+ \
+ vadd.u32 u_block, u_block, dx8; \
+ vdup.u32 dx8, uv_dx8[1]; \
+ \
+ vadd.u32 v_block, v_block, dx8; \
+ vmovn.u16 u_whole_8, u_whole; \
+ \
+ add fb_ptr, fb_ptr, #16; \
+ vmovn.u16 v_whole_8, v_whole; \
+ \
+ vst1.u32 { dither_offsets }, [ block_ptr_b, :128 ], c_32; \
+ pld [ fb_ptr ]; \
+ \
+ vmov.u32 fb_mask_ptrs[1], fb_ptr; \
+ subs span_num_blocks, span_num_blocks, #1; \
+ \
+ vand.u8 uv_whole_8, uv_whole_8, texture_mask; \
+ setup_blocks_texture_##swizzling(); \
+ \
+ bne 4b; \
+ \
+ 5: \
+ ldrh right_mask, [ span_edge_data, #edge_data_right_mask_offset ]; \
+ \
+ vld1.u32 { test_mask }, [ psx_gpu, :128 ]; \
+ vdup.u8 draw_mask, right_mask; \
+ \
+ vmov.u32 fb_mask_ptrs[0], right_mask; \
+ vtst.u16 draw_mask, draw_mask, test_mask; \
+ vzip.u8 u_whole_8, v_whole_8; \
+ \
+ vbic.u16 uv_whole_8, uv_whole_8, draw_mask; \
+ add block_ptr_b, block_ptr_b, #32; \
+ vst1.u32 { uv_whole_8 }, [ block_ptr_a, :128 ], c_32; \
+ vst1.u32 { dither_offsets }, [ block_ptr_b, :128 ], c_32; \
+ vst1.u32 { b_whole_8, fb_mask_ptrs }, [ block_ptr_a, :128 ], c_32; \
+ \
+ 1: \
+ add span_uvrg_offset, span_uvrg_offset, #16; \
+ add span_edge_data, span_edge_data, #8; \
+ subs num_spans, num_spans, #1; \
+ \
+ strh num_blocks, [ psx_gpu, #psx_gpu_num_blocks_offset ]; \
+ bne 0b; \
+ \
+ ldmia sp!, { r4 - r11, pc }; \
+ \
+ 2: \
+ /* TODO: Load from psx_gpu instead of saving/restoring these */\
+ vpush { texture_mask }; \
+ vpush { uvrg_dx4 }; \
+ \
+ stmdb sp!, { r0 - r3, r12, r14 }; \
+ bl flush_render_block_buffer; \
+ ldmia sp!, { r0 - r3, r12, r14 }; \
+ \
+ vpop { uvrg_dx4 }; \
+ vpop { texture_mask }; \
+ \
+ vadd.u32 uvrg_dx8, uvrg_dx4, uvrg_dx4; \
+ vmov.u8 fb_mask_ptrs, #0; \
+ \
+ mov num_blocks, span_num_blocks; \
+ add block_ptr_a, psx_gpu, #psx_gpu_blocks_offset; \
+ bal 3b \
+
+
+setup_blocks_unshaded_textured_builder(swizzled)
+setup_blocks_unshaded_textured_builder(unswizzled)
+
+
+.align 3
+
+function(setup_blocks_unshaded_untextured_undithered_unswizzled_indirect)
+ ldrh num_spans, [ psx_gpu, #psx_gpu_num_spans_offset ]
+ veor.u32 draw_mask, draw_mask, draw_mask
+
+ cmp num_spans, #0
+ bxeq lr
+
+ stmdb sp!, { r4 - r11, r14 }
+ vld1.u32 { test_mask }, [ psx_gpu, :128 ]
+
+ ldr color, [ psx_gpu, #psx_gpu_triangle_color_offset ]
+
+ ubfx color_r, color, #3, #5
+ ubfx color_g, color, #11, #5
+ ubfx color_b, color, #19, #5
+
+ orr color, color_r, color_b, lsl #10
+ orr color, color, color_g, lsl #5
+
+ vdup.u16 colors, color
+
+ ldrh num_blocks, [ psx_gpu, #psx_gpu_num_blocks_offset ]
+ add span_edge_data, psx_gpu, #psx_gpu_span_edge_data_offset
+
+ add block_ptr_a, psx_gpu, #psx_gpu_blocks_offset
+ add block_ptr_a, block_ptr_a, num_blocks, lsl #6
+
+ 0:
+ ldrh span_num_blocks, [ span_edge_data, #edge_data_num_blocks_offset ]
+ ldrh y, [ span_edge_data, #edge_data_y_offset ]
+
+ ldr fb_ptr, [ psx_gpu, #psx_gpu_vram_ptr_offset ]
+
+ cmp span_num_blocks, #0
+ beq 1f
+
+ ldrh left_x, [ span_edge_data, #edge_data_left_x_offset ]
+ add num_blocks, span_num_blocks, num_blocks
+
+ cmp num_blocks, #MAX_BLOCKS
+ bgt 2f
+
+ 3:
+ add fb_ptr, fb_ptr, y, lsl #11
+ and y, y, #0x3
+
+ add fb_ptr, fb_ptr, left_x, lsl #1
+ mov c_32, #32
+
+ subs span_num_blocks, span_num_blocks, #1
+
+ add block_ptr_b, block_ptr_a, #16
+ pld [ fb_ptr ]
+
+ vmov.u32 fb_mask_ptrs[1], fb_ptr
+ beq 5f
+
+ 4:
+ vst1.u32 { draw_mask }, [ block_ptr_a, :128 ], c_32
+ vst1.u32 { colors }, [ block_ptr_b, :128 ], c_32
+ vst1.u32 { b_whole_8, fb_mask_ptrs }, [ block_ptr_a, :128 ], c_32
+
+ add fb_ptr, fb_ptr, #16
+ add block_ptr_b, block_ptr_b, #32
+
+ pld [ fb_ptr ]
+
+ vmov.u32 fb_mask_ptrs[1], fb_ptr
+ subs span_num_blocks, span_num_blocks, #1
+
+ bne 4b
+
+ 5:
+ ldrh right_mask, [ span_edge_data, #edge_data_right_mask_offset ]
+
+ vdup.u8 draw_mask_edge, right_mask
+ vtst.u16 draw_mask_edge, draw_mask_edge, test_mask
+
+ vst1.u32 { colors }, [ block_ptr_b, :128 ], c_32
+ vst1.u32 { draw_mask_edge }, [ block_ptr_a, :128 ], c_32
+ add block_ptr_b, block_ptr_b, #32
+ vst1.u32 { b_whole_8, fb_mask_ptrs }, [ block_ptr_a, :128 ], c_32
+
+ 1:
+ add span_edge_data, span_edge_data, #8
+ subs num_spans, num_spans, #1
+
+ strh num_blocks, [ psx_gpu, #psx_gpu_num_blocks_offset ]
+ bne 0b
+
+ ldmia sp!, { r4 - r11, pc }
+
+ 2:
+ vpush { colors }
+
+ stmdb sp!, { r0 - r3, r12, r14 }
+ bl flush_render_block_buffer
+ ldmia sp!, { r0 - r3, r12, r14 }
+
+ vpop { colors }
+
+ vld1.u32 { test_mask }, [ psx_gpu, :128 ]
+ veor.u32 draw_mask, draw_mask, draw_mask
+
+ mov num_blocks, span_num_blocks
+ add block_ptr_a, psx_gpu, #psx_gpu_blocks_offset
+ bal 3b
+
+
+#define mask_msb_scalar r14
+
+#define msb_mask q15
+
+#define pixels_low d16
+
+#define msb_mask_low d30
+#define msb_mask_high d31
+
+
+.align 3
+
+function(setup_blocks_unshaded_untextured_undithered_unswizzled_direct)
+ ldrh num_spans, [ psx_gpu, #psx_gpu_num_spans_offset ]
+
+ cmp num_spans, #0
+ bxeq lr
+
+ stmdb sp!, { r4 - r11, r14 }
+
+ ldr color, [ psx_gpu, #psx_gpu_triangle_color_offset ]
+
+ ubfx color_r, color, #3, #5
+ ubfx color_g, color, #11, #5
+
+ ldrh mask_msb_scalar, [ psx_gpu, #psx_gpu_mask_msb_offset ]
+ ubfx color_b, color, #19, #5
+
+ orr color, color_r, color_b, lsl #10
+ orr color, color, color_g, lsl #5
+ orr color, color, mask_msb_scalar
+
+ vdup.u16 colors, color
+
+ add span_edge_data, psx_gpu, #psx_gpu_span_edge_data_offset
+
+ 0:
+ ldrh span_num_blocks, [ span_edge_data, #edge_data_num_blocks_offset ]
+ ldrh y, [ span_edge_data, #edge_data_y_offset ]
+
+ ldr fb_ptr, [ psx_gpu, #psx_gpu_vram_ptr_offset ]
+
+ cmp span_num_blocks, #0
+ beq 1f
+
+ ldrh left_x, [ span_edge_data, #edge_data_left_x_offset ]
+
+ add fb_ptr, fb_ptr, y, lsl #11
+ subs span_num_blocks, span_num_blocks, #1
+
+ add fb_ptr, fb_ptr, left_x, lsl #1
+ beq 3f
+
+ 2:
+ vst1.u32 { colors }, [ fb_ptr ]!
+ subs span_num_blocks, span_num_blocks, #1
+
+ bne 2b
+
+ 3:
+ ldrb right_mask, [ span_edge_data, #edge_data_right_mask_offset ]
+ eor right_mask, right_mask, #0xFF
+
+ 4:
+ strh color, [ fb_ptr ], #2
+ movs right_mask, right_mask, lsr #1
+ bne 4b
+
+ 1:
+ add span_edge_data, span_edge_data, #8
+ subs num_spans, num_spans, #1
+
+ strh num_blocks, [ psx_gpu, #psx_gpu_num_blocks_offset ]
+ bne 0b
+
+ ldmia sp!, { r4 - r11, pc }
+
+
+
+#undef c_64
+
+#define c_64 r7
+#define rg_dx_ptr r2
+
+
+#undef r_block
+#undef g_block
+#undef b_block
+#undef r_whole
+#undef g_whole
+#undef b_whole
+#undef r_whole_low
+#undef r_whole_high
+#undef g_whole_low
+#undef g_whole_high
+#undef b_whole_low
+#undef b_whole_high
+#undef r_whole_8
+#undef g_whole_8
+#undef b_whole_8
+#undef dither_offsets
+#undef rg_dx4
+#undef rg_dx8
+#undef dx4
+#undef dx8
+#undef v_left_x
+#undef uvrg
+#undef block_span
+#undef rg
+#undef draw_mask
+#undef test_mask
+
+#define r_block q0
+#define g_block q1
+#define b_block q2
+
+#define r_whole q3
+#define g_whole q4
+#define b_whole q5
+
+#define r_whole_low d6
+#define r_whole_high d7
+#define g_whole_low d8
+#define g_whole_high d9
+#define b_whole_low d10
+#define b_whole_high d11
+
+#define gb_whole_8 q6
+
+#define g_whole_8 d12
+#define b_whole_8 d13
+
+#define r_whole_8 d14
+
+#define pixels q8
+
+#define rg_dx4 d18
+#define rg_dx8 d19
+
+#define dx4 q10
+#define dx8 q10
+
+#define v_left_x d6
+#define uvrg q4
+#define block_span q5
+
+#define rg d9
+
+#define d64_1 d22
+#define d64_128 d23
+
+#define d128_4 q12
+#define d128_0x7 q13
+
+#define d64_4 d24
+
+#define dither_offsets q14
+#define draw_mask q15
+
+#define dither_offsets_low d28
+
+#define rg_dx d0
+#define test_mask q10
+
+
+#define setup_blocks_shaded_untextured_dither_a_dithered() \
+ vqadd.u8 r_whole_8, r_whole_8, dither_offsets_low; \
+ vqadd.u8 gb_whole_8, gb_whole_8, dither_offsets; \
+
+#define setup_blocks_shaded_untextured_dither_b_dithered() \
+ vqsub.u8 r_whole_8, r_whole_8, d64_4; \
+ vqsub.u8 gb_whole_8, gb_whole_8, d128_4 \
+
+#define setup_blocks_shaded_untextured_dither_a_undithered() \
+
+#define setup_blocks_shaded_untextured_dither_b_undithered() \
+
+
+#define setup_blocks_shaded_untextured_indirect_builder(dithering) \
+.align 3; \
+ \
+function(setup_blocks_shaded_untextured_##dithering##_unswizzled_indirect) \
+ ldrh num_spans, [ psx_gpu, #psx_gpu_num_spans_offset ]; \
+ add rg_dx_ptr, psx_gpu, #(psx_gpu_uvrg_dx_offset + 8); \
+ \
+ vld1.u32 { rg_dx }, [ rg_dx_ptr, :64 ]; \
+ \
+ cmp num_spans, #0; \
+ bxeq lr; \
+ \
+ stmdb sp!, { r4 - r11, r14 }; \
+ vshl.u32 rg_dx4, rg_dx, #2; \
+ \
+ ldr b_dx, [ psx_gpu, #psx_gpu_b_dx_offset ]; \
+ vshl.u32 rg_dx8, rg_dx, #3; \
+ \
+ add span_uvrg_offset, psx_gpu, #psx_gpu_span_uvrg_offset_offset; \
+ \
+ ldrh num_blocks, [ psx_gpu, #psx_gpu_num_blocks_offset ]; \
+ add span_edge_data, psx_gpu, #psx_gpu_span_edge_data_offset; \
+ \
+ add span_b_offset, psx_gpu, #psx_gpu_span_b_offset_offset; \
+ add block_ptr_a, psx_gpu, #psx_gpu_blocks_offset; \
+ \
+ add block_ptr_a, block_ptr_a, num_blocks, lsl #6; \
+ vmov.u8 d64_1, #1; \
+ \
+ vmov.u8 d128_4, #4; \
+ vmov.u8 d64_128, #128; \
+ \
+ vmov.u8 d128_0x7, #0x7; \
+ \
+ 0: \
+ ldrh span_num_blocks, [ span_edge_data, #edge_data_num_blocks_offset ]; \
+ add dither_offset_ptr, psx_gpu, #psx_gpu_dither_table_offset; \
+ \
+ ldrh y, [ span_edge_data, #edge_data_y_offset ]; \
+ ldr fb_ptr, [ psx_gpu, #psx_gpu_vram_ptr_offset ]; \
+ \
+ cmp span_num_blocks, #0; \
+ beq 1f; \
+ \
+ ldrh left_x, [ span_edge_data, #edge_data_left_x_offset ]; \
+ add num_blocks, span_num_blocks, num_blocks; \
+ \
+ cmp num_blocks, #MAX_BLOCKS; \
+ bgt 2f; \
+ \
+ 3: \
+ ldr b, [ span_b_offset ]; \
+ add fb_ptr, fb_ptr, y, lsl #11; \
+ \
+ vdup.u32 v_left_x, left_x; \
+ and y, y, #0x3; \
+ \
+ ldr dither_row, [ dither_offset_ptr, y, lsl #2 ]; \
+ add fb_ptr, fb_ptr, left_x, lsl #1; \
+ \
+ mla b, b_dx, left_x, b; \
+ and dither_shift, left_x, #0x03; \
+ \
+ vld1.u32 { uvrg }, [ span_uvrg_offset, :128 ]; \
+ vshr.u32 rg_dx, rg_dx4, #2; \
+ \
+ mov dither_shift, dither_shift, lsl #3; \
+ vmla.u32 rg, rg_dx, v_left_x; \
+ \
+ mov c_64, #64; \
+ subs span_num_blocks, span_num_blocks, #1; \
+ \
+ mov dither_row, dither_row, ror dither_shift; \
+ mov b_dx4, b_dx, lsl #2; \
+ \
+ vdup.u32 dither_offsets, dither_row; \
+ add block_span_ptr, psx_gpu, #psx_gpu_r_block_span_offset; \
+ \
+ vdup.u32 b_block, b; \
+ vadd.u8 dither_offsets, dither_offsets, d128_4; \
+ \
+ mov b_dx8, b_dx, lsl #3; \
+ vdup.u32 r_block, rg[0]; \
+ vdup.u32 g_block, rg[1]; \
+ \
+ vld1.u32 { block_span }, [ block_span_ptr, :128 ]!; \
+ \
+ vadd.u32 r_block, r_block, block_span; \
+ vld1.u32 { block_span }, [ block_span_ptr, :128 ]!; \
+ \
+ vadd.u32 g_block, g_block, block_span; \
+ vld1.u32 { block_span }, [ block_span_ptr, :128 ]; \
+ \
+ vadd.u32 b_block, b_block, block_span; \
+ add block_ptr_b, block_ptr_a, #16; \
+ \
+ vshrn.u32 r_whole_low, r_block, #16; \
+ vshrn.u32 g_whole_low, g_block, #16; \
+ vshrn.u32 b_whole_low, b_block, #16; \
+ vdup.u32 dx4, rg_dx4[0]; \
+ \
+ vaddhn.u32 r_whole_high, r_block, dx4; \
+ vdup.u32 dx4, rg_dx4[1]; \
+ \
+ vaddhn.u32 g_whole_high, g_block, dx4; \
+ vdup.u32 dx4, b_dx4; \
+ \
+ vaddhn.u32 b_whole_high, b_block, dx4; \
+ vdup.u32 dx8, rg_dx8[0]; \
+ \
+ vadd.u32 r_block, r_block, dx8; \
+ vdup.u32 dx8, rg_dx8[1]; \
+ \
+ vadd.u32 g_block, g_block, dx8; \
+ vdup.u32 dx8, b_dx8; \
+ \
+ vadd.u32 b_block, b_block, dx8; \
+ \
+ vmovn.u16 r_whole_8, r_whole; \
+ vmovn.u16 g_whole_8, g_whole; \
+ vmovn.u16 b_whole_8, b_whole; \
+ \
+ beq 5f; \
+ veor.u32 draw_mask, draw_mask, draw_mask; \
+ \
+ 4: \
+ setup_blocks_shaded_untextured_dither_a_##dithering(); \
+ vshrn.u32 r_whole_low, r_block, #16; \
+ \
+ setup_blocks_shaded_untextured_dither_b_##dithering(); \
+ vshrn.u32 g_whole_low, g_block, #16; \
+ \
+ vshrn.u32 b_whole_low, b_block, #16; \
+ str fb_ptr, [ block_ptr_a, #44 ]; \
+ \
+ vdup.u32 dx4, rg_dx4[0]; \
+ vshr.u8 r_whole_8, r_whole_8, #3; \
+ vbic.u8 gb_whole_8, gb_whole_8, d128_0x7; \
+ \
+ vaddhn.u32 r_whole_high, r_block, dx4; \
+ vdup.u32 dx4, rg_dx4[1]; \
+ \
+ vaddhn.u32 g_whole_high, g_block, dx4; \
+ vdup.u32 dx4, b_dx4; \
+ \
+ vaddhn.u32 b_whole_high, b_block, dx4; \
+ vdup.u32 dx8, rg_dx8[0]; \
+ \
+ vmull.u8 pixels, r_whole_8, d64_1; \
+ vmlal.u8 pixels, g_whole_8, d64_4; \
+ vmlal.u8 pixels, b_whole_8, d64_128; \
+ \
+ vadd.u32 r_block, r_block, dx8; \
+ vdup.u32 dx8, rg_dx8[1]; \
+ \
+ vadd.u32 g_block, g_block, dx8; \
+ vdup.u32 dx8, b_dx8; \
+ \
+ vadd.u32 b_block, b_block, dx8; \
+ add fb_ptr, fb_ptr, #16; \
+ \
+ vmovn.u16 r_whole_8, r_whole; \
+ vmovn.u16 g_whole_8, g_whole; \
+ vmovn.u16 b_whole_8, b_whole; \
+ \
+ vst1.u32 { draw_mask }, [ block_ptr_a, :128 ], c_64; \
+ vst1.u32 { pixels }, [ block_ptr_b, :128 ], c_64; \
+ \
+ pld [ fb_ptr ]; \
+ \
+ subs span_num_blocks, span_num_blocks, #1; \
+ bne 4b; \
+ \
+ 5: \
+ str fb_ptr, [ block_ptr_a, #44 ]; \
+ setup_blocks_shaded_untextured_dither_a_##dithering(); \
+ \
+ ldrh right_mask, [ span_edge_data, #edge_data_right_mask_offset ]; \
+ setup_blocks_shaded_untextured_dither_b_##dithering(); \
+ \
+ vshr.u8 r_whole_8, r_whole_8, #3; \
+ vdup.u8 draw_mask, right_mask; \
+ \
+ vbic.u8 gb_whole_8, gb_whole_8, d128_0x7; \
+ vld1.u32 { test_mask }, [ psx_gpu, :128 ]; \
+ \
+ vtst.u16 draw_mask, draw_mask, test_mask; \
+ \
+ vmull.u8 pixels, r_whole_8, d64_1; \
+ vmlal.u8 pixels, g_whole_8, d64_4; \
+ vmlal.u8 pixels, b_whole_8, d64_128; \
+ \
+ vst1.u32 { draw_mask }, [ block_ptr_a, :128 ], c_64; \
+ vst1.u32 { pixels }, [ block_ptr_b, :128 ], c_64; \
+ \
+ 1: \
+ add span_uvrg_offset, span_uvrg_offset, #16; \
+ add span_b_offset, span_b_offset, #4; \
+ \
+ add span_edge_data, span_edge_data, #8; \
+ subs num_spans, num_spans, #1; \
+ \
+ strh num_blocks, [ psx_gpu, #psx_gpu_num_blocks_offset ]; \
+ bne 0b; \
+ \
+ ldmia sp!, { r4 - r11, pc }; \
+ \
+ 2: \
+ /* TODO: Load from psx_gpu instead of saving/restoring these */\
+ vpush { rg_dx4 }; \
+ \
+ stmdb sp!, { r0 - r3, r12, r14 }; \
+ bl flush_render_block_buffer; \
+ ldmia sp!, { r0 - r3, r12, r14 }; \
+ \
+ vpop { rg_dx4 }; \
+ \
+ vmov.u8 d64_1, #1; \
+ vmov.u8 d128_4, #4; \
+ vmov.u8 d64_128, #128; \
+ vmov.u8 d128_0x7, #0x7; \
+ \
+ vadd.u32 rg_dx8, rg_dx4, rg_dx4; \
+ \
+ mov num_blocks, span_num_blocks; \
+ add block_ptr_a, psx_gpu, #psx_gpu_blocks_offset; \
+ bal 3b \
+
+
+setup_blocks_shaded_untextured_indirect_builder(undithered)
+setup_blocks_shaded_untextured_indirect_builder(dithered)
+
+
+#undef draw_mask
+
+#define mask_msb_ptr r14
+
+#define draw_mask q0
+#define pixels_low d16
+
+
+
+#define setup_blocks_shaded_untextured_direct_builder(dithering) \
+.align 3; \
+ \
+function(setup_blocks_shaded_untextured_##dithering##_unswizzled_direct) \
+ ldrh num_spans, [ psx_gpu, #psx_gpu_num_spans_offset ]; \
+ add rg_dx_ptr, psx_gpu, #(psx_gpu_uvrg_dx_offset + 8); \
+ \
+ vld1.u32 { rg_dx }, [ rg_dx_ptr, :64 ]; \
+ \
+ cmp num_spans, #0; \
+ bxeq lr; \
+ \
+ stmdb sp!, { r4 - r11, r14 }; \
+ vshl.u32 rg_dx4, rg_dx, #2; \
+ \
+ ldr b_dx, [ psx_gpu, #psx_gpu_b_dx_offset ]; \
+ vshl.u32 rg_dx8, rg_dx, #3; \
+ \
+ add span_uvrg_offset, psx_gpu, #psx_gpu_span_uvrg_offset_offset; \
+ add span_edge_data, psx_gpu, #psx_gpu_span_edge_data_offset; \
+ \
+ add span_b_offset, psx_gpu, #psx_gpu_span_b_offset_offset; \
+ vmov.u8 d64_1, #1; \
+ \
+ vmov.u8 d128_4, #4; \
+ vmov.u8 d64_128, #128; \
+ \
+ vmov.u8 d128_0x7, #0x7; \
+ add mask_msb_ptr, psx_gpu, #psx_gpu_mask_msb_offset; \
+ vld1.u16 { msb_mask_low[], msb_mask_high[] }, [ mask_msb_ptr, :16 ]; \
+ \
+ 0: \
+ ldrh span_num_blocks, [ span_edge_data, #edge_data_num_blocks_offset ]; \
+ add dither_offset_ptr, psx_gpu, #psx_gpu_dither_table_offset; \
+ \
+ ldrh y, [ span_edge_data, #edge_data_y_offset ]; \
+ ldr fb_ptr, [ psx_gpu, #psx_gpu_vram_ptr_offset ]; \
+ \
+ cmp span_num_blocks, #0; \
+ beq 1f; \
+ \
+ ldrh left_x, [ span_edge_data, #edge_data_left_x_offset ]; \
+ add fb_ptr, fb_ptr, y, lsl #11; \
+ \
+ ldr b, [ span_b_offset ]; \
+ vdup.u32 v_left_x, left_x; \
+ and y, y, #0x3; \
+ \
+ ldr dither_row, [ dither_offset_ptr, y, lsl #2 ]; \
+ add fb_ptr, fb_ptr, left_x, lsl #1; \
+ \
+ mla b, b_dx, left_x, b; \
+ and dither_shift, left_x, #0x03; \
+ \
+ vld1.u32 { uvrg }, [ span_uvrg_offset, :128 ]; \
+ vshr.u32 rg_dx, rg_dx4, #2; \
+ \
+ mov dither_shift, dither_shift, lsl #3; \
+ vmla.u32 rg, rg_dx, v_left_x; \
+ \
+ subs span_num_blocks, span_num_blocks, #1; \
+ \
+ mov dither_row, dither_row, ror dither_shift; \
+ mov b_dx4, b_dx, lsl #2; \
+ \
+ vdup.u32 dither_offsets, dither_row; \
+ add block_span_ptr, psx_gpu, #psx_gpu_r_block_span_offset; \
+ \
+ vdup.u32 b_block, b; \
+ vadd.u8 dither_offsets, dither_offsets, d128_4; \
+ \
+ mov b_dx8, b_dx, lsl #3; \
+ vdup.u32 r_block, rg[0]; \
+ vdup.u32 g_block, rg[1]; \
+ \
+ vld1.u32 { block_span }, [ block_span_ptr, :128 ]!; \
+ \
+ vadd.u32 r_block, r_block, block_span; \
+ vld1.u32 { block_span }, [ block_span_ptr, :128 ]!; \
+ \
+ vadd.u32 g_block, g_block, block_span; \
+ vld1.u32 { block_span }, [ block_span_ptr, :128 ]; \
+ \
+ vadd.u32 b_block, b_block, block_span; \
+ add block_ptr_b, block_ptr_a, #16; \
+ \
+ vshrn.u32 r_whole_low, r_block, #16; \
+ vshrn.u32 g_whole_low, g_block, #16; \
+ vshrn.u32 b_whole_low, b_block, #16; \
+ vdup.u32 dx4, rg_dx4[0]; \
+ \
+ vaddhn.u32 r_whole_high, r_block, dx4; \
+ vdup.u32 dx4, rg_dx4[1]; \
+ \
+ vaddhn.u32 g_whole_high, g_block, dx4; \
+ vdup.u32 dx4, b_dx4; \
+ \
+ vaddhn.u32 b_whole_high, b_block, dx4; \
+ vdup.u32 dx8, rg_dx8[0]; \
+ \
+ vadd.u32 r_block, r_block, dx8; \
+ vdup.u32 dx8, rg_dx8[1]; \
+ \
+ vadd.u32 g_block, g_block, dx8; \
+ vdup.u32 dx8, b_dx8; \
+ \
+ vadd.u32 b_block, b_block, dx8; \
+ \
+ vmovn.u16 r_whole_8, r_whole; \
+ vmovn.u16 g_whole_8, g_whole; \
+ vmovn.u16 b_whole_8, b_whole; \
+ \
+ beq 3f; \
+ \
+ 2: \
+ setup_blocks_shaded_untextured_dither_a_##dithering(); \
+ vshrn.u32 r_whole_low, r_block, #16; \
+ \
+ setup_blocks_shaded_untextured_dither_b_##dithering(); \
+ vshrn.u32 g_whole_low, g_block, #16; \
+ \
+ vshrn.u32 b_whole_low, b_block, #16; \
+ \
+ vdup.u32 dx4, rg_dx4[0]; \
+ vshr.u8 r_whole_8, r_whole_8, #3; \
+ vbic.u8 gb_whole_8, gb_whole_8, d128_0x7; \
+ \
+ vaddhn.u32 r_whole_high, r_block, dx4; \
+ vdup.u32 dx4, rg_dx4[1]; \
+ \
+ vmov pixels, msb_mask; \
+ vaddhn.u32 g_whole_high, g_block, dx4; \
+ vdup.u32 dx4, b_dx4; \
+ \
+ vaddhn.u32 b_whole_high, b_block, dx4; \
+ vdup.u32 dx8, rg_dx8[0]; \
+ \
+ vmlal.u8 pixels, r_whole_8, d64_1; \
+ vmlal.u8 pixels, g_whole_8, d64_4; \
+ vmlal.u8 pixels, b_whole_8, d64_128; \
+ \
+ vadd.u32 r_block, r_block, dx8; \
+ vdup.u32 dx8, rg_dx8[1]; \
+ \
+ vadd.u32 g_block, g_block, dx8; \
+ vdup.u32 dx8, b_dx8; \
+ \
+ vadd.u32 b_block, b_block, dx8; \
+ \
+ vmovn.u16 r_whole_8, r_whole; \
+ vmovn.u16 g_whole_8, g_whole; \
+ vmovn.u16 b_whole_8, b_whole; \
+ \
+ vst1.u32 { pixels }, [ fb_ptr ]!; \
+ subs span_num_blocks, span_num_blocks, #1; \
+ bne 2b; \
+ \
+ 3: \
+ setup_blocks_shaded_untextured_dither_a_##dithering(); \
+ \
+ ldrb right_mask, [ span_edge_data, #edge_data_right_mask_offset ]; \
+ setup_blocks_shaded_untextured_dither_b_##dithering(); \
+ \
+ vshr.u8 r_whole_8, r_whole_8, #3; \
+ vmov pixels, msb_mask; \
+ vbic.u8 gb_whole_8, gb_whole_8, d128_0x7; \
+ eor right_mask, right_mask, #0xFF; \
+ \
+ vmlal.u8 pixels, r_whole_8, d64_1; \
+ vmlal.u8 pixels, g_whole_8, d64_4; \
+ vmlal.u8 pixels, b_whole_8, d64_128; \
+ \
+ 4: \
+ vst1.u16 { pixels_low[0] }, [ fb_ptr ]!; \
+ vext.16 pixels, pixels, #1; \
+ movs right_mask, right_mask, lsr #1; \
+ bne 4b; \
+ \
+ 1: \
+ add span_uvrg_offset, span_uvrg_offset, #16; \
+ add span_b_offset, span_b_offset, #4; \
+ \
+ add span_edge_data, span_edge_data, #8; \
+ subs num_spans, num_spans, #1; \
+ \
+ bne 0b; \
+ \
+ ldmia sp!, { r4 - r11, pc } \
+
+setup_blocks_shaded_untextured_direct_builder(undithered)
+setup_blocks_shaded_untextured_direct_builder(dithered)
+
+
+#undef psx_gpu
+#undef num_blocks
+#undef triangle
+#undef c_64
+
+#define psx_gpu r0
+#define block_ptr r1
+#define num_blocks r2
+#define uv_01 r3
+#define uv_23 r4
+#define uv_45 r5
+#define uv_67 r6
+#define uv_0 r7
+#define uv_1 r3
+#define uv_2 r8
+#define uv_3 r4
+#define uv_4 r9
+#define uv_5 r5
+#define uv_6 r10
+#define uv_7 r6
+#define texture_ptr r11
+
+#define pixel_0 r7
+#define pixel_1 r3
+#define pixel_2 r8
+#define pixel_3 r4
+#define pixel_4 r9
+#define pixel_5 r5
+#define pixel_6 r10
+#define pixel_7 r6
+
+#define pixels_a r7
+#define pixels_b r9
+#define pixels_c r8
+#define pixels_d r10
+
+#define c_64 r0
+
+#define clut_ptr r12
+#define current_texture_mask r5
+#define dirty_textures_mask r6
+
+#define texels d0
+
+#define clut_low_a d2
+#define clut_low_b d3
+#define clut_high_a d4
+#define clut_high_b d5
+
+#define clut_a q1
+#define clut_b q2
+
+#define texels_low d6
+#define texels_high d7
+
+.align 3
+
+function(texture_blocks_untextured)
+ bx lr
+
+
+.align 3
+
+function(texture_blocks_4bpp)
+ stmdb sp!, { r3 - r11, r14 }
+ add block_ptr, psx_gpu, #psx_gpu_blocks_offset
+
+ ldr texture_ptr, [ psx_gpu, #psx_gpu_texture_page_ptr_offset ]
+ ldrh num_blocks, [ psx_gpu, #psx_gpu_num_blocks_offset ]
+
+ ldr clut_ptr, [ psx_gpu, #psx_gpu_clut_ptr_offset ]
+ vld1.u32 { clut_a, clut_b }, [ clut_ptr, :128 ]
+
+ ldr current_texture_mask, [ psx_gpu, #psx_gpu_current_texture_mask_offset ]
+ vuzp.u8 clut_a, clut_b
+
+ ldr dirty_textures_mask, [ psx_gpu, #psx_gpu_dirty_textures_4bpp_mask_offset ]
+ tst dirty_textures_mask, current_texture_mask
+
+ bne 1f
+ mov c_64, #64
+
+0:
+ ldm block_ptr, { uv_01, uv_23, uv_45, uv_67 }
+
+ uxtah uv_0, texture_ptr, uv_01
+ uxtah uv_1, texture_ptr, uv_01, ror #16
+
+ uxtah uv_2, texture_ptr, uv_23
+ uxtah uv_3, texture_ptr, uv_23, ror #16
+
+ uxtah uv_4, texture_ptr, uv_45
+ ldrb pixel_0, [ uv_0 ]
+
+ uxtah uv_5, texture_ptr, uv_45, ror #16
+ ldrb pixel_1, [ uv_1 ]
+
+ uxtah uv_6, texture_ptr, uv_67
+ ldrb pixel_2, [ uv_2 ]
+
+ uxtah uv_7, texture_ptr, uv_67, ror #16
+ ldrb pixel_3, [ uv_3 ]
+
+ ldrb pixel_4, [ uv_4 ]
+ subs num_blocks, num_blocks, #1
+
+ ldrb pixel_5, [ uv_5 ]
+ orr pixels_a, pixel_0, pixel_1, lsl #8
+
+ ldrb pixel_6, [ uv_6 ]
+ orr pixels_b, pixel_4, pixel_5, lsl #8
+
+ ldrb pixel_7, [ uv_7 ]
+ orr pixels_a, pixels_a, pixel_2, lsl #16
+
+ orr pixels_b, pixels_b, pixel_6, lsl #16
+ orr pixels_a, pixels_a, pixel_3, lsl #24
+
+ orr pixels_b, pixels_b, pixel_7, lsl #24
+ vmov.u32 texels, pixels_a, pixels_b
+
+ vtbl.8 texels_low, { clut_low_a, clut_low_b }, texels
+ vtbl.8 texels_high, { clut_high_a, clut_high_b }, texels
+
+ vst2.u8 { texels_low, texels_high }, [ block_ptr, :128 ], c_64
+ bne 0b
+
+ ldmia sp!, { r3 - r11, pc }
+
+1:
+ stmdb sp!, { r1 - r2 }
+ bl update_texture_4bpp_cache
+
+ mov c_64, #64
+ ldmia sp!, { r1 - r2 }
+ bal 0b
+
+
+.align 3
+
+function(texture_blocks_8bpp)
+ stmdb sp!, { r3 - r11, r14 }
+ add block_ptr, psx_gpu, #psx_gpu_blocks_offset
+
+ ldr texture_ptr, [ psx_gpu, #psx_gpu_texture_page_ptr_offset ]
+ ldrh num_blocks, [ psx_gpu, #psx_gpu_num_blocks_offset ]
+
+ ldr clut_ptr, [ psx_gpu, #psx_gpu_clut_ptr_offset ]
+ ldr current_texture_mask, [ psx_gpu, #psx_gpu_current_texture_mask_offset ]
+
+ ldr dirty_textures_mask, [ psx_gpu, #psx_gpu_dirty_textures_8bpp_mask_offset ]
+ tst dirty_textures_mask, current_texture_mask
+
+ bne 1f
+ nop
+
+0:
+ ldm block_ptr, { uv_01, uv_23, uv_45, uv_67 }
+
+ uxtah uv_0, texture_ptr, uv_01
+ uxtah uv_1, texture_ptr, uv_01, ror #16
+
+ uxtah uv_2, texture_ptr, uv_23
+ uxtah uv_3, texture_ptr, uv_23, ror #16
+
+ uxtah uv_4, texture_ptr, uv_45
+ ldrb pixel_0, [ uv_0 ]
+
+ uxtah uv_5, texture_ptr, uv_45, ror #16
+ ldrb pixel_1, [ uv_1 ]
+
+ uxtah uv_6, texture_ptr, uv_67
+ ldrb pixel_2, [ uv_2 ]
+
+ uxtah uv_7, texture_ptr, uv_67, ror #16
+ ldrb pixel_3, [ uv_3 ]
+
+ ldrb pixel_4, [ uv_4 ]
+ add pixel_0, pixel_0, pixel_0
+
+ ldrb pixel_5, [ uv_5 ]
+ add pixel_1, pixel_1, pixel_1
+
+ ldrb pixel_6, [ uv_6 ]
+ add pixel_2, pixel_2, pixel_2
+
+ ldrb pixel_7, [ uv_7 ]
+ add pixel_3, pixel_3, pixel_3
+
+ ldrh pixel_0, [ clut_ptr, pixel_0 ]
+ add pixel_4, pixel_4, pixel_4
+
+ ldrh pixel_1, [ clut_ptr, pixel_1 ]
+ add pixel_5, pixel_5, pixel_5
+
+ ldrh pixel_2, [ clut_ptr, pixel_2 ]
+ add pixel_6, pixel_6, pixel_6
+
+ ldrh pixel_3, [ clut_ptr, pixel_3 ]
+ add pixel_7, pixel_7, pixel_7
+
+ ldrh pixel_4, [ clut_ptr, pixel_4 ]
+ orr pixels_a, pixel_0, pixel_1, lsl #16
+
+ ldrh pixel_5, [ clut_ptr, pixel_5 ]
+ orr pixels_c, pixel_2, pixel_3, lsl #16
+
+ ldrh pixel_6, [ clut_ptr, pixel_6 ]
+ subs num_blocks, num_blocks, #1
+
+ ldrh pixel_7, [ clut_ptr, pixel_7 ]
+ orr pixels_b, pixel_4, pixel_5, lsl #16
+
+ orr pixels_d, pixel_6, pixel_7, lsl #16
+ stm block_ptr, { pixels_a, pixels_c, pixels_b, pixels_d }
+
+ add block_ptr, block_ptr, #64
+ bne 0b
+
+ ldmia sp!, { r3 - r11, pc }
+
+1:
+ stmdb sp!, { r1 - r2, r12 }
+
+ bl update_texture_8bpp_cache
+
+ ldmia sp!, { r1 - r2, r12 }
+ bal 0b
+
+
+#undef uv_0
+#undef uv_1
+#undef uv_2
+#undef uv_3
+#undef uv_4
+#undef uv_5
+#undef uv_6
+#undef uv_7
+
+#undef pixel_0
+#undef pixel_1
+#undef pixel_2
+#undef pixel_3
+#undef pixel_4
+#undef pixel_5
+#undef pixel_6
+#undef pixel_7
+
+#undef texture_ptr
+
+#undef pixels_a
+#undef pixels_b
+#undef pixels_c
+#undef pixels_d
+
+#define psx_gpu r0
+#define block_ptr r1
+#define num_blocks r2
+
+#define uv_0 r3
+#define uv_1 r4
+#define u_0 r3
+#define u_1 r4
+#define v_0 r5
+#define v_1 r6
+
+#define uv_2 r5
+#define uv_3 r6
+#define u_2 r5
+#define u_3 r6
+#define v_2 r7
+#define v_3 r8
+
+#define uv_4 r7
+#define uv_5 r8
+#define u_4 r7
+#define u_5 r8
+#define v_4 r9
+#define v_5 r10
+
+#define uv_6 r9
+#define uv_7 r10
+#define u_6 r9
+#define u_7 r10
+#define v_6 r11
+#define v_7 r0
+
+#define pixel_0 r3
+#define pixel_1 r4
+#define pixel_2 r5
+#define pixel_3 r6
+#define pixel_4 r7
+#define pixel_5 r8
+#define pixel_6 r9
+#define pixel_7 r10
+
+#define pixels_a r3
+#define pixels_b r5
+#define pixels_c r7
+#define pixels_d r9
+
+#define texture_ptr r12
+
+
+.align 3
+
+function(texture_blocks_16bpp)
+ stmdb sp!, { r3 - r11, r14 }
+ add block_ptr, psx_gpu, #psx_gpu_blocks_offset
+
+ ldrh num_blocks, [ psx_gpu, #psx_gpu_num_blocks_offset ]
+ ldr texture_ptr, [ psx_gpu, #psx_gpu_texture_page_ptr_offset ]
+
+0:
+ ldrh uv_0, [ block_ptr ]
+ subs num_blocks, num_blocks, #1
+
+ ldrh uv_1, [ block_ptr, #2 ]
+
+ and v_0, uv_0, #0xFF00
+ and v_1, uv_1, #0xFF00
+
+ and u_0, uv_0, #0xFF
+ and u_1, uv_1, #0xFF
+
+ add uv_0, u_0, v_0, lsl #2
+ ldrh uv_2, [ block_ptr, #4 ]
+
+ add uv_1, u_1, v_1, lsl #2
+ ldrh uv_3, [ block_ptr, #6 ]
+
+ add uv_0, uv_0, uv_0
+ add uv_1, uv_1, uv_1
+
+ and v_2, uv_2, #0xFF00
+ and v_3, uv_3, #0xFF00
+
+ and u_2, uv_2, #0xFF
+ and u_3, uv_3, #0xFF
+
+ add uv_2, u_2, v_2, lsl #2
+ ldrh uv_4, [ block_ptr, #8 ]
+
+ add uv_3, u_3, v_3, lsl #2
+ ldrh uv_5, [ block_ptr, #10 ]
+
+ add uv_2, uv_2, uv_2
+ add uv_3, uv_3, uv_3
+
+ and v_4, uv_4, #0xFF00
+ and v_5, uv_5, #0xFF00
+
+ and u_4, uv_4, #0xFF
+ and u_5, uv_5, #0xFF
+
+ add uv_4, u_4, v_4, lsl #2
+ ldrh uv_6, [ block_ptr, #12 ]
+
+ add uv_5, u_5, v_5, lsl #2
+ ldrh uv_7, [ block_ptr, #14 ]
+
+ add uv_4, uv_4, uv_4
+ ldrh pixel_0, [ texture_ptr, uv_0 ]
+
+ add uv_5, uv_5, uv_5
+ ldrh pixel_1, [ texture_ptr, uv_1 ]
+
+ and v_6, uv_6, #0xFF00
+ ldrh pixel_2, [ texture_ptr, uv_2 ]
+
+ and v_7, uv_7, #0xFF00
+ ldrh pixel_3, [ texture_ptr, uv_3 ]
+
+ and u_6, uv_6, #0xFF
+ ldrh pixel_4, [ texture_ptr, uv_4 ]
+
+ and u_7, uv_7, #0xFF
+ ldrh pixel_5, [ texture_ptr, uv_5 ]
+
+ add uv_6, u_6, v_6, lsl #2
+ add uv_7, u_7, v_7, lsl #2
+
+ add uv_6, uv_6, uv_6
+ add uv_7, uv_7, uv_7
+
+ orr pixels_a, pixel_0, pixel_1, lsl #16
+ orr pixels_b, pixel_2, pixel_3, lsl #16
+
+ ldrh pixel_6, [ texture_ptr, uv_6 ]
+ orr pixels_c, pixel_4, pixel_5, lsl #16
+
+ ldrh pixel_7, [ texture_ptr, uv_7 ]
+ orr pixels_d, pixel_6, pixel_7, lsl #16
+
+ stm block_ptr, { pixels_a, pixels_b, pixels_c, pixels_d }
+ add block_ptr, block_ptr, #64
+
+ bne 0b
+
+ ldmia sp!, { r3 - r11, pc }
+
+
+#undef num_blocks
+
+#undef test_mask
+#undef texels
+#undef pixels_b
+#undef pixels
+#undef d64_1
+#undef d64_4
+#undef d64_128
+#undef draw_mask
+#undef msb_mask
+#undef msb_mask_low
+#undef msb_mask_high
+#undef fb_pixels
+
+#undef c_32
+#undef fb_ptr
+#undef mask_msb_ptr
+
+#define psx_gpu r0
+#define num_blocks r1
+#define color_ptr r2
+#define mask_msb_ptr r2
+
+#define block_ptr_load_a r0
+#define block_ptr_store r3
+#define block_ptr_load_b r12
+#define c_32 r2
+
+#define c_48 r4
+#define fb_ptr r14
+#define draw_mask_bits_scalar r5
+
+#define d128_0x07 q0
+#define d128_0x1F q1
+#define d128_0x8000 q2
+#define test_mask q3
+#define texels q4
+#define colors_rg q5
+#define colors_b_dm_bits q6
+#define texels_rg q7
+#define pixels_r q8
+#define pixels_g q9
+#define pixels_b q10
+#define pixels q11
+#define zero_mask q4
+#define draw_mask q12
+#define msb_mask q13
+
+#define fb_pixels q8
+
+#define pixels_gb_low q9
+
+#define colors_r d10
+#define colors_g d11
+#define colors_b d12
+#define draw_mask_bits d13
+#define texels_r d14
+#define texels_g d15
+#define pixels_r_low d16
+#define pixels_g_low d18
+#define pixels_b_low d19
+#define msb_mask_low d26
+#define msb_mask_high d27
+
+#define d64_1 d28
+#define d64_4 d29
+#define d64_128 d30
+#define texels_b d31
+
+#define shade_blocks_textured_modulated_prologue_indirect() \
+ mov c_48, #48; \
+ add block_ptr_store, psx_gpu, #psx_gpu_blocks_offset \
+
+#define shade_blocks_textured_modulated_prologue_direct() \
+ add mask_msb_ptr, psx_gpu, #psx_gpu_mask_msb_offset; \
+ vld1.u16 { msb_mask_low[], msb_mask_high[] }, [ mask_msb_ptr, :16 ] \
+
+#define shade_blocks_textured_modulated_prologue_shaded() \
+
+#define shade_blocks_textured_modulated_prologue_unshaded() \
+ add color_ptr, psx_gpu, #psx_gpu_triangle_color_offset; \
+ vld1.u32 { colors_r[] }, [ color_ptr, :32 ]; \
+ vdup.u8 colors_g, colors_r[1]; \
+ vdup.u8 colors_b, colors_r[2]; \
+ vdup.u8 colors_r, colors_r[0] \
+
+
+#define shade_blocks_textured_modulated_load_dithered(target) \
+ vld1.u32 { target }, [ block_ptr_load_b, :128 ] \
+
+#define shade_blocks_textured_modulated_load_last_dithered(target) \
+ vld1.u32 { target }, [ block_ptr_load_b, :128 ], c_32 \
+
+#define shade_blocks_textured_modulated_load_undithered(target) \
+
+#define shade_blocks_textured_modulated_load_last_undithered(target) \
+ add block_ptr_load_b, block_ptr_load_b, #32 \
+
+#define shade_blocks_textured_modulate_dithered(channel) \
+ vmlal.u8 pixels_##channel, texels_##channel, colors_##channel \
+
+#define shade_blocks_textured_modulate_undithered(channel) \
+ vmull.u8 pixels_##channel, texels_##channel, colors_##channel \
+
+
+#define shade_blocks_textured_modulated_store_draw_mask_indirect(offset) \
+ vst1.u32 { draw_mask }, [ block_ptr_store, :128 ]! \
+
+#define shade_blocks_textured_modulated_store_draw_mask_direct(offset) \
+ ldr fb_ptr, [ block_ptr_load_b, #(offset - 64) ]; \
+ vld1.u32 { fb_pixels }, [ fb_ptr ]; \
+ vbit.u16 pixels, fb_pixels, draw_mask \
+
+#define shade_blocks_textured_modulated_store_pixels_indirect() \
+ vst1.u32 { pixels }, [ block_ptr_store, :128 ], c_48 \
+
+#define shade_blocks_textured_modulated_store_pixels_direct() \
+ vst1.u32 { pixels }, [ fb_ptr ] \
+
+
+#define shade_blocks_textured_modulated_load_rg_shaded() \
+ vld1.u32 { colors_r, colors_g }, [ block_ptr_load_b, :128 ], c_32 \
+
+#define shade_blocks_textured_modulated_load_rg_unshaded() \
+ add block_ptr_load_b, block_ptr_load_b, #32 \
+
+#define shade_blocks_textured_modulated_load_bdm_shaded() \
+ vld1.u32 { colors_b, draw_mask_bits }, [ block_ptr_load_a, :128 ], c_32 \
+
+#define shade_blocks_textured_modulated_load_bdm_unshaded() \
+ ldr draw_mask_bits_scalar, [ block_ptr_load_a, #8 ]; \
+ add block_ptr_load_a, block_ptr_load_a, #32 \
+
+#define shade_blocks_textured_modulated_expand_draw_mask_shaded() \
+ vdup.u16 draw_mask, draw_mask_bits[0] \
+
+#define shade_blocks_textured_modulated_expand_draw_mask_unshaded() \
+ vdup.u16 draw_mask, draw_mask_bits_scalar \
+
+
+#define shade_blocks_textured_modulated_apply_msb_mask_indirect() \
+
+#define shade_blocks_textured_modulated_apply_msb_mask_direct() \
+ vorr.u16 pixels, pixels, msb_mask \
+
+
+#define shade_blocks_textured_modulated_builder(shading, dithering, target) \
+.align 3; \
+ \
+function(shade_blocks_##shading##_textured_modulated_##dithering##_##target) \
+ stmdb sp!, { r4 - r5, lr }; \
+ ldrh num_blocks, [ psx_gpu, #psx_gpu_num_blocks_offset ]; \
+ \
+ vld1.u32 { test_mask }, [ psx_gpu, :128 ]; \
+ \
+ shade_blocks_textured_modulated_prologue_##target(); \
+ shade_blocks_textured_modulated_prologue_##shading(); \
+ \
+ add block_ptr_load_a, psx_gpu, #psx_gpu_blocks_offset; \
+ mov c_32, #32; \
+ \
+ add block_ptr_load_b, block_ptr_load_a, #16; \
+ vmov.u8 d64_1, #1; \
+ vmov.u8 d64_4, #4; \
+ vmov.u8 d64_128, #128; \
+ \
+ vld1.u32 { texels }, [ block_ptr_load_a, :128 ], c_32; \
+ vmov.u8 d128_0x07, #0x07; \
+ \
+ shade_blocks_textured_modulated_load_rg_##shading(); \
+ vmov.u8 d128_0x1F, #0x1F; \
+ \
+ shade_blocks_textured_modulated_load_bdm_##shading(); \
+ vmov.u16 d128_0x8000, #0x8000; \
+ \
+ vmovn.u16 texels_r, texels; \
+ vshrn.u16 texels_g, texels, #5; \
+ \
+ vshrn.u16 texels_b, texels, #7; \
+ shade_blocks_textured_modulated_expand_draw_mask_##shading(); \
+ \
+ shade_blocks_textured_modulated_load_##dithering(pixels_r); \
+ vtst.u16 draw_mask, draw_mask, test_mask; \
+ \
+ shade_blocks_textured_modulated_load_##dithering(pixels_g); \
+ vand.u8 texels_rg, texels_rg, d128_0x1F; \
+ \
+ shade_blocks_textured_modulated_load_last_##dithering(pixels_b); \
+ vshr.u8 texels_b, texels_b, #3; \
+ \
+ shade_blocks_textured_modulate_##dithering(r); \
+ shade_blocks_textured_modulate_##dithering(g); \
+ shade_blocks_textured_modulate_##dithering(b); \
+ \
+ vand.u16 pixels, texels, d128_0x8000; \
+ vceq.u16 zero_mask, texels, #0; \
+ \
+ vqshrun.s16 pixels_r_low, pixels_r, #4; \
+ vqshrun.s16 pixels_g_low, pixels_g, #4; \
+ vqshrun.s16 pixels_b_low, pixels_b, #4; \
+ \
+ shade_blocks_textured_modulated_apply_msb_mask_##target(); \
+ vorr.u16 draw_mask, draw_mask, zero_mask; \
+ vshr.u8 pixels_r_low, pixels_r_low, #3; \
+ vbic.u8 pixels_gb_low, pixels_gb_low, d128_0x07; \
+ \
+ subs num_blocks, num_blocks, #1; \
+ beq 1f; \
+ \
+ .align 3; \
+ \
+ 0: \
+ vld1.u32 { texels }, [ block_ptr_load_a, :128 ], c_32; \
+ shade_blocks_textured_modulated_load_rg_##shading(); \
+ vshrn.u16 texels_g, texels, #5; \
+ \
+ shade_blocks_textured_modulated_load_bdm_##shading(); \
+ vshrn.u16 texels_b, texels, #7; \
+ \
+ vmovn.u16 texels_r, texels; \
+ vmlal.u8 pixels, pixels_r_low, d64_1; \
+ \
+ vmlal.u8 pixels, pixels_g_low, d64_4; \
+ vmlal.u8 pixels, pixels_b_low, d64_128; \
+ shade_blocks_textured_modulated_store_draw_mask_##target(-4); \
+ \
+ shade_blocks_textured_modulated_load_##dithering(pixels_r); \
+ shade_blocks_textured_modulated_expand_draw_mask_##shading(); \
+ \
+ shade_blocks_textured_modulated_load_##dithering(pixels_g); \
+ vand.u8 texels_rg, texels_rg, d128_0x1F; \
+ \
+ shade_blocks_textured_modulated_load_last_##dithering(pixels_b); \
+ vtst.u16 draw_mask, draw_mask, test_mask; \
+ \
+ shade_blocks_textured_modulated_store_pixels_##target(); \
+ vshr.u8 texels_b, texels_b, #3; \
+ \
+ shade_blocks_textured_modulate_##dithering(r); \
+ shade_blocks_textured_modulate_##dithering(g); \
+ shade_blocks_textured_modulate_##dithering(b); \
+ \
+ vand.u16 pixels, texels, d128_0x8000; \
+ vceq.u16 zero_mask, texels, #0; \
+ \
+ subs num_blocks, num_blocks, #1; \
+ \
+ vqshrun.s16 pixels_r_low, pixels_r, #4; \
+ vqshrun.s16 pixels_g_low, pixels_g, #4; \
+ vqshrun.s16 pixels_b_low, pixels_b, #4; \
+ \
+ shade_blocks_textured_modulated_apply_msb_mask_##target(); \
+ vorr.u16 draw_mask, draw_mask, zero_mask; \
+ vshr.u8 pixels_r_low, pixels_r_low, #3; \
+ vbic.u8 pixels_gb_low, pixels_gb_low, d128_0x07; \
+ \
+ bne 0b; \
+ \
+ 1: \
+ vmlal.u8 pixels, pixels_r_low, d64_1; \
+ vmlal.u8 pixels, pixels_g_low, d64_4; \
+ vmlal.u8 pixels, pixels_b_low, d64_128; \
+ \
+ shade_blocks_textured_modulated_store_draw_mask_##target(28); \
+ shade_blocks_textured_modulated_store_pixels_##target(); \
+ \
+ ldmia sp!, { r4 - r5, pc } \
+
+
+shade_blocks_textured_modulated_builder(shaded, dithered, direct);
+shade_blocks_textured_modulated_builder(shaded, undithered, direct);
+shade_blocks_textured_modulated_builder(unshaded, dithered, direct);
+shade_blocks_textured_modulated_builder(unshaded, undithered, direct);
+
+shade_blocks_textured_modulated_builder(shaded, dithered, indirect);
+shade_blocks_textured_modulated_builder(shaded, undithered, indirect);
+shade_blocks_textured_modulated_builder(unshaded, dithered, indirect);
+shade_blocks_textured_modulated_builder(unshaded, undithered, indirect);
+
+
+#undef c_64
+#undef fb_ptr
+#undef color_ptr
+
+#undef color_r
+#undef color_g
+#undef color_b
+
+#undef test_mask
+#undef pixels
+#undef draw_mask
+#undef zero_mask
+#undef fb_pixels
+#undef msb_mask
+#undef msb_mask_low
+#undef msb_mask_high
+
+#define psx_gpu r0
+#define num_blocks r1
+#define mask_msb_ptr r2
+#define color_ptr r3
+
+#define block_ptr_load r0
+#define draw_mask_store_ptr r3
+#define draw_mask_bits_ptr r12
+#define draw_mask_ptr r12
+#define pixel_store_ptr r14
+
+#define fb_ptr_cmp r4
+
+#define fb_ptr r3
+#define fb_ptr_next r14
+
+#define c_64 r2
+
+#define test_mask q0
+#define pixels q1
+#define draw_mask q2
+#define zero_mask q3
+#define draw_mask_combined q4
+#define fb_pixels q5
+#define fb_pixels_next q6
+#define msb_mask q7
+
+#define draw_mask_low d4
+#define draw_mask_high d5
+#define msb_mask_low d14
+#define msb_mask_high d15
+
+.align 3
+function(shade_blocks_textured_unmodulated_indirect)
+ str r14, [ sp, #-4 ]
+ add draw_mask_bits_ptr, psx_gpu, #(psx_gpu_blocks_offset + 40)
+
+ ldrh num_blocks, [ psx_gpu, #psx_gpu_num_blocks_offset ]
+ add pixel_store_ptr, psx_gpu, #(psx_gpu_blocks_offset + 16)
+
+ vld1.u32 { test_mask }, [ psx_gpu, :128 ]
+ add draw_mask_store_ptr, psx_gpu, #psx_gpu_blocks_offset
+
+ mov c_64, #64
+ add block_ptr_load, psx_gpu, #psx_gpu_blocks_offset
+
+ vld1.u32 { pixels }, [ block_ptr_load, :128 ], c_64
+ vld1.u16 { draw_mask_low[], draw_mask_high[] }, \
+ [ draw_mask_bits_ptr, :16 ], c_64
+ vceq.u16 zero_mask, pixels, #0
+
+ vtst.u16 draw_mask, draw_mask, test_mask
+ vst1.u32 { pixels }, [ pixel_store_ptr, :128 ], c_64
+
+ subs num_blocks, num_blocks, #1
+ beq 1f
+
+ 0:
+ vld1.u32 { pixels }, [ block_ptr_load, :128 ], c_64
+ vorr.u16 draw_mask_combined, draw_mask, zero_mask
+
+ vld1.u16 { draw_mask_low[], draw_mask_high[] }, \
+ [ draw_mask_bits_ptr, :16 ], c_64
+ vceq.u16 zero_mask, pixels, #0
+
+ vtst.u16 draw_mask, draw_mask, test_mask
+ vst1.u32 { pixels }, [ pixel_store_ptr, :128 ], c_64
+
+ vst1.u32 { draw_mask_combined }, [ draw_mask_store_ptr, :128 ], c_64
+ subs num_blocks, num_blocks, #1
+
+ bne 0b
+
+ 1:
+ vorr.u16 draw_mask_combined, draw_mask, zero_mask
+ vst1.u32 { draw_mask_combined }, [ draw_mask_store_ptr, :128 ], c_64
+
+ ldr pc, [ sp, #-4 ]
+
+
+.align 3
+
+function(shade_blocks_textured_unmodulated_direct)
+ stmdb sp!, { r4, r14 }
+ add draw_mask_bits_ptr, psx_gpu, #(psx_gpu_blocks_offset + 40)
+
+ ldrh num_blocks, [ psx_gpu, #psx_gpu_num_blocks_offset ]
+ add mask_msb_ptr, psx_gpu, #psx_gpu_mask_msb_offset
+
+ vld1.u16 { msb_mask_low[], msb_mask_high[] }, [ mask_msb_ptr, :16 ]
+ mov c_64, #64
+
+ vld1.u32 { test_mask }, [ psx_gpu, :128 ]
+ add block_ptr_load, psx_gpu, #psx_gpu_blocks_offset
+
+ vld1.u16 { draw_mask_low[], draw_mask_high[] }, \
+ [ draw_mask_bits_ptr, :16 ], c_64
+ ldr fb_ptr_next, [ block_ptr_load, #44 ]
+
+ vld1.u32 { pixels }, [ block_ptr_load, :128 ], c_64
+ vld1.u16 { fb_pixels_next }, [ fb_ptr_next ]
+ vceq.u16 zero_mask, pixels, #0
+ vtst.u16 draw_mask, draw_mask, test_mask
+
+ subs num_blocks, num_blocks, #1
+ beq 1f
+
+ 0:
+ mov fb_ptr, fb_ptr_next
+ ldr fb_ptr_next, [ block_ptr_load, #44 ]
+
+ vorr.u16 pixels, pixels, msb_mask
+
+ vorr.u16 draw_mask_combined, draw_mask, zero_mask
+ vmov fb_pixels, fb_pixels_next
+
+ vld1.u16 { draw_mask_low[], draw_mask_high[] }, \
+ [ draw_mask_bits_ptr, :16 ], c_64
+ vbif.u16 fb_pixels, pixels, draw_mask_combined
+
+ vld1.u32 { pixels }, [ block_ptr_load, :128 ], c_64
+
+ sub fb_ptr_cmp, fb_ptr_next, fb_ptr
+ add fb_ptr_cmp, fb_ptr_cmp, #14
+ cmp fb_ptr_cmp, #28
+ bls 4f
+
+ vld1.u16 { fb_pixels_next }, [ fb_ptr_next ]
+ vceq.u16 zero_mask, pixels, #0
+
+ vst1.u16 { fb_pixels }, [ fb_ptr ]
+ vtst.u16 draw_mask, draw_mask, test_mask
+
+ 3:
+ subs num_blocks, num_blocks, #1
+ bne 0b
+
+ 1:
+ vorr.u16 draw_mask_combined, draw_mask, zero_mask
+ vbif.u16 fb_pixels_next, pixels, draw_mask_combined
+
+ vst1.u16 { fb_pixels_next }, [ fb_ptr_next ]
+
+ ldmia sp!, { r4, pc }
+
+ 4:
+ vst1.u16 { fb_pixels }, [ fb_ptr ]
+ vceq.u16 zero_mask, pixels, #0
+
+ vld1.u16 { fb_pixels_next }, [ fb_ptr_next ]
+ vtst.u16 draw_mask, draw_mask, test_mask
+
+ bal 3b
+
+
+function(shade_blocks_unshaded_untextured_indirect)
+ bx lr
+
+.align 3
+
+function(shade_blocks_unshaded_untextured_direct)
+ stmdb sp!, { r4, r14 }
+ add draw_mask_ptr, psx_gpu, #psx_gpu_blocks_offset
+
+ ldrh num_blocks, [ psx_gpu, #psx_gpu_num_blocks_offset ]
+ add mask_msb_ptr, psx_gpu, #psx_gpu_mask_msb_offset
+
+ vld1.u16 { msb_mask_low[], msb_mask_high[] }, [ mask_msb_ptr, :16 ]
+ add color_ptr, psx_gpu, #(psx_gpu_blocks_offset + 16)
+
+ add block_ptr_load, psx_gpu, #(psx_gpu_blocks_offset + 44)
+ vld1.u16 { pixels }, [ color_ptr, :128 ]
+
+ mov c_64, #64
+ vld1.u16 { draw_mask }, [ draw_mask_ptr, :128 ], c_64
+
+ vorr.u16 pixels, pixels, msb_mask
+ subs num_blocks, num_blocks, #1
+
+ ldr fb_ptr_next, [ block_ptr_load ], #64
+
+ vld1.u16 { fb_pixels_next }, [ fb_ptr_next ]
+ beq 1f
+
+ 0:
+ vmov fb_pixels, fb_pixels_next
+ mov fb_ptr, fb_ptr_next
+ ldr fb_ptr_next, [ block_ptr_load ], #64
+
+ vbif.u16 fb_pixels, pixels, draw_mask
+ vld1.u16 { draw_mask }, [ draw_mask_ptr, :128 ], c_64
+
+ sub fb_ptr_cmp, fb_ptr_next, fb_ptr
+ add fb_ptr_cmp, fb_ptr_cmp, #14
+ cmp fb_ptr_cmp, #28
+ bls 4f
+
+ vld1.u16 { fb_pixels_next }, [ fb_ptr_next ]
+ vst1.u16 { fb_pixels }, [ fb_ptr ]
+
+ 3:
+ subs num_blocks, num_blocks, #1
+ bne 0b
+
+ 1:
+ vbif.u16 fb_pixels_next, pixels, draw_mask
+ vst1.u16 { fb_pixels_next }, [ fb_ptr_next ]
+
+ ldmia sp!, { r4, pc }
+
+ 4:
+ vst1.u16 { fb_pixels }, [ fb_ptr ]
+ vld1.u16 { fb_pixels_next }, [ fb_ptr_next ]
+ bal 3b
+
+
+#undef draw_mask_ptr
+#undef c_64
+#undef fb_ptr
+#undef fb_ptr_next
+#undef fb_ptr_cmp
+
+#define psx_gpu r0
+#define num_blocks r1
+#define msb_mask_ptr r2
+#define pixel_ptr r3
+#define draw_mask_ptr r0
+#define c_64 r2
+#define fb_ptr r12
+#define fb_ptr_next r14
+#define fb_ptr_cmp r4
+
+#undef msb_mask
+#undef draw_mask
+#undef pixels
+#undef fb_pixels
+#undef d128_0x8000
+#undef msb_mask_low
+#undef msb_mask_high
+#undef draw_mask_next
+#undef pixels_g
+#undef blend_pixels
+#undef fb_pixels_next
+
+#define msb_mask q0
+#define draw_mask q1
+#define pixels q2
+#define fb_pixels q3
+#define blend_pixels q4
+#define pixels_no_msb q5
+#define blend_mask q6
+#define fb_pixels_no_msb q7
+#define d128_0x8000 q8
+#define d128_0x0421 q9
+#define fb_pixels_next q10
+#define blend_pixels_next q11
+#define pixels_next q12
+#define draw_mask_next q13
+#define write_mask q14
+
+#define pixels_rb q5
+#define pixels_mg q7
+#define pixels_g q7
+#define d128_0x7C1F q8
+#define d128_0x03E0 q9
+#define fb_pixels_rb q10
+#define fb_pixels_g q11
+#define fb_pixels_masked q11
+#define d128_0x83E0 q15
+#define pixels_fourth q7
+#define d128_0x1C07 q12
+#define d128_0x00E0 q13
+#define d128_0x80E0 q13
+
+#define msb_mask_low d0
+#define msb_mask_high d1
+
+#define blend_blocks_average_set_blend_mask_textured(source) \
+ vclt.s16 blend_mask, source, #0 \
+
+#define blend_blocks_average_set_stp_bit_textured() \
+ vorr.u16 blend_pixels, #0x8000 \
+
+#define blend_blocks_average_combine_textured(source) \
+ vbif.u16 blend_pixels, source, blend_mask \
+
+#define blend_blocks_average_set_blend_mask_untextured(source) \
+
+#define blend_blocks_average_set_stp_bit_untextured() \
+
+#define blend_blocks_average_combine_untextured(source) \
+
+#define blend_blocks_average_mask_set_on() \
+ vclt.s16 write_mask, fb_pixels_next, #0 \
+
+#define blend_blocks_average_mask_copy_on() \
+ vorr.u16 draw_mask, draw_mask_next, write_mask \
+
+#define blend_blocks_average_mask_copy_b_on() \
+ vorr.u16 draw_mask_next, draw_mask_next, write_mask \
+
+#define blend_blocks_average_mask_set_off() \
+
+#define blend_blocks_average_mask_copy_off() \
+ vmov draw_mask, draw_mask_next \
+
+#define blend_blocks_average_mask_copy_b_off() \
+
+#define blend_blocks_average_builder(texturing, mask_evaluate) \
+.align 3; \
+ \
+function(blend_blocks_##texturing##_average_##mask_evaluate) \
+ stmdb sp!, { r4, r14 }; \
+ add mask_msb_ptr, psx_gpu, #psx_gpu_mask_msb_offset; \
+ ldrh num_blocks, [ psx_gpu, #psx_gpu_num_blocks_offset ]; \
+ \
+ add pixel_ptr, psx_gpu, #(psx_gpu_blocks_offset + 16); \
+ vld1.u16 { msb_mask_low[], msb_mask_high[] }, [ mask_msb_ptr, :16 ]; \
+ \
+ add draw_mask_ptr, psx_gpu, #psx_gpu_blocks_offset; \
+ mov c_64, #64; \
+ \
+ vmov.u16 d128_0x8000, #0x8000; \
+ vld1.u32 { draw_mask_next }, [ draw_mask_ptr, :128 ], c_64; \
+ ldr fb_ptr_next, [ pixel_ptr, #28 ]; \
+ \
+ vmov.u16 d128_0x0421, #0x0400; \
+ vld1.u32 { pixels_next }, [ pixel_ptr, :128 ], c_64; \
+ \
+ vorr.u16 d128_0x0421, #0x0021; \
+ vld1.u16 { fb_pixels_next }, [ fb_ptr_next ]; \
+ \
+ veor.u16 blend_pixels_next, pixels_next, fb_pixels_next; \
+ vbic.u16 pixels_no_msb, pixels_next, d128_0x8000; \
+ vand.u16 blend_pixels_next, blend_pixels_next, d128_0x0421; \
+ vsub.u16 blend_pixels_next, pixels_no_msb, blend_pixels_next; \
+ blend_blocks_average_mask_set_##mask_evaluate(); \
+ vbic.u16 fb_pixels_no_msb, fb_pixels_next, d128_0x8000; \
+ \
+ subs num_blocks, num_blocks, #1; \
+ beq 1f; \
+ \
+ 0: \
+ mov fb_ptr, fb_ptr_next; \
+ ldr fb_ptr_next, [ pixel_ptr, #28 ]; \
+ \
+ vmov pixels, pixels_next; \
+ vld1.u32 { pixels_next }, [ pixel_ptr, :128 ], c_64; \
+ \
+ vhadd.u16 blend_pixels, fb_pixels_no_msb, blend_pixels_next; \
+ \
+ blend_blocks_average_mask_copy_##mask_evaluate(); \
+ vld1.u32 { draw_mask_next }, [ draw_mask_ptr, :128 ], c_64; \
+ \
+ blend_blocks_average_set_blend_mask_##texturing(pixels); \
+ blend_blocks_average_set_stp_bit_##texturing(); \
+ vmov fb_pixels, fb_pixels_next; \
+ blend_blocks_average_combine_##texturing(pixels); \
+ \
+ sub fb_ptr_cmp, fb_ptr_next, fb_ptr; \
+ add fb_ptr_cmp, fb_ptr_cmp, #14; \
+ cmp fb_ptr_cmp, #28; \
+ bls 2f; \
+ \
+ vld1.u16 { fb_pixels_next }, [ fb_ptr_next ]; \
+ veor.u16 blend_pixels_next, pixels_next, fb_pixels_next; \
+ \
+ vorr.u16 blend_pixels, blend_pixels, msb_mask; \
+ vbic.u16 pixels_no_msb, pixels_next, d128_0x8000; \
+ \
+ vand.u16 blend_pixels_next, blend_pixels_next, d128_0x0421; \
+ vbif.u16 fb_pixels, blend_pixels, draw_mask; \
+ \
+ vbic.u16 fb_pixels_no_msb, fb_pixels_next, d128_0x8000; \
+ vsub.u16 blend_pixels_next, pixels_no_msb, blend_pixels_next; \
+ blend_blocks_average_mask_set_##mask_evaluate(); \
+ vst1.u16 { fb_pixels }, [ fb_ptr ]; \
+ \
+ 3: \
+ subs num_blocks, num_blocks, #1; \
+ bne 0b; \
+ \
+ 1: \
+ blend_blocks_average_mask_copy_b_##mask_evaluate(); \
+ vhadd.u16 blend_pixels, fb_pixels_no_msb, blend_pixels_next; \
+ \
+ blend_blocks_average_set_blend_mask_##texturing(pixels_next); \
+ blend_blocks_average_set_stp_bit_##texturing(); \
+ blend_blocks_average_combine_##texturing(pixels_next); \
+ \
+ vorr.u16 blend_pixels, blend_pixels, msb_mask; \
+ vbif.u16 fb_pixels_next, blend_pixels, draw_mask_next; \
+ vst1.u16 { fb_pixels_next }, [ fb_ptr_next ]; \
+ \
+ ldmia sp!, { r4, pc }; \
+ \
+ 2: \
+ vorr.u16 blend_pixels, blend_pixels, msb_mask; \
+ vbif.u16 fb_pixels, blend_pixels, draw_mask; \
+ vst1.u16 { fb_pixels }, [ fb_ptr ]; \
+ \
+ vld1.u16 { fb_pixels_next }, [ fb_ptr_next ]; \
+ veor.u16 blend_pixels_next, pixels_next, fb_pixels_next; \
+ vbic.u16 pixels_no_msb, pixels_next, d128_0x8000; \
+ vand.u16 blend_pixels_next, blend_pixels_next, d128_0x0421; \
+ vsub.u16 blend_pixels_next, pixels_no_msb, blend_pixels_next; \
+ vbic.u16 fb_pixels_no_msb, fb_pixels_next, d128_0x8000; \
+ \
+ bal 3b \
+
+blend_blocks_average_builder(textured, off)
+blend_blocks_average_builder(untextured, off)
+blend_blocks_average_builder(textured, on)
+blend_blocks_average_builder(untextured, on)
+
+
+#define blend_blocks_add_mask_set_on() \
+ vclt.s16 write_mask, fb_pixels, #0 \
+
+#define blend_blocks_add_mask_copy_on() \
+ vorr.u16 draw_mask, draw_mask, write_mask \
+
+#define blend_blocks_add_mask_set_off() \
+
+#define blend_blocks_add_mask_copy_off() \
+
+
+#define blend_blocks_add_textured_builder(mask_evaluate) \
+.align 3; \
+ \
+function(blend_blocks_textured_add_##mask_evaluate) \
+ stmdb sp!, { r4, r14 }; \
+ add mask_msb_ptr, psx_gpu, #psx_gpu_mask_msb_offset; \
+ ldrh num_blocks, [ psx_gpu, #psx_gpu_num_blocks_offset ]; \
+ \
+ add pixel_ptr, psx_gpu, #(psx_gpu_blocks_offset + 16); \
+ vld1.u16 { msb_mask_low[], msb_mask_high[] }, [ mask_msb_ptr, :16 ]; \
+ \
+ add draw_mask_ptr, psx_gpu, #psx_gpu_blocks_offset; \
+ mov c_64, #64; \
+ \
+ vmov.u16 d128_0x7C1F, #0x7C00; \
+ vmov.u16 d128_0x03E0, #0x0300; \
+ vmov.u16 d128_0x83E0, #0x8000; \
+ vorr.u16 d128_0x03E0, #0x00E0; \
+ vorr.u16 d128_0x7C1F, #0x001F; \
+ vorr.u16 d128_0x83E0, d128_0x83E0, d128_0x03E0; \
+ \
+ vld1.u32 { draw_mask }, [ draw_mask_ptr, :128 ], c_64; \
+ ldr fb_ptr_next, [ pixel_ptr, #28 ]; \
+ vld1.u32 { pixels }, [ pixel_ptr, :128 ], c_64; \
+ vclt.s16 blend_mask, pixels, #0; \
+ vld1.u16 { fb_pixels }, [ fb_ptr_next ]; \
+ blend_blocks_add_mask_set_##mask_evaluate(); \
+ vand.u16 pixels_rb, pixels, d128_0x7C1F; \
+ \
+ blend_blocks_add_mask_copy_##mask_evaluate(); \
+ vorr.u16 pixels, pixels, msb_mask; \
+ vand.u16 fb_pixels_masked, fb_pixels, blend_mask; \
+ vand.u16 pixels_mg, pixels, d128_0x83E0; \
+ vand.u16 fb_pixels_rb, fb_pixels_masked, d128_0x7C1F; \
+ vand.u16 fb_pixels_g, fb_pixels_masked, d128_0x03E0; \
+ vadd.u16 fb_pixels_rb, fb_pixels_rb, pixels_rb; \
+ vadd.u16 fb_pixels_g, fb_pixels_g, pixels_mg; \
+ vmin.u8 fb_pixels_rb, fb_pixels_rb, d128_0x7C1F; \
+ vmin.u16 fb_pixels_g, fb_pixels_g, d128_0x83E0; \
+ \
+ subs num_blocks, num_blocks, #1; \
+ beq 1f; \
+ \
+ 0: \
+ mov fb_ptr, fb_ptr_next; \
+ \
+ ldr fb_ptr_next, [ pixel_ptr, #28 ]; \
+ \
+ vld1.u32 { pixels }, [ pixel_ptr, :128 ], c_64; \
+ vclt.s16 blend_mask, pixels, #0; \
+ \
+ vorr.u16 pixels, pixels, msb_mask; \
+ vorr.u16 blend_pixels, fb_pixels_rb, fb_pixels_g; \
+ vand.u16 pixels_mg, pixels, d128_0x83E0; \
+ \
+ vbit.u16 blend_pixels, fb_pixels, draw_mask; \
+ vld1.u32 { draw_mask }, [ draw_mask_ptr, :128 ], c_64; \
+ \
+ sub fb_ptr_cmp, fb_ptr_next, fb_ptr; \
+ add fb_ptr_cmp, fb_ptr_cmp, #14; \
+ cmp fb_ptr_cmp, #28; \
+ bls 2f; \
+ \
+ vld1.u16 { fb_pixels }, [ fb_ptr_next ]; \
+ blend_blocks_add_mask_set_##mask_evaluate(); \
+ vand.u16 fb_pixels_masked, fb_pixels, blend_mask; \
+ blend_blocks_add_mask_copy_##mask_evaluate(); \
+ vand.u16 pixels_rb, pixels, d128_0x7C1F; \
+ vand.u16 fb_pixels_rb, fb_pixels_masked, d128_0x7C1F; \
+ vst1.u16 { blend_pixels }, [ fb_ptr ]; \
+ \
+ 3: \
+ vand.u16 fb_pixels_g, fb_pixels_masked, d128_0x03E0; \
+ vadd.u16 fb_pixels_rb, fb_pixels_rb, pixels_rb; \
+ vadd.u16 fb_pixels_g, fb_pixels_g, pixels_mg; \
+ vmin.u8 fb_pixels_rb, fb_pixels_rb, d128_0x7C1F; \
+ vmin.u16 fb_pixels_g, fb_pixels_g, d128_0x83E0; \
+ \
+ subs num_blocks, num_blocks, #1; \
+ bne 0b; \
+ \
+ 1: \
+ vorr.u16 blend_pixels, fb_pixels_rb, fb_pixels_g; \
+ vbit.u16 blend_pixels, fb_pixels, draw_mask; \
+ vst1.u16 { blend_pixels }, [ fb_ptr_next ]; \
+ \
+ ldmia sp!, { r4, pc }; \
+ \
+ 2: \
+ vst1.u16 { blend_pixels }, [ fb_ptr ]; \
+ vand.u16 pixels_rb, pixels, d128_0x7C1F; \
+ \
+ vld1.u16 { fb_pixels }, [ fb_ptr_next ]; \
+ blend_blocks_add_mask_set_##mask_evaluate(); \
+ vand.u16 fb_pixels_masked, fb_pixels, blend_mask; \
+ blend_blocks_add_mask_copy_##mask_evaluate(); \
+ vand.u16 fb_pixels_rb, fb_pixels_masked, d128_0x7C1F; \
+ bal 3b \
+
+
+#define blend_blocks_add_untextured_builder(mask_evaluate) \
+.align 3; \
+ \
+function(blend_blocks_untextured_add_##mask_evaluate) \
+ stmdb sp!, { r4, r14 }; \
+ add mask_msb_ptr, psx_gpu, #psx_gpu_mask_msb_offset; \
+ ldrh num_blocks, [ psx_gpu, #psx_gpu_num_blocks_offset ]; \
+ \
+ add pixel_ptr, psx_gpu, #(psx_gpu_blocks_offset + 16); \
+ vld1.u16 { msb_mask_low[], msb_mask_high[] }, [ mask_msb_ptr, :16 ]; \
+ \
+ add draw_mask_ptr, psx_gpu, #psx_gpu_blocks_offset; \
+ mov c_64, #64; \
+ \
+ vmov.u16 d128_0x7C1F, #0x7C00; \
+ vmov.u16 d128_0x03E0, #0x0300; \
+ vorr.u16 d128_0x7C1F, #0x001F; \
+ vorr.u16 d128_0x03E0, #0x00E0; \
+ \
+ vld1.u32 { draw_mask }, [ draw_mask_ptr, :128 ], c_64; \
+ ldr fb_ptr_next, [ pixel_ptr, #28 ]; \
+ vld1.u32 { pixels }, [ pixel_ptr, :128 ], c_64; \
+ vld1.u16 { fb_pixels }, [ fb_ptr_next ]; \
+ blend_blocks_add_mask_set_##mask_evaluate(); \
+ vand.u16 pixels_rb, pixels, d128_0x7C1F; \
+ \
+ blend_blocks_add_mask_copy_##mask_evaluate(); \
+ vand.u16 pixels_g, pixels, d128_0x03E0; \
+ vand.u16 fb_pixels_rb, fb_pixels, d128_0x7C1F; \
+ vand.u16 fb_pixels_g, fb_pixels, d128_0x03E0; \
+ vadd.u16 fb_pixels_rb, fb_pixels_rb, pixels_rb; \
+ vadd.u16 fb_pixels_g, fb_pixels_g, pixels_g; \
+ vmin.u8 fb_pixels_rb, fb_pixels_rb, d128_0x7C1F; \
+ vmin.u16 fb_pixels_g, fb_pixels_g, d128_0x03E0; \
+ \
+ subs num_blocks, num_blocks, #1; \
+ beq 1f; \
+ \
+ 0: \
+ mov fb_ptr, fb_ptr_next; \
+ \
+ ldr fb_ptr_next, [ pixel_ptr, #28 ]; \
+ \
+ vld1.u32 { pixels }, [ pixel_ptr, :128 ], c_64; \
+ \
+ vorr.u16 blend_pixels, fb_pixels_rb, fb_pixels_g; \
+ vorr.u16 blend_pixels, blend_pixels, msb_mask; \
+ vand.u16 pixels_g, pixels, d128_0x03E0; \
+ \
+ vbit.u16 blend_pixels, fb_pixels, draw_mask; \
+ vld1.u32 { draw_mask }, [ draw_mask_ptr, :128 ], c_64; \
+ \
+ sub fb_ptr_cmp, fb_ptr_next, fb_ptr; \
+ add fb_ptr_cmp, fb_ptr_cmp, #14; \
+ cmp fb_ptr_cmp, #28; \
+ bls 2f; \
+ \
+ vld1.u16 { fb_pixels }, [ fb_ptr_next ]; \
+ blend_blocks_add_mask_set_##mask_evaluate(); \
+ blend_blocks_add_mask_copy_##mask_evaluate(); \
+ vand.u16 pixels_rb, pixels, d128_0x7C1F; \
+ vand.u16 fb_pixels_rb, fb_pixels, d128_0x7C1F; \
+ vst1.u16 { blend_pixels }, [ fb_ptr ]; \
+ \
+ 3: \
+ vand.u16 fb_pixels_g, fb_pixels, d128_0x03E0; \
+ vadd.u16 fb_pixels_rb, fb_pixels_rb, pixels_rb; \
+ vadd.u16 fb_pixels_g, fb_pixels_g, pixels_g; \
+ vmin.u8 fb_pixels_rb, fb_pixels_rb, d128_0x7C1F; \
+ vmin.u16 fb_pixels_g, fb_pixels_g, d128_0x03E0; \
+ \
+ subs num_blocks, num_blocks, #1; \
+ bne 0b; \
+ \
+ 1: \
+ vorr.u16 blend_pixels, fb_pixels_rb, fb_pixels_g; \
+ vorr.u16 blend_pixels, blend_pixels, msb_mask; \
+ vbit.u16 blend_pixels, fb_pixels, draw_mask; \
+ vst1.u16 { blend_pixels }, [ fb_ptr_next ]; \
+ \
+ ldmia sp!, { r4, pc }; \
+ \
+ 2: \
+ vst1.u16 { blend_pixels }, [ fb_ptr ]; \
+ vand.u16 pixels_rb, pixels, d128_0x7C1F; \
+ \
+ vld1.u16 { fb_pixels }, [ fb_ptr_next ]; \
+ blend_blocks_add_mask_set_##mask_evaluate(); \
+ blend_blocks_add_mask_copy_##mask_evaluate(); \
+ vand.u16 fb_pixels_rb, fb_pixels, d128_0x7C1F; \
+ bal 3b \
+
+
+blend_blocks_add_textured_builder(off)
+blend_blocks_add_textured_builder(on)
+blend_blocks_add_untextured_builder(off)
+blend_blocks_add_untextured_builder(on)
+
+#define blend_blocks_subtract_set_blend_mask_textured() \
+ vclt.s16 blend_mask, pixels_next, #0 \
+
+#define blend_blocks_subtract_combine_textured() \
+ vbif.u16 blend_pixels, pixels, blend_mask \
+
+#define blend_blocks_subtract_set_stb_textured() \
+ vorr.u16 blend_pixels, #0x8000 \
+
+#define blend_blocks_subtract_msb_mask_textured() \
+ vorr.u16 pixels, pixels_next, msb_mask \
+
+#define blend_blocks_subtract_set_blend_mask_untextured() \
+
+#define blend_blocks_subtract_combine_untextured() \
+
+#define blend_blocks_subtract_set_stb_untextured() \
+ vorr.u16 blend_pixels, blend_pixels, msb_mask \
+
+#define blend_blocks_subtract_msb_mask_untextured() \
+
+
+#define blend_blocks_subtract_mask_set_on() \
+ vclt.s16 write_mask, fb_pixels, #0 \
+
+#define blend_blocks_subtract_mask_copy_on() \
+ vorr.u16 draw_mask, draw_mask_next, write_mask \
+
+#define blend_blocks_subtract_mask_set_off() \
+
+#define blend_blocks_subtract_mask_copy_off() \
+ vmov draw_mask, draw_mask_next \
+
+
+#define blend_blocks_subtract_builder(texturing, mask_evaluate) \
+.align 3; \
+ \
+function(blend_blocks_##texturing##_subtract_##mask_evaluate) \
+ stmdb sp!, { r4, r14 }; \
+ add mask_msb_ptr, psx_gpu, #psx_gpu_mask_msb_offset; \
+ ldrh num_blocks, [ psx_gpu, #psx_gpu_num_blocks_offset ]; \
+ \
+ add pixel_ptr, psx_gpu, #(psx_gpu_blocks_offset + 16); \
+ vld1.u16 { msb_mask_low[], msb_mask_high[] }, [ mask_msb_ptr, :16 ]; \
+ \
+ add draw_mask_ptr, psx_gpu, #psx_gpu_blocks_offset; \
+ mov c_64, #64; \
+ \
+ vmov.u16 d128_0x7C1F, #0x7C00; \
+ vmov.u16 d128_0x03E0, #0x0300; \
+ vorr.u16 d128_0x7C1F, #0x001F; \
+ vorr.u16 d128_0x03E0, #0x00E0; \
+ \
+ vld1.u32 { draw_mask_next }, [ draw_mask_ptr, :128 ], c_64; \
+ ldr fb_ptr_next, [ pixel_ptr, #28 ]; \
+ vld1.u32 { pixels_next }, [ pixel_ptr, :128 ], c_64; \
+ blend_blocks_subtract_set_blend_mask_##texturing(); \
+ vld1.u16 { fb_pixels }, [ fb_ptr_next ]; \
+ blend_blocks_subtract_mask_set_##mask_evaluate(); \
+ vand.u16 pixels_rb, pixels_next, d128_0x7C1F; \
+ \
+ vand.u16 pixels_g, pixels_next, d128_0x03E0; \
+ vand.u16 fb_pixels_rb, fb_pixels, d128_0x7C1F; \
+ vand.u16 fb_pixels_g, fb_pixels, d128_0x03E0; \
+ vqsub.u8 fb_pixels_rb, fb_pixels_rb, pixels_rb; \
+ vqsub.u16 fb_pixels_g, fb_pixels_g, pixels_g; \
+ \
+ subs num_blocks, num_blocks, #1; \
+ beq 1f; \
+ \
+ 0: \
+ blend_blocks_subtract_mask_copy_##mask_evaluate(); \
+ mov fb_ptr, fb_ptr_next; \
+ ldr fb_ptr_next, [ pixel_ptr, #28 ]; \
+ \
+ vld1.u32 { draw_mask_next }, [ draw_mask_ptr, :128 ], c_64; \
+ blend_blocks_subtract_msb_mask_##texturing(); \
+ \
+ vld1.u32 { pixels_next }, [ pixel_ptr, :128 ], c_64; \
+ vorr.u16 blend_pixels, fb_pixels_rb, fb_pixels_g; \
+ vand.u16 pixels_rb, pixels_next, d128_0x7C1F; \
+ blend_blocks_subtract_set_stb_##texturing(); \
+ vand.u16 pixels_g, pixels_next, d128_0x03E0; \
+ blend_blocks_subtract_combine_##texturing(); \
+ blend_blocks_subtract_set_blend_mask_##texturing(); \
+ vbit.u16 blend_pixels, fb_pixels, draw_mask; \
+ \
+ sub fb_ptr_cmp, fb_ptr_next, fb_ptr; \
+ add fb_ptr_cmp, fb_ptr_cmp, #14; \
+ cmp fb_ptr_cmp, #28; \
+ bls 2f; \
+ \
+ vld1.u16 { fb_pixels }, [ fb_ptr_next ]; \
+ blend_blocks_subtract_mask_set_##mask_evaluate(); \
+ vand.u16 fb_pixels_rb, fb_pixels, d128_0x7C1F; \
+ vand.u16 fb_pixels_g, fb_pixels, d128_0x03E0; \
+ vqsub.u8 fb_pixels_rb, fb_pixels_rb, pixels_rb; \
+ vst1.u16 { blend_pixels }, [ fb_ptr ]; \
+ vqsub.u16 fb_pixels_g, fb_pixels_g, pixels_g; \
+ \
+ 3: \
+ subs num_blocks, num_blocks, #1; \
+ bne 0b; \
+ \
+ 1: \
+ blend_blocks_subtract_mask_copy_##mask_evaluate(); \
+ \
+ blend_blocks_subtract_msb_mask_##texturing(); \
+ vorr.u16 blend_pixels, fb_pixels_rb, fb_pixels_g; \
+ blend_blocks_subtract_set_stb_##texturing(); \
+ blend_blocks_subtract_combine_##texturing(); \
+ vbit.u16 blend_pixels, fb_pixels, draw_mask; \
+ vst1.u16 { blend_pixels }, [ fb_ptr_next ]; \
+ \
+ ldmia sp!, { r4, pc }; \
+ \
+ 2: \
+ vst1.u16 { blend_pixels }, [ fb_ptr ]; \
+ vld1.u16 { fb_pixels }, [ fb_ptr_next ]; \
+ blend_blocks_subtract_mask_set_##mask_evaluate(); \
+ vand.u16 fb_pixels_rb, fb_pixels, d128_0x7C1F; \
+ vand.u16 fb_pixels_g, fb_pixels, d128_0x03E0; \
+ vqsub.u8 fb_pixels_rb, fb_pixels_rb, pixels_rb; \
+ vqsub.u16 fb_pixels_g, fb_pixels_g, pixels_g; \
+ bal 3b \
+
+
+blend_blocks_subtract_builder(textured, off)
+blend_blocks_subtract_builder(textured, on)
+blend_blocks_subtract_builder(untextured, off)
+blend_blocks_subtract_builder(untextured, on)
+
+
+#define blend_blocks_add_fourth_textured_builder(mask_evaluate) \
+.align 3; \
+ \
+function(blend_blocks_textured_add_fourth_##mask_evaluate) \
+ stmdb sp!, { r4, r14 }; \
+ add mask_msb_ptr, psx_gpu, #psx_gpu_mask_msb_offset; \
+ ldrh num_blocks, [ psx_gpu, #psx_gpu_num_blocks_offset ]; \
+ \
+ add pixel_ptr, psx_gpu, #(psx_gpu_blocks_offset + 16); \
+ vld1.u16 { msb_mask_low[], msb_mask_high[] }, [ mask_msb_ptr, :16 ]; \
+ \
+ add draw_mask_ptr, psx_gpu, #psx_gpu_blocks_offset; \
+ mov c_64, #64; \
+ \
+ vmov.u16 d128_0x7C1F, #0x7C00; \
+ vmov.u16 d128_0x03E0, #0x0300; \
+ vmov.u16 d128_0x83E0, #0x8300; \
+ vmov.u16 d128_0x1C07, #0x1C00; \
+ vmov.u16 d128_0x80E0, #0x8000; \
+ vorr.u16 d128_0x7C1F, #0x001F; \
+ vorr.u16 d128_0x03E0, #0x00E0; \
+ vorr.u16 d128_0x83E0, #0x00E0; \
+ vorr.u16 d128_0x1C07, #0x0007; \
+ vorr.u16 d128_0x80E0, #0x00E0; \
+ \
+ vld1.u32 { draw_mask }, [ draw_mask_ptr, :128 ], c_64; \
+ ldr fb_ptr_next, [ pixel_ptr, #28 ]; \
+ vld1.u32 { pixels }, [ pixel_ptr, :128 ], c_64; \
+ vclt.s16 blend_mask, pixels, #0; \
+ vld1.u16 { fb_pixels }, [ fb_ptr_next ]; \
+ blend_blocks_add_mask_set_##mask_evaluate(); \
+ vshr.s16 pixels_fourth, pixels, #2; \
+ \
+ blend_blocks_add_mask_copy_##mask_evaluate(); \
+ vorr.u16 pixels, pixels, msb_mask; \
+ vand.u16 pixels_rb, pixels_fourth, d128_0x1C07; \
+ vand.u16 fb_pixels_masked, fb_pixels, blend_mask; \
+ vand.u16 pixels_mg, pixels_fourth, d128_0x80E0; \
+ vand.u16 fb_pixels_rb, fb_pixels_masked, d128_0x7C1F; \
+ vand.u16 fb_pixels_g, fb_pixels_masked, d128_0x03E0; \
+ vadd.u16 fb_pixels_rb, fb_pixels_rb, pixels_rb; \
+ vadd.u16 fb_pixels_g, fb_pixels_g, pixels_mg; \
+ vmin.u8 fb_pixels_rb, fb_pixels_rb, d128_0x7C1F; \
+ vmin.u16 fb_pixels_g, fb_pixels_g, d128_0x83E0; \
+ \
+ subs num_blocks, num_blocks, #1; \
+ beq 1f; \
+ \
+ 0: \
+ mov fb_ptr, fb_ptr_next; \
+ \
+ ldr fb_ptr_next, [ pixel_ptr, #28 ]; \
+ \
+ vld1.u32 { pixels }, [ pixel_ptr, :128 ], c_64; \
+ vclt.s16 blend_mask, pixels, #0; \
+ \
+ vshr.s16 pixels_fourth, pixels, #2; \
+ vorr.u16 pixels, pixels, msb_mask; \
+ vorr.u16 blend_pixels, fb_pixels_rb, fb_pixels_g; \
+ vand.u16 pixels_rb, pixels_fourth, d128_0x1C07; \
+ \
+ vbit.u16 blend_pixels, fb_pixels, draw_mask; \
+ vld1.u32 { draw_mask }, [ draw_mask_ptr, :128 ], c_64; \
+ \
+ sub fb_ptr_cmp, fb_ptr_next, fb_ptr; \
+ add fb_ptr_cmp, fb_ptr_cmp, #14; \
+ cmp fb_ptr_cmp, #28; \
+ bls 2f; \
+ \
+ vld1.u16 { fb_pixels }, [ fb_ptr_next ]; \
+ blend_blocks_add_mask_set_##mask_evaluate(); \
+ vand.u16 fb_pixels_masked, fb_pixels, blend_mask; \
+ blend_blocks_add_mask_copy_##mask_evaluate(); \
+ vand.u16 pixels_mg, pixels_fourth, d128_0x80E0; \
+ vand.u16 fb_pixels_rb, fb_pixels_masked, d128_0x7C1F; \
+ vst1.u16 { blend_pixels }, [ fb_ptr ]; \
+ \
+ 3: \
+ vand.u16 fb_pixels_g, fb_pixels_masked, d128_0x03E0; \
+ vadd.u16 fb_pixels_rb, fb_pixels_rb, pixels_rb; \
+ vadd.u16 fb_pixels_g, fb_pixels_g, pixels_mg; \
+ vmin.u8 fb_pixels_rb, fb_pixels_rb, d128_0x7C1F; \
+ vmin.u16 fb_pixels_g, fb_pixels_g, d128_0x83E0; \
+ \
+ subs num_blocks, num_blocks, #1; \
+ bne 0b; \
+ \
+ 1: \
+ vorr.u16 blend_pixels, fb_pixels_rb, fb_pixels_g; \
+ vbit.u16 blend_pixels, fb_pixels, draw_mask; \
+ vst1.u16 { blend_pixels }, [ fb_ptr_next ]; \
+ \
+ ldmia sp!, { r4, pc }; \
+ \
+ 2: \
+ vst1.u16 { blend_pixels }, [ fb_ptr ]; \
+ vand.u16 pixels_mg, pixels_fourth, d128_0x80E0; \
+ \
+ vld1.u16 { fb_pixels }, [ fb_ptr_next ]; \
+ blend_blocks_add_mask_set_##mask_evaluate(); \
+ vand.u16 fb_pixels_masked, fb_pixels, blend_mask; \
+ blend_blocks_add_mask_copy_##mask_evaluate(); \
+ vand.u16 fb_pixels_rb, fb_pixels_masked, d128_0x7C1F; \
+ bal 3b \
+
+
+#define blend_blocks_add_fourth_untextured_builder(mask_evaluate) \
+.align 3; \
+ \
+function(blend_blocks_untextured_add_fourth_##mask_evaluate) \
+ stmdb sp!, { r4, r14 }; \
+ add mask_msb_ptr, psx_gpu, #psx_gpu_mask_msb_offset; \
+ ldrh num_blocks, [ psx_gpu, #psx_gpu_num_blocks_offset ]; \
+ \
+ add pixel_ptr, psx_gpu, #(psx_gpu_blocks_offset + 16); \
+ vld1.u16 { msb_mask_low[], msb_mask_high[] }, [ mask_msb_ptr, :16 ]; \
+ \
+ add draw_mask_ptr, psx_gpu, #psx_gpu_blocks_offset; \
+ mov c_64, #64; \
+ \
+ vmov.u16 d128_0x7C1F, #0x7C00; \
+ vmov.u16 d128_0x03E0, #0x0300; \
+ vmov.u16 d128_0x83E0, #0x8300; \
+ vmov.u16 d128_0x1C07, #0x1C00; \
+ vmov.u16 d128_0x00E0, #0x00E0; \
+ vorr.u16 d128_0x7C1F, #0x001F; \
+ vorr.u16 d128_0x03E0, #0x00E0; \
+ vorr.u16 d128_0x83E0, #0x00E0; \
+ vorr.u16 d128_0x1C07, #0x0007; \
+ \
+ vld1.u32 { draw_mask }, [ draw_mask_ptr, :128 ], c_64; \
+ ldr fb_ptr_next, [ pixel_ptr, #28 ]; \
+ vld1.u32 { pixels }, [ pixel_ptr, :128 ], c_64; \
+ vld1.u16 { fb_pixels }, [ fb_ptr_next ]; \
+ blend_blocks_add_mask_set_##mask_evaluate(); \
+ vshr.s16 pixels_fourth, pixels, #2; \
+ vand.u16 pixels_rb, pixels_fourth, d128_0x1C07; \
+ \
+ blend_blocks_add_mask_copy_##mask_evaluate(); \
+ vand.u16 pixels_g, pixels_fourth, d128_0x00E0; \
+ vand.u16 fb_pixels_rb, fb_pixels, d128_0x7C1F; \
+ vand.u16 fb_pixels_g, fb_pixels, d128_0x03E0; \
+ vadd.u16 fb_pixels_rb, fb_pixels_rb, pixels_rb; \
+ vadd.u16 fb_pixels_g, fb_pixels_g, pixels_g; \
+ vmin.u8 fb_pixels_rb, fb_pixels_rb, d128_0x7C1F; \
+ vmin.u16 fb_pixels_g, fb_pixels_g, d128_0x03E0; \
+ \
+ subs num_blocks, num_blocks, #1; \
+ beq 1f; \
+ \
+ 0: \
+ mov fb_ptr, fb_ptr_next; \
+ \
+ ldr fb_ptr_next, [ pixel_ptr, #28 ]; \
+ \
+ vld1.u32 { pixels }, [ pixel_ptr, :128 ], c_64; \
+ \
+ vorr.u16 blend_pixels, fb_pixels_rb, fb_pixels_g; \
+ vshr.s16 pixels_fourth, pixels, #2; \
+ vorr.u16 blend_pixels, blend_pixels, msb_mask; \
+ vand.u16 pixels_rb, pixels_fourth, d128_0x1C07; \
+ \
+ vbit.u16 blend_pixels, fb_pixels, draw_mask; \
+ vld1.u32 { draw_mask }, [ draw_mask_ptr, :128 ], c_64; \
+ \
+ sub fb_ptr_cmp, fb_ptr_next, fb_ptr; \
+ add fb_ptr_cmp, fb_ptr_cmp, #14; \
+ cmp fb_ptr_cmp, #28; \
+ bls 2f; \
+ \
+ vld1.u16 { fb_pixels }, [ fb_ptr_next ]; \
+ blend_blocks_add_mask_set_##mask_evaluate(); \
+ blend_blocks_add_mask_copy_##mask_evaluate(); \
+ vand.u16 pixels_g, pixels_fourth, d128_0x00E0; \
+ vand.u16 fb_pixels_rb, fb_pixels, d128_0x7C1F; \
+ vst1.u16 { blend_pixels }, [ fb_ptr ]; \
+ \
+ 3: \
+ vand.u16 fb_pixels_g, fb_pixels, d128_0x03E0; \
+ vadd.u16 fb_pixels_rb, fb_pixels_rb, pixels_rb; \
+ vadd.u16 fb_pixels_g, fb_pixels_g, pixels_g; \
+ vmin.u8 fb_pixels_rb, fb_pixels_rb, d128_0x7C1F; \
+ vmin.u16 fb_pixels_g, fb_pixels_g, d128_0x03E0; \
+ \
+ subs num_blocks, num_blocks, #1; \
+ bne 0b; \
+ \
+ 1: \
+ vorr.u16 blend_pixels, fb_pixels_rb, fb_pixels_g; \
+ vorr.u16 blend_pixels, blend_pixels, msb_mask; \
+ vbit.u16 blend_pixels, fb_pixels, draw_mask; \
+ vst1.u16 { blend_pixels }, [ fb_ptr_next ]; \
+ \
+ ldmia sp!, { r4, pc }; \
+ \
+ 2: \
+ vst1.u16 { blend_pixels }, [ fb_ptr ]; \
+ vand.u16 pixels_g, pixels_fourth, d128_0x00E0; \
+ \
+ vld1.u16 { fb_pixels }, [ fb_ptr_next ]; \
+ blend_blocks_add_mask_set_##mask_evaluate(); \
+ blend_blocks_add_mask_copy_##mask_evaluate(); \
+ vand.u16 fb_pixels_rb, fb_pixels, d128_0x7C1F; \
+ bal 3b \
+
+
+blend_blocks_add_fourth_textured_builder(off)
+blend_blocks_add_fourth_textured_builder(on)
+blend_blocks_add_fourth_untextured_builder(off)
+blend_blocks_add_fourth_untextured_builder(on)
+
+// TODO: Optimize this more. Need a scene that actually uses it for
+// confirmation..
+
+.align 3
+
+function(blend_blocks_textured_unblended_on)
+ stmdb sp!, { r4, r14 }
+ add mask_msb_ptr, psx_gpu, #psx_gpu_mask_msb_offset
+ ldrh num_blocks, [ psx_gpu, #psx_gpu_num_blocks_offset ]
+
+ add pixel_ptr, psx_gpu, #(psx_gpu_blocks_offset + 16)
+ vld1.u16 { msb_mask_low[], msb_mask_high[] }, [ mask_msb_ptr, :16 ]
+
+ add draw_mask_ptr, psx_gpu, #psx_gpu_blocks_offset
+ mov c_64, #64
+
+ ldr fb_ptr, [ pixel_ptr, #28 ]
+ vld1.u16 { fb_pixels }, [ fb_ptr ]
+ vld1.u32 { draw_mask }, [ draw_mask_ptr, :128 ], c_64
+ vclt.s16 write_mask, fb_pixels, #0
+ vld1.u32 { pixels }, [ pixel_ptr, :128 ], c_64
+
+ subs num_blocks, num_blocks, #1
+ beq 1f
+
+ 0:
+ vorr.u16 draw_mask, draw_mask, write_mask
+ vbif.u16 fb_pixels, pixels, draw_mask
+ vst1.u16 { fb_pixels }, [ fb_ptr ]
+
+ ldr fb_ptr, [ pixel_ptr, #28 ]
+ vld1.u16 { fb_pixels }, [ fb_ptr ]
+ vld1.u32 { draw_mask }, [ draw_mask_ptr, :128 ], c_64
+ vclt.s16 write_mask, fb_pixels, #0
+ vld1.u32 { pixels }, [ pixel_ptr, :128 ], c_64
+
+ subs num_blocks, num_blocks, #1
+ bne 0b
+
+ 1:
+ vorr.u16 draw_mask, draw_mask, write_mask
+ vbif.u16 fb_pixels, pixels, draw_mask
+ vst1.u16 { fb_pixels }, [ fb_ptr ]
+
+ ldmia sp!, { r4, pc }
+
+
+function(blend_blocks_textured_unblended_off)
+ bx lr
+
+
+function(warmup)
+ mov r3, #64
+ cmp r0, #0
+ bxeq lr
+
+ 0:
+ vld1.u32 { u_whole_8, v_whole_8 }, [ r1, :128 ], r3
+
+ subs r0, r0, #1
+ bne 0b
+
+ bx lr
+
+#undef color
+#undef y
+#undef height
+
+#define psx_gpu r0
+#define color r1
+#define x r2
+#define y r3
+
+#define vram_ptr r0
+#define width r3
+#define height r12
+
+#define parameter_width_offset 0
+#define parameter_height_offset 4
+
+#define color_r r14
+#define color_g r4
+#define color_b r5
+
+#define left_unaligned r14
+#define right_unaligned r4
+#define pitch r5
+#define num_unaligned r2
+#define num_width r6
+
+#undef colors
+
+#define colors q0
+
+.align 3
+
+function(render_block_fill_body)
+ ldr vram_ptr, [ psx_gpu, #psx_gpu_vram_ptr_offset ]
+ ldr height, [ sp, #parameter_height_offset ]
+
+ add vram_ptr, vram_ptr, y, lsl #11
+ ldr width, [ sp, #parameter_width_offset ]
+
+ add vram_ptr, vram_ptr, x, lsl #1
+ stmdb sp!, { r4 - r6, r14 }
+
+ ubfx color_r, color, #3, #5
+ ubfx color_g, color, #11, #5
+
+ ubfx color_b, color, #19, #5
+ orr color, color_r, color_g, lsl #5
+
+ orr color, color, color_b, lsl #10
+ add left_unaligned, x, #0x7
+
+ bic left_unaligned, left_unaligned, #0x7
+ vdup.u16 colors, color
+
+ sub left_unaligned, left_unaligned, x
+ mov pitch, #2048
+
+ sub pitch, pitch, width, lsl #1
+ sub width, width, left_unaligned
+
+ and right_unaligned, width, #0x7
+ bic width, width, #0x7
+
+ 0:
+ mov num_width, width, lsr #3
+
+ movs num_unaligned, left_unaligned
+ beq 2f
+
+ 1:
+ strh color, [ vram_ptr ], #2
+
+ subs num_unaligned, num_unaligned, #1
+ bne 1b
+
+ 2:
+ vst1.u32 { colors }, [ vram_ptr, :128 ]!
+ subs num_width, num_width, #1
+ bne 2b
+
+ movs num_unaligned, right_unaligned
+ beq 4f
+
+ 3:
+ strh color, [ vram_ptr ], #2
+
+ subs num_unaligned, num_unaligned, #1
+ bne 3b
+
+ 4:
+ add vram_ptr, vram_ptr, pitch
+ subs height, height, #1
+ bne 0b
+
+ ldmia sp!, { r4 - r6, pc }
+
+
+#undef x
+#undef y
+#undef width
+#undef height
+#undef fb_ptr
+#undef texture_mask
+#undef num_blocks
+#undef temp
+#undef dirty_textures_mask
+#undef clut_ptr
+#undef current_texture_mask
+
+#define psx_gpu r0
+#define x r1
+#define y r2
+#define u r3
+#define v r4
+#define width r5
+#define height r6
+#define offset_u r8
+#define offset_v r9
+#define offset_u_right r10
+#define width_rounded r11
+#define height_rounded r12
+
+#define texture_offset_base r1
+#define tile_width r2
+#define tile_height r3
+#define num_blocks r4
+#define block r5
+#define sub_tile_height r6
+#define fb_ptr r7
+#define texture_mask r8
+#define column_data r9
+#define texture_offset r10
+#define tiles_remaining r11
+#define fb_ptr_advance_column r12
+#define texture_block_ptr r14
+
+#define texture_page_ptr r3
+#define left_block_mask r4
+#define right_block_mask r5
+#define texture_mask_rev r10
+#define control_mask r11
+
+#define dirty_textures_mask r4
+#define clut_ptr r5
+#define current_texture_mask r6
+
+
+#undef texels
+#undef clut_low_a
+#undef clut_low_b
+#undef clut_high_a
+#undef clut_high_b
+#undef clut_a
+#undef clut_b
+#undef texels_low
+#undef texels_high
+
+#define texels d0
+#define draw_masks_fb_ptrs q1
+
+#define draw_mask_fb_ptr_left d2
+#define draw_mask_fb_ptr_right d3
+
+#define clut_low_a d4
+#define clut_low_b d5
+#define clut_high_a d6
+#define clut_high_b d7
+
+#define block_masks d8
+#define block_masks_shifted d9
+
+#define clut_a q2
+#define clut_b q3
+
+#define texels_low d10
+#define texels_high d11
+
+
+setup_sprite_flush_blocks_single:
+ vpush { q1 - q4 }
+
+ stmdb sp!, { r0 - r3, r12, r14 }
+ bl flush_render_block_buffer
+ ldmia sp!, { r0 - r3, r12, r14 }
+
+ vpop { q1 - q4 }
+
+ add block, psx_gpu, #psx_gpu_blocks_offset
+
+ mov num_blocks, sub_tile_height
+ bx lr
+
+
+setup_sprite_flush_blocks_double:
+ vpush { q1 - q4 }
+
+ stmdb sp!, { r0 - r3, r12, r14 }
+ bl flush_render_block_buffer
+ ldmia sp!, { r0 - r3, r12, r14 }
+
+ vpop { q1 - q4 }
+
+ add block, psx_gpu, #psx_gpu_blocks_offset
+
+ mov num_blocks, sub_tile_height, lsl #1
+ bx lr
+
+
+setup_sprite_update_texture_4bpp_cache:
+ stmdb sp!, { r0 - r3, r14 }
+ bl update_texture_4bpp_cache
+ ldmia sp!, { r0 - r3, pc }
+
+
+setup_sprite_update_texture_8bpp_cache:
+ stmdb sp!, { r0 - r3, r14 }
+ bl update_texture_8bpp_cache
+ ldmia sp!, { r0 - r3, pc }
+
+
+#define setup_sprite_tiled_initialize_4bpp() \
+ ldr dirty_textures_mask, \
+ [ psx_gpu, #psx_gpu_dirty_textures_4bpp_mask_offset ]; \
+ ldr clut_ptr, [ psx_gpu, #psx_gpu_clut_ptr_offset ]; \
+ \
+ ldr current_texture_mask, [ psx_gpu, #psx_gpu_current_texture_mask_offset ]; \
+ vld1.u32 { clut_a, clut_b }, [ clut_ptr, :128 ]; \
+ \
+ tst current_texture_mask, dirty_textures_mask; \
+ vuzp.u8 clut_a, clut_b; \
+ \
+ blne setup_sprite_update_texture_4bpp_cache \
+
+#define setup_sprite_tiled_initialize_8bpp() \
+ ldr dirty_textures_mask, \
+ [ psx_gpu, #psx_gpu_dirty_textures_8bpp_mask_offset ]; \
+ ldr current_texture_mask, [ psx_gpu, #psx_gpu_current_texture_mask_offset ]; \
+ \
+ tst current_texture_mask, dirty_textures_mask; \
+ blne setup_sprite_update_texture_8bpp_cache \
+
+
+#define setup_sprite_tile_setup_block_no(side, offset, texture_mode) \
+
+#define setup_sprite_block_count_single() \
+ sub_tile_height \
+
+#define setup_sprite_block_count_double() \
+ sub_tile_height, lsl #1 \
+
+#define setup_sprite_tile_add_blocks(type) \
+ add num_blocks, num_blocks, setup_sprite_block_count_##type(); \
+ cmp num_blocks, #MAX_BLOCKS; \
+ \
+ blgt setup_sprite_flush_blocks_##type \
+
+
+#define setup_sprite_tile_full_4bpp(edge) \
+ setup_sprite_tile_add_blocks(double); \
+ \
+ 4: \
+ and texture_block_ptr, texture_offset, texture_mask; \
+ vmov.u32 draw_mask_fb_ptr_left[1], fb_ptr; \
+ \
+ pld [ fb_ptr ]; \
+ add texture_block_ptr, texture_page_ptr, texture_block_ptr; \
+ vld1.u32 { texels }, [ texture_block_ptr, :64 ]; \
+ \
+ vtbl.8 texels_low, { clut_low_a, clut_low_b }, texels; \
+ vtbl.8 texels_high, { clut_high_a, clut_high_b }, texels; \
+ \
+ vst2.u8 { texels_low, texels_high }, [ block, :128 ]; \
+ add texture_block_ptr, texture_offset, #8; \
+ \
+ and texture_block_ptr, texture_block_ptr, texture_mask; \
+ add block, block, #40; \
+ \
+ add texture_block_ptr, texture_page_ptr, texture_block_ptr; \
+ add fb_ptr, fb_ptr, #16; \
+ \
+ vst1.u32 { draw_mask_fb_ptr_left }, [ block, :64 ]; \
+ add block, block, #24; \
+ \
+ vld1.u32 { texels }, [ texture_block_ptr, :64 ]; \
+ vtbl.8 texels_low, { clut_low_a, clut_low_b }, texels; \
+ \
+ pld [ fb_ptr ]; \
+ vmov.u32 draw_mask_fb_ptr_right[1], fb_ptr; \
+ vtbl.8 texels_high, { clut_high_a, clut_high_b }, texels; \
+ \
+ vst2.u8 { texels_low, texels_high }, [ block, :128 ]; \
+ add block, block, #40; \
+ \
+ add texture_offset, texture_offset, #0x10; \
+ add fb_ptr, fb_ptr, #(2048 - 16); \
+ \
+ vst1.u32 { draw_mask_fb_ptr_right }, [ block, :64 ]; \
+ add block, block, #24; \
+ \
+ subs sub_tile_height, sub_tile_height, #1; \
+ bne 4b; \
+ \
+ add texture_offset, texture_offset, #0xF00; \
+ strh num_blocks, [ psx_gpu, #psx_gpu_num_blocks_offset ] \
+
+
+#define setup_sprite_tile_half_4bpp(edge) \
+ setup_sprite_tile_add_blocks(single); \
+ \
+ 4: \
+ and texture_block_ptr, texture_offset, texture_mask; \
+ vmov.u32 draw_mask_fb_ptr_##edge[1], fb_ptr; \
+ \
+ pld [ fb_ptr ]; \
+ add texture_block_ptr, texture_page_ptr, texture_block_ptr; \
+ vld1.u32 { texels }, [ texture_block_ptr, :64 ]; \
+ \
+ vtbl.8 texels_low, { clut_low_a, clut_low_b }, texels; \
+ vtbl.8 texels_high, { clut_high_a, clut_high_b }, texels; \
+ \
+ vst2.u8 { texels_low, texels_high }, [ block, :128 ]; \
+ add block, block, #40; \
+ \
+ add texture_block_ptr, texture_page_ptr, texture_block_ptr; \
+ vst1.u32 { draw_mask_fb_ptr_##edge }, [ block, :64 ]; \
+ \
+ add block, block, #24; \
+ add texture_offset, texture_offset, #0x10; \
+ \
+ add fb_ptr, fb_ptr, #2048; \
+ subs sub_tile_height, sub_tile_height, #1; \
+ \
+ bne 4b; \
+ \
+ add texture_offset, texture_offset, #0xF00; \
+ strh num_blocks, [ psx_gpu, #psx_gpu_num_blocks_offset ] \
+
+
+#define setup_sprite_tile_full_8bpp(edge) \
+ setup_sprite_tile_add_blocks(double); \
+ add block, block, #16; \
+ \
+ 4: \
+ and texture_block_ptr, texture_offset, texture_mask; \
+ vmov.u32 draw_mask_fb_ptr_left[1], fb_ptr; \
+ \
+ pld [ fb_ptr ]; \
+ add texture_block_ptr, texture_page_ptr, texture_block_ptr; \
+ vld1.u32 { texels }, [ texture_block_ptr, :64 ]; \
+ \
+ add texture_block_ptr, texture_offset, #8; \
+ vst1.u32 { texels }, [ block, :64 ]; \
+ \
+ and texture_block_ptr, texture_block_ptr, texture_mask; \
+ add block, block, #24; \
+ \
+ add texture_block_ptr, texture_page_ptr, texture_block_ptr; \
+ \
+ add fb_ptr, fb_ptr, #16; \
+ vst1.u32 { draw_mask_fb_ptr_left }, [ block, :64 ]; \
+ \
+ add block, block, #40; \
+ vld1.u32 { texels }, [ texture_block_ptr, :64 ]; \
+ pld [ fb_ptr ]; \
+ \
+ vmov.u32 draw_mask_fb_ptr_right[1], fb_ptr; \
+ vst1.u32 { texels }, [ block, :64 ]; \
+ add block, block, #24; \
+ \
+ add texture_offset, texture_offset, #0x10; \
+ add fb_ptr, fb_ptr, #(2048 - 16); \
+ \
+ vst1.u32 { draw_mask_fb_ptr_right }, [ block, :64 ]; \
+ add block, block, #40; \
+ \
+ subs sub_tile_height, sub_tile_height, #1; \
+ bne 4b; \
+ \
+ sub block, block, #16; \
+ add texture_offset, texture_offset, #0xF00; \
+ strh num_blocks, [ psx_gpu, #psx_gpu_num_blocks_offset ] \
+
+
+#define setup_sprite_tile_half_8bpp(edge) \
+ setup_sprite_tile_add_blocks(single); \
+ add block, block, #16; \
+ \
+ 4: \
+ and texture_block_ptr, texture_offset, texture_mask; \
+ vmov.u32 draw_mask_fb_ptr_##edge[1], fb_ptr; \
+ pld [ fb_ptr ]; \
+ \
+ add texture_block_ptr, texture_page_ptr, texture_block_ptr; \
+ vld1.u32 { texels }, [ texture_block_ptr, :64 ]; \
+ \
+ vst1.u32 { texels }, [ block, :64 ]; \
+ add block, block, #24; \
+ \
+ vst1.u32 { draw_mask_fb_ptr_##edge }, [ block, :64 ]; \
+ add block, block, #40; \
+ \
+ add texture_offset, texture_offset, #0x10; \
+ add fb_ptr, fb_ptr, #2048; \
+ \
+ subs sub_tile_height, sub_tile_height, #1; \
+ bne 4b; \
+ \
+ sub block, block, #16; \
+ add texture_offset, texture_offset, #0xF00; \
+ strh num_blocks, [ psx_gpu, #psx_gpu_num_blocks_offset ] \
+
+
+#define setup_sprite_tile_column_edge_pre_adjust_half_right() \
+ add texture_offset, texture_offset_base, #8; \
+ add fb_ptr, fb_ptr, #16 \
+
+#define setup_sprite_tile_column_edge_pre_adjust_half_left() \
+ mov texture_offset, texture_offset_base \
+
+#define setup_sprite_tile_column_edge_pre_adjust_half(edge) \
+ setup_sprite_tile_column_edge_pre_adjust_half_##edge() \
+
+#define setup_sprite_tile_column_edge_pre_adjust_full(edge) \
+ mov texture_offset, texture_offset_base \
+
+#define setup_sprite_tile_column_edge_post_adjust_half_right() \
+ sub fb_ptr, fb_ptr, #16 \
+
+#define setup_sprite_tile_column_edge_post_adjust_half_left() \
+
+#define setup_sprite_tile_column_edge_post_adjust_half(edge) \
+ setup_sprite_tile_column_edge_post_adjust_half_##edge() \
+
+#define setup_sprite_tile_column_edge_post_adjust_full(edge) \
+
+
+#define setup_sprite_tile_column_height_single(edge_mode, edge, texture_mode) \
+ mov sub_tile_height, column_data; \
+ setup_sprite_tile_column_edge_pre_adjust_##edge_mode(edge); \
+ setup_sprite_tile_##edge_mode##_##texture_mode(edge); \
+ setup_sprite_tile_column_edge_post_adjust_##edge_mode(edge) \
+
+#define setup_sprite_tile_column_height_multi(edge_mode, edge, texture_mode) \
+ and sub_tile_height, column_data, #0xFF; \
+ mov tiles_remaining, column_data, lsr #16; \
+ setup_sprite_tile_column_edge_pre_adjust_##edge_mode(edge); \
+ setup_sprite_tile_##edge_mode##_##texture_mode(edge); \
+ \
+ subs tiles_remaining, tiles_remaining, #1; \
+ beq 2f; \
+ \
+ 3: \
+ mov sub_tile_height, #16; \
+ setup_sprite_tile_##edge_mode##_##texture_mode(edge); \
+ subs tiles_remaining, tiles_remaining, #1; \
+ bne 3b; \
+ \
+ 2: \
+ uxtb sub_tile_height, column_data, ror #8; \
+ setup_sprite_tile_##edge_mode##_##texture_mode(edge); \
+ setup_sprite_tile_column_edge_post_adjust_##edge_mode(edge) \
+
+
+#define setup_sprite_column_data_single() \
+ mov column_data, height; \
+ ldr texture_page_ptr, [ psx_gpu, #psx_gpu_texture_page_ptr_offset ] \
+
+#define setup_sprite_column_data_multi() \
+ and height_rounded, height_rounded, #0xF; \
+ rsb column_data, offset_v, #16; \
+ \
+ add height_rounded, height_rounded, #1; \
+ sub tile_height, tile_height, #1; \
+ \
+ orr column_data, column_data, tile_height, lsl #16; \
+ ldr texture_page_ptr, [ psx_gpu, #psx_gpu_texture_page_ptr_offset ]; \
+ \
+ orr column_data, column_data, height_rounded, lsl #8 \
+
+#define setup_sprite_tile_column_width_single(texture_mode, multi_height, \
+ edge_mode, edge) \
+ setup_sprite_##texture_mode##_single_##multi_height##_##edge_mode##_##edge: \
+ setup_sprite_column_data_##multi_height(); \
+ vext.32 block_masks_shifted, block_masks, block_masks, #1; \
+ vorr.u32 block_masks, block_masks, block_masks_shifted; \
+ vdup.u8 draw_mask_fb_ptr_left, block_masks[0]; \
+ vdup.u8 draw_mask_fb_ptr_right, block_masks[1]; \
+ \
+ setup_sprite_tile_column_height_##multi_height(edge_mode, edge, \
+ texture_mode); \
+ ldmia sp!, { r4 - r11, pc } \
+
+#define setup_sprite_tiled_advance_column() \
+ add texture_offset_base, texture_offset_base, #0x100; \
+ tst texture_offset_base, #0xF00; \
+ subeq texture_offset_base, texture_offset_base, #(0x100 + 0xF00) \
+
+#define setup_sprite_tile_column_width_multi(tm, multi_height, left_mode, \
+ right_mode) \
+ setup_sprite_##tm##_multi_##multi_height##_##left_mode##_##right_mode: \
+ setup_sprite_column_data_##multi_height(); \
+ mov fb_ptr_advance_column, #32; \
+ \
+ sub fb_ptr_advance_column, height, lsl #11; \
+ vdup.u8 draw_mask_fb_ptr_left, block_masks[0]; \
+ \
+ vdup.u8 draw_mask_fb_ptr_right, block_masks[1]; \
+ setup_sprite_tile_column_height_##multi_height(left_mode, right, tm); \
+ \
+ subs tile_width, tile_width, #2; \
+ add fb_ptr, fb_ptr, fb_ptr_advance_column; \
+ \
+ vmov.u8 draw_masks_fb_ptrs, #0; \
+ beq 1f; \
+ \
+ 0: \
+ setup_sprite_tiled_advance_column(); \
+ setup_sprite_tile_column_height_##multi_height(full, none, tm); \
+ add fb_ptr, fb_ptr, fb_ptr_advance_column; \
+ subs tile_width, tile_width, #1; \
+ bne 0b; \
+ \
+ 1: \
+ vdup.u8 draw_mask_fb_ptr_left, block_masks[4]; \
+ vdup.u8 draw_mask_fb_ptr_right, block_masks[5]; \
+ \
+ setup_sprite_tiled_advance_column(); \
+ setup_sprite_tile_column_height_##multi_height(right_mode, left, tm); \
+ ldmia sp!, { r4 - r11, pc } \
+
+
+// r0: psx_gpu
+// r1: x
+// r2: y
+// r3: u
+// [ sp ]: v
+// [ sp + 4 ]: width
+// [ sp + 8 ]: height
+// [ sp + 12 ]: color (unused)
+
+#define setup_sprite_tiled_builder(texture_mode) \
+ \
+setup_sprite_tile_column_width_multi(texture_mode, multi, full, full); \
+setup_sprite_tile_column_width_single(texture_mode, multi, full, none); \
+setup_sprite_tile_column_width_multi(texture_mode, single, full, full); \
+setup_sprite_tile_column_width_single(texture_mode, single, full, none); \
+setup_sprite_tile_column_width_multi(texture_mode, multi, half, full); \
+setup_sprite_tile_column_width_single(texture_mode, multi, half, right); \
+setup_sprite_tile_column_width_multi(texture_mode, single, half, full); \
+setup_sprite_tile_column_width_single(texture_mode, single, half, right); \
+setup_sprite_tile_column_width_multi(texture_mode, multi, full, half); \
+setup_sprite_tile_column_width_single(texture_mode, multi, half, left); \
+setup_sprite_tile_column_width_multi(texture_mode, single, full, half); \
+setup_sprite_tile_column_width_single(texture_mode, single, half, left); \
+setup_sprite_tile_column_width_multi(texture_mode, multi, half, half); \
+setup_sprite_tile_column_width_multi(texture_mode, single, half, half); \
+ \
+.align 4; \
+ \
+function(setup_sprite_##texture_mode) \
+ stmdb sp!, { r4 - r11, r14 }; \
+ setup_sprite_tiled_initialize_##texture_mode(); \
+ \
+ ldr v, [ sp, #36 ]; \
+ and offset_u, u, #0xF; \
+ \
+ ldr width, [ sp, #40 ]; \
+ ldr fb_ptr, [ psx_gpu, #psx_gpu_vram_ptr_offset ]; \
+ \
+ ldr height, [ sp, #44 ]; \
+ add fb_ptr, fb_ptr, y, lsl #11; \
+ \
+ add fb_ptr, fb_ptr, x, lsl #1; \
+ and offset_v, v, #0xF; \
+ \
+ sub fb_ptr, fb_ptr, offset_u, lsl #1; \
+ add width_rounded, offset_u, width; \
+ \
+ add height_rounded, offset_v, height; \
+ add width_rounded, width_rounded, #15; \
+ \
+ add height_rounded, height_rounded, #15; \
+ mov tile_width, width_rounded, lsr #4; \
+ \
+ /* texture_offset_base = VH-VL-00-00 */\
+ mov texture_offset_base, v, lsl #8; \
+ and offset_u_right, width_rounded, #0xF; \
+ \
+ /* texture_offset_base = VH-UH-UL-00 */\
+ bfi texture_offset_base, u, #4, #8; \
+ movw right_block_mask, #0xFFFE; \
+ \
+ /* texture_offset_base = VH-UH-VL-00 */\
+ bfi texture_offset_base, v, #4, #4; \
+ movw left_block_mask, #0xFFFF; \
+ \
+ mov tile_height, height_rounded, lsr #4; \
+ mvn left_block_mask, left_block_mask, lsl offset_u; \
+ \
+ /* texture_mask = HH-HL-WH-WL */\
+ ldrh texture_mask, [ psx_gpu, #psx_gpu_texture_mask_width_offset ]; \
+ mov right_block_mask, right_block_mask, lsl offset_u_right; \
+ \
+ /* texture_mask_rev = WH-WL-HH-HL */\
+ rev16 texture_mask_rev, texture_mask; \
+ vmov block_masks, left_block_mask, right_block_mask; \
+ \
+ /* texture_mask = HH-HL-HL-WL */\
+ bfi texture_mask, texture_mask_rev, #4, #4; \
+ /* texture_mask_rev = 00-00-00-WH */\
+ mov texture_mask_rev, texture_mask_rev, lsr #12; \
+ \
+ /* texture_mask = HH-WH-HL-WL */\
+ bfi texture_mask, texture_mask_rev, #8, #4; \
+ and left_block_mask, left_block_mask, #0xFF; \
+ \
+ mov control_mask, #0; \
+ cmp left_block_mask, #0xFF; \
+ \
+ uxtb right_block_mask, right_block_mask, ror #8; \
+ orreq control_mask, control_mask, #0x4; \
+ \
+ ldrh num_blocks, [ psx_gpu, #psx_gpu_num_blocks_offset ]; \
+ cmp right_block_mask, #0xFF; \
+ \
+ orreq control_mask, control_mask, #0x8; \
+ cmp tile_width, #1; \
+ \
+ add block, psx_gpu, #psx_gpu_blocks_offset; \
+ orreq control_mask, control_mask, #0x1; \
+ \
+ cmp tile_height, #1; \
+ add block, block, num_blocks, lsl #6; \
+ \
+ orreq control_mask, control_mask, #0x2; \
+ ldr pc, [ pc, control_mask, lsl #2 ]; \
+ nop; \
+ \
+ .word setup_sprite_##texture_mode##_multi_multi_full_full; \
+ .word setup_sprite_##texture_mode##_single_multi_full_none; \
+ .word setup_sprite_##texture_mode##_multi_single_full_full; \
+ .word setup_sprite_##texture_mode##_single_single_full_none; \
+ .word setup_sprite_##texture_mode##_multi_multi_half_full; \
+ .word setup_sprite_##texture_mode##_single_multi_half_right; \
+ .word setup_sprite_##texture_mode##_multi_single_half_full; \
+ .word setup_sprite_##texture_mode##_single_single_half_right; \
+ .word setup_sprite_##texture_mode##_multi_multi_full_half; \
+ .word setup_sprite_##texture_mode##_single_multi_half_left; \
+ .word setup_sprite_##texture_mode##_multi_single_full_half; \
+ .word setup_sprite_##texture_mode##_single_single_half_left; \
+ .word setup_sprite_##texture_mode##_multi_multi_half_half; \
+ .word 0x00000000; \
+ .word setup_sprite_##texture_mode##_multi_single_half_half \
+
+
+setup_sprite_tiled_builder(4bpp);
+setup_sprite_tiled_builder(8bpp);
+
+
+#undef block_ptr
+#undef num_blocks
+#undef clut_ptr
+
+#define psx_gpu r0
+#define block_ptr r0
+#define num_blocks r1
+#define clut_ptr r2
+#define texel_shift_mask r3
+#define block_pixels_a r4
+#define block_pixels_b r5
+#define texel_0 r6
+#define texel_2 r7
+#define texel_4 r8
+#define texel_6 r9
+#define texel_1 r10
+#define texel_3 r11
+#define texel_5 r12
+#define texel_7 r14
+#define texels_01 r6
+#define texels_23 r7
+#define texels_45 r8
+#define texels_67 r9
+
+function(texture_sprite_blocks_8bpp)
+ stmdb sp!, { r4 - r11, r14 }
+ movw texel_shift_mask, #(0xFF << 1)
+
+ ldrh num_blocks, [ psx_gpu, #psx_gpu_num_blocks_offset ]
+ ldr clut_ptr, [ psx_gpu, #psx_gpu_clut_ptr_offset ]
+
+ add block_ptr, psx_gpu, #psx_gpu_blocks_offset
+ ldr block_pixels_a, [ block_ptr, #16 ]
+
+ 0:
+ and texel_0, texel_shift_mask, block_pixels_a, lsl #1
+ ldr block_pixels_b, [ block_ptr, #20 ]
+
+ and texel_1, texel_shift_mask, block_pixels_a, lsr #7
+ ldrh texel_0, [ clut_ptr, texel_0 ]
+
+ and texel_2, texel_shift_mask, block_pixels_a, lsr #15
+ ldrh texel_1, [ clut_ptr, texel_1 ]
+
+ and texel_3, texel_shift_mask, block_pixels_a, lsr #23
+ ldr block_pixels_a, [ block_ptr, #(64 + 16) ]
+
+ ldrh texel_2, [ clut_ptr, texel_2 ]
+ and texel_4, texel_shift_mask, block_pixels_b, lsl #1
+
+ ldrh texel_3, [ clut_ptr, texel_3 ]
+ and texel_5, texel_shift_mask, block_pixels_b, lsr #7
+
+ ldrh texel_4, [ clut_ptr, texel_4 ]
+ and texel_6, texel_shift_mask, block_pixels_b, lsr #15
+
+ ldrh texel_5, [ clut_ptr, texel_5 ]
+ and texel_7, texel_shift_mask, block_pixels_b, lsr #23
+
+ ldrh texel_6, [ clut_ptr, texel_6 ]
+ orr texels_01, texel_0, texel_1, lsl #16
+
+ ldrh texel_7, [ clut_ptr, texel_7 ]
+ orr texels_23, texel_2, texel_3, lsl #16
+
+ orr texels_45, texel_4, texel_5, lsl #16
+ str texels_01, [ block_ptr, #0 ]
+
+ orr texels_67, texel_6, texel_7, lsl #16
+ str texels_23, [ block_ptr, #4 ]
+
+ subs num_blocks, num_blocks, #1
+ str texels_45, [ block_ptr, #8 ]
+
+ str texels_67, [ block_ptr, #12 ]
+ add block_ptr, block_ptr, #64
+
+ bne 0b
+
+ ldmia sp!, { r4 - r11, pc }
+
+
+#undef width_rounded
+#undef texture_mask
+#undef num_blocks
+#undef texture_offset
+
+#define psx_gpu r0
+#define x r1
+#define y r2
+#define u r3
+#define v r4
+#define width r5
+#define height r6
+#define left_offset r8
+#define width_rounded r9
+#define right_width r10
+#define block_width r11
+
+#define texture_offset_base r1
+#define texture_mask r2
+#define texture_page_ptr r3
+#define num_blocks r4
+#define block r5
+#define fb_ptr r7
+#define texture_offset r8
+#define blocks_remaining r9
+#define fb_ptr_pitch r12
+#define texture_block_ptr r14
+
+#define texture_mask_width r2
+#define texture_mask_height r3
+#define left_mask_bits r4
+#define right_mask_bits r5
+
+
+#undef block_masks
+#undef block_masks_shifted
+#undef texels
+
+#define block_masks d0
+#define block_masks_shifted d1
+#define draw_mask_fb_ptr d2
+#define texels q2
+
+
+setup_sprites_16bpp_flush_single:
+ vpush { d0 - d2 }
+
+ stmdb sp!, { r0 - r3, r12, r14 }
+ bl flush_render_block_buffer
+ ldmia sp!, { r0 - r3, r12, r14 }
+
+ vpop { d0 - d2 }
+
+ add block, psx_gpu, #psx_gpu_blocks_offset
+ mov num_blocks, #1
+
+ bx lr
+
+setup_sprites_16bpp_flush_row:
+ vpush { d0 - d2 }
+
+ stmdb sp!, { r0 - r3, r12, r14 }
+ bl flush_render_block_buffer
+ ldmia sp!, { r0 - r3, r12, r14 }
+
+ vpop { d0 - d2 }
+
+ add block, psx_gpu, #psx_gpu_blocks_offset
+ mov num_blocks, block_width
+
+ bx lr
+
+function(setup_sprite_16bpp)
+ stmdb sp!, { r4 - r11, r14 }
+ ldr fb_ptr, [ psx_gpu, #psx_gpu_vram_ptr_offset ]
+
+ ldr v, [ sp, #36 ]
+ add fb_ptr, fb_ptr, y, lsl #11
+
+ ldr width, [ sp, #40 ]
+ add fb_ptr, fb_ptr, x, lsl #1
+
+ ldr height, [ sp, #44 ]
+ and left_offset, u, #0x7
+
+ add texture_offset_base, u, u
+ add width_rounded, width, #7
+
+ add texture_offset_base, v, lsl #11
+ mov left_mask_bits, #0xFF
+
+ ldrb texture_mask_width, [ psx_gpu, #psx_gpu_texture_mask_width_offset ]
+ add width_rounded, width_rounded, left_offset
+
+ ldrb texture_mask_height, [ psx_gpu, #psx_gpu_texture_mask_height_offset ]
+ sub fb_ptr, fb_ptr, left_offset, lsl #1
+
+ add texture_mask, texture_mask_width, texture_mask_width
+ mov right_mask_bits, #0xFE
+
+ and right_width, width_rounded, #0x7
+ mvn left_mask_bits, left_mask_bits, lsl left_offset
+
+ add texture_mask, texture_mask_height, lsl #11
+ mov block_width, width_rounded, lsr #3
+
+ mov right_mask_bits, right_mask_bits, lsl right_width
+ movw fb_ptr_pitch, #(2048 + 16)
+
+ sub fb_ptr_pitch, fb_ptr_pitch, block_width, lsl #4
+ vmov block_masks, left_mask_bits, right_mask_bits
+
+ ldrh num_blocks, [ psx_gpu, #psx_gpu_num_blocks_offset ]
+ add block, psx_gpu, #psx_gpu_blocks_offset
+
+ bic texture_offset_base, texture_offset_base, #0x7
+ cmp block_width, #1
+
+ ldr texture_page_ptr, [ psx_gpu, #psx_gpu_texture_page_ptr_offset ]
+ add block, block, num_blocks, lsl #6
+
+ bne 0f
+
+ vext.32 block_masks_shifted, block_masks, block_masks, #1
+ vorr.u32 block_masks, block_masks, block_masks_shifted
+ vdup.u8 draw_mask_fb_ptr, block_masks[0]
+
+ 1:
+ add num_blocks, num_blocks, #1
+ cmp num_blocks, #MAX_BLOCKS
+ blgt setup_sprites_16bpp_flush_single
+
+ and texture_block_ptr, texture_offset_base, texture_mask
+ subs height, height, #1
+
+ add texture_block_ptr, texture_page_ptr, texture_block_ptr
+ vld1.u32 { texels }, [ texture_block_ptr, :128 ]
+
+ vst1.u32 { texels }, [ block, :128 ]
+ add block, block, #40
+
+ vmov.u32 draw_mask_fb_ptr[1], fb_ptr
+ pld [ fb_ptr ]
+
+ vst1.u32 { draw_mask_fb_ptr }, [ block, :64 ]
+
+ add block, block, #24
+ add texture_offset_base, texture_offset_base, #2048
+ add fb_ptr, fb_ptr, #2048
+ strh num_blocks, [ psx_gpu, #psx_gpu_num_blocks_offset ]
+ bne 1b
+
+ ldmia sp!, { r4 - r11, pc }
+
+ 0:
+ add num_blocks, num_blocks, block_width
+ mov texture_offset, texture_offset_base
+
+ cmp num_blocks, #MAX_BLOCKS
+ blgt setup_sprites_16bpp_flush_row
+
+ add texture_offset_base, texture_offset_base, #2048
+ and texture_block_ptr, texture_offset, texture_mask
+
+ add texture_block_ptr, texture_page_ptr, texture_block_ptr
+ vld1.u32 { texels }, [ texture_block_ptr, :128 ]
+
+ vst1.u32 { texels }, [ block, :128 ]
+ add block, block, #40
+
+ vdup.u8 draw_mask_fb_ptr, block_masks[0]
+ vmov.u32 draw_mask_fb_ptr[1], fb_ptr
+ pld [ fb_ptr ]
+
+ vst1.u32 { draw_mask_fb_ptr }, [ block, :64 ]
+ subs blocks_remaining, block_width, #2
+
+ add texture_offset, texture_offset, #16
+ add fb_ptr, fb_ptr, #16
+
+ vmov.u8 draw_mask_fb_ptr, #0
+
+ add block, block, #24
+ beq 2f
+
+ 1:
+ and texture_block_ptr, texture_offset, texture_mask
+ subs blocks_remaining, blocks_remaining, #1
+
+ add texture_block_ptr, texture_page_ptr, texture_block_ptr
+ vld1.u32 { texels }, [ texture_block_ptr, :128 ]
+
+ vst1.u32 { texels }, [ block, :128 ]
+ add block, block, #40
+
+ vmov.u32 draw_mask_fb_ptr[1], fb_ptr
+ pld [ fb_ptr ]
+
+ vst1.u32 { draw_mask_fb_ptr }, [ block, :64 ]
+
+ add texture_offset, texture_offset, #16
+ add fb_ptr, fb_ptr, #16
+
+ add block, block, #24
+ bne 1b
+
+ 2:
+ and texture_block_ptr, texture_offset, texture_mask
+ add texture_block_ptr, texture_page_ptr, texture_block_ptr
+
+ vld1.u32 { texels }, [ texture_block_ptr, :128 ]
+ vdup.u8 draw_mask_fb_ptr, block_masks[4]
+
+ vst1.u32 { texels }, [ block, :128 ]
+ add block, block, #40
+
+ vmov.u32 draw_mask_fb_ptr[1], fb_ptr
+ vst1.u32 { draw_mask_fb_ptr }, [ block, :64 ]
+
+ add block, block, #24
+ subs height, height, #1
+
+ add fb_ptr, fb_ptr, fb_ptr_pitch
+ strh num_blocks, [ psx_gpu, #psx_gpu_num_blocks_offset ]
+
+ bne 0b
+
+ ldmia sp!, { r4 - r11, pc }
+
+
+#undef texture_page_ptr
+#undef vram_ptr
+#undef dirty_textures_mask
+#undef current_texture_mask
+
+#define psx_gpu r0
+#define current_texture_page r1
+#define texture_page_ptr r2
+#define vram_ptr_a r3
+#define current_texture_page_x r12
+#define current_texture_page_y r4
+#define dirty_textures_mask r5
+#define tile_y r6
+#define tile_x r7
+#define sub_y r8
+#define current_texture_mask r9
+#define c_4096 r10
+#define vram_ptr_b r11
+
+#define texel_block_a d0
+#define texel_block_b d1
+#define texel_block_expanded_a q1
+#define texel_block_expanded_b q2
+#define texel_block_expanded_ab q2
+#define texel_block_expanded_c q3
+#define texel_block_expanded_d q4
+#define texel_block_expanded_cd q3
+
+function(update_texture_4bpp_cache)
+ stmdb sp!, { r4 - r11, r14 }
+ vpush { q0 - q3 }
+
+ ldrb current_texture_page, [ psx_gpu, #psx_gpu_current_texture_page_offset ]
+
+ ldr texture_page_ptr, [ psx_gpu, #psx_gpu_texture_page_ptr_offset ]
+ ldr vram_ptr_a, [ psx_gpu, #psx_gpu_vram_ptr_offset ]
+
+ and current_texture_page_x, current_texture_page, #0xF
+ ldr current_texture_mask, [ psx_gpu, #psx_gpu_current_texture_mask_offset ]
+
+ mov current_texture_page_y, current_texture_page, lsr #4
+ ldr dirty_textures_mask, [ psx_gpu, #psx_gpu_dirty_textures_4bpp_mask_offset ]
+
+ add vram_ptr_a, vram_ptr_a, current_texture_page_y, lsl #19
+ mov tile_y, #16
+
+ add vram_ptr_a, vram_ptr_a, current_texture_page_x, lsl #7
+ bic dirty_textures_mask, current_texture_mask
+
+ mov tile_x, #16
+ str dirty_textures_mask, [ psx_gpu, #psx_gpu_dirty_textures_4bpp_mask_offset ]
+
+ mov sub_y, #8
+ movw c_4096, #4096
+
+ add vram_ptr_b, vram_ptr_a, #2048
+
+ 0:
+ vld1.u32 { texel_block_a }, [ vram_ptr_a, :64 ], c_4096
+ vld1.u32 { texel_block_b }, [ vram_ptr_b, :64 ], c_4096
+
+ vmovl.u8 texel_block_expanded_a, texel_block_a
+ vshll.u8 texel_block_expanded_b, texel_block_a, #4
+ vmovl.u8 texel_block_expanded_c, texel_block_b
+ vshll.u8 texel_block_expanded_d, texel_block_b, #4
+
+ vbic.u16 texel_block_expanded_a, #0x00F0
+ vbic.u16 texel_block_expanded_b, #0x00F0
+ vbic.u16 texel_block_expanded_c, #0x00F0
+ vbic.u16 texel_block_expanded_d, #0x00F0
+
+ vorr.u16 texel_block_expanded_ab, texel_block_expanded_a, \
+ texel_block_expanded_b
+ vorr.u16 texel_block_expanded_cd, texel_block_expanded_c, \
+ texel_block_expanded_d
+
+ vst1.u32 { texel_block_expanded_ab, texel_block_expanded_cd }, \
+ [ texture_page_ptr, :256 ]!
+
+ subs sub_y, sub_y, #1
+ bne 0b
+
+ mov sub_y, #8
+ add vram_ptr_a, vram_ptr_a, #8
+ add vram_ptr_b, vram_ptr_b, #8
+
+ sub vram_ptr_a, vram_ptr_a, #(16 * 2048)
+ sub vram_ptr_b, vram_ptr_b, #(16 * 2048)
+
+ subs tile_x, tile_x, #1
+ bne 0b
+
+ mov tile_x, #16
+ add vram_ptr_a, vram_ptr_a, #(16 * 2048)
+ add vram_ptr_b, vram_ptr_b, #(16 * 2048)
+
+ sub vram_ptr_a, vram_ptr_a, #(8 * 16)
+ sub vram_ptr_b, vram_ptr_b, #(8 * 16)
+
+ subs tile_y, tile_y, #1
+ bne 0b
+
+ vpop { q0 - q3 }
+ ldmia sp!, { r4 - r11, pc }
+
+
+#undef current_texture_page
+
+#define psx_gpu r0
+#define texture_page r1
+#define texture_page_ptr r2
+#define vram_ptr_a r3
+#define texture_page_x r12
+#define texture_page_y r4
+#define current_texture_page r5
+#define tile_y r6
+#define tile_x r7
+#define sub_y r8
+#define c_4096 r10
+#define vram_ptr_b r11
+
+
+#undef texels_a
+#undef texels_b
+
+#define texels_a q0
+#define texels_b q1
+#define texels_c q2
+#define texels_d q3
+
+
+function(update_texture_8bpp_cache_slice)
+ stmdb sp!, { r4 - r11, r14 }
+ vpush { q0 - q3 }
+
+ ldrb current_texture_page, [ psx_gpu, #psx_gpu_current_texture_page_offset ]
+ ldr vram_ptr_a, [ psx_gpu, #psx_gpu_vram_ptr_offset ]
+
+ ldr texture_page_ptr, [ psx_gpu, #psx_gpu_texture_page_ptr_offset ]
+ mov tile_y, #16
+
+ and texture_page_x, texture_page, #0xF
+ mov texture_page_y, texture_page, lsr #4
+
+ add vram_ptr_a, vram_ptr_a, texture_page_x, lsl #7
+ mov tile_x, #8
+
+ add vram_ptr_a, vram_ptr_a, texture_page_y, lsl #19
+ eor current_texture_page, current_texture_page, texture_page
+
+ ands current_texture_page, current_texture_page, #0x1
+ mov sub_y, #4
+
+ addne texture_page_ptr, texture_page_ptr, #(8 * 16 * 16)
+ movw c_4096, #4096
+
+ add vram_ptr_b, vram_ptr_a, #2048
+
+ 0:
+ vld1.u32 { texels_a }, [ vram_ptr_a, :128 ], c_4096
+ vld1.u32 { texels_b }, [ vram_ptr_b, :128 ], c_4096
+ vld1.u32 { texels_c }, [ vram_ptr_a, :128 ], c_4096
+ vld1.u32 { texels_d }, [ vram_ptr_b, :128 ], c_4096
+
+ vst1.u32 { texels_a, texels_b }, [ texture_page_ptr, :256 ]!
+ vst1.u32 { texels_c, texels_d }, [ texture_page_ptr, :256 ]!
+
+ subs sub_y, sub_y, #1
+ bne 0b
+
+ mov sub_y, #4
+
+ add vram_ptr_a, vram_ptr_a, #16
+ add vram_ptr_b, vram_ptr_b, #16
+
+ sub vram_ptr_a, vram_ptr_a, #(16 * 2048)
+ sub vram_ptr_b, vram_ptr_b, #(16 * 2048)
+
+ subs tile_x, tile_x, #1
+ bne 0b
+
+ mov tile_x, #8
+
+ add vram_ptr_a, vram_ptr_a, #(16 * 2048)
+ add vram_ptr_b, vram_ptr_b, #(16 * 2048)
+
+ sub vram_ptr_a, vram_ptr_a, #(8 * 16)
+ sub vram_ptr_b, vram_ptr_b, #(8 * 16)
+
+ subs tile_y, tile_y, #1
+ add texture_page_ptr, texture_page_ptr, #(8 * 16 * 16)
+
+ bne 0b
+
+ vpop { q0 - q3 }
+ ldmia sp!, { r4 - r11, pc }
+
--- /dev/null
+/*
+ * Copyright (C) 2011 Gilead Kutnick "Exophase" <exophase@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "SDL.h"
+#include "common.h"
+
+extern u32 span_pixels;
+extern u32 span_pixel_blocks;
+extern u32 span_pixel_blocks_unaligned;
+extern u32 spans;
+extern u32 triangles;
+extern u32 sprites;
+extern u32 sprites_4bpp;
+extern u32 sprites_8bpp;
+extern u32 sprites_16bpp;
+extern u32 sprites_untextured;
+extern u32 sprite_blocks;
+extern u32 lines;
+extern u32 texels_4bpp;
+extern u32 texels_8bpp;
+extern u32 texels_16bpp;
+extern u32 texel_blocks_4bpp;
+extern u32 texel_blocks_8bpp;
+extern u32 texel_blocks_16bpp;
+extern u32 texel_blocks_untextured;
+extern u32 blend_blocks;
+extern u32 untextured_pixels;
+extern u32 blend_pixels;
+extern u32 transparent_pixels;
+extern u32 render_buffer_flushes;
+extern u32 state_changes;
+extern u32 trivial_rejects;
+extern u32 left_split_triangles;
+extern u32 flat_triangles;
+extern u32 clipped_triangles;
+extern u32 zero_block_spans;
+extern u32 texture_cache_loads;
+extern u32 false_modulated_triangles;
+extern u32 false_modulated_sprites;
+
+static u32 mismatches;
+
+typedef struct
+{
+ u16 vram[1024 * 512];
+ u32 gpu_register[15];
+ u32 status;
+} gpu_dump_struct;
+
+static gpu_dump_struct state;
+
+psx_gpu_struct __attribute__((aligned(256))) _psx_gpu;
+
+#define percent_of(numerator, denominator) \
+ ((((double)(numerator)) / (denominator)) * 100.0) \
+
+void clear_stats(void)
+{
+ triangles = 0;
+ sprites = 0;
+ sprites_4bpp = 0;
+ sprites_8bpp = 0;
+ sprites_16bpp = 0;
+ sprites_untextured = 0;
+ sprite_blocks = 0;
+ lines = 0;
+ span_pixels = 0;
+ span_pixel_blocks = 0;
+ span_pixel_blocks_unaligned = 0;
+ spans = 0;
+ texels_4bpp = 0;
+ texels_8bpp = 0;
+ texels_16bpp = 0;
+ texel_blocks_untextured = 0;
+ texel_blocks_4bpp = 0;
+ texel_blocks_8bpp = 0;
+ texel_blocks_16bpp = 0;
+ blend_blocks = 0;
+ untextured_pixels = 0;
+ blend_pixels = 0;
+ transparent_pixels = 0;
+ render_buffer_flushes = 0;
+ state_changes = 0;
+ trivial_rejects = 0;
+ left_split_triangles = 0;
+ flat_triangles = 0;
+ clipped_triangles = 0;
+ zero_block_spans = 0;
+ texture_cache_loads = 0;
+ false_modulated_triangles = 0;
+ false_modulated_sprites = 0;
+}
+
+void update_screen(psx_gpu_struct *psx_gpu, SDL_Surface *screen)
+{
+ u32 x, y;
+
+ for(y = 0; y < 512; y++)
+ {
+ for(x = 0; x < 1024; x++)
+ {
+ u32 pixel = psx_gpu->vram_ptr[(y * 1024) + x];
+ ((u32 *)screen->pixels)[(y * 1024) + x] =
+ ((pixel & 0x1F) << (16 + 3)) |
+ (((pixel >> 5) & 0x1F) << (8 + 3)) |
+ (((pixel >> 10) & 0x1F) << 3);
+ }
+ }
+
+ SDL_Flip(screen);
+}
+
+#ifdef PANDORA_BUILD
+
+#include <fcntl.h>
+#include <linux/fb.h>
+#include <sys/mman.h>
+#include <sys/ioctl.h>
+
+#endif
+
+int main(int argc, char *argv[])
+{
+ psx_gpu_struct *psx_gpu = &_psx_gpu;
+ SDL_Surface *screen;
+ SDL_Event event;
+
+ u32 *list;
+ int size;
+ FILE *state_file;
+ FILE *list_file;
+ u32 no_display = 0;
+
+ if((argc != 3) && (argc != 4))
+ {
+ printf("usage:\n%s <state> <list>\n", argv[0]);
+ return 1;
+ }
+
+ if((argc == 4) && !strcmp(argv[3], "-n"))
+ no_display = 1;
+
+ state_file = fopen(argv[1], "rb");
+ fread(&state, 1, sizeof(gpu_dump_struct), state_file);
+ fclose(state_file);
+
+ list_file = fopen(argv[2], "rb");
+
+ fseek(list_file, 0, SEEK_END);
+ size = ftell(list_file);
+ fseek(list_file, 0, SEEK_SET);
+ //size = 0;
+
+ list = malloc(size);
+ fread(list, 1, size, list_file);
+ fclose(list_file);
+
+ if(no_display == 0)
+ {
+ SDL_Init(SDL_INIT_EVERYTHING);
+ screen = SDL_SetVideoMode(1024, 512, 32, 0);
+ }
+
+ initialize_psx_gpu(psx_gpu);
+
+#ifdef PANDORA_BUILD
+ system("ofbset -fb /dev/fb1 -mem 6291456 -en 0");
+ u32 fbdev_handle = open("/dev/fb1", O_RDWR);
+ psx_gpu->vram_ptr = (mmap((void *)0x50000000, 1024 * 1024 * 2, PROT_READ | PROT_WRITE,
+ MAP_SHARED | 0xA0000000, fbdev_handle, 0));
+ psx_gpu->vram_ptr += 64;
+#endif
+
+
+
+#ifdef PANDORA_BUILD
+ //triangle_benchmark(psx_gpu);
+ //return 0;
+#endif
+
+#ifdef FULL_COMPARE_MODE
+ psx_gpu->pixel_count_mode = 1;
+ psx_gpu->pixel_compare_mode = 0;
+ memcpy(psx_gpu->vram_ptr, state.vram, 1024 * 512 * 2);
+ //render_block_fill(psx_gpu, 0, 0, 0, 1024, 512);
+ gpu_parse(psx_gpu, list, size);
+
+ psx_gpu->pixel_count_mode = 0;
+ psx_gpu->pixel_compare_mode = 1;
+ memcpy(psx_gpu->compare_vram, state.vram, 1024 * 512 * 2);
+ memcpy(psx_gpu->vram_ptr, state.vram, 1024 * 512 * 2);
+ //render_block_fill(psx_gpu, 0, 0, 0, 1024, 512);
+ clear_stats();
+ gpu_parse(psx_gpu, list, size);
+ flush_render_block_buffer(psx_gpu);
+#else
+ memcpy(psx_gpu->vram_ptr, state.vram, 1024 * 512 * 2);
+
+ psx_gpu->pixel_count_mode = 0;
+ psx_gpu->pixel_compare_mode = 0;
+
+ clear_stats();
+
+#ifdef PANDORA_BUILD
+ init_counter();
+#endif
+
+ gpu_parse(psx_gpu, list, size);
+ flush_render_block_buffer(psx_gpu);
+
+ clear_stats();
+
+#ifdef PANDORA_BUILD
+ u32 cycles = get_counter();
+#endif
+
+ gpu_parse(psx_gpu, list, size);
+ flush_render_block_buffer(psx_gpu);
+
+ printf("%s: ", argv[1]);
+#ifdef PANDORA_BUILD
+ u32 cycles_elapsed = get_counter() - cycles;
+
+ printf("%d\n", cycles_elapsed);
+#endif
+
+#if 1
+ u32 i;
+
+ for(i = 0; i < 1024 * 512; i++)
+ {
+ if((psx_gpu->vram_ptr[i] & 0x7FFF) != (state.vram[i] & 0x7FFF))
+ {
+ printf("(%d %d %d) vs (%d %d %d) at (%d %d)\n",
+ psx_gpu->vram_ptr[i] & 0x1F,
+ (psx_gpu->vram_ptr[i] >> 5) & 0x1F,
+ (psx_gpu->vram_ptr[i] >> 10) & 0x1F,
+ state.vram[i] & 0x1F,
+ (state.vram[i] >> 5) & 0x1F,
+ (state.vram[i] >> 10) & 0x1F, i % 1024, i / 1024);
+
+ mismatches++;
+ }
+ else
+ {
+ psx_gpu->vram_ptr[i] =
+ ((psx_gpu->vram_ptr[i] & 0x1F) / 4) |
+ ((((psx_gpu->vram_ptr[i] >> 5) & 0x1F) / 4) << 5) |
+ ((((psx_gpu->vram_ptr[i] >> 10) & 0x1F) / 4) << 10);
+ }
+ }
+#endif
+#endif
+
+#if 0
+ printf("\n");
+ printf(" %d pixels, %d pixel blocks (%d unaligned), %d spans\n"
+ " (%lf pixels per block (%lf unaligned, r %lf), %lf pixels per span),\n"
+ " %lf blocks per span (%lf per non-zero span), %lf overdraw)\n\n",
+ span_pixels, span_pixel_blocks, span_pixel_blocks_unaligned, spans,
+ (double)span_pixels / span_pixel_blocks,
+ (double)span_pixels / span_pixel_blocks_unaligned,
+ (double)span_pixel_blocks / span_pixel_blocks_unaligned,
+ (double)span_pixels / spans,
+ (double)span_pixel_blocks / spans,
+ (double)span_pixel_blocks / (spans - zero_block_spans),
+ (double)span_pixels /
+ ((psx_gpu->viewport_end_x - psx_gpu->viewport_start_x) *
+ (psx_gpu->viewport_end_y - psx_gpu->viewport_start_y)));
+
+ printf(" %d triangles (%d false modulated)\n"
+ " (%d trivial rejects, %lf%% flat, %lf%% left split, %lf%% clipped)\n"
+ " (%lf pixels per triangle, %lf rows per triangle)\n\n",
+ triangles, false_modulated_triangles, trivial_rejects,
+ percent_of(flat_triangles, triangles),
+ percent_of(left_split_triangles, triangles),
+ percent_of(clipped_triangles, triangles),
+ (double)span_pixels / triangles,
+ (double)spans / triangles);
+
+ printf(" Block data:\n");
+ printf(" %7d 4bpp texel blocks (%lf%%)\n", texel_blocks_4bpp,
+ percent_of(texel_blocks_4bpp, span_pixel_blocks));
+ printf(" %7d 8bpp texel blocks (%lf%%)\n", texel_blocks_8bpp,
+ percent_of(texel_blocks_8bpp, span_pixel_blocks));
+ printf(" %7d 16bpp texel blocks (%lf%%)\n", texel_blocks_16bpp,
+ percent_of(texel_blocks_16bpp, span_pixel_blocks));
+ printf(" %7d untextured blocks (%lf%%)\n", texel_blocks_untextured,
+ percent_of(texel_blocks_untextured, span_pixel_blocks));
+ printf(" %7d sprite blocks (%lf%%)\n", sprite_blocks,
+ percent_of(sprite_blocks, span_pixel_blocks));
+ printf(" %7d blended blocks (%lf%%)\n", blend_blocks,
+ percent_of(blend_blocks, span_pixel_blocks));
+ printf("\n");
+ printf(" %lf blocks per render buffer flush\n", (double)span_pixel_blocks /
+ render_buffer_flushes);
+ printf(" %d zero block spans\n", zero_block_spans);
+ printf(" %d state changes, %d texture cache loads\n", state_changes,
+ texture_cache_loads);
+ if(sprites)
+ {
+ printf(" %d sprites\n"
+ " 4bpp: %lf%%\n"
+ " 8bpp: %lf%%\n"
+ " 16bpp: %lf%%\n"
+ " untextured: %lf%%\n",
+ sprites, percent_of(sprites_4bpp, sprites),
+ percent_of(sprites_8bpp, sprites), percent_of(sprites_16bpp, sprites),
+ percent_of(sprites_untextured, sprites));
+ }
+ printf("\n");
+ printf(" %d mismatches\n\n\n", mismatches);
+#endif
+
+ fflush(stdout);
+
+ if(no_display == 0)
+ {
+ while(1)
+ {
+ update_screen(psx_gpu, screen);
+
+ if(SDL_PollEvent(&event))
+ {
+ if((event.type == SDL_QUIT) ||
+ ((event.type == SDL_KEYDOWN) &&
+ (event.key.keysym.sym == SDLK_ESCAPE)))
+ {
+ break;
+ }
+ }
+
+ SDL_Delay(20);
+ }
+ }
+
+ return (mismatches != 0);
+}
--- /dev/null
+/*
+ * Copyright (C) 2011 Gilead Kutnick "Exophase" <exophase@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <stdio.h>
+
+#include "common.h"
+
+const u8 command_lengths[256] =
+{
+ 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 00
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 10
+ 3, 3, 3, 3, 6, 6, 6, 6, 4, 4, 4, 4, 8, 8, 8, 8, // 20
+ 5, 5, 5, 5, 8, 8, 8, 8, 7, 7, 7, 7, 11, 11, 11, 11, // 30
+ 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, // 40
+ 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, // 50
+ 2, 2, 2, 2, 3, 3, 3, 3, 1, 1, 1, 1, 1, 1, 1, 1, // 60
+ 1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2, // 70
+ 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 80
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 90
+ 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // a0
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // b0
+ 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // c0
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // d0
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // e0
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 // f0
+};
+
+void update_texture_ptr(psx_gpu_struct *psx_gpu)
+{
+ u8 *texture_ptr;
+
+ switch((psx_gpu->render_state_base >> 8) & 0x3)
+ {
+ default:
+ case TEXTURE_MODE_4BPP:
+#ifdef TEXTURE_CACHE_4BPP
+ texture_ptr = psx_gpu->texture_4bpp_cache[psx_gpu->current_texture_page];
+ texture_ptr += psx_gpu->texture_window_x & 0xF;
+ texture_ptr += (psx_gpu->texture_window_y & 0xF) << 4;
+ texture_ptr += (psx_gpu->texture_window_x >> 4) << 8;
+ texture_ptr += (psx_gpu->texture_window_y >> 4) << 12;
+#else
+ texture_ptr = (u8 *)(psx_gpu->vram_ptr);
+ texture_ptr += (psx_gpu->current_texture_page & 0xF) * 128;
+ texture_ptr += ((psx_gpu->current_texture_page >> 4) * 256) * 2048;
+ texture_ptr += psx_gpu->texture_window_x / 2;
+ texture_ptr += (psx_gpu->texture_window_y) * 2048;
+#endif
+ break;
+
+ case TEXTURE_MODE_8BPP:
+#ifdef TEXTURE_CACHE_8BPP
+ if(psx_gpu->current_texture_page & 0x1)
+ {
+ texture_ptr =
+ psx_gpu->texture_8bpp_odd_cache[psx_gpu->current_texture_page >> 1];
+ }
+ else
+ {
+ texture_ptr =
+ psx_gpu->texture_8bpp_even_cache[psx_gpu->current_texture_page >> 1];
+ }
+
+ texture_ptr += (psx_gpu->texture_window_y & 0xF) << 4;
+ texture_ptr += (psx_gpu->texture_window_x >> 4) << 8;
+ texture_ptr += (psx_gpu->texture_window_y >> 4) << 12;
+#else
+ texture_ptr = (u8 *)(psx_gpu->vram_ptr);
+ texture_ptr += (psx_gpu->current_texture_page & 0xF) * 128;
+ texture_ptr += ((psx_gpu->current_texture_page >> 4) * 256) * 2048;
+ texture_ptr += psx_gpu->texture_window_x;
+ texture_ptr += (psx_gpu->texture_window_y) * 2048;
+#endif
+ break;
+
+ case TEXTURE_MODE_16BPP:
+ texture_ptr = (u8 *)(psx_gpu->vram_ptr);
+ texture_ptr += (psx_gpu->current_texture_page & 0xF) * 128;
+ texture_ptr += ((psx_gpu->current_texture_page >> 4) * 256) * 2048;
+ texture_ptr += psx_gpu->texture_window_x * 2;
+ texture_ptr += (psx_gpu->texture_window_y) * 2048;
+ break;
+ }
+
+ psx_gpu->texture_page_ptr = texture_ptr;
+}
+
+void set_texture(psx_gpu_struct *psx_gpu, u32 texture_settings)
+{
+ if(psx_gpu->texture_settings != texture_settings)
+ {
+ u32 new_texture_page = texture_settings & 0x1F;
+ u32 texture_mode = (texture_settings >> 7) & 0x3;
+ u32 render_state_base = psx_gpu->render_state_base;
+
+ if(psx_gpu->current_texture_page != new_texture_page)
+ flush_render_block_buffer(psx_gpu);
+
+ render_state_base &= ~(0xF << 6);
+ render_state_base |= ((texture_settings >> 5) & 0xF) << 6;
+
+ psx_gpu->render_state_base = render_state_base;
+
+ psx_gpu->current_texture_mask = 0x1 << new_texture_page;
+
+ if(texture_mode == TEXTURE_MODE_8BPP)
+ {
+ // In 8bpp mode 256x256 takes up two pages. If it's on the right edge it
+ // wraps back around to the left edge.
+ u32 adjacent_texture_page = ((texture_settings + 1) & 0xF) | (texture_settings & 0x10);
+ psx_gpu->current_texture_mask |= 0x1 << adjacent_texture_page;
+
+ if((psx_gpu->last_8bpp_texture_page ^ new_texture_page) & 0x1)
+ {
+ u32 dirty_textures_8bpp_alternate_mask =
+ psx_gpu->dirty_textures_8bpp_alternate_mask;
+ psx_gpu->dirty_textures_8bpp_alternate_mask =
+ psx_gpu->dirty_textures_8bpp_mask;
+ psx_gpu->dirty_textures_8bpp_mask = dirty_textures_8bpp_alternate_mask;
+ }
+
+ psx_gpu->last_8bpp_texture_page = new_texture_page;
+ }
+
+ psx_gpu->current_texture_page = new_texture_page;
+ psx_gpu->texture_settings = texture_settings;
+
+ update_texture_ptr(psx_gpu);
+ }
+}
+
+void set_clut(psx_gpu_struct *psx_gpu, u32 clut_settings)
+{
+ if(psx_gpu->clut_settings != clut_settings)
+ {
+ flush_render_block_buffer(psx_gpu);
+ psx_gpu->clut_settings = clut_settings;
+ psx_gpu->clut_ptr = psx_gpu->vram_ptr + ((clut_settings & 0x7FFF) * 16);
+ }
+}
+
+void set_triangle_color(psx_gpu_struct *psx_gpu, u32 triangle_color)
+{
+ if(psx_gpu->triangle_color != triangle_color)
+ {
+ flush_render_block_buffer(psx_gpu);
+ psx_gpu->triangle_color = triangle_color;
+ }
+}
+
+#define sign_extend_12bit(value) \
+ (((s32)((value) << 20)) >> 20) \
+
+#define get_vertex_data_xy(vertex_number, offset16) \
+ vertexes[vertex_number].x = \
+ sign_extend_12bit(list_s16[offset16]) + psx_gpu->offset_x; \
+ vertexes[vertex_number].y = \
+ sign_extend_12bit(list_s16[(offset16) + 1]) + psx_gpu->offset_y; \
+
+#define get_vertex_data_uv(vertex_number, offset16) \
+ vertexes[vertex_number].u = list_s16[offset16] & 0xFF; \
+ vertexes[vertex_number].v = (list_s16[offset16] >> 8) & 0xFF \
+
+#define get_vertex_data_rgb(vertex_number, offset32) \
+ vertexes[vertex_number].r = list[offset32] & 0xFF; \
+ vertexes[vertex_number].g = (list[offset32] >> 8) & 0xFF; \
+ vertexes[vertex_number].b = (list[offset32] >> 16) & 0xFF \
+
+#define get_vertex_data_xy_uv(vertex_number, offset16) \
+ get_vertex_data_xy(vertex_number, offset16); \
+ get_vertex_data_uv(vertex_number, (offset16) + 2) \
+
+#define get_vertex_data_xy_rgb(vertex_number, offset16) \
+ get_vertex_data_rgb(vertex_number, (offset16) / 2); \
+ get_vertex_data_xy(vertex_number, (offset16) + 2); \
+
+#define get_vertex_data_xy_uv_rgb(vertex_number, offset16) \
+ get_vertex_data_rgb(vertex_number, (offset16) / 2); \
+ get_vertex_data_xy(vertex_number, (offset16) + 2); \
+ get_vertex_data_uv(vertex_number, (offset16) + 4); \
+
+#define set_vertex_color_constant(vertex_number, color) \
+ vertexes[vertex_number].r = color & 0xFF; \
+ vertexes[vertex_number].g = (color >> 8) & 0xFF; \
+ vertexes[vertex_number].b = (color >> 16) & 0xFF \
+
+#define get_vertex_data_xy_rgb_constant(vertex_number, offset16, color) \
+ get_vertex_data_xy(vertex_number, offset16); \
+ set_vertex_color_constant(vertex_number, color) \
+
+vertex_struct vertexes[4] __attribute__((aligned(32)));
+
+void gpu_parse(psx_gpu_struct *psx_gpu, u32 *list, u32 size)
+{
+ u32 current_command, command_length;
+
+ u32 *list_end = list + (size / 4);
+
+ for(; list < list_end; list += 1 + command_length)
+ {
+ s16 *list_s16 = (void *)list;
+ current_command = *list >> 24;
+ command_length = command_lengths[current_command];
+
+ switch(current_command)
+ {
+ case 0x00:
+ break;
+
+ case 0x02:
+ render_block_fill(psx_gpu, list[0] & 0xFFFFFF, list_s16[2], list_s16[3],
+ list_s16[4] & 0x3FF, list_s16[5] & 0x3FF);
+ break;
+
+ case 0x20 ... 0x23:
+ {
+ set_triangle_color(psx_gpu, list[0] & 0xFFFFFF);
+
+ get_vertex_data_xy(0, 2);
+ get_vertex_data_xy(1, 4);
+ get_vertex_data_xy(2, 6);
+
+ render_triangle(psx_gpu, vertexes, current_command);
+ break;
+ }
+
+ case 0x24 ... 0x27:
+ {
+ set_clut(psx_gpu, list_s16[5]);
+ set_texture(psx_gpu, list_s16[9]);
+ set_triangle_color(psx_gpu, list[0] & 0xFFFFFF);
+
+ get_vertex_data_xy_uv(0, 2);
+ get_vertex_data_xy_uv(1, 6);
+ get_vertex_data_xy_uv(2, 10);
+
+ render_triangle(psx_gpu, vertexes, current_command);
+ break;
+ }
+
+ case 0x28 ... 0x2B:
+ {
+ set_triangle_color(psx_gpu, list[0] & 0xFFFFFF);
+
+ get_vertex_data_xy(0, 2);
+ get_vertex_data_xy(1, 4);
+ get_vertex_data_xy(2, 6);
+ get_vertex_data_xy(3, 8);
+
+ render_triangle(psx_gpu, vertexes, current_command);
+ render_triangle(psx_gpu, &(vertexes[1]), current_command);
+ break;
+ }
+
+ case 0x2C ... 0x2F:
+ {
+ set_clut(psx_gpu, list_s16[5]);
+ set_texture(psx_gpu, list_s16[9]);
+ set_triangle_color(psx_gpu, list[0] & 0xFFFFFF);
+
+ get_vertex_data_xy_uv(0, 2);
+ get_vertex_data_xy_uv(1, 6);
+ get_vertex_data_xy_uv(2, 10);
+ get_vertex_data_xy_uv(3, 14);
+
+ render_triangle(psx_gpu, vertexes, current_command);
+ render_triangle(psx_gpu, &(vertexes[1]), current_command);
+ break;
+ }
+
+ case 0x30 ... 0x33:
+ {
+ get_vertex_data_xy_rgb(0, 0);
+ get_vertex_data_xy_rgb(1, 4);
+ get_vertex_data_xy_rgb(2, 8);
+
+ render_triangle(psx_gpu, vertexes, current_command);
+ break;
+ }
+
+ case 0x34:
+ case 0x35:
+ case 0x36:
+ case 0x37:
+ {
+ set_clut(psx_gpu, list_s16[5]);
+ set_texture(psx_gpu, list_s16[11]);
+
+ get_vertex_data_xy_uv_rgb(0, 0);
+ get_vertex_data_xy_uv_rgb(1, 6);
+ get_vertex_data_xy_uv_rgb(2, 12);
+
+ render_triangle(psx_gpu, vertexes, current_command);
+ break;
+ }
+
+ case 0x38:
+ case 0x39:
+ case 0x3A:
+ case 0x3B:
+ {
+ get_vertex_data_xy_rgb(0, 0);
+ get_vertex_data_xy_rgb(1, 4);
+ get_vertex_data_xy_rgb(2, 8);
+ get_vertex_data_xy_rgb(3, 12);
+
+ render_triangle(psx_gpu, vertexes, current_command);
+ render_triangle(psx_gpu, &(vertexes[1]), current_command);
+ break;
+ }
+
+ case 0x3C:
+ case 0x3D:
+ case 0x3E:
+ case 0x3F:
+ {
+ set_clut(psx_gpu, list_s16[5]);
+ set_texture(psx_gpu, list_s16[11]);
+
+ get_vertex_data_xy_uv_rgb(0, 0);
+ get_vertex_data_xy_uv_rgb(1, 6);
+ get_vertex_data_xy_uv_rgb(2, 12);
+ get_vertex_data_xy_uv_rgb(3, 18);
+
+ render_triangle(psx_gpu, vertexes, current_command);
+ render_triangle(psx_gpu, &(vertexes[1]), current_command);
+ break;
+ }
+
+ case 0x40 ... 0x47:
+ {
+ vertexes[0].x = list_s16[2] + psx_gpu->offset_x;
+ vertexes[0].y = list_s16[3] + psx_gpu->offset_y;
+ vertexes[1].x = list_s16[4] + psx_gpu->offset_x;
+ vertexes[1].y = list_s16[5] + psx_gpu->offset_y;
+
+ render_line(psx_gpu, vertexes, current_command, list[0]);
+ break;
+ }
+
+ case 0x48 ... 0x4F:
+ {
+ u32 num_vertexes = 1;
+ u32 *list_position = &(list[2]);
+ u32 xy = list[1];
+
+ vertexes[1].x = (xy & 0xFFFF) + psx_gpu->offset_x;
+ vertexes[1].y = (xy >> 16) + psx_gpu->offset_y;
+
+ while(1)
+ {
+ xy = *list_position;
+ if(xy == 0x55555555)
+ break;
+
+ vertexes[0] = vertexes[1];
+
+ vertexes[1].x = (xy & 0xFFFF) + psx_gpu->offset_x;
+ vertexes[1].y = (xy >> 16) + psx_gpu->offset_y;
+
+ list_position++;
+ num_vertexes++;
+
+ render_line(psx_gpu, vertexes, current_command, list[0]);
+ }
+
+ if(num_vertexes > 2)
+ command_length += (num_vertexes - 2);
+
+ break;
+ }
+
+ case 0x50 ... 0x57:
+ {
+ vertexes[0].r = list[0] & 0xFF;
+ vertexes[0].g = (list[0] >> 8) & 0xFF;
+ vertexes[0].b = (list[0] >> 16) & 0xFF;
+ vertexes[0].x = list_s16[2] + psx_gpu->offset_x;
+ vertexes[0].y = list_s16[3] + psx_gpu->offset_y;
+
+ vertexes[1].r = list[2] & 0xFF;
+ vertexes[1].g = (list[2] >> 8) & 0xFF;
+ vertexes[1].b = (list[2] >> 16) & 0xFF;
+ vertexes[1].x = list_s16[6] + psx_gpu->offset_x;
+ vertexes[1].y = list_s16[7] + psx_gpu->offset_y;
+
+ render_line(psx_gpu, vertexes, current_command, 0);
+ break;
+ }
+
+ case 0x58 ... 0x5F:
+ {
+ u32 num_vertexes = 1;
+ u32 *list_position = &(list[2]);
+ u32 color = list[0];
+ u32 xy = list[1];
+
+ vertexes[1].r = color & 0xFF;
+ vertexes[1].g = (color >> 8) & 0xFF;
+ vertexes[1].b = (color >> 16) & 0xFF;
+ vertexes[1].x = (xy & 0xFFFF) + psx_gpu->offset_x;
+ vertexes[1].y = (xy >> 16) + psx_gpu->offset_y;
+
+ while(1)
+ {
+ color = list_position[0];
+ if(color == 0x55555555)
+ break;
+
+ xy = list_position[1];
+
+ vertexes[0] = vertexes[1];
+
+ vertexes[1].r = color & 0xFF;
+ vertexes[1].g = (color >> 8) & 0xFF;
+ vertexes[1].b = (color >> 16) & 0xFF;
+ vertexes[1].x = (xy & 0xFFFF) + psx_gpu->offset_x;
+ vertexes[1].y = (xy >> 16) + psx_gpu->offset_y;
+
+ list_position += 2;
+ num_vertexes++;
+
+ render_line(psx_gpu, vertexes, current_command, 0);
+ }
+
+ if(num_vertexes > 2)
+ command_length += ((num_vertexes * 2) - 2);
+
+ break;
+ }
+
+ case 0x60 ... 0x63:
+ {
+ u32 x = list_s16[2] + psx_gpu->offset_x;
+ u32 y = list_s16[3] + psx_gpu->offset_y;
+ u32 width = list_s16[4] & 0x3FF;
+ u32 height = list_s16[5] & 0x1FF;
+
+ psx_gpu->primitive_color = list[0] & 0xFFFFFF;
+
+ render_sprite(psx_gpu, x, y, 0, 0, width, height, current_command, list[0]);
+ break;
+ }
+
+ case 0x64 ... 0x67:
+ {
+ u32 x = list_s16[2] + psx_gpu->offset_x;
+ u32 y = list_s16[3] + psx_gpu->offset_y;
+ u32 uv = list_s16[4];
+ u32 width = list_s16[6] & 0x3FF;
+ u32 height = list_s16[7] & 0x1FF;
+
+ psx_gpu->primitive_color = list[0] & 0xFFFFFF;
+ set_clut(psx_gpu, list_s16[5]);
+
+ render_sprite(psx_gpu, x, y, uv & 0xFF, (uv >> 8) & 0xFF, width, height,
+ current_command, list[0]);
+ break;
+ }
+
+ case 0x68:
+ case 0x69:
+ case 0x6A:
+ case 0x6B:
+ {
+ s32 x = list_s16[2] + psx_gpu->offset_x;
+ s32 y = list_s16[3] + psx_gpu->offset_y;
+
+ psx_gpu->primitive_color = list[0] & 0xFFFFFF;
+
+ render_sprite(psx_gpu, x, y, 0, 0, 1, 1, current_command, list[0]);
+ break;
+ }
+
+ case 0x70:
+ case 0x71:
+ case 0x72:
+ case 0x73:
+ {
+ s32 x = list_s16[2] + psx_gpu->offset_x;
+ s32 y = list_s16[3] + psx_gpu->offset_y;
+
+ psx_gpu->primitive_color = list[0] & 0xFFFFFF;
+
+ render_sprite(psx_gpu, x, y, 0, 0, 8, 8, current_command, list[0]);
+ break;
+ }
+
+ case 0x74:
+ case 0x75:
+ case 0x76:
+ case 0x77:
+ {
+ s32 x = list_s16[2] + psx_gpu->offset_x;
+ s32 y = list_s16[3] + psx_gpu->offset_y;
+ u32 uv = list_s16[4];
+
+ psx_gpu->primitive_color = list[0] & 0xFFFFFF;
+ set_clut(psx_gpu, list_s16[5]);
+
+ render_sprite(psx_gpu, x, y, uv & 0xFF, (uv >> 8) & 0xFF, 8, 8,
+ current_command, list[0]);
+ break;
+ }
+
+ case 0x78:
+ case 0x79:
+ case 0x7A:
+ case 0x7B:
+ {
+ s32 x = list_s16[2] + psx_gpu->offset_x;
+ s32 y = list_s16[3] + psx_gpu->offset_y;
+
+ psx_gpu->primitive_color = list[0] & 0xFFFFFF;
+ render_sprite(psx_gpu, x, y, 0, 0, 16, 16, current_command, list[0]);
+ break;
+ }
+
+ case 0x7C:
+ case 0x7D:
+ case 0x7E:
+ case 0x7F:
+ {
+ s32 x = list_s16[2] + psx_gpu->offset_x;
+ s32 y = list_s16[3] + psx_gpu->offset_y;
+ u32 uv = list_s16[4];
+
+ psx_gpu->primitive_color = list[0] & 0xFFFFFF;
+ set_clut(psx_gpu, list_s16[5]);
+
+ render_sprite(psx_gpu, x, y, uv & 0xFF, (uv >> 8) & 0xFF, 16, 16,
+ current_command, list[0]);
+ break;
+ }
+
+ case 0x80: // vid -> vid
+ render_block_move(psx_gpu, list_s16[2] & 0x3FF, list_s16[3] & 0x1FF,
+ list_s16[4] & 0x3FF, list_s16[5] & 0x1FF, list_s16[6], list_s16[7]);
+ break;
+
+ case 0xA0: // sys -> vid
+ {
+ u32 load_x = list_s16[2];
+ u32 load_y = list_s16[3];
+ u32 load_width = list_s16[4];
+ u32 load_height = list_s16[5];
+ u32 load_size = load_width * load_height;
+
+ command_length += load_size / 2;
+
+ render_block_copy(psx_gpu, (u16 *)&(list_s16[6]), load_x, load_y,
+ load_width, load_height, load_width);
+ break;
+ }
+
+ case 0xC0: // vid -> sys
+ break;
+
+ case 0xE1:
+ set_texture(psx_gpu, list[0] & 0x1FF);
+ if(list[0] & (1 << 9))
+ psx_gpu->render_state_base |= RENDER_STATE_DITHER;
+ else
+ psx_gpu->render_state_base &= ~RENDER_STATE_DITHER;
+
+ psx_gpu->display_area_draw_enable = (list[0] >> 10) & 0x1;
+ break;
+
+ case 0xE2:
+ {
+ // TODO: Clean
+ u32 texture_window_settings = list[0];
+ u32 tmp, x, y, w, h;
+
+ if(texture_window_settings != psx_gpu->texture_window_settings)
+ {
+ tmp = (texture_window_settings & 0x1F) | 0x20;
+ for(w = 8; (tmp & 1) == 0; tmp >>= 1, w <<= 1);
+
+ tmp = ((texture_window_settings >> 5) & 0x1f) | 0x20;
+ for (h = 8; (tmp & 1) == 0; tmp >>= 1, h <<= 1);
+
+ tmp = 32 - (w >> 3);
+ x = ((texture_window_settings >> 10) & tmp) << 3;
+
+ tmp = 32 - (h >> 3);
+ y = ((texture_window_settings >> 15) & tmp) << 3;
+
+ flush_render_block_buffer(psx_gpu);
+
+ psx_gpu->texture_window_x = x;
+ psx_gpu->texture_window_y = y;
+ psx_gpu->texture_mask_width = w - 1;
+ psx_gpu->texture_mask_height = h - 1;
+
+ update_texture_ptr(psx_gpu);
+ }
+ break;
+ }
+
+ case 0xE3:
+ psx_gpu->viewport_start_x = list[0] & 0x3FF;
+ psx_gpu->viewport_start_y = (list[0] >> 10) & 0x1FF;
+
+#ifdef TEXTURE_CACHE_4BPP
+ psx_gpu->viewport_mask =
+ texture_region_mask(psx_gpu->viewport_start_x,
+ psx_gpu->viewport_start_y, psx_gpu->viewport_end_x,
+ psx_gpu->viewport_end_y);
+#endif
+ break;
+
+ case 0xE4:
+ psx_gpu->viewport_end_x = list[0] & 0x3FF;
+ psx_gpu->viewport_end_y = (list[0] >> 10) & 0x1FF;
+
+#ifdef TEXTURE_CACHE_4BPP
+ psx_gpu->viewport_mask =
+ texture_region_mask(psx_gpu->viewport_start_x,
+ psx_gpu->viewport_start_y, psx_gpu->viewport_end_x,
+ psx_gpu->viewport_end_y);
+#endif
+ break;
+
+ case 0xE5:
+ {
+ s32 offset_x = list[0] << 21;
+ s32 offset_y = list[0] << 10;
+ psx_gpu->offset_x = offset_x >> 21;
+ psx_gpu->offset_y = offset_y >> 21;
+
+ break;
+ }
+
+ case 0xE6:
+ {
+ u32 mask_settings = list[0];
+ u16 mask_msb = mask_settings << 15;
+
+ if(list[0] & 0x2)
+ psx_gpu->render_state_base |= RENDER_STATE_MASK_EVALUATE;
+ else
+ psx_gpu->render_state_base &= ~RENDER_STATE_MASK_EVALUATE;
+
+ if(mask_msb != psx_gpu->mask_msb)
+ {
+ flush_render_block_buffer(psx_gpu);
+ psx_gpu->mask_msb = mask_msb;
+ }
+
+ break;
+ }
+
+ default:
+ break;
+ }
+ }
+}
+
--- /dev/null
+/*
+ * Copyright (C) 2011 Gilead Kutnick "Exophase" <exophase@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <malloc.h>
+#include <math.h>
+
+#include "common.h"
+
+typedef s32 fixed_type;
+
+#define EDGE_STEP_BITS 32
+#define FIXED_BITS 12
+
+#define fixed_center(value) \
+ ((((fixed_type)value) << FIXED_BITS) + (1 << (FIXED_BITS - 1))) \
+
+#define int_to_fixed(value) \
+ (((fixed_type)value) << FIXED_BITS) \
+
+#define fixed_to_int(value) \
+ ((value) >> FIXED_BITS) \
+
+#define fixed_mul(_a, _b) \
+ (((s64)(_a) * (_b)) >> FIXED_BITS) \
+
+#define fixed_to_double(value) \
+ ((value) / (double)(1 << FIXED_BITS)) \
+
+#define double_to_fixed(value) \
+ (fixed_type)(((value) * (double)(1 << FIXED_BITS))) \
+
+typedef struct
+{
+ fixed_type current_value;
+ fixed_type step_dx;
+ fixed_type step_dy;
+ fixed_type gradient_area_x;
+ fixed_type gradient_area_y;
+} interpolant_struct;
+
+typedef struct
+{
+ s32 base_x;
+
+ s64 left_x;
+ s64 left_dx_dy;
+
+ s64 right_x;
+ s64 right_dx_dy;
+
+ u32 triangle_area;
+ u32 triangle_winding;
+
+ interpolant_struct u;
+ interpolant_struct v;
+ interpolant_struct r;
+ interpolant_struct g;
+ interpolant_struct b;
+} _span_struct;
+
+
+u32 span_pixels = 0;
+u32 span_pixel_blocks = 0;
+u32 spans = 0;
+u32 triangles = 0;
+
+u32 texels_4bpp = 0;
+u32 texels_8bpp = 0;
+u32 texels_16bpp = 0;
+u32 untextured_pixels = 0;
+u32 blend_pixels = 0;
+u32 transparent_pixels = 0;
+
+u32 state_changes = 0;
+u32 render_buffer_flushes = 0;
+u32 trivial_rejects = 0;
+
+void flush_render_block_buffer(psx_gpu_struct *psx_gpu)
+{
+
+}
+
+
+u32 fixed_reciprocal(u32 denominator, u32 *_shift)
+{
+ u32 shift = __builtin_clz(denominator);
+ u32 denominator_normalized = denominator << shift;
+
+ // Implement with a DP divide
+ u32 reciprocal =
+ (double)((1ULL << 62) + (denominator_normalized - 1)) /
+ (double)denominator_normalized;
+
+ *_shift = 62 - shift;
+ return reciprocal;
+}
+
+fixed_type fixed_reciprocal_multiply(s32 numerator, u32 reciprocal,
+ u32 reciprocal_sign, u32 shift)
+{
+ u32 numerator_sign = (u32)numerator >> 31;
+ u32 flip_sign = numerator_sign ^ reciprocal_sign;
+ u32 flip_sign_mask = ~(flip_sign - 1);
+ fixed_type value;
+
+ numerator = abs(numerator);
+
+ value = ((u64)numerator * reciprocal) >> shift;
+
+ value ^= flip_sign_mask;
+ value -= flip_sign_mask;
+
+ return value;
+}
+
+s32 triangle_signed_area_x2(s32 x0, s32 y0, s32 x1, s32 y1, s32 x2, s32 y2)
+{
+ return ((x1 - x0) * (y2 - y1)) - ((x2 - x1) * (y1 - y0));
+}
+
+u32 fetch_texel_4bpp(psx_gpu_struct *psx_gpu, u32 u, u32 v)
+{
+ u8 *texture_ptr_8bpp = psx_gpu->texture_page_ptr;
+ u32 texel = texture_ptr_8bpp[(v * 2048) + (u / 2)];
+
+ if(u & 1)
+ texel >>= 4;
+ else
+ texel &= 0xF;
+
+ texels_4bpp++;
+
+ return psx_gpu->clut_ptr[texel];
+}
+
+u32 fetch_texel_8bpp(psx_gpu_struct *psx_gpu, u32 u, u32 v)
+{
+ u8 *texture_ptr_8bpp = psx_gpu->texture_page_ptr;
+ u32 texel = texture_ptr_8bpp[(v * 2048) + u];
+
+ texels_8bpp++;
+
+ return psx_gpu->clut_ptr[texel];
+}
+
+u32 fetch_texel_16bpp(psx_gpu_struct *psx_gpu, u32 u, u32 v)
+{
+ u16 *texture_ptr_16bpp = psx_gpu->texture_page_ptr;
+
+ texels_16bpp++;
+
+ return texture_ptr_16bpp[(v * 1024) + u];
+}
+
+u32 fetch_texel(psx_gpu_struct *psx_gpu, u32 u, u32 v)
+{
+ u &= psx_gpu->texture_mask_width;
+ v &= psx_gpu->texture_mask_height;
+
+ switch(psx_gpu->texture_mode)
+ {
+ case TEXTURE_MODE_4BPP:
+ return fetch_texel_4bpp(psx_gpu, u, v);
+
+ case TEXTURE_MODE_8BPP:
+ return fetch_texel_8bpp(psx_gpu, u, v);
+
+ case TEXTURE_MODE_16BPP:
+ return fetch_texel_16bpp(psx_gpu, u, v);
+ }
+
+ return 0;
+}
+
+void draw_pixel(psx_gpu_struct *psx_gpu, s32 r, s32 g, s32 b, u32 texel,
+ u32 x, u32 y, u32 flags)
+{
+ u32 pixel;
+
+ if(r > 31)
+ r = 31;
+
+ if(g > 31)
+ g = 31;
+
+ if(b > 31)
+ b = 31;
+
+ if(flags & RENDER_FLAGS_BLEND)
+ {
+ if(((flags & RENDER_FLAGS_TEXTURE_MAP) == 0) || (texel & 0x8000))
+ {
+ s32 fb_pixel = psx_gpu->vram[(y * 1024) + x];
+ s32 fb_r = fb_pixel & 0x1F;
+ s32 fb_g = (fb_pixel >> 5) & 0x1F;
+ s32 fb_b = (fb_pixel >> 10) & 0x1F;
+
+ blend_pixels++;
+
+ switch(psx_gpu->blend_mode)
+ {
+ case BLEND_MODE_AVERAGE:
+ r = (r + fb_r) / 2;
+ g = (g + fb_g) / 2;
+ b = (b + fb_b) / 2;
+ break;
+
+ case BLEND_MODE_ADD:
+ r += fb_r;
+ g += fb_g;
+ b += fb_b;
+
+ if(r > 31)
+ r = 31;
+
+ if(g > 31)
+ g = 31;
+
+ if(b > 31)
+ b = 31;
+
+ break;
+
+ case BLEND_MODE_SUBTRACT:
+ r = fb_r - r;
+ g = fb_g - g;
+ b = fb_b - b;
+
+ if(r < 0)
+ r = 0;
+
+ if(g < 0)
+ g = 0;
+
+ if(b < 0)
+ b = 0;
+
+ break;
+
+ case BLEND_MODE_ADD_FOURTH:
+ r = fb_r + (r / 4);
+ g = fb_g + (g / 4);
+ b = fb_b + (b / 4);
+
+ if(r > 31)
+ r = 31;
+
+ if(g > 31)
+ g = 31;
+
+ if(b > 31)
+ b = 31;
+
+ break;
+ }
+ }
+ }
+
+ pixel = r | (g << 5) | (b << 10);
+
+ if(psx_gpu->mask_apply || (texel & 0x8000))
+ pixel |= 0x8000;
+
+ psx_gpu->vram[(y * 1024) + x] = pixel;
+}
+
+s32 dither_table[4][4] =
+{
+ { -4, 0, -3, 1 },
+ { 2, -2, 3, -1 },
+ { -3, 1, -4, 0 },
+ { 3, -1, 2, -2 },
+};
+
+void render_span(psx_gpu_struct *psx_gpu, _span_struct *span, s32 y,
+ u32 flags)
+{
+ s32 left_x = span->left_x >> EDGE_STEP_BITS;
+ s32 right_x = span->right_x >> EDGE_STEP_BITS;
+ s32 current_x = left_x;
+ s32 delta_x;
+
+ fixed_type current_u = span->u.current_value;
+ fixed_type current_v = span->v.current_value;
+ fixed_type current_r = span->r.current_value;
+ fixed_type current_g = span->g.current_value;
+ fixed_type current_b = span->b.current_value;
+
+ if(y < psx_gpu->viewport_start_y)
+ return;
+
+ if(y > psx_gpu->viewport_end_y)
+ return;
+
+ if(right_x < psx_gpu->viewport_start_x)
+ return;
+
+ if(current_x > psx_gpu->viewport_end_x)
+ return;
+
+ spans++;
+
+ if(current_x < psx_gpu->viewport_start_x)
+ current_x = psx_gpu->viewport_start_x;
+
+ if(right_x > psx_gpu->viewport_end_x + 1)
+ right_x = psx_gpu->viewport_end_x + 1;
+
+ delta_x = current_x - span->base_x;
+
+ current_u += delta_x * span->u.step_dx;
+ current_v += delta_x * span->v.step_dx;
+ current_r += delta_x * span->r.step_dx;
+ current_g += delta_x * span->g.step_dx;
+ current_b += delta_x * span->b.step_dx;
+
+ span_pixels += right_x - current_x;
+ span_pixel_blocks += ((right_x / 8) - (current_x / 8)) + 1;
+
+ while(current_x < right_x)
+ {
+ s32 color_r, color_g, color_b;
+ u32 texel = 0;
+
+ if(psx_gpu->mask_evaluate &&
+ (psx_gpu->vram[(y * 1024) + current_x] & 0x8000))
+ {
+ goto skip_pixel;
+ }
+
+ if(flags & RENDER_FLAGS_SHADE)
+ {
+ color_r = fixed_to_int(current_r);
+ color_g = fixed_to_int(current_g);
+ color_b = fixed_to_int(current_b);
+ }
+ else
+ {
+ color_r = psx_gpu->primitive_color & 0xFF;
+ color_g = (psx_gpu->primitive_color >> 8) & 0xFF;
+ color_b = (psx_gpu->primitive_color >> 16) & 0xFF;
+ }
+
+ if(flags & RENDER_FLAGS_TEXTURE_MAP)
+ {
+ u32 texel_r, texel_g, texel_b;
+ u32 u = fixed_to_int(current_u);
+ u32 v = fixed_to_int(current_v);
+
+ texel = fetch_texel(psx_gpu, u, v);
+
+ if(texel == 0)
+ {
+ transparent_pixels++;
+ goto skip_pixel;
+ }
+
+ texel_r = texel & 0x1F;
+ texel_g = (texel >> 5) & 0x1F;
+ texel_b = (texel >> 10) & 0x1F;
+
+ if((flags & RENDER_FLAGS_MODULATE_TEXELS) == 0)
+ {
+ color_r *= texel_r;
+ color_g *= texel_g;
+ color_b *= texel_b;
+ }
+ else
+ {
+ color_r = texel_r << 7;
+ color_g = texel_g << 7;
+ color_b = texel_b << 7;
+ }
+
+ color_r >>= 4;
+ color_g >>= 4;
+ color_b >>= 4;
+ }
+ else
+ {
+ untextured_pixels++;
+ }
+
+ if(psx_gpu->dither_mode && ((flags & RENDER_FLAGS_SHADE) ||
+ ((flags & RENDER_FLAGS_TEXTURE_MAP) &&
+ ((flags & RENDER_FLAGS_MODULATE_TEXELS) == 0))))
+ {
+ s32 dither_offset = dither_table[y % 4][current_x % 4];
+ color_r += dither_offset;
+ color_g += dither_offset;
+ color_b += dither_offset;
+
+ if(color_r < 0)
+ color_r = 0;
+
+ if(color_g < 0)
+ color_g = 0;
+
+ if(color_b < 0)
+ color_b = 0;
+ }
+
+ color_r >>= 3;
+ color_g >>= 3;
+ color_b >>= 3;
+
+ draw_pixel(psx_gpu, color_r, color_g, color_b, texel, current_x, y, flags);
+
+ skip_pixel:
+
+ current_u += span->u.step_dx;
+ current_v += span->v.step_dx;
+ current_r += span->r.step_dx;
+ current_g += span->g.step_dx;
+ current_b += span->b.step_dx;
+
+ current_x++;
+ }
+}
+
+void increment_span(_span_struct *span)
+{
+ span->left_x += span->left_dx_dy;
+ span->right_x += span->right_dx_dy;
+
+ span->u.current_value += span->u.step_dy;
+ span->v.current_value += span->v.step_dy;
+ span->r.current_value += span->r.step_dy;
+ span->g.current_value += span->g.step_dy;
+ span->b.current_value += span->b.step_dy;
+}
+
+void decrement_span(_span_struct *span)
+{
+ span->left_x += span->left_dx_dy;
+ span->right_x += span->right_dx_dy;
+
+ span->u.current_value -= span->u.step_dy;
+ span->v.current_value -= span->v.step_dy;
+ span->r.current_value -= span->r.step_dy;
+ span->g.current_value -= span->g.step_dy;
+ span->b.current_value -= span->b.step_dy;
+}
+
+
+#define compute_gradient_area_x(interpolant) \
+{ \
+ span.interpolant.gradient_area_x = \
+ triangle_signed_area_x2(a->interpolant, a->y, b->interpolant, b->y, \
+ c->interpolant, c->y); \
+} \
+
+#define compute_gradient_area_y(interpolant) \
+{ \
+ span.interpolant.gradient_area_y = \
+ triangle_signed_area_x2(a->x, a->interpolant, b->x, b->interpolant, \
+ c->x, c->interpolant); \
+} \
+
+#define compute_all_gradient_areas() \
+ compute_gradient_area_x(u); \
+ compute_gradient_area_x(v); \
+ compute_gradient_area_x(r); \
+ compute_gradient_area_x(g); \
+ compute_gradient_area_x(b); \
+ compute_gradient_area_y(u); \
+ compute_gradient_area_y(v); \
+ compute_gradient_area_y(r); \
+ compute_gradient_area_y(g); \
+ compute_gradient_area_y(b) \
+
+#define set_interpolant_base(interpolant, base_vertex) \
+ span->interpolant.step_dx = \
+ fixed_reciprocal_multiply(span->interpolant.gradient_area_x, reciprocal, \
+ span->triangle_winding, shift); \
+ span->interpolant.step_dy = \
+ fixed_reciprocal_multiply(span->interpolant.gradient_area_y, reciprocal, \
+ span->triangle_winding, shift); \
+ span->interpolant.current_value = fixed_center(base_vertex->interpolant) \
+
+#define set_interpolant_bases(base_vertex) \
+{ \
+ u32 shift; \
+ u32 reciprocal = fixed_reciprocal(span->triangle_area, &shift); \
+ shift -= FIXED_BITS; \
+ set_interpolant_base(u, base_vertex); \
+ set_interpolant_base(v, base_vertex); \
+ set_interpolant_base(r, base_vertex); \
+ set_interpolant_base(g, base_vertex); \
+ set_interpolant_base(b, base_vertex); \
+ span->base_x = span->left_x >> EDGE_STEP_BITS; \
+} \
+
+#define compute_edge_delta(edge, start, end, height) \
+{ \
+ s32 x_start = start->x; \
+ s32 x_end = end->x; \
+ s32 width = x_end - x_start; \
+ \
+ s32 shift = __builtin_clz(height); \
+ u32 height_normalized = height << shift; \
+ u32 height_reciprocal = ((1ULL << 50) + (height_normalized - 1)) / \
+ height_normalized; \
+ \
+ shift -= (50 - EDGE_STEP_BITS); \
+ \
+ span->edge##_x = \
+ ((((s64)x_start * height) + (height - 1)) * height_reciprocal) << shift; \
+ span->edge##_dx_dy = ((s64)width * height_reciprocal) << shift; \
+} \
+
+
+#define render_spans_up(height) \
+ do \
+ { \
+ decrement_span(span); \
+ render_span(psx_gpu, span, current_y, flags); \
+ current_y--; \
+ height--; \
+ } while(height) \
+
+#define render_spans_down(height) \
+ do \
+ { \
+ render_span(psx_gpu, span, current_y, flags); \
+ increment_span(span); \
+ current_y++; \
+ height--; \
+ } while(height) \
+
+#define render_spans_up_up(minor, major) \
+ s32 current_y = bottom->y - 1; \
+ s32 height_minor_a = bottom->y - middle->y; \
+ s32 height_minor_b = middle->y - top->y; \
+ s32 height_major = height_minor_a + height_minor_b; \
+ \
+ compute_edge_delta(major, bottom, top, height_major); \
+ compute_edge_delta(minor, bottom, middle, height_minor_a); \
+ set_interpolant_bases(bottom); \
+ \
+ render_spans_up(height_minor_a); \
+ \
+ compute_edge_delta(minor, middle, top, height_minor_b); \
+ render_spans_up(height_minor_b) \
+
+void render_spans_up_left(psx_gpu_struct *psx_gpu, _span_struct *span,
+ vertex_struct *bottom, vertex_struct *middle, vertex_struct *top, u32 flags)
+{
+ render_spans_up_up(left, right);
+}
+
+void render_spans_up_right(psx_gpu_struct *psx_gpu, _span_struct *span,
+ vertex_struct *bottom, vertex_struct *middle, vertex_struct *top, u32 flags)
+{
+ render_spans_up_up(right, left);
+}
+
+#define render_spans_down_down(minor, major) \
+ s32 current_y = top->y; \
+ s32 height_minor_a = middle->y - top->y; \
+ s32 height_minor_b = bottom->y - middle->y; \
+ s32 height_major = height_minor_a + height_minor_b; \
+ \
+ compute_edge_delta(minor, top, middle, height_minor_a); \
+ compute_edge_delta(major, top, bottom, height_major); \
+ set_interpolant_bases(top); \
+ \
+ render_spans_down(height_minor_a); \
+ \
+ compute_edge_delta(minor, middle, bottom, height_minor_b); \
+ render_spans_down(height_minor_b) \
+
+void render_spans_down_left(psx_gpu_struct *psx_gpu, _span_struct *span,
+ vertex_struct *top, vertex_struct *middle, vertex_struct *bottom, u32 flags)
+{
+ render_spans_down_down(left, right);
+}
+
+void render_spans_down_right(psx_gpu_struct *psx_gpu, _span_struct *span,
+ vertex_struct *top, vertex_struct *middle, vertex_struct *bottom, u32 flags)
+{
+ render_spans_down_down(right, left);
+}
+
+#define render_spans_up_flat(bottom_left, bottom_right, top_left, top_right) \
+ s32 current_y = bottom_left->y - 1; \
+ s32 height = bottom_left->y - top_left->y; \
+ \
+ compute_edge_delta(left, bottom_left, top_left, height); \
+ compute_edge_delta(right, bottom_right, top_right, height); \
+ set_interpolant_bases(bottom_left); \
+ render_spans_up(height) \
+
+void render_spans_up_a(psx_gpu_struct *psx_gpu, _span_struct *span,
+ vertex_struct *bottom_left, vertex_struct *bottom_right, vertex_struct *top,
+ u32 flags)
+{
+ render_spans_up_flat(bottom_left, bottom_right, top, top);
+}
+
+void render_spans_up_b(psx_gpu_struct *psx_gpu, _span_struct *span,
+ vertex_struct *bottom, vertex_struct *top_left, vertex_struct *top_right,
+ u32 flags)
+{
+ render_spans_up_flat(bottom, bottom, top_left, top_right);
+}
+
+#define render_spans_down_flat(top_left, top_right, bottom_left, bottom_right) \
+ s32 current_y = top_left->y; \
+ s32 height = bottom_left->y - top_left->y; \
+ \
+ compute_edge_delta(left, top_left, bottom_left, height); \
+ compute_edge_delta(right, top_right, bottom_right, height); \
+ set_interpolant_bases(top_left); \
+ render_spans_down(height) \
+
+void render_spans_down_a(psx_gpu_struct *psx_gpu, _span_struct *span,
+ vertex_struct *top_left, vertex_struct *top_right, vertex_struct *bottom,
+ u32 flags)
+{
+ render_spans_down_flat(top_left, top_right, bottom, bottom);
+}
+
+void render_spans_down_b(psx_gpu_struct *psx_gpu, _span_struct *span,
+ vertex_struct *top, vertex_struct *bottom_left, vertex_struct *bottom_right,
+ u32 flags)
+{
+ render_spans_down_flat(top, top, bottom_left, bottom_right);
+}
+
+void render_spans_up_down(psx_gpu_struct *psx_gpu, _span_struct *span,
+ vertex_struct *middle, vertex_struct *top, vertex_struct *bottom, u32 flags)
+{
+ s32 middle_y = middle->y;
+ s32 current_y = middle_y - 1;
+ s32 height_minor_a = middle->y - top->y;
+ s32 height_minor_b = bottom->y - middle->y;
+ s32 height_major = height_minor_a + height_minor_b;
+
+ u64 right_x_mid;
+
+ compute_edge_delta(left, middle, top, height_minor_a);
+ compute_edge_delta(right, bottom, top, height_major);
+ set_interpolant_bases(middle);
+
+ right_x_mid = span->right_x + (span->right_dx_dy * height_minor_b);
+ span->right_x = right_x_mid;
+
+ render_spans_up(height_minor_a);
+
+ compute_edge_delta(left, middle, bottom, height_minor_b);
+ set_interpolant_bases(middle);
+
+ span->right_dx_dy *= -1;
+ span->right_x = right_x_mid;
+ current_y = middle_y;
+
+ render_spans_down(height_minor_b);
+}
+
+#define vertex_swap(_a, _b) \
+{ \
+ vertex_struct *temp_vertex = _a; \
+ _a = _b; \
+ _b = temp_vertex; \
+ triangle_winding ^= 1; \
+} \
+
+
+#define triangle_y_direction_up 1
+#define triangle_y_direction_flat 2
+#define triangle_y_direction_down 0
+
+#define triangle_winding_positive 0
+#define triangle_winding_negative 1
+
+#define triangle_set_direction(direction_variable, value) \
+ u32 direction_variable = (u32)(value) >> 31; \
+ if(value == 0) \
+ direction_variable = 2 \
+
+#define triangle_case(direction_a, direction_b, direction_c, winding) \
+ case (triangle_y_direction_##direction_a | \
+ (triangle_y_direction_##direction_b << 2) | \
+ (triangle_y_direction_##direction_c << 4) | \
+ (triangle_winding_##winding << 6)) \
+
+
+void render_triangle(psx_gpu_struct *psx_gpu, vertex_struct *vertexes,
+ u32 flags)
+{
+ s32 triangle_area;
+ u32 triangle_winding = 0;
+ _span_struct span;
+
+ vertex_struct *a = &(vertexes[0]);
+ vertex_struct *b = &(vertexes[1]);
+ vertex_struct *c = &(vertexes[2]);
+
+ triangle_area = triangle_signed_area_x2(a->x, a->y, b->x, b->y, c->x, c->y);
+
+ triangles++;
+
+ if(triangle_area == 0)
+ return;
+
+ if(b->y < a->y)
+ vertex_swap(a, b);
+
+ if(c->y < b->y)
+ {
+ vertex_swap(b, c);
+
+ if(b->y < a->y)
+ vertex_swap(a, b);
+ }
+
+ if((c->y - a->y) >= 512)
+ return;
+
+ if(triangle_area < 0)
+ {
+ triangle_area = -triangle_area;
+ triangle_winding ^= 1;
+ vertex_swap(a, c);
+ }
+
+ if(b->x < a->x)
+ vertex_swap(a, b);
+
+ if(c->x < b->x)
+ {
+ vertex_swap(b, c);
+
+ if(b->x < a->x)
+ vertex_swap(a, b);
+ }
+
+ if((c->x - a->x) >= 1024)
+ return;
+
+ s32 y_delta_a = b->y - a->y;
+ s32 y_delta_b = c->y - b->y;
+ s32 y_delta_c = c->y - a->y;
+
+ triangle_set_direction(y_direction_a, y_delta_a);
+ triangle_set_direction(y_direction_b, y_delta_b);
+ triangle_set_direction(y_direction_c, y_delta_c);
+
+ compute_all_gradient_areas();
+ span.triangle_area = triangle_area;
+ span.triangle_winding = triangle_winding;
+
+ switch(y_direction_a | (y_direction_b << 2) | (y_direction_c << 4) |
+ (triangle_winding << 6))
+ {
+ triangle_case(up, up, up, negative):
+ triangle_case(up, up, flat, negative):
+ triangle_case(up, up, down, negative):
+ render_spans_up_right(psx_gpu, &span, a, b, c, flags);
+ break;
+
+ triangle_case(flat, up, up, negative):
+ triangle_case(flat, up, flat, negative):
+ triangle_case(flat, up, down, negative):
+ render_spans_up_a(psx_gpu, &span, a, b, c, flags);
+ break;
+
+ triangle_case(down, up, up, negative):
+ render_spans_up_down(psx_gpu, &span, a, c, b, flags);
+ break;
+
+ triangle_case(down, up, flat, negative):
+ render_spans_down_a(psx_gpu, &span, a, c, b, flags);
+ break;
+
+ triangle_case(down, up, down, negative):
+ render_spans_down_right(psx_gpu, &span, a, c, b, flags);
+ break;
+
+ triangle_case(down, flat, up, negative):
+ triangle_case(down, flat, flat, negative):
+ triangle_case(down, flat, down, negative):
+ render_spans_down_b(psx_gpu, &span, a, b, c, flags);
+ break;
+
+ triangle_case(down, down, up, negative):
+ triangle_case(down, down, flat, negative):
+ triangle_case(down, down, down, negative):
+ render_spans_down_left(psx_gpu, &span, a, b, c, flags);
+ break;
+
+ triangle_case(up, up, up, positive):
+ triangle_case(up, up, flat, positive):
+ triangle_case(up, up, down, positive):
+ render_spans_up_left(psx_gpu, &span, a, b, c, flags);
+ break;
+
+ triangle_case(up, flat, up, positive):
+ triangle_case(up, flat, flat, positive):
+ triangle_case(up, flat, down, positive):
+ render_spans_up_b(psx_gpu, &span, a, b, c, flags);
+ break;
+
+ triangle_case(up, down, up, positive):
+ render_spans_up_right(psx_gpu, &span, a, c, b, flags);
+ break;
+
+ triangle_case(up, down, flat, positive):
+ render_spans_up_a(psx_gpu, &span, a, c, b, flags);
+ break;
+
+ triangle_case(up, down, down, positive):
+ render_spans_up_down(psx_gpu, &span, a, b, c, flags);
+ break;
+
+ triangle_case(flat, down, up, positive):
+ triangle_case(flat, down, flat, positive):
+ triangle_case(flat, down, down, positive):
+ render_spans_down_a(psx_gpu, &span, a, b, c, flags);
+ break;
+
+ triangle_case(down, down, up, positive):
+ triangle_case(down, down, flat, positive):
+ triangle_case(down, down, down, positive):
+ render_spans_down_right(psx_gpu, &span, a, b, c, flags);
+ break;
+ }
+
+}
+
+
+void render_sprite(psx_gpu_struct *psx_gpu, s32 x, s32 y, u32 u, u32 v,
+ s32 width, s32 height, u32 flags)
+{
+ // TODO: Flip/mirror
+ s32 current_x, current_y;
+ u32 current_u, current_v;
+ u32 primitive_color = psx_gpu->primitive_color;
+ u32 sprite_r, sprite_g, sprite_b;
+ s32 color_r = 0;
+ s32 color_g = 0;
+ s32 color_b = 0;
+ u32 texel = 0;
+
+ sprite_r = primitive_color & 0xFF;
+ sprite_g = (primitive_color >> 8) & 0xFF;
+ sprite_b = (primitive_color >> 16) & 0xFF;
+
+ static u32 sprites = 0;
+
+ sprites++;
+
+ for(current_y = y, current_v = v;
+ current_y < y + height; current_y++, current_v++)
+ {
+ for(current_x = x, current_u = u;
+ current_x < x + width; current_x++, current_u++)
+ {
+ if((current_x >= psx_gpu->viewport_start_x) &&
+ (current_y >= psx_gpu->viewport_start_y) &&
+ (current_x <= psx_gpu->viewport_end_x) &&
+ (current_y <= psx_gpu->viewport_end_y))
+ {
+ if(psx_gpu->mask_evaluate &&
+ (psx_gpu->vram[(y * 1024) + current_x] & 0x8000))
+ {
+ continue;
+ }
+
+ if(flags & RENDER_FLAGS_TEXTURE_MAP)
+ {
+ texel = fetch_texel(psx_gpu, current_u, current_v);
+ if(texel == 0)
+ continue;
+
+ color_r = texel & 0x1F;
+ color_g = (texel >> 5) & 0x1F;
+ color_b = (texel >> 10) & 0x1F;
+
+ if((flags & RENDER_FLAGS_MODULATE_TEXELS) == 0)
+ {
+ color_r *= sprite_r;
+ color_g *= sprite_g;
+ color_b *= sprite_b;
+
+ color_r >>= 7;
+ color_g >>= 7;
+ color_b >>= 7;
+ }
+ }
+ else
+ {
+ color_r = sprite_r >> 3;
+ color_g = sprite_g >> 3;
+ color_b = sprite_b >> 3;
+ }
+
+ draw_pixel(psx_gpu, color_r, color_g, color_b, texel, current_x,
+ current_y, flags);
+ }
+ }
+ }
+}
+
+
+#define draw_pixel_line(_x, _y) \
+ if((_x >= psx_gpu->viewport_start_x) && (_y >= psx_gpu->viewport_start_y) && \
+ (_x <= psx_gpu->viewport_end_x) && (_y <= psx_gpu->viewport_end_y)) \
+ { \
+ if(flags & RENDER_FLAGS_SHADE) \
+ { \
+ color_r = fixed_to_int(current_r); \
+ color_g = fixed_to_int(current_g); \
+ color_b = fixed_to_int(current_b); \
+ \
+ current_r += gradient_r; \
+ current_g += gradient_g; \
+ current_b += gradient_b; \
+ } \
+ else \
+ { \
+ color_r = primitive_color & 0xFF; \
+ color_g = (primitive_color >> 8) & 0xFF; \
+ color_b = (primitive_color >> 16) & 0xFF; \
+ } \
+ \
+ if(psx_gpu->dither_mode) \
+ { \
+ s32 dither_offset = dither_table[_y % 4][_x % 4]; \
+ \
+ color_r += dither_offset; \
+ color_g += dither_offset; \
+ color_b += dither_offset; \
+ \
+ if(color_r < 0) \
+ color_r = 0; \
+ \
+ if(color_g < 0) \
+ color_g = 0; \
+ \
+ if(color_b < 0) \
+ color_b = 0; \
+ } \
+ color_r >>= 3; \
+ color_g >>= 3; \
+ color_b >>= 3; \
+ \
+ span_pixels++; \
+ \
+ draw_pixel(psx_gpu, color_r, color_g, color_b, 0, _x, _y, flags); \
+ } \
+
+#define update_increment(value) \
+ value++ \
+
+#define update_decrement(value) \
+ value-- \
+
+#define compare_increment(a, b) \
+ (a <= b) \
+
+#define compare_decrement(a, b) \
+ (a >= b) \
+
+#define set_line_gradients(minor) \
+{ \
+ s32 gradient_divisor = delta_##minor; \
+ gradient_r = int_to_fixed(vertex_b->r - vertex_a->r) / gradient_divisor; \
+ gradient_g = int_to_fixed(vertex_b->g - vertex_a->g) / gradient_divisor; \
+ gradient_b = int_to_fixed(vertex_b->b - vertex_a->b) / gradient_divisor; \
+ current_r = fixed_center(vertex_a->r); \
+ current_g = fixed_center(vertex_a->g); \
+ current_b = fixed_center(vertex_a->b); \
+}
+
+#define draw_line_span_horizontal(direction) \
+do \
+{ \
+ error_step = delta_y * 2; \
+ error_wrap = delta_x * 2; \
+ error = delta_x; \
+ \
+ current_y = y_a; \
+ set_line_gradients(x); \
+ \
+ for(current_x = x_a; current_x <= x_b; current_x++) \
+ { \
+ draw_pixel_line(current_x, current_y); \
+ error += error_step; \
+ \
+ if(error >= error_wrap) \
+ { \
+ update_##direction(current_y); \
+ error -= error_wrap; \
+ } \
+ } \
+} while(0) \
+
+#define draw_line_span_vertical(direction) \
+do \
+{ \
+ error_step = delta_x * 2; \
+ error_wrap = delta_y * 2; \
+ error = delta_y; \
+ \
+ current_x = x_a; \
+ set_line_gradients(y); \
+ \
+ for(current_y = y_a; compare_##direction(current_y, y_b); \
+ update_##direction(current_y)) \
+ { \
+ draw_pixel_line(current_x, current_y); \
+ error += error_step; \
+ \
+ if(error > error_wrap) \
+ { \
+ current_x++; \
+ error -= error_wrap; \
+ } \
+ } \
+} while(0) \
+
+void render_line(psx_gpu_struct *psx_gpu, vertex_struct *vertexes, u32 flags)
+{
+ u32 primitive_color = psx_gpu->primitive_color;
+ s32 color_r, color_g, color_b;
+
+ fixed_type gradient_r = 0;
+ fixed_type gradient_g = 0;
+ fixed_type gradient_b = 0;
+ fixed_type current_r = 0;
+ fixed_type current_g = 0;
+ fixed_type current_b = 0;
+
+ s32 y_a, y_b;
+ s32 x_a, x_b;
+
+ s32 delta_x, delta_y;
+ u32 triangle_winding = 0;
+
+ s32 current_x;
+ s32 current_y;
+
+ u32 error_step;
+ u32 error;
+ u32 error_wrap;
+
+ vertex_struct *vertex_a = &(vertexes[0]);
+ vertex_struct *vertex_b = &(vertexes[1]);
+
+ if(vertex_a->x >= vertex_b->x)
+ {
+ vertex_swap(vertex_a, vertex_b);
+ }
+
+ x_a = vertex_a->x;
+ x_b = vertex_b->x;
+
+ y_a = vertex_a->y;
+ y_b = vertex_b->y;
+
+ delta_x = x_b - x_a;
+ delta_y = y_b - y_a;
+
+ if(delta_x >= 1024)
+ return;
+
+ flags &= ~RENDER_FLAGS_TEXTURE_MAP;
+
+ if(delta_y < 0)
+ {
+ delta_y *= -1;
+
+ if(delta_y >= 512)
+ return;
+
+ if(delta_x > delta_y)
+ draw_line_span_horizontal(decrement);
+ else
+ draw_line_span_vertical(decrement);
+ }
+ else
+ {
+ if(delta_y >= 512)
+ return;
+
+ if(delta_x > delta_y)
+ draw_line_span_horizontal(increment);
+ else
+ draw_line_span_vertical(increment);
+ }
+}
+
+
+void render_block_fill(psx_gpu_struct *psx_gpu, u32 color, u32 x, u32 y,
+ u32 width, u32 height)
+{
+ u32 r = color & 0xFF;
+ u32 g = (color >> 8) & 0xFF;
+ u32 b = (color >> 16) & 0xFF;
+ u32 color_16bpp = (r >> 3) | ((g >> 3) << 5) | ((b >> 3) << 10);
+
+ u16 *vram_ptr = psx_gpu->vram + x + (y * 1024);
+ u32 draw_x, draw_y;
+
+ for(draw_y = 0; draw_y < height; draw_y++)
+ {
+ for(draw_x = 0; draw_x < width; draw_x++)
+ {
+ vram_ptr[draw_x] = color_16bpp;
+ }
+
+ vram_ptr += 1024;
+ }
+}
+
+void render_block_copy(psx_gpu_struct *psx_gpu, u16 *source, u32 x, u32 y,
+ u32 width, u32 height, u32 pitch)
+{
+ u16 *vram_ptr = psx_gpu->vram + x + (y * 1024);
+ u32 draw_x, draw_y;
+
+ for(draw_y = 0; draw_y < height; draw_y++)
+ {
+ for(draw_x = 0; draw_x < width; draw_x++)
+ {
+ vram_ptr[draw_x] = source[draw_x];
+ }
+
+ source += pitch;
+ vram_ptr += 1024;
+ }
+}
+
+void render_block_move(psx_gpu_struct *psx_gpu, u32 source_x, u32 source_y,
+ u32 dest_x, u32 dest_y, u32 width, u32 height)
+{
+ render_block_copy(psx_gpu, psx_gpu->vram + source_x + (source_y * 1024),
+ dest_x, dest_y, width, height, 1024);
+}
+
+void initialize_psx_gpu(psx_gpu_struct *psx_gpu)
+{
+ psx_gpu->pixel_count_mode = 0;
+ psx_gpu->pixel_compare_mode = 0;
+
+ psx_gpu->vram_pixel_counts_a = malloc(sizeof(u8) * 1024 * 512);
+ psx_gpu->vram_pixel_counts_b = malloc(sizeof(u8) * 1024 * 512);
+ memset(psx_gpu->vram_pixel_counts_a, 0, sizeof(u8) * 1024 * 512);
+ memset(psx_gpu->vram_pixel_counts_b, 0, sizeof(u8) * 1024 * 512);
+ psx_gpu->compare_vram = malloc(sizeof(u16) * 1024 * 512);
+}
--- /dev/null
+/*
+ * Copyright (C) 2011 Gilead Kutnick "Exophase" <exophase@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef VECTOR_OPS
+#define VECTOR_OPS
+
+#define build_vector_type_pair(sign, size, count, count_x2) \
+typedef struct \
+{ \
+ sign##size e[count]; \
+} vec_##count##x##size##sign; \
+ \
+typedef struct \
+{ \
+ union \
+ { \
+ sign##size e[count_x2]; \
+ struct \
+ { \
+ vec_##count##x##size##sign low; \
+ vec_##count##x##size##sign high; \
+ }; \
+ }; \
+} vec_##count_x2##x##size##sign \
+
+#define build_vector_types(sign) \
+ build_vector_type_pair(sign, 8, 8, 16); \
+ build_vector_type_pair(sign, 16, 4, 8); \
+ build_vector_type_pair(sign, 32, 2, 4); \
+ build_vector_type_pair(sign, 64, 1, 2) \
+
+build_vector_types(u);
+build_vector_types(s);
+
+
+#define foreach_element(iterations, operation) \
+{ \
+ u32 _i; \
+ for(_i = 0; _i < iterations; _i++) \
+ { \
+ operation; \
+ } \
+} \
+
+#define load_64b(dest, source) \
+ *((u64 *)(dest).e) = *((u64 *)(source)) \
+
+#define load_128b(dest, source) \
+ *((u64 *)(dest).e) = *((u64 *)(source)); \
+ *((u64 *)(dest).e + 1) = *(((u64 *)(source)) + 1) \
+
+#define load_8x16b(dest, source) \
+ foreach_element(8, (dest).e[_i] = ((u16 *)(source))[_i]) \
+
+#define store_64b(source, dest) \
+ *((u64 *)(dest)) = *((u64 *)(source).e) \
+
+#define store_128b(source, dest) \
+ *((u64 *)(dest)) = *((u64 *)(source).e); \
+ *(((u64 *)(dest)) + 1) = *((u64 *)(source).e + 1) \
+
+#define store_8x16b(source, dest) \
+ foreach_element(8, ((u16 *)dest)[_i] = (source).e[_i]) \
+
+
+#define split_8x16b(dest, source) \
+ foreach_element(8, \
+ { \
+ (dest).e[_i * 2] = (source).e[_i]; \
+ (dest).e[(_i * 2) + 1] = (source).e[_i] >> 8; \
+ }) \
+
+#define merge_16x8b(dest, source) \
+ foreach_element(8, \
+ (dest).e[_i] = (source).e[_i * 2] | ((source).e[(_i * 2) + 1] << 8)) \
+
+#define vector_cast(vec_to, source) \
+ (*((volatile vec_to *)(&(source)))) \
+
+#define vector_cast_high(vec_to, source) \
+ (*((volatile vec_to *)((u8 *)source.e + (sizeof(source.e) / 2)))) \
+
+
+#define dup_8x8b(dest, value) \
+ foreach_element(8, (dest).e[_i] = value) \
+
+#define dup_16x8b(dest, value) \
+ foreach_element(16, (dest).e[_i] = value) \
+
+#define dup_4x16b(dest, value) \
+ foreach_element(4, (dest).e[_i] = value) \
+
+#define dup_8x16b(dest, value) \
+ foreach_element(8, (dest).e[_i] = value) \
+
+#define dup_2x32b(dest, value) \
+ foreach_element(2, (dest).e[_i] = value) \
+
+#define dup_4x32b(dest, value) \
+ foreach_element(4, (dest).e[_i] = value) \
+
+#define shr_narrow_8x16b(dest, source, shift) \
+ foreach_element(8, (dest).e[_i] = (u16)(source).e[_i] >> (shift)) \
+
+#define shr_narrow_2x64b(dest, source, shift) \
+ foreach_element(2, (dest).e[_i] = (source).e[_i] >> (shift)) \
+
+#define shr_8x8b(dest, source, shift) \
+ foreach_element(8, (dest).e[_i] = (u8)(source).e[_i] >> (shift)) \
+
+#define shl_8x8b(dest, source, shift) \
+ foreach_element(8, (dest).e[_i] = (source).e[_i] << (shift)) \
+
+#define shr_8x16b(dest, source, shift) \
+ foreach_element(8, (dest).e[_i] = (u16)(source).e[_i] >> (shift)) \
+
+#define shr_2x32b(dest, source, shift) \
+ foreach_element(2, (dest).e[_i] = (u32)(source).e[_i] >> (shift)) \
+
+#define shr_4x16b(dest, source, shift) \
+ foreach_element(4, (dest).e[_i] = (source).e[_i] >> (shift)) \
+
+#define shl_4x16b(dest, source, shift) \
+ foreach_element(4, (dest).e[_i] = (u32)(source).e[_i] << (shift)) \
+
+#define shr_4x32b(dest, source, shift) \
+ foreach_element(4, (dest).e[_i] = (u32)(source).e[_i] >> (shift)) \
+
+#define shr_narrow_4x32b(dest, source, shift) \
+ foreach_element(4, (dest).e[_i] = (u32)(source).e[_i] >> (shift)) \
+
+#define shl_8x16b(dest, source, shift) \
+ foreach_element(8, (dest).e[_i] = (source).e[_i] << (shift)) \
+
+#define shl_4x32b(dest, source, shift) \
+ foreach_element(4, (dest).e[_i] = (source).e[_i] << (shift)) \
+
+#define shl_2x32b(dest, source, shift) \
+ foreach_element(2, (dest).e[_i] = (source).e[_i] << (shift)) \
+
+#define shl_1x64b(dest, source, shift) \
+ ((dest).e[0] = (source).e[0] << (shift)) \
+
+#define shl_2x64b(dest, source, shift) \
+ foreach_element(2, (dest).e[_i] = (source).e[_i] << (shift)) \
+
+#define shl_variable_2x64b(dest, source_a, source_b) \
+ foreach_element(2, \
+ (dest).e[_i] = (source_a).e[_i] << ((source_b).e[_i] & 0xFF)) \
+
+#define shl_variable_8x16b(dest, source_a, source_b) \
+ foreach_element(8, \
+ (dest).e[_i] = (source_a).e[_i] << ((source_b).e[_i] & 0xFF)) \
+
+#define shl_variable_4x16b(dest, source_a, source_b) \
+ foreach_element(4, \
+ (dest).e[_i] = (source_a).e[_i] << ((source_b).e[_i] & 0xFF)) \
+
+#define shr_1x64b(dest, source, shift) \
+ ((dest).e[0] = (source).e[0] >> (shift)) \
+
+#define shl_long_8x8b(dest, source, shift) \
+ foreach_element(8, (dest).e[_i] = (source).e[_i] << (shift)) \
+
+#define shl_long_4x16b(dest, source, shift) \
+ foreach_element(4, (dest).e[_i] = (source).e[_i] << (shift)) \
+
+#define shrq_narrow_signed_8x16b(dest, source, shift) \
+ foreach_element(8, \
+ { \
+ s32 result = ((s16)(source).e[_i]) >> shift; \
+ if(result < 0) \
+ result = 0; \
+ if(result > 0xFF) \
+ result = 0xFF; \
+ (dest).e[_i] = result; \
+ }) \
+
+#define shl_reg_4x32b(dest, source_a, source_b) \
+ foreach_element(4, \
+ { \
+ s8 shift = (source_b).e[_i]; \
+ if(shift < 0) \
+ dest.e[_i] = (source_a).e[_i] >> (-shift); \
+ else \
+ dest.e[_i] = (source_a).e[_i] << shift; \
+ }) \
+
+#define shl_reg_2x32b(dest, source_a, source_b) \
+ foreach_element(2, \
+ { \
+ s8 shift = (source_b).e[_i]; \
+ if(shift < 0) \
+ dest.e[_i] = (source_a).e[_i] >> (-shift); \
+ else \
+ dest.e[_i] = (source_a).e[_i] << shift; \
+ }) \
+
+#define shl_reg_2x64b(dest, source_a, source_b) \
+ foreach_element(2, \
+ { \
+ s8 shift = (source_b).e[_i]; \
+ if(shift < 0) \
+ dest.e[_i] = (source_a).e[_i] >> (-shift); \
+ else \
+ dest.e[_i] = (source_a).e[_i] << shift; \
+ }) \
+
+
+#define sri_8x8b(dest, source, shift) \
+ foreach_element(8, (dest).e[_i] = ((dest).e[_i] & ~(0xFF >> (shift))) | \
+ ((u8)(source).e[_i] >> (shift))) \
+
+#define sli_8x8b(dest, source, shift) \
+ foreach_element(8, (dest).e[_i] = ((dest).e[_i] & ~(0xFF << (shift))) | \
+ ((source).e[_i] << (shift))) \
+
+
+
+#define mov_narrow_8x16b(dest, source) \
+ foreach_element(8, (dest).e[_i] = (source).e[_i]) \
+
+#define mov_narrow_4x32b(dest, source) \
+ foreach_element(4, (dest).e[_i] = (source).e[_i]) \
+
+#define mov_narrow_2x64b(dest, source) \
+ foreach_element(2, (dest).e[_i] = (source).e[_i]) \
+
+#define mov_wide_8x8b(dest, source) \
+ foreach_element(8, (dest).e[_i] = (source).e[_i]) \
+
+#define mov_wide_2x32b(dest, source) \
+ foreach_element(2, (dest).e[_i] = (source).e[_i]) \
+
+#define mvn_4x16b(dest, source) \
+ foreach_element(4, (dest).e[_i] = ~((source).e[_i])) \
+
+#define add_4x16b(dest, source_a, source_b) \
+ foreach_element(4, (dest).e[_i] = (source_a).e[_i] + (source_b).e[_i]) \
+
+#define add_4x32b(dest, source_a, source_b) \
+ foreach_element(4, (dest).e[_i] = (source_a).e[_i] + (source_b).e[_i]) \
+
+#define add_2x32b(dest, source_a, source_b) \
+ foreach_element(2, (dest).e[_i] = (source_a).e[_i] + (source_b).e[_i]) \
+
+#define add_8x16b(dest, source_a, source_b) \
+ foreach_element(8, (dest).e[_i] = (source_a).e[_i] + (source_b).e[_i]) \
+
+#define add_16x8b(dest, source_a, source_b) \
+ foreach_element(16, (dest).e[_i] = (source_a).e[_i] + (source_b).e[_i]) \
+
+#define add_8x8b(dest, source_a, source_b) \
+ foreach_element(8, (dest).e[_i] = (source_a).e[_i] + (source_b).e[_i]) \
+
+#define add_1x64b(dest, source_a, source_b) \
+ (dest).e[0] = (source_a).e[0] + (source_b).e[0] \
+
+#define add_2x64b(dest, source_a, source_b) \
+ foreach_element(2, (dest).e[_i] = (source_a).e[_i] + (source_b).e[_i]) \
+
+#define add_high_narrow_2x64b(dest, source_a, source_b) \
+ foreach_element(2, \
+ ((dest).e[_i] = (source_a).e[_i] + (source_b).e[_i]) >> 32) \
+
+#define add_high_narrow_4x32b(dest, source_a, source_b) \
+ foreach_element(4, \
+ ((dest).e[_i] = ((source_a).e[_i] + (source_b).e[_i]) >> 16)) \
+
+#define sub_4x16b(dest, source_a, source_b) \
+ foreach_element(4, (dest).e[_i] = (source_a).e[_i] - (source_b).e[_i]) \
+
+#define sub_4x32b(dest, source_a, source_b) \
+ foreach_element(4, (dest).e[_i] = (source_a).e[_i] - (source_b).e[_i]) \
+
+#define sub_2x32b(dest, source_a, source_b) \
+ foreach_element(2, (dest).e[_i] = (source_a).e[_i] - (source_b).e[_i]) \
+
+#define sub_wide_8x8b(dest, source_a, source_b) \
+ foreach_element(8, (dest).e[_i] = (source_a).e[_i] - (source_b).e[_i]) \
+
+#define add_wide_8x8b(dest, source_a, source_b) \
+ foreach_element(8, (dest).e[_i] = (source_a).e[_i] + (source_b).e[_i]) \
+
+#define add_wide_2x32b(dest, source_a, source_b) \
+ foreach_element(2, (dest).e[_i] = (source_a).e[_i] + (source_b).e[_i]) \
+
+#define addq_8x8b(dest, source_a, source_b) \
+ foreach_element(8, \
+ { \
+ u32 result = (source_a).e[_i] + (source_b).e[_i]; \
+ if(result > 0xFF) \
+ result = 0xFF; \
+ (dest).e[_i] = result; \
+ }) \
+
+#define subq_8x8b(dest, source_a, source_b) \
+ foreach_element(8, \
+ { \
+ u32 result = (source_a).e[_i] - (source_b).e[_i]; \
+ if(result > 0xFF) \
+ result = 0; \
+ (dest).e[_i] = result; \
+ }) \
+
+#define subs_long_8x8b(dest, source_a, source_b) \
+ subs_8x8b(dest, source_a, source_b) \
+
+#define subs_16x8b(dest, source_a, source_b) \
+ foreach_element(16, \
+ { \
+ u32 result = (source_a).e[_i] - (source_b).e[_i]; \
+ if(result > 0xFF) \
+ result = 0; \
+ (dest).e[_i] = result; \
+ }) \
+
+#define subs_8x16b(dest, source_a, source_b) \
+ foreach_element(8, \
+ { \
+ s32 result = (source_a).e[_i] - (source_b).e[_i]; \
+ if(result < 0) \
+ result = 0; \
+ \
+ (dest).e[_i] = result; \
+ }) \
+
+#define sub_8x16b(dest, source_a, source_b) \
+ foreach_element(8, (dest).e[_i] = (source_a).e[_i] - (source_b).e[_i]) \
+
+#define sub_16x8b(dest, source_a, source_b) \
+ foreach_element(16, (dest).e[_i] = (source_a).e[_i] - (source_b).e[_i]) \
+
+#define orn_8x16b(dest, source_a, source_b) \
+ foreach_element(8, (dest).e[_i] = (source_a).e[_i] | ~((source_b).e[_i])) \
+
+#define and_4x16b(dest, source_a, source_b) \
+ foreach_element(4, (dest).e[_i] = (source_a).e[_i] & (source_b).e[_i]) \
+
+#define and_8x16b(dest, source_a, source_b) \
+ foreach_element(8, (dest).e[_i] = (source_a).e[_i] & (source_b).e[_i]) \
+
+#define and_4x32b(dest, source_a, source_b) \
+ foreach_element(4, (dest).e[_i] = (source_a).e[_i] & (source_b).e[_i]) \
+
+#define and_16x8b(dest, source_a, source_b) \
+ foreach_element(16, (dest).e[_i] = (source_a).e[_i] & (source_b).e[_i]) \
+
+#define and_8x8b(dest, source_a, source_b) \
+ foreach_element(8, (dest).e[_i] = (source_a).e[_i] & (source_b).e[_i]) \
+
+#define and_2x32b(dest, source_a, source_b) \
+ foreach_element(2, (dest).e[_i] = (source_a).e[_i] & (source_b).e[_i]) \
+
+#define bic_8x8b(dest, source_a, source_b) \
+ foreach_element(8, (dest).e[_i] = (source_a).e[_i] & ~((source_b).e[_i])) \
+
+#define bic_8x16b(dest, source_a, source_b) \
+ foreach_element(8, (dest).e[_i] = (source_a).e[_i] & ~((source_b).e[_i])) \
+
+#define bic_immediate_4x16b(dest, value) \
+ foreach_element(4, (dest).e[_i] = (dest).e[_i] & ~(value)) \
+
+#define bic_immediate_8x16b(dest, value) \
+ foreach_element(8, (dest).e[_i] = (dest).e[_i] & ~(value)) \
+
+#define or_8x16b(dest, source_a, source_b) \
+ foreach_element(8, (dest).e[_i] = (source_a).e[_i] | (source_b).e[_i]) \
+
+#define or_immediate_8x16b(dest, source_a, value) \
+ foreach_element(8, (dest).e[_i] = (source_a).e[_i] | (value)) \
+
+#define eor_8x16b(dest, source_a, source_b) \
+ foreach_element(8, (dest).e[_i] = (source_a).e[_i] ^ (source_b).e[_i]) \
+
+#define eor_4x32b(dest, source_a, source_b) \
+ foreach_element(4, (dest).e[_i] = (source_a).e[_i] ^ (source_b).e[_i]) \
+
+#define eor_2x32b(dest, source_a, source_b) \
+ foreach_element(2, (dest).e[_i] = (source_a).e[_i] ^ (source_b).e[_i]) \
+
+#define zip_8x16b(dest, source_a, source_b) \
+ foreach_element(8, (dest).e[_i] = \
+ (u8)(source_a).e[_i] | ((u8)(source_b).e[_i] << 8)) \
+
+#define zip_2x64b(dest, source_a, source_b) \
+ foreach_element(2, (dest).e[_i] = \
+ (u64)(source_a).e[_i] | ((u64)(source_b).e[_i] << 32)) \
+
+#define unzip_8x8b(dest_a, dest_b, source) \
+ foreach_element(8, \
+ { \
+ (dest_a).e[_i] = (source).e[_i]; \
+ (dest_b).e[_i] = ((source).e[_i]) >> 8; \
+ }) \
+
+#define unzip_16x8b(dest_a, dest_b, source_a, source_b) \
+ foreach_element(8, \
+ { \
+ (dest_a).e[_i] = (source_a).e[_i]; \
+ (dest_b).e[_i] = (source_a).e[_i] >> 8; \
+ }); \
+ foreach_element(8, \
+ { \
+ (dest_a).e[_i + 8] = (source_b).e[_i]; \
+ (dest_b).e[_i + 8] = (source_b).e[_i] >> 8; \
+ }) \
+
+#define tbl_16(dest, indexes, table) \
+ foreach_element(8, \
+ { \
+ u32 index = indexes.e[_i]; \
+ if(index < 16) \
+ (dest).e[_i] = table.e[index]; \
+ else \
+ (dest).e[_i] = 0; \
+ }) \
+
+#define cmpeqz_8x16b(dest, source) \
+ foreach_element(8, (dest).e[_i] = ~(((source).e[_i] == 0) - 1)) \
+
+#define cmpltz_8x16b(dest, source) \
+ foreach_element(8, (dest).e[_i] = ((s16)(source).e[_i] >> 15)) \
+
+#define cmpltz_4x32b(dest, source) \
+ foreach_element(4, (dest).e[_i] = ((s32)(source).e[_i] >> 31)) \
+
+#define cmpltz_2x32b(dest, source) \
+ foreach_element(2, (dest).e[_i] = ((s32)(source).e[_i] >> 31)) \
+
+#define cmplte_4x16b(dest, source_a, source_b) \
+ foreach_element(4, (dest).e[_i] = ~((source_a.e[_i] <= source_b.e[_i]) - 1)) \
+
+#define cmplt_4x16b(dest, source_a, source_b) \
+ foreach_element(4, (dest).e[_i] = ~((source_a.e[_i] < source_b.e[_i]) - 1)) \
+
+#define cmpgt_4x16b(dest, source_a, source_b) \
+ foreach_element(4, (dest).e[_i] = ~((source_a.e[_i] > source_b.e[_i]) - 1)) \
+
+#define tst_8x16b(dest, source_a, source_b) \
+ foreach_element(8, \
+ (dest).e[_i] = ~(((source_a.e[_i] & source_b.e[_i]) != 0) - 1)) \
+
+#define andi_8x8b(dest, source_a, value) \
+ foreach_element(8, (dest).e[_i] = (source_a).e[_i] & value) \
+
+#define average_8x16b(dest, source_a, source_b) \
+ foreach_element(8, \
+ (dest).e[_i] = ((source_a).e[_i] + (source_b).e[_i]) >> 1) \
+
+
+#define mul_8x8b(dest, source_a, source_b) \
+ foreach_element(8, (dest).e[_i] = (source_a).e[_i] * (source_b).e[_i]) \
+
+#define mul_8x16b(dest, source_a, source_b) \
+ foreach_element(8, (dest).e[_i] = (source_a).e[_i] * (source_b).e[_i]) \
+
+#define mul_2x32b(dest, source_a, source_b) \
+ foreach_element(2, (dest).e[_i] = (source_a).e[_i] * (source_b).e[_i]) \
+
+#define mul_4x32b(dest, source_a, source_b) \
+ foreach_element(4, (dest).e[_i] = (source_a).e[_i] * (source_b).e[_i]) \
+
+#define mul_long_8x8b(dest, source_a, source_b) \
+ foreach_element(8, (dest).e[_i] = (source_a).e[_i] * (source_b).e[_i]) \
+
+#define mul_long_4x16b(dest, source_a, source_b) \
+ foreach_element(4, (dest).e[_i] = (source_a).e[_i] * (source_b).e[_i]) \
+
+#define mul_long_2x32b(dest, source_a, source_b) \
+ foreach_element(2, \
+ (dest).e[_i] = (source_a).e[_i] * (s64)((source_b).e[_i])) \
+
+#define mul_scalar_2x32b(dest, source, value) \
+ foreach_element(2, (dest).e[_i] = (source).e[_i] * value) \
+
+#define mul_scalar_long_8x16b(dest, source, value) \
+ foreach_element(8, (dest).e[_i] = (source).e[_i] * value) \
+
+#define mul_scalar_long_2x32b(dest, source, value) \
+ foreach_element(2, (dest).e[_i] = (source).e[_i] * value) \
+
+#define mla_2x32b(dest, source_a, source_b) \
+ foreach_element(2, (dest).e[_i] += (source_a).e[_i] * (source_b).e[_i]) \
+
+#define mla_4x32b(dest, source_a, source_b) \
+ foreach_element(4, (dest).e[_i] += (source_a).e[_i] * (source_b).e[_i]) \
+
+#define mla_scalar_long_2x32b(dest, source, value) \
+ foreach_element(2, (dest).e[_i] += (source).e[_i] * value) \
+
+#define mla_long_8x8b(dest, source_a, source_b) \
+ foreach_element(8, (dest).e[_i] += (source_a).e[_i] * (source_b).e[_i]) \
+
+#define mla_long_2x32b(dest, source_a, source_b) \
+ foreach_element(2, (dest).e[_i] += (source_a).e[_i] * (s64)(source_b).e[_i]) \
+
+#define mla_scalar_4x32b(dest, source, value) \
+ foreach_element(4, (dest).e[_i] += (source).e[_i] * value) \
+
+#define mla_scalar_2x32b(dest, source, value) \
+ foreach_element(2, (dest).e[_i] += (source).e[_i] * value) \
+
+#define mls_scalar_4x32b(dest, source, value) \
+ foreach_element(4, (dest).e[_i] -= (source).e[_i] * value) \
+
+#define mls_scalar_2x32b(dest, source, value) \
+ foreach_element(2, (dest).e[_i] -= (source).e[_i] * value) \
+
+#define mls_scalar_long_2x32b(dest, source, value) \
+ foreach_element(2, (dest).e[_i] -= (source).e[_i] * value) \
+
+#define rev_2x32b(dest, source) \
+{ \
+ u32 tmp = source.e[1]; \
+ (dest).e[1] = source.e[0]; \
+ (dest).e[0] = tmp; \
+} \
+
+#define abs_4x32b(dest, source) \
+ foreach_element(4, (dest).e[_i] = abs(source.e[_i])) \
+
+#define abs_2x32b(dest, source) \
+ foreach_element(2, (dest).e[_i] = abs(source.e[_i])) \
+
+#define neg_2x32b(dest, source) \
+ foreach_element(2, (dest).e[_i] = -((source).e[_i])) \
+
+
+#define shrq_narrow_8x16b(dest, source, shift) \
+ foreach_element(8, \
+ { \
+ u32 result = ((source).e[_i]) >> shift; \
+ if(result > 0xFF) \
+ result = 0xFF; \
+ (dest).e[_i] = result; \
+ }) \
+
+#define min_8x16b(dest, source_a, source_b) \
+ foreach_element(8, \
+ { \
+ s32 result = (source_a).e[_i]; \
+ if((source_b).e[_i] < result) \
+ result = (source_b).e[_i]; \
+ (dest).e[_i] = result; \
+ }) \
+
+#define min_8x8b(dest, source_a, source_b) \
+ foreach_element(8, \
+ { \
+ u32 result = (source_a).e[_i]; \
+ if((source_b).e[_i] < result) \
+ result = (source_b).e[_i]; \
+ (dest).e[_i] = result; \
+ }) \
+
+#define min_16x8b(dest, source_a, source_b) \
+ foreach_element(16, \
+ { \
+ u32 result = (source_a).e[_i]; \
+ if((source_b).e[_i] < result) \
+ result = (source_b).e[_i]; \
+ (dest).e[_i] = result; \
+ }) \
+
+#define max_8x16b(dest, source_a, source_b) \
+ foreach_element(8, \
+ { \
+ s32 result = (source_a).e[_i]; \
+ if((source_b).e[_i] > result) \
+ result = (source_b).e[_i]; \
+ (dest).e[_i] = result; \
+ }) \
+
+#define bsl_8x16b(dest_mask, source_a, source_b) \
+ foreach_element(8, dest_mask.e[_i] = ((source_a).e[_i] & dest_mask.e[_i]) | \
+ ((source_b).e[_i] & ~(dest_mask.e[_i]))) \
+
+#define bif_8x16b(dest, source, mask) \
+ foreach_element(8, dest.e[_i] = ((source).e[_i] & ~(mask.e[_i])) | \
+ ((dest).e[_i] & mask.e[_i])) \
+
+#define bsl_4x32b(dest_mask, source_a, source_b) \
+ foreach_element(4, dest_mask.e[_i] = ((source_a).e[_i] & dest_mask.e[_i]) | \
+ ((source_b).e[_i] & ~(dest_mask.e[_i]))) \
+
+#define bit_4x16b(dest, source, mask) \
+ foreach_element(4, dest.e[_i] = ((source).e[_i] & mask.e[_i]) | \
+ ((dest).e[_i] & ~(mask.e[_i]))) \
+
+#endif