static psx_gpu_struct egpu __attribute__((aligned(256)));
-int do_cmd_list(uint32_t *list, int count, int *last_cmd)
+int do_cmd_list(uint32_t *list, int count, int *cycles, int *last_cmd)
{
int ret;
#endif
if (gpu.state.enhancement_active)
- ret = gpu_parse_enhanced(&egpu, list, count * 4, (u32 *)last_cmd);
+ ret = gpu_parse_enhanced(&egpu, list, count * 4, cycles, (u32 *)last_cmd);
else
- ret = gpu_parse(&egpu, list, count * 4, (u32 *)last_cmd);
+ ret = gpu_parse(&egpu, list, count * 4, cycles, (u32 *)last_cmd);
#if defined(__arm__) && defined(NEON_BUILD) && !defined(SIMD_BUILD)
__asm__ __volatile__("":::"q4","q5","q6","q7");
x2 = min(right, s->x + s_w);
y1 = max(y, s->y);
y2 = min(bottom, s->y + s_h);
+ // 16-byte align for the asm version
+ x2 += x1 & 7;
+ x1 &= ~7;
scale2x_tiles8(dst + y1 * 1024*2 + x1 * 2,
src + y1 * 1024 + x1, (x2 - x1 + 7) / 8u, y2 - y1);
}
void renderer_sync_ecmds(uint32_t *ecmds)
{
- gpu_parse(&egpu, ecmds + 1, 6 * 4, NULL);
+ s32 dummy0 = 0;
+ u32 dummy1 = 0;
+ gpu_parse(&egpu, ecmds + 1, 6 * 4, &dummy0, &dummy1);
}
void renderer_update_caches(int x, int y, int w, int h, int state_changed)
if (state_changed) {
memset(egpu.enhancement_scanouts, 0, sizeof(egpu.enhancement_scanouts));
egpu.enhancement_scanout_eselect = 0;
+ update_enhancement_buf_scanouts(&egpu,
+ gpu.screen.src_x, gpu.screen.src_y, gpu.screen.hres, gpu.screen.vres);
return;
}
sync_enhancement_buffers(x, y, w, h);