X-Git-Url: https://notaz.gp2x.de/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=deps%2Flightrec%2Femitter.c;h=3875e58ed052fe86f4afc1d3f722c6f531ea9984;hb=e2fb1389dc12376acb84e4993ed3b08760257252;hp=0eff0ce3d893c4fd0b6b5a0d071b1880c57b9671;hpb=9259d7486618d69721fee743c3e4d0b5c83805fe;p=pcsx_rearmed.git diff --git a/deps/lightrec/emitter.c b/deps/lightrec/emitter.c index 0eff0ce3..3875e58e 100644 --- a/deps/lightrec/emitter.c +++ b/deps/lightrec/emitter.c @@ -14,6 +14,8 @@ #include #include +#define LIGHTNING_UNALIGNED_32BIT 4 + typedef void (*lightrec_rec_func_t)(struct lightrec_cstate *, const struct block *, u16); /* Forward declarations */ @@ -21,26 +23,32 @@ static void rec_SPECIAL(struct lightrec_cstate *state, const struct block *block static void rec_REGIMM(struct lightrec_cstate *state, const struct block *block, u16 offset); static void rec_CP0(struct lightrec_cstate *state, const struct block *block, u16 offset); static void rec_CP2(struct lightrec_cstate *state, const struct block *block, u16 offset); +static void rec_META(struct lightrec_cstate *state, const struct block *block, u16 offset); static void rec_cp2_do_mtc2(struct lightrec_cstate *state, const struct block *block, u16 offset, u8 reg, u8 in_reg); static void rec_cp2_do_mfc2(struct lightrec_cstate *state, const struct block *block, u16 offset, u8 reg, u8 out_reg); -static void unknown_opcode(struct lightrec_cstate *state, const struct block *block, u16 offset) +static void +lightrec_jump_to_fn(jit_state_t *_jit, void (*fn)(void)) { - pr_warn("Unknown opcode: 0x%08x at PC 0x%08x\n", - block->opcode_list[offset].c.opcode, - block->pc + (offset << 2)); + /* Prevent jit_jmpi() from using our cycles register as a temporary */ + jit_live(LIGHTREC_REG_CYCLE); + + jit_patch_abs(jit_jmpi(), fn); } static void lightrec_jump_to_eob(struct lightrec_cstate *state, jit_state_t *_jit) { - /* Prevent jit_jmpi() from using our cycles register as a temporary */ - jit_live(LIGHTREC_REG_CYCLE); + lightrec_jump_to_fn(_jit, state->state->eob_wrapper_func); +} - jit_patch_abs(jit_jmpi(), state->state->eob_wrapper_func); +static void +lightrec_jump_to_ds_check(struct lightrec_cstate *state, jit_state_t *_jit) +{ + lightrec_jump_to_fn(_jit, state->state->ds_check_func); } static void update_ra_register(struct regcache *reg_cache, jit_state_t *_jit, @@ -61,8 +69,9 @@ static void lightrec_emit_end_of_block(struct lightrec_cstate *state, struct regcache *reg_cache = state->reg_cache; jit_state_t *_jit = block->_jit; const struct opcode *op = &block->opcode_list[offset], - *next = &block->opcode_list[offset + 1]; - u32 cycles = state->cycles + lightrec_cycles_of_opcode(op->c); + *ds = get_delay_slot(block->opcode_list, offset); + u32 cycles = state->cycles + lightrec_cycles_of_opcode(state->state, op->c); + bool has_ds = has_delay_slot(op->c); jit_note(__FILE__, __LINE__); @@ -81,12 +90,11 @@ static void lightrec_emit_end_of_block(struct lightrec_cstate *state, update_ra_register(reg_cache, _jit, ra_reg, block->pc, link); } - if (has_delay_slot(op->c) && - !op_flag_no_ds(op->flags) && !op_flag_local_branch(op->flags)) { - cycles += lightrec_cycles_of_opcode(next->c); + if (has_ds && !op_flag_no_ds(op->flags) && !op_flag_local_branch(op->flags)) { + cycles += lightrec_cycles_of_opcode(state->state, ds->c); /* Recompile the delay slot */ - if (next->c.opcode) + if (ds->c.opcode) lightrec_rec_opcode(state, block, offset + 1); } @@ -98,19 +106,61 @@ static void lightrec_emit_end_of_block(struct lightrec_cstate *state, pr_debug("EOB: %u cycles\n", cycles); } - lightrec_jump_to_eob(state, _jit); + if (has_ds && op_flag_load_delay(ds->flags) + && opcode_has_load_delay(ds->c) && !state->no_load_delay) { + /* If the delay slot is a load opcode, its target register + * will be written after the first opcode of the target is + * executed. Handle this by jumping to a special section of + * the dispatcher. It expects the loaded value to be in + * REG_TEMP, and the target register number to be in JIT_V1.*/ + jit_movi(JIT_V1, ds->c.i.rt); + + lightrec_jump_to_ds_check(state, _jit); + } else { + lightrec_jump_to_eob(state, _jit); + } + + lightrec_regcache_reset(reg_cache); } -void lightrec_emit_eob(struct lightrec_cstate *state, - const struct block *block, u16 offset) +void lightrec_emit_jump_to_interpreter(struct lightrec_cstate *state, + const struct block *block, u16 offset) { struct regcache *reg_cache = state->reg_cache; jit_state_t *_jit = block->_jit; lightrec_clean_regs(reg_cache, _jit); + /* Call the interpreter with the block's address in JIT_V1 and the + * PC (which might have an offset) in JIT_V0. */ lightrec_load_imm(reg_cache, _jit, JIT_V0, block->pc, block->pc + (offset << 2)); + if (lightrec_store_next_pc()) { + jit_stxi_i(offsetof(struct lightrec_state, next_pc), + LIGHTREC_REG_STATE, JIT_V0); + } + + jit_movi(JIT_V1, (uintptr_t)block); + + jit_subi(LIGHTREC_REG_CYCLE, LIGHTREC_REG_CYCLE, state->cycles); + lightrec_jump_to_fn(_jit, state->state->interpreter_func); +} + +static void lightrec_emit_eob(struct lightrec_cstate *state, + const struct block *block, u16 offset) +{ + struct regcache *reg_cache = state->reg_cache; + jit_state_t *_jit = block->_jit; + + lightrec_clean_regs(reg_cache, _jit); + + lightrec_load_imm(reg_cache, _jit, JIT_V0, block->pc, + block->pc + (offset << 2)); + if (lightrec_store_next_pc()) { + jit_stxi_i(offsetof(struct lightrec_state, next_pc), + LIGHTREC_REG_STATE, JIT_V0); + } + jit_subi(LIGHTREC_REG_CYCLE, LIGHTREC_REG_CYCLE, state->cycles); lightrec_jump_to_eob(state, _jit); @@ -198,10 +248,10 @@ static void rec_b(struct lightrec_cstate *state, const struct block *block, u16 jit_state_t *_jit = block->_jit; struct lightrec_branch *branch; const struct opcode *op = &block->opcode_list[offset], - *next = &block->opcode_list[offset + 1]; + *ds = get_delay_slot(block->opcode_list, offset); jit_node_t *addr; - bool is_forward = (s16)op->i.imm >= -1; - int op_cycles = lightrec_cycles_of_opcode(op->c); + bool is_forward = (s16)op->i.imm >= 0; + int op_cycles = lightrec_cycles_of_opcode(state->state, op->c); u32 target_offset, cycles = state->cycles + op_cycles; bool no_indirection = false; u32 next_pc; @@ -210,7 +260,7 @@ static void rec_b(struct lightrec_cstate *state, const struct block *block, u16 jit_note(__FILE__, __LINE__); if (!op_flag_no_ds(op->flags)) - cycles += lightrec_cycles_of_opcode(next->c); + cycles += lightrec_cycles_of_opcode(state->state, ds->c); state->cycles = -op_cycles; @@ -224,7 +274,7 @@ static void rec_b(struct lightrec_cstate *state, const struct block *block, u16 lightrec_do_early_unload(state, block, offset); if (op_flag_local_branch(op->flags) && - (op_flag_no_ds(op->flags) || !next->opcode) && + (op_flag_no_ds(op->flags) || !ds->opcode) && is_forward && !lightrec_has_dirty_regs(reg_cache)) no_indirection = true; @@ -246,8 +296,11 @@ static void rec_b(struct lightrec_cstate *state, const struct block *block, u16 if (op_flag_local_branch(op->flags)) { /* Recompile the delay slot */ - if (!op_flag_no_ds(op->flags) && next->opcode) + if (!op_flag_no_ds(op->flags) && ds->opcode) { + /* Never handle load delays with local branches. */ + state->no_load_delay = true; lightrec_rec_opcode(state, block, offset + 1); + } if (link) update_ra_register(reg_cache, _jit, 31, block->pc, link); @@ -274,6 +327,7 @@ static void rec_b(struct lightrec_cstate *state, const struct block *block, u16 if (!op_flag_local_branch(op->flags) || !is_forward) { next_pc = get_branch_pc(block, offset, 1 + (s16)op->i.imm); + state->no_load_delay = op_flag_local_branch(op->flags); lightrec_emit_end_of_block(state, block, offset, -1, next_pc, 31, link, false); } @@ -287,8 +341,10 @@ static void rec_b(struct lightrec_cstate *state, const struct block *block, u16 if (bz && link) update_ra_register(reg_cache, _jit, 31, block->pc, link); - if (!op_flag_no_ds(op->flags) && next->opcode) + if (!op_flag_no_ds(op->flags) && ds->opcode) { + state->no_load_delay = true; lightrec_rec_opcode(state, block, offset + 1); + } } } @@ -368,6 +424,36 @@ static void rec_regimm_BGEZAL(struct lightrec_cstate *state, !op->i.rs, true); } +static void rec_alloc_rs_rd(struct regcache *reg_cache, + jit_state_t *_jit, + const struct opcode *op, + u8 rs, u8 rd, + u8 in_flags, u8 out_flags, + u8 *rs_out, u8 *rd_out) +{ + bool unload, discard; + u32 unload_flags; + + if (OPT_EARLY_UNLOAD) { + unload_flags = LIGHTREC_FLAGS_GET_RS(op->flags); + unload = unload_flags == LIGHTREC_REG_UNLOAD; + discard = unload_flags == LIGHTREC_REG_DISCARD; + } + + if (OPT_EARLY_UNLOAD && rs && rd != rs && (unload || discard)) { + rs = lightrec_alloc_reg_in(reg_cache, _jit, rs, in_flags); + lightrec_remap_reg(reg_cache, _jit, rs, rd, discard); + lightrec_set_reg_out_flags(reg_cache, rs, out_flags); + rd = rs; + } else { + rs = lightrec_alloc_reg_in(reg_cache, _jit, rs, in_flags); + rd = lightrec_alloc_reg_out(reg_cache, _jit, rd, out_flags); + } + + *rs_out = rs; + *rd_out = rd; +} + static void rec_alu_imm(struct lightrec_cstate *state, const struct block *block, u16 offset, jit_code_t code, bool slti) { @@ -380,8 +466,9 @@ static void rec_alu_imm(struct lightrec_cstate *state, const struct block *block out_flags |= REG_ZEXT; jit_note(__FILE__, __LINE__); - rs = lightrec_alloc_reg_in(reg_cache, _jit, c.i.rs, REG_EXT); - rt = lightrec_alloc_reg_out(reg_cache, _jit, c.i.rt, out_flags); + + rec_alloc_rs_rd(reg_cache, _jit, &block->opcode_list[offset], + c.i.rs, c.i.rt, REG_EXT, out_flags, &rs, &rt); jit_new_node_www(code, rt, rs, (s32)(s16) c.i.imm); @@ -398,10 +485,11 @@ static void rec_alu_special(struct lightrec_cstate *state, const struct block *b u8 rd, rt, rs; jit_note(__FILE__, __LINE__); - rs = lightrec_alloc_reg_in(reg_cache, _jit, c.r.rs, REG_EXT); + rt = lightrec_alloc_reg_in(reg_cache, _jit, c.r.rt, REG_EXT); - rd = lightrec_alloc_reg_out(reg_cache, _jit, c.r.rd, - out_ext ? REG_EXT | REG_ZEXT : 0); + rec_alloc_rs_rd(reg_cache, _jit, &block->opcode_list[offset], + c.r.rs, c.r.rd, REG_EXT, + out_ext ? REG_EXT | REG_ZEXT : 0, &rs, &rd); jit_new_node_www(code, rd, rs, rt); @@ -419,17 +507,17 @@ static void rec_alu_shiftv(struct lightrec_cstate *state, const struct block *bl u8 rd, rt, rs, temp, flags = 0; jit_note(__FILE__, __LINE__); - rs = lightrec_alloc_reg_in(reg_cache, _jit, c.r.rs, 0); if (code == jit_code_rshr) flags = REG_EXT; else if (code == jit_code_rshr_u) flags = REG_ZEXT; - rt = lightrec_alloc_reg_in(reg_cache, _jit, c.r.rt, flags); - rd = lightrec_alloc_reg_out(reg_cache, _jit, c.r.rd, flags); + rs = lightrec_alloc_reg_in(reg_cache, _jit, c.r.rs, 0); + rec_alloc_rs_rd(reg_cache, _jit, &block->opcode_list[offset], + c.r.rt, c.r.rd, flags, flags, &rt, &rd); - if (rs != rd && rt != rd) { + if (rt != rd) { jit_andi(rd, rs, 0x1f); jit_new_node_www(code, rd, rt, rd); } else { @@ -451,14 +539,18 @@ static void rec_movi(struct lightrec_cstate *state, union code c = block->opcode_list[offset].c; jit_state_t *_jit = block->_jit; u16 flags = REG_EXT; + s32 value = (s32)(s16) c.i.imm; u8 rt; - if (!(c.i.imm & 0x8000)) + if (block->opcode_list[offset].flags & LIGHTREC_MOVI) + value += (s32)((u32)state->movi_temp[c.i.rt] << 16); + + if (value >= 0) flags |= REG_ZEXT; rt = lightrec_alloc_reg_out(reg_cache, _jit, c.i.rt, flags); - jit_movi(rt, (s32)(s16) c.i.imm); + jit_movi(rt, value); lightrec_free_reg(reg_cache, rt); } @@ -466,9 +558,11 @@ static void rec_movi(struct lightrec_cstate *state, static void rec_ADDIU(struct lightrec_cstate *state, const struct block *block, u16 offset) { + const struct opcode *op = &block->opcode_list[offset]; + _jit_name(block->_jit, __func__); - if (block->opcode_list[offset].c.i.rs) + if (op->i.rs && !(op->flags & LIGHTREC_MOVI)) rec_alu_imm(state, block, offset, jit_code_addi, false); else rec_movi(state, block, offset); @@ -506,9 +600,9 @@ static void rec_ANDI(struct lightrec_cstate *state, _jit_name(block->_jit, __func__); jit_note(__FILE__, __LINE__); - rs = lightrec_alloc_reg_in(reg_cache, _jit, c.i.rs, 0); - rt = lightrec_alloc_reg_out(reg_cache, _jit, c.i.rt, - REG_EXT | REG_ZEXT); + + rec_alloc_rs_rd(reg_cache, _jit, &block->opcode_list[offset], + c.i.rs, c.i.rt, 0, REG_EXT | REG_ZEXT, &rs, &rt); /* PSX code uses ANDI 0xff / ANDI 0xffff a lot, which are basically * casts to uint8_t / uint16_t. */ @@ -532,8 +626,9 @@ static void rec_alu_or_xor(struct lightrec_cstate *state, const struct block *bl u8 rs, rt, flags; jit_note(__FILE__, __LINE__); - rs = lightrec_alloc_reg_in(reg_cache, _jit, c.i.rs, 0); - rt = lightrec_alloc_reg_out(reg_cache, _jit, c.i.rt, 0); + + rec_alloc_rs_rd(reg_cache, _jit, &block->opcode_list[offset], + c.i.rs, c.i.rt, 0, 0, &rs, &rt); flags = lightrec_get_reg_in_flags(reg_cache, rs); lightrec_set_reg_out_flags(reg_cache, rt, flags); @@ -548,8 +643,24 @@ static void rec_alu_or_xor(struct lightrec_cstate *state, const struct block *bl static void rec_ORI(struct lightrec_cstate *state, const struct block *block, u16 offset) { - _jit_name(block->_jit, __func__); - rec_alu_or_xor(state, block, offset, jit_code_ori); + const struct opcode *op = &block->opcode_list[offset]; + struct regcache *reg_cache = state->reg_cache; + jit_state_t *_jit = block->_jit; + s32 val; + u8 rt; + + _jit_name(_jit, __func__); + + if (op->flags & LIGHTREC_MOVI) { + rt = lightrec_alloc_reg_out(reg_cache, _jit, op->i.rt, REG_EXT); + + val = ((u32)state->movi_temp[op->i.rt] << 16) | op->i.imm; + jit_movi(rt, val); + + lightrec_free_reg(reg_cache, rt); + } else { + rec_alu_or_xor(state, block, offset, jit_code_ori); + } } static void rec_XORI(struct lightrec_cstate *state, @@ -567,6 +678,11 @@ static void rec_LUI(struct lightrec_cstate *state, jit_state_t *_jit = block->_jit; u8 rt, flags = REG_EXT; + if (block->opcode_list[offset].flags & LIGHTREC_MOVI) { + state->movi_temp[c.i.rt] = c.i.imm; + return; + } + jit_name(__func__); jit_note(__FILE__, __LINE__); @@ -620,9 +736,10 @@ static void rec_special_AND(struct lightrec_cstate *state, _jit_name(block->_jit, __func__); jit_note(__FILE__, __LINE__); - rs = lightrec_alloc_reg_in(reg_cache, _jit, c.r.rs, 0); + rt = lightrec_alloc_reg_in(reg_cache, _jit, c.r.rt, 0); - rd = lightrec_alloc_reg_out(reg_cache, _jit, c.r.rd, 0); + rec_alloc_rs_rd(reg_cache, _jit, &block->opcode_list[offset], + c.r.rs, c.r.rd, 0, 0, &rs, &rd); flags_rs = lightrec_get_reg_in_flags(reg_cache, rs); flags_rt = lightrec_get_reg_in_flags(reg_cache, rt); @@ -654,9 +771,10 @@ static void rec_special_or_nor(struct lightrec_cstate *state, u8 rd, rt, rs, flags_rs, flags_rt, flags_rd = 0; jit_note(__FILE__, __LINE__); - rs = lightrec_alloc_reg_in(reg_cache, _jit, c.r.rs, 0); + rt = lightrec_alloc_reg_in(reg_cache, _jit, c.r.rt, 0); - rd = lightrec_alloc_reg_out(reg_cache, _jit, c.r.rd, 0); + rec_alloc_rs_rd(reg_cache, _jit, &block->opcode_list[offset], + c.r.rs, c.r.rd, 0, 0, &rs, &rd); flags_rs = lightrec_get_reg_in_flags(reg_cache, rs); flags_rt = lightrec_get_reg_in_flags(reg_cache, rt); @@ -707,9 +825,10 @@ static void rec_special_XOR(struct lightrec_cstate *state, _jit_name(block->_jit, __func__); jit_note(__FILE__, __LINE__); - rs = lightrec_alloc_reg_in(reg_cache, _jit, c.r.rs, 0); + rt = lightrec_alloc_reg_in(reg_cache, _jit, c.r.rt, 0); - rd = lightrec_alloc_reg_out(reg_cache, _jit, c.r.rd, 0); + rec_alloc_rs_rd(reg_cache, _jit, &block->opcode_list[offset], + c.r.rs, c.r.rd, 0, 0, &rs, &rd); flags_rs = lightrec_get_reg_in_flags(reg_cache, rs); flags_rt = lightrec_get_reg_in_flags(reg_cache, rt); @@ -770,7 +889,7 @@ static void rec_alu_shift(struct lightrec_cstate *state, const struct block *blo struct regcache *reg_cache = state->reg_cache; union code c = block->opcode_list[offset].c; jit_state_t *_jit = block->_jit; - u8 rd, rt, flags = 0; + u8 rd, rt, flags = 0, out_flags = 0; jit_note(__FILE__, __LINE__); @@ -779,13 +898,14 @@ static void rec_alu_shift(struct lightrec_cstate *state, const struct block *blo else if (code == jit_code_rshi_u) flags = REG_ZEXT; - rt = lightrec_alloc_reg_in(reg_cache, _jit, c.r.rt, flags); - /* Input reg is zero-extended, if we SRL at least by one bit, we know * the output reg will be both zero-extended and sign-extended. */ + out_flags = flags; if (code == jit_code_rshi_u && c.r.imm) - flags |= REG_EXT; - rd = lightrec_alloc_reg_out(reg_cache, _jit, c.r.rd, flags); + out_flags |= REG_EXT; + + rec_alloc_rs_rd(reg_cache, _jit, &block->opcode_list[offset], + c.r.rt, c.r.rd, flags, out_flags, &rt, &rd); jit_new_node_www(code, rd, rt, c.r.imm); @@ -824,6 +944,8 @@ static void rec_alu_mult(struct lightrec_cstate *state, u8 reg_hi = get_mult_div_hi(c); jit_state_t *_jit = block->_jit; u8 lo, hi, rs, rt, rflags = 0; + bool no_lo = op_flag_no_lo(flags); + bool no_hi = op_flag_no_hi(flags); jit_note(__FILE__, __LINE__); @@ -835,44 +957,46 @@ static void rec_alu_mult(struct lightrec_cstate *state, rs = lightrec_alloc_reg_in(reg_cache, _jit, c.r.rs, rflags); rt = lightrec_alloc_reg_in(reg_cache, _jit, c.r.rt, rflags); - if (!op_flag_no_lo(flags)) + if (!no_lo) lo = lightrec_alloc_reg_out(reg_cache, _jit, reg_lo, 0); - else if (__WORDSIZE == 32) - lo = lightrec_alloc_reg_temp(reg_cache, _jit); - if (!op_flag_no_hi(flags)) + if (!no_hi) hi = lightrec_alloc_reg_out(reg_cache, _jit, reg_hi, REG_EXT); if (__WORDSIZE == 32) { /* On 32-bit systems, do a 32*32->64 bit operation, or a 32*32->32 bit * operation if the MULT was detected a 32-bit only. */ - if (!op_flag_no_hi(flags)) { + if (no_lo) { if (is_signed) - jit_qmulr(lo, hi, rs, rt); + jit_hmulr(hi, rs, rt); else - jit_qmulr_u(lo, hi, rs, rt); - } else { + jit_hmulr_u(hi, rs, rt); + } else if (no_hi) { jit_mulr(lo, rs, rt); + } else if (is_signed) { + jit_qmulr(lo, hi, rs, rt); + } else { + jit_qmulr_u(lo, hi, rs, rt); } } else { /* On 64-bit systems, do a 64*64->64 bit operation. */ - if (op_flag_no_lo(flags)) { + if (no_lo) { jit_mulr(hi, rs, rt); jit_rshi(hi, hi, 32); } else { jit_mulr(lo, rs, rt); /* The 64-bit output value is in $lo, store the upper 32 bits in $hi */ - if (!op_flag_no_hi(flags)) + if (!no_hi) jit_rshi(hi, lo, 32); } } lightrec_free_reg(reg_cache, rs); lightrec_free_reg(reg_cache, rt); - if (!op_flag_no_lo(flags) || __WORDSIZE == 32) + if (!no_lo) lightrec_free_reg(reg_cache, lo); - if (!op_flag_no_hi(flags)) + if (!no_hi) lightrec_free_reg(reg_cache, hi); } @@ -987,14 +1111,16 @@ static void rec_special_DIVU(struct lightrec_cstate *state, } static void rec_alu_mv_lo_hi(struct lightrec_cstate *state, - const struct block *block, u8 dst, u8 src) + const struct block *block, u16 offset, + u8 dst, u8 src) { struct regcache *reg_cache = state->reg_cache; jit_state_t *_jit = block->_jit; jit_note(__FILE__, __LINE__); - src = lightrec_alloc_reg_in(reg_cache, _jit, src, 0); - dst = lightrec_alloc_reg_out(reg_cache, _jit, dst, REG_EXT); + + rec_alloc_rs_rd(reg_cache, _jit, &block->opcode_list[offset], + src, dst, 0, REG_EXT, &src, &dst); jit_extr_i(dst, src); @@ -1008,7 +1134,7 @@ static void rec_special_MFHI(struct lightrec_cstate *state, union code c = block->opcode_list[offset].c; _jit_name(block->_jit, __func__); - rec_alu_mv_lo_hi(state, block, c.r.rd, REG_HI); + rec_alu_mv_lo_hi(state, block, offset, c.r.rd, REG_HI); } static void rec_special_MTHI(struct lightrec_cstate *state, @@ -1017,7 +1143,7 @@ static void rec_special_MTHI(struct lightrec_cstate *state, union code c = block->opcode_list[offset].c; _jit_name(block->_jit, __func__); - rec_alu_mv_lo_hi(state, block, REG_HI, c.r.rs); + rec_alu_mv_lo_hi(state, block, offset, REG_HI, c.r.rs); } static void rec_special_MFLO(struct lightrec_cstate *state, @@ -1026,7 +1152,7 @@ static void rec_special_MFLO(struct lightrec_cstate *state, union code c = block->opcode_list[offset].c; _jit_name(block->_jit, __func__); - rec_alu_mv_lo_hi(state, block, c.r.rd, REG_LO); + rec_alu_mv_lo_hi(state, block, offset, c.r.rd, REG_LO); } static void rec_special_MTLO(struct lightrec_cstate *state, @@ -1035,7 +1161,7 @@ static void rec_special_MTLO(struct lightrec_cstate *state, union code c = block->opcode_list[offset].c; _jit_name(block->_jit, __func__); - rec_alu_mv_lo_hi(state, block, REG_LO, c.r.rs); + rec_alu_mv_lo_hi(state, block, offset, REG_LO, c.r.rs); } static void call_to_c_wrapper(struct lightrec_cstate *state, @@ -1090,6 +1216,7 @@ static void rec_io(struct lightrec_cstate *state, u32 flags = block->opcode_list[offset].flags; bool is_tagged = LIGHTREC_FLAGS_GET_IO_MODE(flags); u32 lut_entry; + u8 zero; jit_note(__FILE__, __LINE__); @@ -1100,6 +1227,16 @@ static void rec_io(struct lightrec_cstate *state, else if (load_rt) lightrec_clean_reg_if_loaded(reg_cache, _jit, c.i.rt, false); + if (op_flag_load_delay(flags) && !state->no_load_delay) { + /* Clear state->in_delay_slot_n. This notifies the lightrec_rw + * wrapper that it should write the REG_TEMP register instead of + * the actual output register of the opcode. */ + zero = lightrec_alloc_reg_in(reg_cache, _jit, 0, 0); + jit_stxi_c(offsetof(struct lightrec_state, in_delay_slot_n), + LIGHTREC_REG_STATE, zero); + lightrec_free_reg(reg_cache, zero); + } + if (is_tagged) { call_to_c_wrapper(state, block, c.opcode, C_WRAPPER_RW); } else { @@ -1109,7 +1246,7 @@ static void rec_io(struct lightrec_cstate *state, } } -static u32 rec_ram_mask(struct lightrec_state *state) +static u32 rec_ram_mask(const struct lightrec_state *state) { return (RAM_SIZE << (state->mirrors_mapped * 2)) - 1; } @@ -1118,7 +1255,7 @@ static u32 rec_io_mask(const struct lightrec_state *state) { u32 length = state->maps[PSX_MAP_HW_REGISTERS].length; - return GENMASK(31 - clz32(length - 1), 0); + return 0x1f800000 | GENMASK(31 - clz32(length - 1), 0); } static void rec_store_memory(struct lightrec_cstate *cstate, @@ -1133,19 +1270,20 @@ static void rec_store_memory(struct lightrec_cstate *cstate, struct opcode *op = &block->opcode_list[offset]; jit_state_t *_jit = block->_jit; union code c = op->c; - u8 rs, rt, tmp, tmp2, tmp3, addr_reg, addr_reg2; + u8 rs, rt, tmp = 0, tmp2 = 0, tmp3, addr_reg, addr_reg2; s16 imm = (s16)c.i.imm; s32 simm = (s32)imm << (1 - lut_is_32bit(state)); s32 lut_offt = offsetof(struct lightrec_state, code_lut); bool no_mask = op_flag_no_mask(op->flags); bool add_imm = c.i.imm && - ((!state->mirrors_mapped && !no_mask) || (invalidate && + (c.i.op == OP_META_SWU + || (!state->mirrors_mapped && !no_mask) || (invalidate && ((imm & 0x3) || simm + lut_offt != (s16)(simm + lut_offt)))); - bool need_tmp = !no_mask || addr_offset || add_imm || invalidate; + bool need_tmp = !no_mask || add_imm || invalidate; bool swc2 = c.i.op == OP_SWC2; - u8 in_reg = swc2 ? REG_CP2_TEMP : c.i.rt; + u8 in_reg = swc2 ? REG_TEMP : c.i.rt; + s8 reg_imm; - rt = lightrec_alloc_reg_in(reg_cache, _jit, in_reg, 0); rs = lightrec_alloc_reg_in(reg_cache, _jit, c.i.rs, 0); if (need_tmp) tmp = lightrec_alloc_reg_temp(reg_cache, _jit); @@ -1154,6 +1292,7 @@ static void rec_store_memory(struct lightrec_cstate *cstate, if (add_imm) { jit_addi(tmp, addr_reg, (s16)c.i.imm); + lightrec_free_reg(reg_cache, rs); addr_reg = tmp; imm = 0; } else if (simm) { @@ -1161,25 +1300,42 @@ static void rec_store_memory(struct lightrec_cstate *cstate, } if (!no_mask) { - jit_andi(tmp, addr_reg, addr_mask); + reg_imm = lightrec_alloc_reg_temp_with_value(reg_cache, _jit, + addr_mask); + + jit_andr(tmp, addr_reg, reg_imm); addr_reg = tmp; + + lightrec_free_reg(reg_cache, reg_imm); } if (addr_offset) { + reg_imm = lightrec_alloc_reg_temp_with_value(reg_cache, _jit, + addr_offset); tmp2 = lightrec_alloc_reg_temp(reg_cache, _jit); - jit_addi(tmp2, addr_reg, addr_offset); + jit_addr(tmp2, addr_reg, reg_imm); addr_reg2 = tmp2; + + lightrec_free_reg(reg_cache, reg_imm); } else { addr_reg2 = addr_reg; } + rt = lightrec_alloc_reg_in(reg_cache, _jit, in_reg, 0); + if (is_big_endian() && swap_code && in_reg) { tmp3 = lightrec_alloc_reg_temp(reg_cache, _jit); jit_new_node_ww(swap_code, tmp3, rt); - jit_new_node_www(code, imm, addr_reg2, tmp3); + + if (c.i.op == OP_META_SWU) + jit_unstr(addr_reg2, tmp3, LIGHTNING_UNALIGNED_32BIT); + else + jit_new_node_www(code, imm, addr_reg2, tmp3); lightrec_free_reg(reg_cache, tmp3); + } else if (c.i.op == OP_META_SWU) { + jit_unstr(addr_reg2, rt, LIGHTNING_UNALIGNED_32BIT); } else { jit_new_node_www(code, imm, addr_reg2, rt); } @@ -1202,7 +1358,7 @@ static void rec_store_memory(struct lightrec_cstate *cstate, if (addr_reg == rs && c.i.rs == 0) { addr_reg = LIGHTREC_REG_STATE; } else { - jit_addr(tmp, addr_reg, LIGHTREC_REG_STATE); + jit_add_state(tmp, addr_reg); addr_reg = tmp; } @@ -1226,7 +1382,7 @@ static void rec_store_ram(struct lightrec_cstate *cstate, u16 offset, jit_code_t code, jit_code_t swap_code, bool invalidate) { - struct lightrec_state *state = cstate->state; + const struct lightrec_state *state = cstate->state; _jit_note(block->_jit, __FILE__, __LINE__); @@ -1262,38 +1418,44 @@ static void rec_store_direct_no_invalidate(struct lightrec_cstate *cstate, u16 offset, jit_code_t code, jit_code_t swap_code) { - struct lightrec_state *state = cstate->state; + const struct lightrec_state *state = cstate->state; struct regcache *reg_cache = cstate->reg_cache; union code c = block->opcode_list[offset].c; jit_state_t *_jit = block->_jit; jit_node_t *to_not_ram, *to_end; bool swc2 = c.i.op == OP_SWC2; - u8 tmp, tmp2, rs, rt, in_reg = swc2 ? REG_CP2_TEMP : c.i.rt; + u8 tmp, tmp2 = 0, rs, rt, in_reg = swc2 ? REG_TEMP : c.i.rt; + u32 addr_mask; + s32 reg_imm; s16 imm; jit_note(__FILE__, __LINE__); rs = lightrec_alloc_reg_in(reg_cache, _jit, c.i.rs, 0); tmp = lightrec_alloc_reg_temp(reg_cache, _jit); - if (state->offset_ram || state->offset_scratch) - tmp2 = lightrec_alloc_reg_temp(reg_cache, _jit); + if (state->mirrors_mapped) + addr_mask = 0x1f800000 | (4 * RAM_SIZE - 1); + else + addr_mask = 0x1f800000 | (RAM_SIZE - 1); + + reg_imm = lightrec_alloc_reg_temp_with_value(reg_cache, _jit, addr_mask); /* Convert to KUNSEG and avoid RAM mirrors */ - if (state->mirrors_mapped) { - imm = (s16)c.i.imm; - jit_andi(tmp, rs, 0x1f800000 | (4 * RAM_SIZE - 1)); - } else if (c.i.imm) { + if ((c.i.op == OP_META_SWU || !state->mirrors_mapped) && c.i.imm) { imm = 0; jit_addi(tmp, rs, (s16)c.i.imm); - jit_andi(tmp, tmp, 0x1f800000 | (RAM_SIZE - 1)); + jit_andr(tmp, tmp, reg_imm); } else { - imm = 0; - jit_andi(tmp, rs, 0x1f800000 | (RAM_SIZE - 1)); + imm = (s16)c.i.imm; + jit_andr(tmp, rs, reg_imm); } lightrec_free_reg(reg_cache, rs); + lightrec_free_reg(reg_cache, reg_imm); if (state->offset_ram != state->offset_scratch) { + tmp2 = lightrec_alloc_reg_temp(reg_cache, _jit); + to_not_ram = jit_bmsi(tmp, BIT(28)); jit_movi(tmp2, state->offset_ram); @@ -1304,7 +1466,8 @@ static void rec_store_direct_no_invalidate(struct lightrec_cstate *cstate, jit_movi(tmp2, state->offset_scratch); jit_patch(to_end); } else if (state->offset_ram) { - jit_movi(tmp2, state->offset_ram); + tmp2 = lightrec_alloc_reg_temp_with_value(reg_cache, _jit, + state->offset_ram); } if (state->offset_ram || state->offset_scratch) { @@ -1318,9 +1481,15 @@ static void rec_store_direct_no_invalidate(struct lightrec_cstate *cstate, tmp2 = lightrec_alloc_reg_temp(reg_cache, _jit); jit_new_node_ww(swap_code, tmp2, rt); - jit_new_node_www(code, imm, tmp, tmp2); + + if (c.i.op == OP_META_SWU) + jit_unstr(tmp, tmp2, LIGHTNING_UNALIGNED_32BIT); + else + jit_new_node_www(code, imm, tmp, tmp2); lightrec_free_reg(reg_cache, tmp2); + } else if (c.i.op == OP_META_SWU) { + jit_unstr(tmp, rt, LIGHTNING_UNALIGNED_32BIT); } else { jit_new_node_www(code, imm, tmp, rt); } @@ -1332,7 +1501,7 @@ static void rec_store_direct_no_invalidate(struct lightrec_cstate *cstate, static void rec_store_direct(struct lightrec_cstate *cstate, const struct block *block, u16 offset, jit_code_t code, jit_code_t swap_code) { - struct lightrec_state *state = cstate->state; + const struct lightrec_state *state = cstate->state; u32 ram_size = state->mirrors_mapped ? RAM_SIZE * 4 : RAM_SIZE; struct regcache *reg_cache = cstate->reg_cache; union code c = block->opcode_list[offset].c; @@ -1340,7 +1509,10 @@ static void rec_store_direct(struct lightrec_cstate *cstate, const struct block jit_node_t *to_not_ram, *to_end; bool swc2 = c.i.op == OP_SWC2; u8 tmp, tmp2, tmp3, masked_reg, rs, rt; - u8 in_reg = swc2 ? REG_CP2_TEMP : c.i.rt; + u8 in_reg = swc2 ? REG_TEMP : c.i.rt; + u32 addr_mask = 0x1f800000 | (ram_size - 1); + bool different_offsets = state->offset_ram != state->offset_scratch; + s32 reg_imm; jit_note(__FILE__, __LINE__); @@ -1348,18 +1520,21 @@ static void rec_store_direct(struct lightrec_cstate *cstate, const struct block tmp2 = lightrec_alloc_reg_temp(reg_cache, _jit); tmp3 = lightrec_alloc_reg_in(reg_cache, _jit, 0, 0); + reg_imm = lightrec_alloc_reg_temp_with_value(reg_cache, _jit, addr_mask); + /* Convert to KUNSEG and avoid RAM mirrors */ if (c.i.imm) { jit_addi(tmp2, rs, (s16)c.i.imm); - jit_andi(tmp2, tmp2, 0x1f800000 | (ram_size - 1)); + jit_andr(tmp2, tmp2, reg_imm); } else { - jit_andi(tmp2, rs, 0x1f800000 | (ram_size - 1)); + jit_andr(tmp2, rs, reg_imm); } lightrec_free_reg(reg_cache, rs); + lightrec_free_reg(reg_cache, reg_imm); tmp = lightrec_alloc_reg_temp(reg_cache, _jit); - if (state->offset_ram != state->offset_scratch) { + if (different_offsets) { to_not_ram = jit_bgti(tmp2, ram_size); masked_reg = tmp2; } else { @@ -1376,7 +1551,7 @@ static void rec_store_direct(struct lightrec_cstate *cstate, const struct block if (!lut_is_32bit(state)) jit_lshi(tmp, tmp, 1); - jit_addr(tmp, LIGHTREC_REG_STATE, tmp); + jit_add_state(tmp, tmp); /* Write NULL to the code LUT to invalidate any block that's there */ if (lut_is_32bit(state)) @@ -1384,7 +1559,19 @@ static void rec_store_direct(struct lightrec_cstate *cstate, const struct block else jit_stxi(offsetof(struct lightrec_state, code_lut), tmp, tmp3); - if (state->offset_ram != state->offset_scratch) { + if (c.i.op == OP_META_SWU) { + /* With a SWU opcode, we might have touched the following 32-bit + * word, so invalidate it as well */ + if (lut_is_32bit(state)) { + jit_stxi_i(offsetof(struct lightrec_state, code_lut) + 4, + tmp, tmp3); + } else { + jit_stxi(offsetof(struct lightrec_state, code_lut) + + sizeof(uintptr_t), tmp, tmp3); + } + } + + if (different_offsets) { jit_movi(tmp, state->offset_ram); to_end = jit_b(); @@ -1394,7 +1581,7 @@ static void rec_store_direct(struct lightrec_cstate *cstate, const struct block if (state->offset_ram || state->offset_scratch) jit_movi(tmp, state->offset_scratch); - if (state->offset_ram != state->offset_scratch) + if (different_offsets) jit_patch(to_end); if (state->offset_ram || state->offset_scratch) @@ -1409,9 +1596,15 @@ static void rec_store_direct(struct lightrec_cstate *cstate, const struct block tmp = lightrec_alloc_reg_temp(reg_cache, _jit); jit_new_node_ww(swap_code, tmp, rt); - jit_new_node_www(code, 0, tmp2, tmp); + + if (c.i.op == OP_META_SWU) + jit_unstr(tmp2, tmp, LIGHTNING_UNALIGNED_32BIT); + else + jit_new_node_www(code, 0, tmp2, tmp); lightrec_free_reg(reg_cache, tmp); + } else if (c.i.op == OP_META_SWU) { + jit_unstr(tmp2, rt, LIGHTNING_UNALIGNED_32BIT); } else { jit_new_node_www(code, 0, tmp2, rt); } @@ -1427,7 +1620,7 @@ static void rec_store(struct lightrec_cstate *state, u32 flags = block->opcode_list[offset].flags; u32 mode = LIGHTREC_FLAGS_GET_IO_MODE(flags); bool no_invalidate = op_flag_no_invalidate(flags) || - state->state->invalidate_from_dma_only; + (state->state->opt_flags & LIGHTREC_OPT_INV_DMA_ONLY); union code c = block->opcode_list[offset].c; bool is_swc2 = c.i.op == OP_SWC2; @@ -1437,7 +1630,7 @@ static void rec_store(struct lightrec_cstate *state, case LIGHTREC_IO_SCRATCH: case LIGHTREC_IO_DIRECT: case LIGHTREC_IO_DIRECT_HW: - rec_cp2_do_mfc2(state, block, offset, c.i.rt, REG_CP2_TEMP); + rec_cp2_do_mfc2(state, block, offset, c.i.rt, REG_TEMP); break; default: break; @@ -1469,7 +1662,7 @@ static void rec_store(struct lightrec_cstate *state, } if (is_swc2) - lightrec_discard_reg_if_loaded(state->reg_cache, REG_CP2_TEMP); + lightrec_discard_reg_if_loaded(state->reg_cache, REG_TEMP); } static void rec_SB(struct lightrec_cstate *state, @@ -1519,14 +1712,16 @@ static void rec_load_memory(struct lightrec_cstate *cstate, { struct regcache *reg_cache = cstate->reg_cache; struct opcode *op = &block->opcode_list[offset]; + bool load_delay = op_flag_load_delay(op->flags) && !cstate->no_load_delay; jit_state_t *_jit = block->_jit; u8 rs, rt, out_reg, addr_reg, flags = REG_EXT; bool no_mask = op_flag_no_mask(op->flags); union code c = op->c; + s8 reg_imm; s16 imm; - if (c.i.op == OP_LWC2) - out_reg = REG_CP2_TEMP; + if (load_delay || c.i.op == OP_LWC2) + out_reg = REG_TEMP; else if (c.i.rt) out_reg = c.i.rt; else @@ -1538,7 +1733,8 @@ static void rec_load_memory(struct lightrec_cstate *cstate, rs = lightrec_alloc_reg_in(reg_cache, _jit, c.i.rs, 0); rt = lightrec_alloc_reg_out(reg_cache, _jit, out_reg, flags); - if (!cstate->state->mirrors_mapped && c.i.imm && !no_mask) { + if ((op->i.op == OP_META_LWU && c.i.imm) + || (!cstate->state->mirrors_mapped && c.i.imm && !no_mask)) { jit_addi(rt, rs, (s16)c.i.imm); addr_reg = rt; imm = 0; @@ -1547,14 +1743,27 @@ static void rec_load_memory(struct lightrec_cstate *cstate, imm = (s16)c.i.imm; } + if (op->i.op == OP_META_LWU) + imm = LIGHTNING_UNALIGNED_32BIT; + if (!no_mask) { - jit_andi(rt, addr_reg, addr_mask); + reg_imm = lightrec_alloc_reg_temp_with_value(reg_cache, _jit, + addr_mask); + + jit_andr(rt, addr_reg, reg_imm); addr_reg = rt; + + lightrec_free_reg(reg_cache, reg_imm); } if (addr_offset) { - jit_addi(rt, addr_reg, addr_offset); + reg_imm = lightrec_alloc_reg_temp_with_value(reg_cache, _jit, + addr_offset); + + jit_addr(rt, addr_reg, reg_imm); addr_reg = rt; + + lightrec_free_reg(reg_cache, reg_imm); } jit_new_node_www(code, rt, addr_reg, imm); @@ -1617,16 +1826,22 @@ static void rec_load_direct(struct lightrec_cstate *cstate, jit_code_t code, jit_code_t swap_code, bool is_unsigned) { - struct lightrec_state *state = cstate->state; + const struct lightrec_state *state = cstate->state; struct regcache *reg_cache = cstate->reg_cache; - union code c = block->opcode_list[offset].c; + struct opcode *op = &block->opcode_list[offset]; + bool load_delay = op_flag_load_delay(op->flags) && !cstate->no_load_delay; jit_state_t *_jit = block->_jit; jit_node_t *to_not_ram, *to_not_bios, *to_end, *to_end2; u8 tmp, rs, rt, out_reg, addr_reg, flags = REG_EXT; + bool different_offsets = state->offset_bios != state->offset_scratch; + union code c = op->c; + s32 addr_mask; + u32 reg_imm; + s8 offt_reg; s16 imm; - if (c.i.op == OP_LWC2) - out_reg = REG_CP2_TEMP; + if (load_delay || c.i.op == OP_LWC2) + out_reg = REG_TEMP; else if (c.i.rt) out_reg = c.i.rt; else @@ -1641,7 +1856,8 @@ static void rec_load_direct(struct lightrec_cstate *cstate, if ((state->offset_ram == state->offset_bios && state->offset_ram == state->offset_scratch && - state->mirrors_mapped) || !c.i.imm) { + state->mirrors_mapped && c.i.op != OP_META_LWU) + || !c.i.imm) { addr_reg = rs; imm = (s16)c.i.imm; } else { @@ -1653,21 +1869,43 @@ static void rec_load_direct(struct lightrec_cstate *cstate, lightrec_free_reg(reg_cache, rs); } + if (op->i.op == OP_META_LWU) + imm = LIGHTNING_UNALIGNED_32BIT; + tmp = lightrec_alloc_reg_temp(reg_cache, _jit); if (state->offset_ram == state->offset_bios && state->offset_ram == state->offset_scratch) { + if (!state->mirrors_mapped) + addr_mask = 0x1f800000 | (RAM_SIZE - 1); + else + addr_mask = 0x1fffffff; + + reg_imm = lightrec_alloc_reg_temp_with_value(reg_cache, _jit, + addr_mask); if (!state->mirrors_mapped) { jit_andi(tmp, addr_reg, BIT(28)); jit_rshi_u(tmp, tmp, 28 - 22); - jit_ori(tmp, tmp, 0x1f800000 | (RAM_SIZE - 1)); + jit_orr(tmp, tmp, reg_imm); jit_andr(rt, addr_reg, tmp); } else { - jit_andi(rt, addr_reg, 0x1fffffff); + jit_andr(rt, addr_reg, reg_imm); } - if (state->offset_ram) - jit_movi(tmp, state->offset_ram); + lightrec_free_reg(reg_cache, reg_imm); + + if (state->offset_ram) { + offt_reg = lightrec_get_reg_with_value(reg_cache, + state->offset_ram); + if (offt_reg < 0) { + jit_movi(tmp, state->offset_ram); + lightrec_temp_set_value(reg_cache, tmp, + state->offset_ram); + } else { + lightrec_free_reg(reg_cache, tmp); + tmp = offt_reg; + } + } } else { to_not_ram = jit_bmsi(addr_reg, BIT(28)); @@ -1681,7 +1919,7 @@ static void rec_load_direct(struct lightrec_cstate *cstate, jit_patch(to_not_ram); - if (state->offset_bios != state->offset_scratch) + if (different_offsets) to_not_bios = jit_bmci(addr_reg, BIT(22)); /* Convert to KUNSEG */ @@ -1689,7 +1927,7 @@ static void rec_load_direct(struct lightrec_cstate *cstate, jit_movi(tmp, state->offset_bios); - if (state->offset_bios != state->offset_scratch) { + if (different_offsets) { to_end2 = jit_b(); jit_patch(to_not_bios); @@ -1754,8 +1992,8 @@ static void rec_load(struct lightrec_cstate *state, const struct block *block, } if (op->i.op == OP_LWC2) { - rec_cp2_do_mtc2(state, block, offset, op->i.rt, REG_CP2_TEMP); - lightrec_discard_reg_if_loaded(state->reg_cache, REG_CP2_TEMP); + rec_cp2_do_mtc2(state, block, offset, op->i.rt, REG_TEMP); + lightrec_discard_reg_if_loaded(state->reg_cache, REG_TEMP); } } @@ -1811,9 +2049,9 @@ static void rec_LW(struct lightrec_cstate *state, const struct block *block, u16 rec_load(state, block, offset, code, jit_code_bswapr_ui, false); } -static void rec_break_syscall(struct lightrec_cstate *state, - const struct block *block, u16 offset, - u32 exit_code) +static void rec_exit_early(struct lightrec_cstate *state, + const struct block *block, u16 offset, + u32 exit_code, u32 pc) { struct regcache *reg_cache = state->reg_cache; jit_state_t *_jit = block->_jit; @@ -1827,26 +2065,36 @@ static void rec_break_syscall(struct lightrec_cstate *state, jit_stxi_i(offsetof(struct lightrec_state, exit_flags), LIGHTREC_REG_STATE, tmp); + jit_ldxi_i(tmp, LIGHTREC_REG_STATE, + offsetof(struct lightrec_state, target_cycle)); + jit_subr(tmp, tmp, LIGHTREC_REG_CYCLE); + jit_movi(LIGHTREC_REG_CYCLE, 0); + jit_stxi_i(offsetof(struct lightrec_state, target_cycle), + LIGHTREC_REG_STATE, tmp); + jit_stxi_i(offsetof(struct lightrec_state, current_cycle), + LIGHTREC_REG_STATE, tmp); + lightrec_free_reg(reg_cache, tmp); - /* TODO: the return address should be "pc - 4" if we're a delay slot */ - lightrec_emit_end_of_block(state, block, offset, -1, - get_ds_pc(block, offset, 0), - 31, 0, true); + lightrec_emit_end_of_block(state, block, offset, -1, pc, 31, 0, true); } static void rec_special_SYSCALL(struct lightrec_cstate *state, const struct block *block, u16 offset) { _jit_name(block->_jit, __func__); - rec_break_syscall(state, block, offset, LIGHTREC_EXIT_SYSCALL); + + /* TODO: the return address should be "pc - 4" if we're a delay slot */ + rec_exit_early(state, block, offset, LIGHTREC_EXIT_SYSCALL, + get_ds_pc(block, offset, 0)); } static void rec_special_BREAK(struct lightrec_cstate *state, const struct block *block, u16 offset) { _jit_name(block->_jit, __func__); - rec_break_syscall(state, block, offset, LIGHTREC_EXIT_BREAK); + rec_exit_early(state, block, offset, LIGHTREC_EXIT_BREAK, + get_ds_pc(block, offset, 0)); } static void rec_mfc(struct lightrec_cstate *state, const struct block *block, u16 offset) @@ -1872,6 +2120,7 @@ static void rec_mtc(struct lightrec_cstate *state, const struct block *block, u1 jit_note(__FILE__, __LINE__); lightrec_clean_reg_if_loaded(reg_cache, _jit, c.i.rs, false); lightrec_clean_reg_if_loaded(reg_cache, _jit, c.i.rt, false); + lightrec_clean_reg_if_loaded(reg_cache, _jit, REG_TEMP, false); call_to_c_wrapper(state, block, c.opcode, C_WRAPPER_MTC); @@ -1901,13 +2150,16 @@ rec_mfc0(struct lightrec_cstate *state, const struct block *block, u16 offset) lightrec_free_reg(reg_cache, rt); } -static bool block_in_bios(const struct lightrec_cstate *state, - const struct block *block) +static bool block_uses_icache(const struct lightrec_cstate *state, + const struct block *block) { - const struct lightrec_mem_map *bios = &state->state->maps[PSX_MAP_BIOS]; + const struct lightrec_mem_map *map = &state->state->maps[PSX_MAP_KERNEL_USER_RAM]; u32 pc = kunseg(block->pc); - return pc >= bios->pc && pc < bios->pc + bios->length; + if (pc < map->pc || pc >= map->pc + map->length) + return false; + + return (block->pc >> 28) < 0xa; } static void @@ -1933,10 +2185,11 @@ rec_mtc0(struct lightrec_cstate *state, const struct block *block, u16 offset) break; } - if (block_in_bios(state, block) && c.r.rd == 12) { - /* If we are running code from the BIOS, handle writes to the - * Status register in C. BIOS code may toggle bit 16 which will - * map/unmap the RAM, while game code cannot do that. */ + if (!block_uses_icache(state, block) && c.r.rd == 12) { + /* If we are not running code from the RAM through kuseg or + * kseg0, handle writes to the Status register in C; as the + * code may toggle bit 16 which isolates the cache. Code + * running from kuseg or kseg0 in RAM cannot do that. */ rec_mtc(state, block, offset); return; } @@ -2010,7 +2263,7 @@ rec_mtc0(struct lightrec_cstate *state, const struct block *block, u16 offset) if (!op_flag_no_ds(block->opcode_list[offset].flags) && (c.r.rd == 12 || c.r.rd == 13)) { - state->cycles += lightrec_cycles_of_opcode(c); + state->cycles += lightrec_cycles_of_opcode(state->state, c); lightrec_emit_eob(state, block, offset + 1); } } @@ -2192,7 +2445,6 @@ static void rec_cp2_do_mtc2(struct lightrec_cstate *state, { struct regcache *reg_cache = state->reg_cache; jit_state_t *_jit = block->_jit; - jit_node_t *loop, *to_loop; u8 rt, tmp, tmp2, flags = 0; _jit_name(block->_jit, __func__); @@ -2245,30 +2497,20 @@ static void rec_cp2_do_mtc2(struct lightrec_cstate *state, break; case 30: tmp = lightrec_alloc_reg_temp(reg_cache, _jit); - tmp2 = lightrec_alloc_reg_temp(reg_cache, _jit); /* if (rt < 0) rt = ~rt; */ jit_rshi(tmp, rt, 31); jit_xorr(tmp, rt, tmp); - /* We know the sign bit is 0. Left-shift by 1 to start the algorithm */ - jit_lshi(tmp, tmp, 1); - jit_movi(tmp2, 33); - - /* Decrement tmp2 and right-shift the value by 1 until it equals zero */ - loop = jit_label(); - jit_subi(tmp2, tmp2, 1); - jit_rshi_u(tmp, tmp, 1); - to_loop = jit_bnei(tmp, 0); - - jit_patch_at(to_loop, loop); + /* Count leading zeros */ + jit_clzr(tmp, tmp); + if (__WORDSIZE != 32) + jit_subi(tmp, tmp, __WORDSIZE - 32); - jit_stxi_i(cp2d_i_offset(31), LIGHTREC_REG_STATE, tmp2); - jit_stxi_i(cp2d_i_offset(30), LIGHTREC_REG_STATE, rt); + jit_stxi_i(cp2d_i_offset(31), LIGHTREC_REG_STATE, tmp); lightrec_free_reg(reg_cache, tmp); - lightrec_free_reg(reg_cache, tmp2); - break; + fallthrough; default: jit_stxi_i(cp2d_i_offset(reg), LIGHTREC_REG_STATE, rt); break; @@ -2397,42 +2639,71 @@ static void rec_meta_MOV(struct lightrec_cstate *state, union code c = op->c; jit_state_t *_jit = block->_jit; bool unload_rd; + bool unload_rs, discard_rs; u8 rs, rd; _jit_name(block->_jit, __func__); jit_note(__FILE__, __LINE__); + unload_rs = OPT_EARLY_UNLOAD + && LIGHTREC_FLAGS_GET_RS(op->flags) == LIGHTREC_REG_UNLOAD; + discard_rs = OPT_EARLY_UNLOAD + && LIGHTREC_FLAGS_GET_RS(op->flags) == LIGHTREC_REG_DISCARD; + + if ((unload_rs || discard_rs) && c.m.rs) { + /* If the source register is going to be unloaded or discarded, + * then we can simply mark its host register as now pointing to + * the destination register. */ + pr_debug("Remap %s to %s at offset 0x%x\n", + lightrec_reg_name(c.m.rs), lightrec_reg_name(c.m.rd), + offset << 2); + rs = lightrec_alloc_reg_in(reg_cache, _jit, c.m.rs, 0); + lightrec_remap_reg(reg_cache, _jit, rs, c.m.rd, discard_rs); + lightrec_free_reg(reg_cache, rs); + return; + } + unload_rd = OPT_EARLY_UNLOAD && LIGHTREC_FLAGS_GET_RD(op->flags) == LIGHTREC_REG_UNLOAD; - if (c.r.rs || unload_rd) - rs = lightrec_alloc_reg_in(reg_cache, _jit, c.r.rs, 0); + if (c.m.rs && !lightrec_reg_is_loaded(reg_cache, c.m.rs)) { + /* The source register is not yet loaded - we can load its value + * from the register cache directly into the target register. */ + rd = lightrec_alloc_reg_out(reg_cache, _jit, c.m.rd, REG_EXT); + + jit_ldxi_i(rd, LIGHTREC_REG_STATE, + offsetof(struct lightrec_state, regs.gpr) + (c.m.rs << 2)); - if (unload_rd) { + lightrec_free_reg(reg_cache, rd); + } else if (unload_rd) { /* If the destination register will be unloaded right after the * MOV meta-opcode, we don't actually need to write any host * register - we can just store the source register directly to * the register cache, at the offset corresponding to the * destination register. */ - lightrec_discard_reg_if_loaded(reg_cache, c.r.rd); + lightrec_discard_reg_if_loaded(reg_cache, c.m.rd); + + rs = lightrec_alloc_reg_in(reg_cache, _jit, c.m.rs, 0); jit_stxi_i(offsetof(struct lightrec_state, regs.gpr) - + c.r.rd << 2, LIGHTREC_REG_STATE, rs); + + (c.m.rd << 2), LIGHTREC_REG_STATE, rs); lightrec_free_reg(reg_cache, rs); } else { - rd = lightrec_alloc_reg_out(reg_cache, _jit, c.r.rd, REG_EXT); + if (c.m.rs) + rs = lightrec_alloc_reg_in(reg_cache, _jit, c.m.rs, 0); - if (c.r.rs == 0) + rd = lightrec_alloc_reg_out(reg_cache, _jit, c.m.rd, REG_EXT); + + if (c.m.rs == 0) { jit_movi(rd, 0); - else + } else { jit_extr_i(rd, rs); + lightrec_free_reg(reg_cache, rs); + } lightrec_free_reg(reg_cache, rd); } - - if (c.r.rs || unload_rd) - lightrec_free_reg(reg_cache, rs); } static void rec_meta_EXTC_EXTS(struct lightrec_cstate *state, @@ -2442,21 +2713,21 @@ static void rec_meta_EXTC_EXTS(struct lightrec_cstate *state, struct regcache *reg_cache = state->reg_cache; union code c = block->opcode_list[offset].c; jit_state_t *_jit = block->_jit; - u8 rs, rt; + u8 rs, rd; _jit_name(block->_jit, __func__); jit_note(__FILE__, __LINE__); - rs = lightrec_alloc_reg_in(reg_cache, _jit, c.i.rs, 0); - rt = lightrec_alloc_reg_out(reg_cache, _jit, c.i.rt, REG_EXT); + rec_alloc_rs_rd(reg_cache, _jit, &block->opcode_list[offset], + c.m.rs, c.m.rd, 0, REG_EXT, &rs, &rd); - if (c.i.op == OP_META_EXTC) - jit_extr_c(rt, rs); + if (c.m.op == OP_META_EXTC) + jit_extr_c(rd, rs); else - jit_extr_s(rt, rs); + jit_extr_s(rd, rs); lightrec_free_reg(reg_cache, rs); - lightrec_free_reg(reg_cache, rt); + lightrec_free_reg(reg_cache, rd); } static void rec_meta_MULT2(struct lightrec_cstate *state, @@ -2506,12 +2777,19 @@ static void rec_meta_MULT2(struct lightrec_cstate *state, hi = lightrec_alloc_reg_out(reg_cache, _jit, reg_hi, hiflags); - if (c.r.op >= 32) + if (c.r.op >= 32) { jit_lshi(hi, rs, c.r.op - 32); - else if (is_signed) - jit_rshi(hi, rs, 32 - c.r.op); - else - jit_rshi_u(hi, rs, 32 - c.r.op); + } else if (is_signed) { + if (c.r.op) + jit_rshi(hi, rs, 32 - c.r.op); + else + jit_rshi(hi, rs, 31); + } else { + if (c.r.op) + jit_rshi_u(hi, rs, 32 - c.r.op); + else + jit_movi(hi, 0); + } lightrec_free_reg(reg_cache, hi); } @@ -2523,6 +2801,60 @@ static void rec_meta_MULT2(struct lightrec_cstate *state, jit_note(__FILE__, __LINE__); } +static void rec_meta_COM(struct lightrec_cstate *state, + const struct block *block, u16 offset) +{ + struct regcache *reg_cache = state->reg_cache; + union code c = block->opcode_list[offset].c; + jit_state_t *_jit = block->_jit; + u8 rd, rs, flags; + + jit_note(__FILE__, __LINE__); + + rec_alloc_rs_rd(reg_cache, _jit, &block->opcode_list[offset], + c.m.rs, c.m.rd, 0, 0, &rs, &rd); + + flags = lightrec_get_reg_in_flags(reg_cache, rs); + + lightrec_set_reg_out_flags(reg_cache, rd, + flags & REG_EXT); + + jit_comr(rd, rs); + + lightrec_free_reg(reg_cache, rs); + lightrec_free_reg(reg_cache, rd); +} + +static void rec_meta_LWU(struct lightrec_cstate *state, + const struct block *block, + u16 offset) +{ + jit_code_t code; + + if (is_big_endian() && __WORDSIZE == 64) + code = jit_code_unldr_u; + else + code = jit_code_unldr; + + _jit_name(block->_jit, __func__); + rec_load(state, block, offset, code, jit_code_bswapr_ui, false); +} + +static void rec_meta_SWU(struct lightrec_cstate *state, + const struct block *block, + u16 offset) +{ + _jit_name(block->_jit, __func__); + rec_store(state, block, offset, jit_code_unstr, jit_code_bswapr_ui); +} + +static void unknown_opcode(struct lightrec_cstate *state, + const struct block *block, u16 offset) +{ + rec_exit_early(state, block, offset, LIGHTREC_EXIT_UNKNOWN_OP, + block->pc + (offset << 2)); +} + static const lightrec_rec_func_t rec_standard[64] = { SET_DEFAULT_ELM(rec_standard, unknown_opcode), [OP_SPECIAL] = rec_SPECIAL, @@ -2558,11 +2890,11 @@ static const lightrec_rec_func_t rec_standard[64] = { [OP_LWC2] = rec_LW, [OP_SWC2] = rec_SW, - [OP_META_MOV] = rec_meta_MOV, - [OP_META_EXTC] = rec_meta_EXTC_EXTS, - [OP_META_EXTS] = rec_meta_EXTC_EXTS, + [OP_META] = rec_META, [OP_META_MULT2] = rec_meta_MULT2, [OP_META_MULTU2] = rec_meta_MULT2, + [OP_META_LWU] = rec_meta_LWU, + [OP_META_SWU] = rec_meta_SWU, }; static const lightrec_rec_func_t rec_special[64] = { @@ -2622,6 +2954,14 @@ static const lightrec_rec_func_t rec_cp2_basic[64] = { [OP_CP2_BASIC_CTC2] = rec_cp2_basic_CTC2, }; +static const lightrec_rec_func_t rec_meta[64] = { + SET_DEFAULT_ELM(rec_meta, unknown_opcode), + [OP_META_MOV] = rec_meta_MOV, + [OP_META_EXTC] = rec_meta_EXTC_EXTS, + [OP_META_EXTS] = rec_meta_EXTC_EXTS, + [OP_META_COM] = rec_meta_COM, +}; + static void rec_SPECIAL(struct lightrec_cstate *state, const struct block *block, u16 offset) { @@ -2675,6 +3015,18 @@ static void rec_CP2(struct lightrec_cstate *state, rec_CP(state, block, offset); } +static void rec_META(struct lightrec_cstate *state, + const struct block *block, u16 offset) +{ + union code c = block->opcode_list[offset].c; + lightrec_rec_func_t f = rec_meta[c.m.op]; + + if (!HAS_DEFAULT_ELM && unlikely(!f)) + unknown_opcode(state, block, offset); + else + (*f)(state, block, offset); +} + void lightrec_rec_opcode(struct lightrec_cstate *state, const struct block *block, u16 offset) { @@ -2714,4 +3066,6 @@ void lightrec_rec_opcode(struct lightrec_cstate *state, lightrec_do_early_unload(state, block, unload_offset); } + + state->no_load_delay = false; }