X-Git-Url: https://notaz.gp2x.de/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=deps%2Flightrec%2Femitter.c;h=b8025aacd5809f10fabcb69f4f425f8ce404c065;hb=d75d83f79fd867214fe53d167a13d0ff2bf1e7d0;hp=b7ace1945b675dc6a3b51059a30764757dd83a86;hpb=03b78a3bf48813202e01149ae0b3c5c1f01efb4c;p=pcsx_rearmed.git diff --git a/deps/lightrec/emitter.c b/deps/lightrec/emitter.c index b7ace194..b8025aac 100644 --- a/deps/lightrec/emitter.c +++ b/deps/lightrec/emitter.c @@ -14,6 +14,8 @@ #include #include +#define LIGHTNING_UNALIGNED_32BIT 4 + typedef void (*lightrec_rec_func_t)(struct lightrec_cstate *, const struct block *, u16); /* Forward declarations */ @@ -28,13 +30,6 @@ static void rec_cp2_do_mfc2(struct lightrec_cstate *state, const struct block *block, u16 offset, u8 reg, u8 out_reg); -static void unknown_opcode(struct lightrec_cstate *state, const struct block *block, u16 offset) -{ - pr_warn("Unknown opcode: 0x%08x at PC 0x%08x\n", - block->opcode_list[offset].c.opcode, - block->pc + (offset << 2)); -} - static void lightrec_jump_to_fn(jit_state_t *_jit, void (*fn)(void)) { @@ -76,6 +71,7 @@ static void lightrec_emit_end_of_block(struct lightrec_cstate *state, const struct opcode *op = &block->opcode_list[offset], *ds = get_delay_slot(block->opcode_list, offset); u32 cycles = state->cycles + lightrec_cycles_of_opcode(state->state, op->c); + bool has_ds = has_delay_slot(op->c); jit_note(__FILE__, __LINE__); @@ -94,8 +90,7 @@ static void lightrec_emit_end_of_block(struct lightrec_cstate *state, update_ra_register(reg_cache, _jit, ra_reg, block->pc, link); } - if (has_delay_slot(op->c) && - !op_flag_no_ds(op->flags) && !op_flag_local_branch(op->flags)) { + if (has_ds && !op_flag_no_ds(op->flags) && !op_flag_local_branch(op->flags)) { cycles += lightrec_cycles_of_opcode(state->state, ds->c); /* Recompile the delay slot */ @@ -111,7 +106,7 @@ static void lightrec_emit_end_of_block(struct lightrec_cstate *state, pr_debug("EOB: %u cycles\n", cycles); } - if (op_flag_load_delay(ds->flags) + if (has_ds && op_flag_load_delay(ds->flags) && opcode_is_load(ds->c) && !state->no_load_delay) { /* If the delay slot is a load opcode, its target register * will be written after the first opcode of the target is @@ -949,6 +944,8 @@ static void rec_alu_mult(struct lightrec_cstate *state, u8 reg_hi = get_mult_div_hi(c); jit_state_t *_jit = block->_jit; u8 lo, hi, rs, rt, rflags = 0; + bool no_lo = op_flag_no_lo(flags); + bool no_hi = op_flag_no_hi(flags); jit_note(__FILE__, __LINE__); @@ -960,44 +957,46 @@ static void rec_alu_mult(struct lightrec_cstate *state, rs = lightrec_alloc_reg_in(reg_cache, _jit, c.r.rs, rflags); rt = lightrec_alloc_reg_in(reg_cache, _jit, c.r.rt, rflags); - if (!op_flag_no_lo(flags)) + if (!no_lo) lo = lightrec_alloc_reg_out(reg_cache, _jit, reg_lo, 0); - else if (__WORDSIZE == 32) - lo = lightrec_alloc_reg_temp(reg_cache, _jit); - if (!op_flag_no_hi(flags)) + if (!no_hi) hi = lightrec_alloc_reg_out(reg_cache, _jit, reg_hi, REG_EXT); if (__WORDSIZE == 32) { /* On 32-bit systems, do a 32*32->64 bit operation, or a 32*32->32 bit * operation if the MULT was detected a 32-bit only. */ - if (!op_flag_no_hi(flags)) { + if (no_lo) { if (is_signed) - jit_qmulr(lo, hi, rs, rt); + jit_hmulr(hi, rs, rt); else - jit_qmulr_u(lo, hi, rs, rt); - } else { + jit_hmulr_u(hi, rs, rt); + } else if (no_hi) { jit_mulr(lo, rs, rt); + } else if (is_signed) { + jit_qmulr(lo, hi, rs, rt); + } else { + jit_qmulr_u(lo, hi, rs, rt); } } else { /* On 64-bit systems, do a 64*64->64 bit operation. */ - if (op_flag_no_lo(flags)) { + if (no_lo) { jit_mulr(hi, rs, rt); jit_rshi(hi, hi, 32); } else { jit_mulr(lo, rs, rt); /* The 64-bit output value is in $lo, store the upper 32 bits in $hi */ - if (!op_flag_no_hi(flags)) + if (!no_hi) jit_rshi(hi, lo, 32); } } lightrec_free_reg(reg_cache, rs); lightrec_free_reg(reg_cache, rt); - if (!op_flag_no_lo(flags) || __WORDSIZE == 32) + if (!no_lo) lightrec_free_reg(reg_cache, lo); - if (!op_flag_no_hi(flags)) + if (!no_hi) lightrec_free_reg(reg_cache, hi); } @@ -1247,7 +1246,7 @@ static void rec_io(struct lightrec_cstate *state, } } -static u32 rec_ram_mask(struct lightrec_state *state) +static u32 rec_ram_mask(const struct lightrec_state *state) { return (RAM_SIZE << (state->mirrors_mapped * 2)) - 1; } @@ -1271,13 +1270,14 @@ static void rec_store_memory(struct lightrec_cstate *cstate, struct opcode *op = &block->opcode_list[offset]; jit_state_t *_jit = block->_jit; union code c = op->c; - u8 rs, rt, tmp, tmp2, tmp3, addr_reg, addr_reg2; + u8 rs, rt, tmp = 0, tmp2 = 0, tmp3, addr_reg, addr_reg2; s16 imm = (s16)c.i.imm; s32 simm = (s32)imm << (1 - lut_is_32bit(state)); s32 lut_offt = offsetof(struct lightrec_state, code_lut); bool no_mask = op_flag_no_mask(op->flags); bool add_imm = c.i.imm && - ((!state->mirrors_mapped && !no_mask) || (invalidate && + (c.i.op == OP_META_SWU + || (!state->mirrors_mapped && !no_mask) || (invalidate && ((imm & 0x3) || simm + lut_offt != (s16)(simm + lut_offt)))); bool need_tmp = !no_mask || add_imm || invalidate; bool swc2 = c.i.op == OP_SWC2; @@ -1327,9 +1327,15 @@ static void rec_store_memory(struct lightrec_cstate *cstate, tmp3 = lightrec_alloc_reg_temp(reg_cache, _jit); jit_new_node_ww(swap_code, tmp3, rt); - jit_new_node_www(code, imm, addr_reg2, tmp3); + + if (c.i.op == OP_META_SWU) + jit_unstr(addr_reg2, tmp3, LIGHTNING_UNALIGNED_32BIT); + else + jit_new_node_www(code, imm, addr_reg2, tmp3); lightrec_free_reg(reg_cache, tmp3); + } else if (c.i.op == OP_META_SWU) { + jit_unstr(addr_reg2, rt, LIGHTNING_UNALIGNED_32BIT); } else { jit_new_node_www(code, imm, addr_reg2, rt); } @@ -1376,7 +1382,7 @@ static void rec_store_ram(struct lightrec_cstate *cstate, u16 offset, jit_code_t code, jit_code_t swap_code, bool invalidate) { - struct lightrec_state *state = cstate->state; + const struct lightrec_state *state = cstate->state; _jit_note(block->_jit, __FILE__, __LINE__); @@ -1412,7 +1418,7 @@ static void rec_store_direct_no_invalidate(struct lightrec_cstate *cstate, u16 offset, jit_code_t code, jit_code_t swap_code) { - struct lightrec_state *state = cstate->state; + const struct lightrec_state *state = cstate->state; struct regcache *reg_cache = cstate->reg_cache; union code c = block->opcode_list[offset].c; jit_state_t *_jit = block->_jit; @@ -1435,7 +1441,7 @@ static void rec_store_direct_no_invalidate(struct lightrec_cstate *cstate, reg_imm = lightrec_alloc_reg_temp_with_value(reg_cache, _jit, addr_mask); /* Convert to KUNSEG and avoid RAM mirrors */ - if (!state->mirrors_mapped && c.i.imm) { + if ((c.i.op == OP_META_SWU || !state->mirrors_mapped) && c.i.imm) { imm = 0; jit_addi(tmp, rs, (s16)c.i.imm); jit_andr(tmp, tmp, reg_imm); @@ -1475,9 +1481,15 @@ static void rec_store_direct_no_invalidate(struct lightrec_cstate *cstate, tmp2 = lightrec_alloc_reg_temp(reg_cache, _jit); jit_new_node_ww(swap_code, tmp2, rt); - jit_new_node_www(code, imm, tmp, tmp2); + + if (c.i.op == OP_META_SWU) + jit_unstr(tmp, tmp2, LIGHTNING_UNALIGNED_32BIT); + else + jit_new_node_www(code, imm, tmp, tmp2); lightrec_free_reg(reg_cache, tmp2); + } else if (c.i.op == OP_META_SWU) { + jit_unstr(tmp, rt, LIGHTNING_UNALIGNED_32BIT); } else { jit_new_node_www(code, imm, tmp, rt); } @@ -1489,7 +1501,7 @@ static void rec_store_direct_no_invalidate(struct lightrec_cstate *cstate, static void rec_store_direct(struct lightrec_cstate *cstate, const struct block *block, u16 offset, jit_code_t code, jit_code_t swap_code) { - struct lightrec_state *state = cstate->state; + const struct lightrec_state *state = cstate->state; u32 ram_size = state->mirrors_mapped ? RAM_SIZE * 4 : RAM_SIZE; struct regcache *reg_cache = cstate->reg_cache; union code c = block->opcode_list[offset].c; @@ -1499,6 +1511,7 @@ static void rec_store_direct(struct lightrec_cstate *cstate, const struct block u8 tmp, tmp2, tmp3, masked_reg, rs, rt; u8 in_reg = swc2 ? REG_TEMP : c.i.rt; u32 addr_mask = 0x1f800000 | (ram_size - 1); + bool different_offsets = state->offset_ram != state->offset_scratch; s32 reg_imm; jit_note(__FILE__, __LINE__); @@ -1521,7 +1534,7 @@ static void rec_store_direct(struct lightrec_cstate *cstate, const struct block lightrec_free_reg(reg_cache, reg_imm); tmp = lightrec_alloc_reg_temp(reg_cache, _jit); - if (state->offset_ram != state->offset_scratch) { + if (different_offsets) { to_not_ram = jit_bgti(tmp2, ram_size); masked_reg = tmp2; } else { @@ -1546,7 +1559,19 @@ static void rec_store_direct(struct lightrec_cstate *cstate, const struct block else jit_stxi(offsetof(struct lightrec_state, code_lut), tmp, tmp3); - if (state->offset_ram != state->offset_scratch) { + if (c.i.op == OP_META_SWU) { + /* With a SWU opcode, we might have touched the following 32-bit + * word, so invalidate it as well */ + if (lut_is_32bit(state)) { + jit_stxi_i(offsetof(struct lightrec_state, code_lut) + 4, + tmp, tmp3); + } else { + jit_stxi(offsetof(struct lightrec_state, code_lut) + + sizeof(uintptr_t), tmp, tmp3); + } + } + + if (different_offsets) { jit_movi(tmp, state->offset_ram); to_end = jit_b(); @@ -1556,7 +1581,7 @@ static void rec_store_direct(struct lightrec_cstate *cstate, const struct block if (state->offset_ram || state->offset_scratch) jit_movi(tmp, state->offset_scratch); - if (state->offset_ram != state->offset_scratch) + if (different_offsets) jit_patch(to_end); if (state->offset_ram || state->offset_scratch) @@ -1571,9 +1596,15 @@ static void rec_store_direct(struct lightrec_cstate *cstate, const struct block tmp = lightrec_alloc_reg_temp(reg_cache, _jit); jit_new_node_ww(swap_code, tmp, rt); - jit_new_node_www(code, 0, tmp2, tmp); + + if (c.i.op == OP_META_SWU) + jit_unstr(tmp2, tmp, LIGHTNING_UNALIGNED_32BIT); + else + jit_new_node_www(code, 0, tmp2, tmp); lightrec_free_reg(reg_cache, tmp); + } else if (c.i.op == OP_META_SWU) { + jit_unstr(tmp2, rt, LIGHTNING_UNALIGNED_32BIT); } else { jit_new_node_www(code, 0, tmp2, rt); } @@ -1702,7 +1733,8 @@ static void rec_load_memory(struct lightrec_cstate *cstate, rs = lightrec_alloc_reg_in(reg_cache, _jit, c.i.rs, 0); rt = lightrec_alloc_reg_out(reg_cache, _jit, out_reg, flags); - if (!cstate->state->mirrors_mapped && c.i.imm && !no_mask) { + if ((op->i.op == OP_META_LWU && c.i.imm) + || (!cstate->state->mirrors_mapped && c.i.imm && !no_mask)) { jit_addi(rt, rs, (s16)c.i.imm); addr_reg = rt; imm = 0; @@ -1711,6 +1743,9 @@ static void rec_load_memory(struct lightrec_cstate *cstate, imm = (s16)c.i.imm; } + if (op->i.op == OP_META_LWU) + imm = LIGHTNING_UNALIGNED_32BIT; + if (!no_mask) { reg_imm = lightrec_alloc_reg_temp_with_value(reg_cache, _jit, addr_mask); @@ -1791,13 +1826,14 @@ static void rec_load_direct(struct lightrec_cstate *cstate, jit_code_t code, jit_code_t swap_code, bool is_unsigned) { - struct lightrec_state *state = cstate->state; + const struct lightrec_state *state = cstate->state; struct regcache *reg_cache = cstate->reg_cache; struct opcode *op = &block->opcode_list[offset]; bool load_delay = op_flag_load_delay(op->flags) && !cstate->no_load_delay; jit_state_t *_jit = block->_jit; jit_node_t *to_not_ram, *to_not_bios, *to_end, *to_end2; u8 tmp, rs, rt, out_reg, addr_reg, flags = REG_EXT; + bool different_offsets = state->offset_bios != state->offset_scratch; union code c = op->c; s32 addr_mask; u32 reg_imm; @@ -1820,7 +1856,8 @@ static void rec_load_direct(struct lightrec_cstate *cstate, if ((state->offset_ram == state->offset_bios && state->offset_ram == state->offset_scratch && - state->mirrors_mapped) || !c.i.imm) { + state->mirrors_mapped && c.i.op != OP_META_LWU) + || !c.i.imm) { addr_reg = rs; imm = (s16)c.i.imm; } else { @@ -1832,6 +1869,9 @@ static void rec_load_direct(struct lightrec_cstate *cstate, lightrec_free_reg(reg_cache, rs); } + if (op->i.op == OP_META_LWU) + imm = LIGHTNING_UNALIGNED_32BIT; + tmp = lightrec_alloc_reg_temp(reg_cache, _jit); if (state->offset_ram == state->offset_bios && @@ -1879,7 +1919,7 @@ static void rec_load_direct(struct lightrec_cstate *cstate, jit_patch(to_not_ram); - if (state->offset_bios != state->offset_scratch) + if (different_offsets) to_not_bios = jit_bmci(addr_reg, BIT(22)); /* Convert to KUNSEG */ @@ -1887,7 +1927,7 @@ static void rec_load_direct(struct lightrec_cstate *cstate, jit_movi(tmp, state->offset_bios); - if (state->offset_bios != state->offset_scratch) { + if (different_offsets) { to_end2 = jit_b(); jit_patch(to_not_bios); @@ -2009,9 +2049,9 @@ static void rec_LW(struct lightrec_cstate *state, const struct block *block, u16 rec_load(state, block, offset, code, jit_code_bswapr_ui, false); } -static void rec_break_syscall(struct lightrec_cstate *state, - const struct block *block, u16 offset, - u32 exit_code) +static void rec_exit_early(struct lightrec_cstate *state, + const struct block *block, u16 offset, + u32 exit_code, u32 pc) { struct regcache *reg_cache = state->reg_cache; jit_state_t *_jit = block->_jit; @@ -2036,24 +2076,25 @@ static void rec_break_syscall(struct lightrec_cstate *state, lightrec_free_reg(reg_cache, tmp); - /* TODO: the return address should be "pc - 4" if we're a delay slot */ - lightrec_emit_end_of_block(state, block, offset, -1, - get_ds_pc(block, offset, 0), - 31, 0, true); + lightrec_emit_end_of_block(state, block, offset, -1, pc, 31, 0, true); } static void rec_special_SYSCALL(struct lightrec_cstate *state, const struct block *block, u16 offset) { _jit_name(block->_jit, __func__); - rec_break_syscall(state, block, offset, LIGHTREC_EXIT_SYSCALL); + + /* TODO: the return address should be "pc - 4" if we're a delay slot */ + rec_exit_early(state, block, offset, LIGHTREC_EXIT_SYSCALL, + get_ds_pc(block, offset, 0)); } static void rec_special_BREAK(struct lightrec_cstate *state, const struct block *block, u16 offset) { _jit_name(block->_jit, __func__); - rec_break_syscall(state, block, offset, LIGHTREC_EXIT_BREAK); + rec_exit_early(state, block, offset, LIGHTREC_EXIT_BREAK, + get_ds_pc(block, offset, 0)); } static void rec_mfc(struct lightrec_cstate *state, const struct block *block, u16 offset) @@ -2736,12 +2777,19 @@ static void rec_meta_MULT2(struct lightrec_cstate *state, hi = lightrec_alloc_reg_out(reg_cache, _jit, reg_hi, hiflags); - if (c.r.op >= 32) + if (c.r.op >= 32) { jit_lshi(hi, rs, c.r.op - 32); - else if (is_signed) - jit_rshi(hi, rs, 32 - c.r.op); - else - jit_rshi_u(hi, rs, 32 - c.r.op); + } else if (is_signed) { + if (c.r.op) + jit_rshi(hi, rs, 32 - c.r.op); + else + jit_rshi(hi, rs, 31); + } else { + if (c.r.op) + jit_rshi_u(hi, rs, 32 - c.r.op); + else + jit_movi(hi, 0); + } lightrec_free_reg(reg_cache, hi); } @@ -2777,6 +2825,36 @@ static void rec_meta_COM(struct lightrec_cstate *state, lightrec_free_reg(reg_cache, rd); } +static void rec_meta_LWU(struct lightrec_cstate *state, + const struct block *block, + u16 offset) +{ + jit_code_t code; + + if (is_big_endian() && __WORDSIZE == 64) + code = jit_code_unldr_u; + else + code = jit_code_unldr; + + _jit_name(block->_jit, __func__); + rec_load(state, block, offset, code, jit_code_bswapr_ui, false); +} + +static void rec_meta_SWU(struct lightrec_cstate *state, + const struct block *block, + u16 offset) +{ + _jit_name(block->_jit, __func__); + rec_store(state, block, offset, jit_code_unstr, jit_code_bswapr_ui); +} + +static void unknown_opcode(struct lightrec_cstate *state, + const struct block *block, u16 offset) +{ + rec_exit_early(state, block, offset, LIGHTREC_EXIT_UNKNOWN_OP, + block->pc + (offset << 2)); +} + static const lightrec_rec_func_t rec_standard[64] = { SET_DEFAULT_ELM(rec_standard, unknown_opcode), [OP_SPECIAL] = rec_SPECIAL, @@ -2815,6 +2893,8 @@ static const lightrec_rec_func_t rec_standard[64] = { [OP_META] = rec_META, [OP_META_MULT2] = rec_meta_MULT2, [OP_META_MULTU2] = rec_meta_MULT2, + [OP_META_LWU] = rec_meta_LWU, + [OP_META_SWU] = rec_meta_SWU, }; static const lightrec_rec_func_t rec_special[64] = {