static void rec_REGIMM(struct lightrec_cstate *state, const struct block *block, u16 offset);
static void rec_CP0(struct lightrec_cstate *state, const struct block *block, u16 offset);
static void rec_CP2(struct lightrec_cstate *state, const struct block *block, u16 offset);
+static void rec_cp2_do_mtc2(struct lightrec_cstate *state,
+ const struct block *block, u16 offset, u8 reg, u8 in_reg);
+static void rec_cp2_do_mfc2(struct lightrec_cstate *state,
+ const struct block *block, u16 offset,
+ u8 reg, u8 out_reg);
static void unknown_opcode(struct lightrec_cstate *state, const struct block *block, u16 offset)
{
block->pc + (offset << 2));
}
+static void
+lightrec_jump_to_eob(struct lightrec_cstate *state, jit_state_t *_jit)
+{
+ /* Prevent jit_jmpi() from using our cycles register as a temporary */
+ jit_live(LIGHTREC_REG_CYCLE);
+
+ jit_patch_abs(jit_jmpi(), state->state->eob_wrapper_func);
+}
+
+static void update_ra_register(struct regcache *reg_cache, jit_state_t *_jit,
+ u8 ra_reg, u32 pc, u32 link)
+{
+ u8 link_reg;
+
+ link_reg = lightrec_alloc_reg_out(reg_cache, _jit, ra_reg, 0);
+ lightrec_load_imm(reg_cache, _jit, link_reg, pc, link);
+ lightrec_free_reg(reg_cache, link_reg);
+}
+
static void lightrec_emit_end_of_block(struct lightrec_cstate *state,
const struct block *block, u16 offset,
s8 reg_new_pc, u32 imm, u8 ra_reg,
const struct opcode *op = &block->opcode_list[offset],
*next = &block->opcode_list[offset + 1];
u32 cycles = state->cycles + lightrec_cycles_of_opcode(op->c);
- u16 offset_after_eob;
jit_note(__FILE__, __LINE__);
- if (link) {
- /* Update the $ra register */
- u8 link_reg = lightrec_alloc_reg_out(reg_cache, _jit, ra_reg, 0);
- jit_movi(link_reg, link);
- lightrec_free_reg(reg_cache, link_reg);
- }
+ if (link && ra_reg != reg_new_pc)
+ update_ra_register(reg_cache, _jit, ra_reg, block->pc, link);
- if (reg_new_pc < 0) {
- reg_new_pc = lightrec_alloc_reg(reg_cache, _jit, JIT_V0);
- lightrec_lock_reg(reg_cache, _jit, reg_new_pc);
+ if (reg_new_pc < 0)
+ lightrec_load_next_pc_imm(reg_cache, _jit, block->pc, imm);
+ else
+ lightrec_load_next_pc(reg_cache, _jit, reg_new_pc);
- jit_movi(reg_new_pc, imm);
+ if (link && ra_reg == reg_new_pc) {
+ /* Handle the special case: JALR $r0, $r0
+ * In that case the target PC should be the old value of the
+ * register. */
+ update_ra_register(reg_cache, _jit, ra_reg, block->pc, link);
}
if (has_delay_slot(op->c) &&
/* Clean the remaining registers */
lightrec_clean_regs(reg_cache, _jit);
- jit_movr(JIT_V0, reg_new_pc);
-
if (cycles && update_cycles) {
jit_subi(LIGHTREC_REG_CYCLE, LIGHTREC_REG_CYCLE, cycles);
pr_debug("EOB: %u cycles\n", cycles);
}
- offset_after_eob = offset + 1 +
- (has_delay_slot(op->c) && !op_flag_no_ds(op->flags));
-
- if (offset_after_eob < block->nb_ops)
- state->branches[state->nb_branches++] = jit_b();
+ lightrec_jump_to_eob(state, _jit);
}
-void lightrec_emit_eob(struct lightrec_cstate *state, const struct block *block,
- u16 offset, bool after_op)
+void lightrec_emit_eob(struct lightrec_cstate *state,
+ const struct block *block, u16 offset)
{
struct regcache *reg_cache = state->reg_cache;
jit_state_t *_jit = block->_jit;
- union code c = block->opcode_list[offset].c;
- u32 cycles = state->cycles;
-
- if (after_op)
- cycles += lightrec_cycles_of_opcode(c);
lightrec_clean_regs(reg_cache, _jit);
- jit_movi(JIT_V0, block->pc + (offset << 2));
- jit_subi(LIGHTREC_REG_CYCLE, LIGHTREC_REG_CYCLE, cycles);
+ lightrec_load_imm(reg_cache, _jit, JIT_V0, block->pc,
+ block->pc + (offset << 2));
+ jit_subi(LIGHTREC_REG_CYCLE, LIGHTREC_REG_CYCLE, state->cycles);
- state->branches[state->nb_branches++] = jit_b();
-}
-
-static u8 get_jr_jalr_reg(struct lightrec_cstate *state, const struct block *block, u16 offset)
-{
- struct regcache *reg_cache = state->reg_cache;
- jit_state_t *_jit = block->_jit;
- const struct opcode *op = &block->opcode_list[offset];
- u8 rs;
-
- rs = lightrec_request_reg_in(reg_cache, _jit, op->r.rs, JIT_V0);
- lightrec_lock_reg(reg_cache, _jit, rs);
-
- return rs;
+ lightrec_jump_to_eob(state, _jit);
}
static void rec_special_JR(struct lightrec_cstate *state, const struct block *block, u16 offset)
{
- u8 rs = get_jr_jalr_reg(state, block, offset);
+ union code c = block->opcode_list[offset].c;
_jit_name(block->_jit, __func__);
- lightrec_emit_end_of_block(state, block, offset, rs, 0, 31, 0, true);
+ lightrec_emit_end_of_block(state, block, offset, c.r.rs, 0, 31, 0, true);
}
static void rec_special_JALR(struct lightrec_cstate *state, const struct block *block, u16 offset)
{
- u8 rs = get_jr_jalr_reg(state, block, offset);
union code c = block->opcode_list[offset].c;
_jit_name(block->_jit, __func__);
- lightrec_emit_end_of_block(state, block, offset, rs, 0, c.r.rd,
+ lightrec_emit_end_of_block(state, block, offset, c.r.rs, 0, c.r.rd,
get_branch_pc(block, offset, 2), true);
}
}
static void rec_b(struct lightrec_cstate *state, const struct block *block, u16 offset,
- jit_code_t code, u32 link, bool unconditional, bool bz)
+ jit_code_t code, jit_code_t code2, u32 link, bool unconditional, bool bz)
{
struct regcache *reg_cache = state->reg_cache;
struct native_register *regs_backup;
const struct opcode *op = &block->opcode_list[offset],
*next = &block->opcode_list[offset + 1];
jit_node_t *addr;
- u8 link_reg, rs, rt;
bool is_forward = (s16)op->i.imm >= -1;
int op_cycles = lightrec_cycles_of_opcode(op->c);
u32 target_offset, cycles = state->cycles + op_cycles;
+ bool no_indirection = false;
u32 next_pc;
+ u8 rs, rt;
jit_note(__FILE__, __LINE__);
/* Unload dead registers before evaluating the branch */
if (OPT_EARLY_UNLOAD)
lightrec_do_early_unload(state, block, offset);
+
+ if (op_flag_local_branch(op->flags) &&
+ (op_flag_no_ds(op->flags) || !next->opcode) &&
+ is_forward && !lightrec_has_dirty_regs(reg_cache))
+ no_indirection = true;
+
+ if (no_indirection)
+ pr_debug("Using no indirection for branch at offset 0x%hx\n", offset << 2);
}
if (cycles)
if (!unconditional) {
/* Generate the branch opcode */
- addr = jit_new_node_pww(code, NULL, rs, rt);
+ if (!no_indirection)
+ addr = jit_new_node_pww(code, NULL, rs, rt);
lightrec_free_regs(reg_cache);
regs_backup = lightrec_regcache_enter_branch(reg_cache);
if (op_flag_local_branch(op->flags)) {
/* Recompile the delay slot */
- if (next && next->opcode && !op_flag_no_ds(op->flags))
+ if (!op_flag_no_ds(op->flags) && next->opcode)
lightrec_rec_opcode(state, block, offset + 1);
- if (link) {
- /* Update the $ra register */
- link_reg = lightrec_alloc_reg_out(reg_cache, _jit, 31, 0);
- jit_movi(link_reg, link);
- lightrec_free_reg(reg_cache, link_reg);
- }
+ if (link)
+ update_ra_register(reg_cache, _jit, 31, block->pc, link);
/* Clean remaining registers */
lightrec_clean_regs(reg_cache, _jit);
state->nb_local_branches++];
branch->target = target_offset;
- if (is_forward)
+
+ if (no_indirection)
+ branch->branch = jit_new_node_pww(code2, NULL, rs, rt);
+ else if (is_forward)
branch->branch = jit_b();
else
branch->branch = jit_bgti(LIGHTREC_REG_CYCLE, 0);
}
if (!unconditional) {
- jit_patch(addr);
+ if (!no_indirection)
+ jit_patch(addr);
+
lightrec_regcache_leave_branch(reg_cache, regs_backup);
- if (bz && link) {
- /* Update the $ra register */
- link_reg = lightrec_alloc_reg_out(reg_cache, _jit,
- 31, REG_EXT);
- jit_movi(link_reg, (s32)link);
- lightrec_free_reg(reg_cache, link_reg);
- }
+ if (bz && link)
+ update_ra_register(reg_cache, _jit, 31, block->pc, link);
if (!op_flag_no_ds(op->flags) && next->opcode)
lightrec_rec_opcode(state, block, offset + 1);
_jit_name(block->_jit, __func__);
if (c.i.rt == 0)
- rec_b(state, block, offset, jit_code_beqi, 0, false, true);
+ rec_b(state, block, offset, jit_code_beqi, jit_code_bnei, 0, false, true);
else
- rec_b(state, block, offset, jit_code_beqr, 0, false, false);
+ rec_b(state, block, offset, jit_code_beqr, jit_code_bner, 0, false, false);
}
static void rec_BEQ(struct lightrec_cstate *state,
_jit_name(block->_jit, __func__);
if (c.i.rt == 0)
- rec_b(state, block, offset, jit_code_bnei, 0, c.i.rs == 0, true);
+ rec_b(state, block, offset, jit_code_bnei, jit_code_beqi, 0, c.i.rs == 0, true);
else
- rec_b(state, block, offset, jit_code_bner, 0, c.i.rs == c.i.rt, false);
+ rec_b(state, block, offset, jit_code_bner, jit_code_beqr, 0, c.i.rs == c.i.rt, false);
}
static void rec_BLEZ(struct lightrec_cstate *state,
union code c = block->opcode_list[offset].c;
_jit_name(block->_jit, __func__);
- rec_b(state, block, offset, jit_code_bgti, 0, c.i.rs == 0, true);
+ rec_b(state, block, offset, jit_code_bgti, jit_code_blei, 0, c.i.rs == 0, true);
}
static void rec_BGTZ(struct lightrec_cstate *state,
const struct block *block, u16 offset)
{
_jit_name(block->_jit, __func__);
- rec_b(state, block, offset, jit_code_blei, 0, false, true);
+ rec_b(state, block, offset, jit_code_blei, jit_code_bgti, 0, false, true);
}
static void rec_regimm_BLTZ(struct lightrec_cstate *state,
const struct block *block, u16 offset)
{
_jit_name(block->_jit, __func__);
- rec_b(state, block, offset, jit_code_bgei, 0, false, true);
+ rec_b(state, block, offset, jit_code_bgei, jit_code_blti, 0, false, true);
}
static void rec_regimm_BLTZAL(struct lightrec_cstate *state,
const struct block *block, u16 offset)
{
_jit_name(block->_jit, __func__);
- rec_b(state, block, offset, jit_code_bgei,
+ rec_b(state, block, offset, jit_code_bgei, jit_code_blti,
get_branch_pc(block, offset, 2), false, true);
}
union code c = block->opcode_list[offset].c;
_jit_name(block->_jit, __func__);
- rec_b(state, block, offset, jit_code_blti, 0, !c.i.rs, true);
+ rec_b(state, block, offset, jit_code_blti, jit_code_bgei, 0, !c.i.rs, true);
}
static void rec_regimm_BGEZAL(struct lightrec_cstate *state,
{
const struct opcode *op = &block->opcode_list[offset];
_jit_name(block->_jit, __func__);
- rec_b(state, block, offset, jit_code_blti,
+ rec_b(state, block, offset, jit_code_blti, jit_code_bgei,
get_branch_pc(block, offset, 2),
!op->i.rs, true);
}
if (!nor)
flags_rd = REG_ZEXT & flags_rs & flags_rt;
- /* E(rd) = (E(rs) & E(rt)) | (E(rt) & !Z(rt)) | (E(rs) & !Z(rs)) */
- if ((REG_EXT & flags_rs & flags_rt) ||
- (flags_rt & (REG_EXT | REG_ZEXT) == REG_EXT) ||
- (flags_rs & (REG_EXT | REG_ZEXT) == REG_EXT))
+ /* E(rd) = E(rs) & E(rt) */
+ if (REG_EXT & flags_rs & flags_rt)
flags_rd |= REG_EXT;
lightrec_set_reg_out_flags(reg_cache, rd, flags_rd);
if (!op_flag_no_lo(flags)) {
if (is_signed) {
- jit_lti(lo, rs, 0);
+ jit_ltr(lo, rs, rt);
jit_lshi(lo, lo, 1);
jit_subi(lo, lo, 1);
} else {
- jit_movi(lo, 0xffffffff);
+ jit_subi(lo, rt, 1);
}
}
rec_alu_mv_lo_hi(state, block, REG_LO, c.r.rs);
}
-static void call_to_c_wrapper(struct lightrec_cstate *state, const struct block *block,
- u32 arg, bool with_arg, enum c_wrappers wrapper)
+static void call_to_c_wrapper(struct lightrec_cstate *state,
+ const struct block *block, u32 arg,
+ enum c_wrappers wrapper)
{
struct regcache *reg_cache = state->reg_cache;
jit_state_t *_jit = block->_jit;
- u8 tmp;
+ s8 tmp, tmp2;
- tmp = lightrec_alloc_reg_temp(reg_cache, _jit);
- jit_ldxi(tmp, LIGHTREC_REG_STATE,
- offsetof(struct lightrec_state, wrappers_eps[wrapper]));
+ /* Make sure JIT_R1 is not mapped; it will be used in the C wrapper. */
+ tmp2 = lightrec_alloc_reg(reg_cache, _jit, JIT_R1);
- if (with_arg) {
- jit_prepare();
- jit_pushargi(arg);
+ tmp = lightrec_get_reg_with_value(reg_cache,
+ (intptr_t) state->state->wrappers_eps[wrapper]);
+ if (tmp < 0) {
+ tmp = lightrec_alloc_reg_temp(reg_cache, _jit);
+ jit_ldxi(tmp, LIGHTREC_REG_STATE,
+ offsetof(struct lightrec_state, wrappers_eps[wrapper]));
+
+ lightrec_temp_set_value(reg_cache, tmp,
+ (intptr_t) state->state->wrappers_eps[wrapper]);
}
+ lightrec_free_reg(reg_cache, tmp2);
+
+#ifdef __mips__
+ /* On MIPS, register t9 is always used as the target register for JALR.
+ * Therefore if it does not contain the target address we must
+ * invalidate it. */
+ if (tmp != _T9)
+ lightrec_unload_reg(reg_cache, _jit, _T9);
+#endif
+
+ jit_prepare();
+ jit_pushargi(arg);
+
lightrec_regcache_mark_live(reg_cache, _jit);
jit_callr(tmp);
lightrec_clean_reg_if_loaded(reg_cache, _jit, c.i.rt, false);
if (is_tagged) {
- call_to_c_wrapper(state, block, c.opcode, true, C_WRAPPER_RW);
+ call_to_c_wrapper(state, block, c.opcode, C_WRAPPER_RW);
} else {
lut_entry = lightrec_get_lut_entry(block);
call_to_c_wrapper(state, block, (lut_entry << 16) | offset,
- true, C_WRAPPER_RW_GENERIC);
+ C_WRAPPER_RW_GENERIC);
}
}
return (RAM_SIZE << (state->mirrors_mapped * 2)) - 1;
}
+static u32 rec_io_mask(const struct lightrec_state *state)
+{
+ u32 length = state->maps[PSX_MAP_HW_REGISTERS].length;
+
+ return GENMASK(31 - clz32(length - 1), 0);
+}
+
static void rec_store_memory(struct lightrec_cstate *cstate,
const struct block *block,
u16 offset, jit_code_t code,
bool add_imm = c.i.imm &&
((!state->mirrors_mapped && !no_mask) || (invalidate &&
((imm & 0x3) || simm + lut_offt != (s16)(simm + lut_offt))));
- bool need_tmp = !no_mask || addr_offset || add_imm;
- bool need_tmp2 = addr_offset || invalidate;
+ bool need_tmp = !no_mask || addr_offset || add_imm || invalidate;
+ bool swc2 = c.i.op == OP_SWC2;
+ u8 in_reg = swc2 ? REG_CP2_TEMP : c.i.rt;
- rt = lightrec_alloc_reg_in(reg_cache, _jit, c.i.rt, 0);
+ rt = lightrec_alloc_reg_in(reg_cache, _jit, in_reg, 0);
rs = lightrec_alloc_reg_in(reg_cache, _jit, c.i.rs, 0);
if (need_tmp)
tmp = lightrec_alloc_reg_temp(reg_cache, _jit);
addr_reg = tmp;
}
- if (need_tmp2)
- tmp2 = lightrec_alloc_reg_temp(reg_cache, _jit);
-
if (addr_offset) {
+ tmp2 = lightrec_alloc_reg_temp(reg_cache, _jit);
jit_addi(tmp2, addr_reg, addr_offset);
addr_reg2 = tmp2;
} else {
addr_reg2 = addr_reg;
}
- if (is_big_endian() && swap_code && c.i.rt) {
+ if (is_big_endian() && swap_code && in_reg) {
tmp3 = lightrec_alloc_reg_temp(reg_cache, _jit);
jit_new_node_ww(swap_code, tmp3, rt);
tmp3 = lightrec_alloc_reg_in(reg_cache, _jit, 0, 0);
if (c.i.op != OP_SW) {
- jit_andi(tmp2, addr_reg, ~3);
- addr_reg = tmp2;
+ jit_andi(tmp, addr_reg, ~3);
+ addr_reg = tmp;
}
if (!lut_is_32bit(state)) {
- jit_lshi(tmp2, addr_reg, 1);
- addr_reg = tmp2;
+ jit_lshi(tmp, addr_reg, 1);
+ addr_reg = tmp;
}
if (addr_reg == rs && c.i.rs == 0) {
addr_reg = LIGHTREC_REG_STATE;
} else {
- jit_addr(tmp2, addr_reg, LIGHTREC_REG_STATE);
- addr_reg = tmp2;
+ jit_addr(tmp, addr_reg, LIGHTREC_REG_STATE);
+ addr_reg = tmp;
}
if (lut_is_32bit(state))
lightrec_free_reg(reg_cache, tmp3);
}
- if (need_tmp2)
+ if (addr_offset)
lightrec_free_reg(reg_cache, tmp2);
if (need_tmp)
lightrec_free_reg(reg_cache, tmp);
0x1fffffff, false);
}
+static void rec_store_io(struct lightrec_cstate *cstate,
+ const struct block *block, u16 offset,
+ jit_code_t code, jit_code_t swap_code)
+{
+ _jit_note(block->_jit, __FILE__, __LINE__);
+
+ return rec_store_memory(cstate, block, offset, code, swap_code,
+ cstate->state->offset_io,
+ rec_io_mask(cstate->state), false);
+}
+
static void rec_store_direct_no_invalidate(struct lightrec_cstate *cstate,
const struct block *block,
u16 offset, jit_code_t code,
union code c = block->opcode_list[offset].c;
jit_state_t *_jit = block->_jit;
jit_node_t *to_not_ram, *to_end;
- u8 tmp, tmp2, rs, rt;
+ bool swc2 = c.i.op == OP_SWC2;
+ u8 tmp, tmp2, rs, rt, in_reg = swc2 ? REG_CP2_TEMP : c.i.rt;
s16 imm;
jit_note(__FILE__, __LINE__);
rs = lightrec_alloc_reg_in(reg_cache, _jit, c.i.rs, 0);
- rt = lightrec_alloc_reg_in(reg_cache, _jit, c.i.rt, 0);
tmp = lightrec_alloc_reg_temp(reg_cache, _jit);
if (state->offset_ram || state->offset_scratch)
lightrec_free_reg(reg_cache, tmp2);
}
- if (is_big_endian() && swap_code && c.i.rt) {
+ rt = lightrec_alloc_reg_in(reg_cache, _jit, in_reg, 0);
+
+ if (is_big_endian() && swap_code && in_reg) {
tmp2 = lightrec_alloc_reg_temp(reg_cache, _jit);
jit_new_node_ww(swap_code, tmp2, rt);
union code c = block->opcode_list[offset].c;
jit_state_t *_jit = block->_jit;
jit_node_t *to_not_ram, *to_end;
- u8 tmp, tmp2, tmp3, rs, rt;
+ bool swc2 = c.i.op == OP_SWC2;
+ u8 tmp, tmp2, tmp3, masked_reg, rs, rt;
+ u8 in_reg = swc2 ? REG_CP2_TEMP : c.i.rt;
jit_note(__FILE__, __LINE__);
lightrec_free_reg(reg_cache, rs);
tmp = lightrec_alloc_reg_temp(reg_cache, _jit);
- to_not_ram = jit_bgti(tmp2, ram_size);
+ if (state->offset_ram != state->offset_scratch) {
+ to_not_ram = jit_bgti(tmp2, ram_size);
+ masked_reg = tmp2;
+ } else {
+ jit_lti_u(tmp, tmp2, ram_size);
+ jit_movnr(tmp, tmp2, tmp);
+ masked_reg = tmp;
+ }
/* Compute the offset to the code LUT */
- jit_andi(tmp, tmp2, (RAM_SIZE - 1) & ~3);
+ if (c.i.op == OP_SW)
+ jit_andi(tmp, masked_reg, RAM_SIZE - 1);
+ else
+ jit_andi(tmp, masked_reg, (RAM_SIZE - 1) & ~3);
+
if (!lut_is_32bit(state))
jit_lshi(tmp, tmp, 1);
jit_addr(tmp, LIGHTREC_REG_STATE, tmp);
jit_movi(tmp, state->offset_ram);
to_end = jit_b();
+ jit_patch(to_not_ram);
}
- jit_patch(to_not_ram);
-
if (state->offset_ram || state->offset_scratch)
jit_movi(tmp, state->offset_scratch);
lightrec_free_reg(reg_cache, tmp);
lightrec_free_reg(reg_cache, tmp3);
- rt = lightrec_alloc_reg_in(reg_cache, _jit, c.i.rt, 0);
+ rt = lightrec_alloc_reg_in(reg_cache, _jit, in_reg, 0);
- if (is_big_endian() && swap_code && c.i.rt) {
+ if (is_big_endian() && swap_code && in_reg) {
tmp = lightrec_alloc_reg_temp(reg_cache, _jit);
jit_new_node_ww(swap_code, tmp, rt);
jit_code_t code, jit_code_t swap_code)
{
u32 flags = block->opcode_list[offset].flags;
+ u32 mode = LIGHTREC_FLAGS_GET_IO_MODE(flags);
bool no_invalidate = op_flag_no_invalidate(flags) ||
state->state->invalidate_from_dma_only;
+ union code c = block->opcode_list[offset].c;
+ bool is_swc2 = c.i.op == OP_SWC2;
+
+ if (is_swc2) {
+ switch (mode) {
+ case LIGHTREC_IO_RAM:
+ case LIGHTREC_IO_SCRATCH:
+ case LIGHTREC_IO_DIRECT:
+ case LIGHTREC_IO_DIRECT_HW:
+ rec_cp2_do_mfc2(state, block, offset, c.i.rt, REG_CP2_TEMP);
+ break;
+ default:
+ break;
+ }
+ }
- switch (LIGHTREC_FLAGS_GET_IO_MODE(flags)) {
+ switch (mode) {
case LIGHTREC_IO_RAM:
rec_store_ram(state, block, offset, code,
swap_code, !no_invalidate);
rec_store_direct(state, block, offset, code, swap_code);
}
break;
+ case LIGHTREC_IO_DIRECT_HW:
+ rec_store_io(state, block, offset, code, swap_code);
+ break;
default:
rec_io(state, block, offset, true, false);
- break;
+ return;
}
+
+ if (is_swc2)
+ lightrec_discard_reg_if_loaded(state->reg_cache, REG_CP2_TEMP);
}
static void rec_SB(struct lightrec_cstate *state,
const struct block *block, u16 offset)
{
- _jit_name(block->_jit, __func__);
+ union code c = block->opcode_list[offset].c;
+
+ _jit_name(block->_jit, c.i.op == OP_SWC2 ? "rec_SWC2" : "rec_SW");
rec_store(state, block, offset,
jit_code_stxi_i, jit_code_bswapr_ui);
}
rec_io(state, block, offset, true, false);
}
-static void rec_SWC2(struct lightrec_cstate *state,
- const struct block *block, u16 offset)
-{
- _jit_name(block->_jit, __func__);
- rec_io(state, block, offset, false, false);
-}
-
static void rec_load_memory(struct lightrec_cstate *cstate,
const struct block *block, u16 offset,
jit_code_t code, jit_code_t swap_code, bool is_unsigned,
struct regcache *reg_cache = cstate->reg_cache;
struct opcode *op = &block->opcode_list[offset];
jit_state_t *_jit = block->_jit;
- u8 rs, rt, addr_reg, flags = REG_EXT;
+ u8 rs, rt, out_reg, addr_reg, flags = REG_EXT;
bool no_mask = op_flag_no_mask(op->flags);
union code c = op->c;
s16 imm;
- if (!c.i.rt)
+ if (c.i.op == OP_LWC2)
+ out_reg = REG_CP2_TEMP;
+ else if (c.i.rt)
+ out_reg = c.i.rt;
+ else
return;
if (is_unsigned)
flags |= REG_ZEXT;
rs = lightrec_alloc_reg_in(reg_cache, _jit, c.i.rs, 0);
- rt = lightrec_alloc_reg_out(reg_cache, _jit, c.i.rt, flags);
+ rt = lightrec_alloc_reg_out(reg_cache, _jit, out_reg, flags);
if (!cstate->state->mirrors_mapped && c.i.imm && !no_mask) {
jit_addi(rt, rs, (s16)c.i.imm);
cstate->state->offset_scratch, 0x1fffffff);
}
+static void rec_load_io(struct lightrec_cstate *cstate,
+ const struct block *block, u16 offset,
+ jit_code_t code, jit_code_t swap_code, bool is_unsigned)
+{
+ _jit_note(block->_jit, __FILE__, __LINE__);
+
+ rec_load_memory(cstate, block, offset, code, swap_code, is_unsigned,
+ cstate->state->offset_io, rec_io_mask(cstate->state));
+}
+
static void rec_load_direct(struct lightrec_cstate *cstate,
const struct block *block, u16 offset,
jit_code_t code, jit_code_t swap_code,
union code c = block->opcode_list[offset].c;
jit_state_t *_jit = block->_jit;
jit_node_t *to_not_ram, *to_not_bios, *to_end, *to_end2;
- u8 tmp, rs, rt, addr_reg, flags = REG_EXT;
+ u8 tmp, rs, rt, out_reg, addr_reg, flags = REG_EXT;
s16 imm;
- if (!c.i.rt)
+ if (c.i.op == OP_LWC2)
+ out_reg = REG_CP2_TEMP;
+ else if (c.i.rt)
+ out_reg = c.i.rt;
+ else
return;
if (is_unsigned)
jit_note(__FILE__, __LINE__);
rs = lightrec_alloc_reg_in(reg_cache, _jit, c.i.rs, 0);
- rt = lightrec_alloc_reg_out(reg_cache, _jit, c.i.rt, flags);
+ rt = lightrec_alloc_reg_out(reg_cache, _jit, out_reg, flags);
if ((state->offset_ram == state->offset_bios &&
state->offset_ram == state->offset_scratch &&
u16 offset, jit_code_t code, jit_code_t swap_code,
bool is_unsigned)
{
- u32 flags = block->opcode_list[offset].flags;
+ const struct opcode *op = &block->opcode_list[offset];
+ u32 flags = op->flags;
switch (LIGHTREC_FLAGS_GET_IO_MODE(flags)) {
case LIGHTREC_IO_RAM:
case LIGHTREC_IO_SCRATCH:
rec_load_scratch(state, block, offset, code, swap_code, is_unsigned);
break;
+ case LIGHTREC_IO_DIRECT_HW:
+ rec_load_io(state, block, offset, code, swap_code, is_unsigned);
+ break;
case LIGHTREC_IO_DIRECT:
rec_load_direct(state, block, offset, code, swap_code, is_unsigned);
break;
default:
rec_io(state, block, offset, false, true);
- break;
+ return;
+ }
+
+ if (op->i.op == OP_LWC2) {
+ rec_cp2_do_mtc2(state, block, offset, op->i.rt, REG_CP2_TEMP);
+ lightrec_discard_reg_if_loaded(state->reg_cache, REG_CP2_TEMP);
}
}
static void rec_LH(struct lightrec_cstate *state, const struct block *block, u16 offset)
{
+ jit_code_t code = is_big_endian() ? jit_code_ldxi_us : jit_code_ldxi_s;
+
_jit_name(block->_jit, __func__);
- rec_load(state, block, offset, jit_code_ldxi_s, jit_code_bswapr_us, false);
+ rec_load(state, block, offset, code, jit_code_bswapr_us, false);
}
static void rec_LHU(struct lightrec_cstate *state, const struct block *block, u16 offset)
static void rec_LW(struct lightrec_cstate *state, const struct block *block, u16 offset)
{
- _jit_name(block->_jit, __func__);
- rec_load(state, block, offset, jit_code_ldxi_i, jit_code_bswapr_ui, false);
-}
+ union code c = block->opcode_list[offset].c;
+ jit_code_t code;
-static void rec_LWC2(struct lightrec_cstate *state, const struct block *block, u16 offset)
-{
- _jit_name(block->_jit, __func__);
- rec_io(state, block, offset, false, false);
+ if (is_big_endian() && __WORDSIZE == 64)
+ code = jit_code_ldxi_ui;
+ else
+ code = jit_code_ldxi_i;
+
+ _jit_name(block->_jit, c.i.op == OP_LWC2 ? "rec_LWC2" : "rec_LW");
+ rec_load(state, block, offset, code, jit_code_bswapr_ui, false);
}
static void rec_break_syscall(struct lightrec_cstate *state,
- const struct block *block, u16 offset, bool is_break)
+ const struct block *block, u16 offset,
+ u32 exit_code)
{
+ struct regcache *reg_cache = state->reg_cache;
+ jit_state_t *_jit = block->_jit;
+ u8 tmp;
+
_jit_note(block->_jit, __FILE__, __LINE__);
- if (is_break)
- call_to_c_wrapper(state, block, 0, false, C_WRAPPER_BREAK);
- else
- call_to_c_wrapper(state, block, 0, false, C_WRAPPER_SYSCALL);
+ tmp = lightrec_alloc_reg_temp(reg_cache, _jit);
+
+ jit_movi(tmp, exit_code);
+ jit_stxi_i(offsetof(struct lightrec_state, exit_flags),
+ LIGHTREC_REG_STATE, tmp);
+
+ lightrec_free_reg(reg_cache, tmp);
/* TODO: the return address should be "pc - 4" if we're a delay slot */
lightrec_emit_end_of_block(state, block, offset, -1,
const struct block *block, u16 offset)
{
_jit_name(block->_jit, __func__);
- rec_break_syscall(state, block, offset, false);
+ rec_break_syscall(state, block, offset, LIGHTREC_EXIT_SYSCALL);
}
static void rec_special_BREAK(struct lightrec_cstate *state,
const struct block *block, u16 offset)
{
_jit_name(block->_jit, __func__);
- rec_break_syscall(state, block, offset, true);
+ rec_break_syscall(state, block, offset, LIGHTREC_EXIT_BREAK);
+}
+
+static void rec_mfc(struct lightrec_cstate *state, const struct block *block, u16 offset)
+{
+ struct regcache *reg_cache = state->reg_cache;
+ union code c = block->opcode_list[offset].c;
+ jit_state_t *_jit = block->_jit;
+
+ jit_note(__FILE__, __LINE__);
+
+ if (c.i.op != OP_SWC2)
+ lightrec_clean_reg_if_loaded(reg_cache, _jit, c.i.rt, true);
+
+ call_to_c_wrapper(state, block, c.opcode, C_WRAPPER_MFC);
}
static void rec_mtc(struct lightrec_cstate *state, const struct block *block, u16 offset)
lightrec_clean_reg_if_loaded(reg_cache, _jit, c.i.rs, false);
lightrec_clean_reg_if_loaded(reg_cache, _jit, c.i.rt, false);
- call_to_c_wrapper(state, block, c.opcode, true, C_WRAPPER_MTC);
+ call_to_c_wrapper(state, block, c.opcode, C_WRAPPER_MTC);
if (c.i.op == OP_CP0 &&
!op_flag_no_ds(block->opcode_list[offset].flags) &&
const union code c = block->opcode_list[offset].c;
jit_state_t *_jit = block->_jit;
u8 rt, tmp = 0, tmp2, status;
+ jit_node_t *to_end;
jit_note(__FILE__, __LINE__);
jit_orr(tmp, tmp, tmp2);
}
+ lightrec_free_reg(reg_cache, rt);
+
if (c.r.rd == 12 || c.r.rd == 13) {
- jit_stxi_i(offsetof(struct lightrec_state, exit_flags),
- LIGHTREC_REG_STATE, tmp);
+ to_end = jit_beqi(tmp, 0);
- lightrec_free_reg(reg_cache, tmp);
- lightrec_free_reg(reg_cache, tmp2);
- }
+ jit_ldxi_i(tmp2, LIGHTREC_REG_STATE,
+ offsetof(struct lightrec_state, target_cycle));
+ jit_subr(tmp2, tmp2, LIGHTREC_REG_CYCLE);
+ jit_movi(LIGHTREC_REG_CYCLE, 0);
+ jit_stxi_i(offsetof(struct lightrec_state, target_cycle),
+ LIGHTREC_REG_STATE, tmp2);
+ jit_stxi_i(offsetof(struct lightrec_state, current_cycle),
+ LIGHTREC_REG_STATE, tmp2);
- lightrec_free_reg(reg_cache, rt);
+
+ jit_patch(to_end);
+ }
if (!op_flag_no_ds(block->opcode_list[offset].flags) &&
- (c.r.rd == 12 || c.r.rd == 13))
- lightrec_emit_eob(state, block, offset + 1, true);
+ (c.r.rd == 12 || c.r.rd == 13)) {
+ state->cycles += lightrec_cycles_of_opcode(c);
+ lightrec_emit_eob(state, block, offset + 1);
+ }
}
static void rec_cp0_MFC0(struct lightrec_cstate *state,
return cp2c_i_offset(reg) + is_big_endian() * 2;
}
-static void rec_cp2_basic_MFC2(struct lightrec_cstate *state,
- const struct block *block, u16 offset)
+static void rec_cp2_do_mfc2(struct lightrec_cstate *state,
+ const struct block *block, u16 offset,
+ u8 reg, u8 out_reg)
{
struct regcache *reg_cache = state->reg_cache;
- const union code c = block->opcode_list[offset].c;
jit_state_t *_jit = block->_jit;
const u32 zext_regs = 0x300f0080;
u8 rt, tmp, tmp2, tmp3, out, flags;
- u8 reg = c.r.rd == 15 ? 14 : c.r.rd;
unsigned int i;
_jit_name(block->_jit, __func__);
+ if (state->state->ops.cop2_notify) {
+ /* We must call cop2_notify, handle that in C. */
+ rec_mfc(state, block, offset);
+ return;
+ }
+
flags = (zext_regs & BIT(reg)) ? REG_ZEXT : REG_EXT;
- rt = lightrec_alloc_reg_out(reg_cache, _jit, c.r.rt, flags);
+ rt = lightrec_alloc_reg_out(reg_cache, _jit, out_reg, flags);
+
+ if (reg == 15)
+ reg = 14;
switch (reg) {
case 1:
lightrec_free_reg(reg_cache, rt);
}
+static void rec_cp2_basic_MFC2(struct lightrec_cstate *state,
+ const struct block *block, u16 offset)
+{
+ const union code c = block->opcode_list[offset].c;
+
+ rec_cp2_do_mfc2(state, block, offset, c.r.rd, c.r.rt);
+}
+
static void rec_cp2_basic_CFC2(struct lightrec_cstate *state,
const struct block *block, u16 offset)
{
_jit_name(block->_jit, __func__);
+ if (state->state->ops.cop2_notify) {
+ /* We must call cop2_notify, handle that in C. */
+ rec_mfc(state, block, offset);
+ return;
+ }
+
switch (c.r.rd) {
case 4:
case 12:
break;
default:
rt = lightrec_alloc_reg_out(reg_cache, _jit, c.r.rt, REG_ZEXT);
- jit_ldxi_i(rt, LIGHTREC_REG_STATE, cp2c_i_offset(c.r.rd));
+ jit_ldxi_ui(rt, LIGHTREC_REG_STATE, cp2c_i_offset(c.r.rd));
break;
}
lightrec_free_reg(reg_cache, rt);
}
-static void rec_cp2_basic_MTC2(struct lightrec_cstate *state,
- const struct block *block, u16 offset)
+static void rec_cp2_do_mtc2(struct lightrec_cstate *state,
+ const struct block *block, u16 offset,
+ u8 reg, u8 in_reg)
{
struct regcache *reg_cache = state->reg_cache;
- const union code c = block->opcode_list[offset].c;
jit_state_t *_jit = block->_jit;
jit_node_t *loop, *to_loop;
u8 rt, tmp, tmp2, flags = 0;
_jit_name(block->_jit, __func__);
- if (c.r.rd == 31)
+ if (state->state->ops.cop2_notify) {
+ /* We must call cop2_notify, handle that in C. */
+ rec_mtc(state, block, offset);
+ return;
+ }
+
+ if (reg == 31)
return;
- if (c.r.rd == 30)
+ if (reg == 30)
flags |= REG_EXT;
- rt = lightrec_alloc_reg_in(reg_cache, _jit, c.r.rt, flags);
+ rt = lightrec_alloc_reg_in(reg_cache, _jit, in_reg, flags);
- switch (c.r.rd) {
+ switch (reg) {
case 15:
tmp = lightrec_alloc_reg_temp(reg_cache, _jit);
jit_ldxi_i(tmp, LIGHTREC_REG_STATE, cp2d_i_offset(13));
lightrec_free_reg(reg_cache, tmp2);
break;
default:
- jit_stxi_i(cp2d_i_offset(c.r.rd), LIGHTREC_REG_STATE, rt);
+ jit_stxi_i(cp2d_i_offset(reg), LIGHTREC_REG_STATE, rt);
break;
}
lightrec_free_reg(reg_cache, rt);
}
+static void rec_cp2_basic_MTC2(struct lightrec_cstate *state,
+ const struct block *block, u16 offset)
+{
+ const union code c = block->opcode_list[offset].c;
+
+ rec_cp2_do_mtc2(state, block, offset, c.r.rd, c.r.rt);
+}
+
static void rec_cp2_basic_CTC2(struct lightrec_cstate *state,
const struct block *block, u16 offset)
{
_jit_name(block->_jit, __func__);
+ if (state->state->ops.cop2_notify) {
+ /* We must call cop2_notify, handle that in C. */
+ rec_mtc(state, block, offset);
+ return;
+ }
+
rt = lightrec_alloc_reg_in(reg_cache, _jit, c.r.rt, 0);
switch (c.r.rd) {
jit_name(__func__);
jit_note(__FILE__, __LINE__);
- call_to_c_wrapper(state, block, c.opcode, true, C_WRAPPER_CP);
+ call_to_c_wrapper(state, block, c.opcode, C_WRAPPER_CP);
}
static void rec_meta_MOV(struct lightrec_cstate *state,
const struct block *block, u16 offset)
{
struct regcache *reg_cache = state->reg_cache;
- union code c = block->opcode_list[offset].c;
+ const struct opcode *op = &block->opcode_list[offset];
+ union code c = op->c;
jit_state_t *_jit = block->_jit;
+ bool unload_rd;
u8 rs, rd;
_jit_name(block->_jit, __func__);
jit_note(__FILE__, __LINE__);
- if (c.r.rs)
+
+ unload_rd = OPT_EARLY_UNLOAD
+ && LIGHTREC_FLAGS_GET_RD(op->flags) == LIGHTREC_REG_UNLOAD;
+
+ if (c.r.rs || unload_rd)
rs = lightrec_alloc_reg_in(reg_cache, _jit, c.r.rs, 0);
- rd = lightrec_alloc_reg_out(reg_cache, _jit, c.r.rd, REG_EXT);
- if (c.r.rs == 0)
- jit_movi(rd, 0);
- else
- jit_extr_i(rd, rs);
+ if (unload_rd) {
+ /* If the destination register will be unloaded right after the
+ * MOV meta-opcode, we don't actually need to write any host
+ * register - we can just store the source register directly to
+ * the register cache, at the offset corresponding to the
+ * destination register. */
+ lightrec_discard_reg_if_loaded(reg_cache, c.r.rd);
+
+ jit_stxi_i(offsetof(struct lightrec_state, regs.gpr)
+ + c.r.rd << 2, LIGHTREC_REG_STATE, rs);
- if (c.r.rs)
lightrec_free_reg(reg_cache, rs);
- lightrec_free_reg(reg_cache, rd);
+ } else {
+ rd = lightrec_alloc_reg_out(reg_cache, _jit, c.r.rd, REG_EXT);
+
+ if (c.r.rs == 0)
+ jit_movi(rd, 0);
+ else
+ jit_extr_i(rd, rs);
+
+ lightrec_free_reg(reg_cache, rd);
+ }
+
+ if (c.r.rs || unload_rd)
+ lightrec_free_reg(reg_cache, rs);
}
static void rec_meta_EXTC_EXTS(struct lightrec_cstate *state,
lightrec_free_reg(reg_cache, rt);
}
+static void rec_meta_MULT2(struct lightrec_cstate *state,
+ const struct block *block,
+ u16 offset)
+{
+ struct regcache *reg_cache = state->reg_cache;
+ union code c = block->opcode_list[offset].c;
+ jit_state_t *_jit = block->_jit;
+ u8 reg_lo = get_mult_div_lo(c);
+ u8 reg_hi = get_mult_div_hi(c);
+ u32 flags = block->opcode_list[offset].flags;
+ bool is_signed = c.i.op == OP_META_MULT2;
+ u8 rs, lo, hi, rflags = 0, hiflags = 0;
+ unsigned int i;
+
+ if (!op_flag_no_hi(flags) && c.r.op < 32) {
+ rflags = is_signed ? REG_EXT : REG_ZEXT;
+ hiflags = is_signed ? REG_EXT : (REG_EXT | REG_ZEXT);
+ }
+
+ _jit_name(block->_jit, __func__);
+ jit_note(__FILE__, __LINE__);
+
+ rs = lightrec_alloc_reg_in(reg_cache, _jit, c.i.rs, rflags);
+
+ /*
+ * We must handle the case where one of the output registers is our rs
+ * input register. Thanksfully, computing LO/HI can be done in any
+ * order. Here, we make sure that the computation that overwrites the
+ * input register is always performed last.
+ */
+ for (i = 0; i < 2; i++) {
+ if ((!i ^ (reg_lo == c.i.rs)) && !op_flag_no_lo(flags)) {
+ lo = lightrec_alloc_reg_out(reg_cache, _jit, reg_lo, 0);
+
+ if (c.r.op < 32)
+ jit_lshi(lo, rs, c.r.op);
+ else
+ jit_movi(lo, 0);
+
+ lightrec_free_reg(reg_cache, lo);
+ continue;
+ }
+
+ if ((!!i ^ (reg_lo == c.i.rs)) && !op_flag_no_hi(flags)) {
+ hi = lightrec_alloc_reg_out(reg_cache, _jit,
+ reg_hi, hiflags);
+
+ if (c.r.op >= 32)
+ jit_lshi(hi, rs, c.r.op - 32);
+ else if (is_signed)
+ jit_rshi(hi, rs, 32 - c.r.op);
+ else
+ jit_rshi_u(hi, rs, 32 - c.r.op);
+
+ lightrec_free_reg(reg_cache, hi);
+ }
+ }
+
+ lightrec_free_reg(reg_cache, rs);
+
+ _jit_name(block->_jit, __func__);
+ jit_note(__FILE__, __LINE__);
+}
+
static const lightrec_rec_func_t rec_standard[64] = {
SET_DEFAULT_ELM(rec_standard, unknown_opcode),
[OP_SPECIAL] = rec_SPECIAL,
[OP_SWL] = rec_SWL,
[OP_SW] = rec_SW,
[OP_SWR] = rec_SWR,
- [OP_LWC2] = rec_LWC2,
- [OP_SWC2] = rec_SWC2,
+ [OP_LWC2] = rec_LW,
+ [OP_SWC2] = rec_SW,
[OP_META_MOV] = rec_meta_MOV,
[OP_META_EXTC] = rec_meta_EXTC_EXTS,
[OP_META_EXTS] = rec_meta_EXTC_EXTS,
+ [OP_META_MULT2] = rec_meta_MULT2,
+ [OP_META_MULTU2] = rec_meta_MULT2,
};
static const lightrec_rec_func_t rec_special[64] = {