#include <stdbool.h>
#include <stddef.h>
+#define LIGHTNING_UNALIGNED_32BIT 4
+
typedef void (*lightrec_rec_func_t)(struct lightrec_cstate *, const struct block *, u16);
/* Forward declarations */
const struct opcode *op = &block->opcode_list[offset],
*ds = get_delay_slot(block->opcode_list, offset);
u32 cycles = state->cycles + lightrec_cycles_of_opcode(state->state, op->c);
+ bool has_ds = has_delay_slot(op->c);
jit_note(__FILE__, __LINE__);
update_ra_register(reg_cache, _jit, ra_reg, block->pc, link);
}
- if (has_delay_slot(op->c) &&
- !op_flag_no_ds(op->flags) && !op_flag_local_branch(op->flags)) {
+ if (has_ds && !op_flag_no_ds(op->flags) && !op_flag_local_branch(op->flags)) {
cycles += lightrec_cycles_of_opcode(state->state, ds->c);
/* Recompile the delay slot */
pr_debug("EOB: %u cycles\n", cycles);
}
- if (op_flag_load_delay(ds->flags)
+ if (has_ds && op_flag_load_delay(ds->flags)
&& opcode_is_load(ds->c) && !state->no_load_delay) {
/* If the delay slot is a load opcode, its target register
* will be written after the first opcode of the target is
u8 reg_hi = get_mult_div_hi(c);
jit_state_t *_jit = block->_jit;
u8 lo, hi, rs, rt, rflags = 0;
+ bool no_lo = op_flag_no_lo(flags);
+ bool no_hi = op_flag_no_hi(flags);
jit_note(__FILE__, __LINE__);
rs = lightrec_alloc_reg_in(reg_cache, _jit, c.r.rs, rflags);
rt = lightrec_alloc_reg_in(reg_cache, _jit, c.r.rt, rflags);
- if (!op_flag_no_lo(flags))
+ if (!no_lo)
lo = lightrec_alloc_reg_out(reg_cache, _jit, reg_lo, 0);
- else if (__WORDSIZE == 32)
- lo = lightrec_alloc_reg_temp(reg_cache, _jit);
- if (!op_flag_no_hi(flags))
+ if (!no_hi)
hi = lightrec_alloc_reg_out(reg_cache, _jit, reg_hi, REG_EXT);
if (__WORDSIZE == 32) {
/* On 32-bit systems, do a 32*32->64 bit operation, or a 32*32->32 bit
* operation if the MULT was detected a 32-bit only. */
- if (!op_flag_no_hi(flags)) {
+ if (no_lo) {
if (is_signed)
- jit_qmulr(lo, hi, rs, rt);
+ jit_hmulr(hi, rs, rt);
else
- jit_qmulr_u(lo, hi, rs, rt);
- } else {
+ jit_hmulr_u(hi, rs, rt);
+ } else if (no_hi) {
jit_mulr(lo, rs, rt);
+ } else if (is_signed) {
+ jit_qmulr(lo, hi, rs, rt);
+ } else {
+ jit_qmulr_u(lo, hi, rs, rt);
}
} else {
/* On 64-bit systems, do a 64*64->64 bit operation. */
- if (op_flag_no_lo(flags)) {
+ if (no_lo) {
jit_mulr(hi, rs, rt);
jit_rshi(hi, hi, 32);
} else {
jit_mulr(lo, rs, rt);
/* The 64-bit output value is in $lo, store the upper 32 bits in $hi */
- if (!op_flag_no_hi(flags))
+ if (!no_hi)
jit_rshi(hi, lo, 32);
}
}
lightrec_free_reg(reg_cache, rs);
lightrec_free_reg(reg_cache, rt);
- if (!op_flag_no_lo(flags) || __WORDSIZE == 32)
+ if (!no_lo)
lightrec_free_reg(reg_cache, lo);
- if (!op_flag_no_hi(flags))
+ if (!no_hi)
lightrec_free_reg(reg_cache, hi);
}
}
}
-static u32 rec_ram_mask(struct lightrec_state *state)
+static u32 rec_ram_mask(const struct lightrec_state *state)
{
return (RAM_SIZE << (state->mirrors_mapped * 2)) - 1;
}
struct opcode *op = &block->opcode_list[offset];
jit_state_t *_jit = block->_jit;
union code c = op->c;
- u8 rs, rt, tmp, tmp2, tmp3, addr_reg, addr_reg2;
+ u8 rs, rt, tmp = 0, tmp2 = 0, tmp3, addr_reg, addr_reg2;
s16 imm = (s16)c.i.imm;
s32 simm = (s32)imm << (1 - lut_is_32bit(state));
s32 lut_offt = offsetof(struct lightrec_state, code_lut);
bool no_mask = op_flag_no_mask(op->flags);
bool add_imm = c.i.imm &&
- ((!state->mirrors_mapped && !no_mask) || (invalidate &&
+ (c.i.op == OP_META_SWU
+ || (!state->mirrors_mapped && !no_mask) || (invalidate &&
((imm & 0x3) || simm + lut_offt != (s16)(simm + lut_offt))));
bool need_tmp = !no_mask || add_imm || invalidate;
bool swc2 = c.i.op == OP_SWC2;
tmp3 = lightrec_alloc_reg_temp(reg_cache, _jit);
jit_new_node_ww(swap_code, tmp3, rt);
- jit_new_node_www(code, imm, addr_reg2, tmp3);
+
+ if (c.i.op == OP_META_SWU)
+ jit_unstr(addr_reg2, tmp3, LIGHTNING_UNALIGNED_32BIT);
+ else
+ jit_new_node_www(code, imm, addr_reg2, tmp3);
lightrec_free_reg(reg_cache, tmp3);
+ } else if (c.i.op == OP_META_SWU) {
+ jit_unstr(addr_reg2, rt, LIGHTNING_UNALIGNED_32BIT);
} else {
jit_new_node_www(code, imm, addr_reg2, rt);
}
u16 offset, jit_code_t code,
jit_code_t swap_code, bool invalidate)
{
- struct lightrec_state *state = cstate->state;
+ const struct lightrec_state *state = cstate->state;
_jit_note(block->_jit, __FILE__, __LINE__);
u16 offset, jit_code_t code,
jit_code_t swap_code)
{
- struct lightrec_state *state = cstate->state;
+ const struct lightrec_state *state = cstate->state;
struct regcache *reg_cache = cstate->reg_cache;
union code c = block->opcode_list[offset].c;
jit_state_t *_jit = block->_jit;
reg_imm = lightrec_alloc_reg_temp_with_value(reg_cache, _jit, addr_mask);
/* Convert to KUNSEG and avoid RAM mirrors */
- if (!state->mirrors_mapped && c.i.imm) {
+ if ((c.i.op == OP_META_SWU || !state->mirrors_mapped) && c.i.imm) {
imm = 0;
jit_addi(tmp, rs, (s16)c.i.imm);
jit_andr(tmp, tmp, reg_imm);
tmp2 = lightrec_alloc_reg_temp(reg_cache, _jit);
jit_new_node_ww(swap_code, tmp2, rt);
- jit_new_node_www(code, imm, tmp, tmp2);
+
+ if (c.i.op == OP_META_SWU)
+ jit_unstr(tmp, tmp2, LIGHTNING_UNALIGNED_32BIT);
+ else
+ jit_new_node_www(code, imm, tmp, tmp2);
lightrec_free_reg(reg_cache, tmp2);
+ } else if (c.i.op == OP_META_SWU) {
+ jit_unstr(tmp, rt, LIGHTNING_UNALIGNED_32BIT);
} else {
jit_new_node_www(code, imm, tmp, rt);
}
static void rec_store_direct(struct lightrec_cstate *cstate, const struct block *block,
u16 offset, jit_code_t code, jit_code_t swap_code)
{
- struct lightrec_state *state = cstate->state;
+ const struct lightrec_state *state = cstate->state;
u32 ram_size = state->mirrors_mapped ? RAM_SIZE * 4 : RAM_SIZE;
struct regcache *reg_cache = cstate->reg_cache;
union code c = block->opcode_list[offset].c;
u8 tmp, tmp2, tmp3, masked_reg, rs, rt;
u8 in_reg = swc2 ? REG_TEMP : c.i.rt;
u32 addr_mask = 0x1f800000 | (ram_size - 1);
+ bool different_offsets = state->offset_ram != state->offset_scratch;
s32 reg_imm;
jit_note(__FILE__, __LINE__);
lightrec_free_reg(reg_cache, reg_imm);
tmp = lightrec_alloc_reg_temp(reg_cache, _jit);
- if (state->offset_ram != state->offset_scratch) {
+ if (different_offsets) {
to_not_ram = jit_bgti(tmp2, ram_size);
masked_reg = tmp2;
} else {
else
jit_stxi(offsetof(struct lightrec_state, code_lut), tmp, tmp3);
- if (state->offset_ram != state->offset_scratch) {
+ if (c.i.op == OP_META_SWU) {
+ /* With a SWU opcode, we might have touched the following 32-bit
+ * word, so invalidate it as well */
+ if (lut_is_32bit(state)) {
+ jit_stxi_i(offsetof(struct lightrec_state, code_lut) + 4,
+ tmp, tmp3);
+ } else {
+ jit_stxi(offsetof(struct lightrec_state, code_lut)
+ + sizeof(uintptr_t), tmp, tmp3);
+ }
+ }
+
+ if (different_offsets) {
jit_movi(tmp, state->offset_ram);
to_end = jit_b();
if (state->offset_ram || state->offset_scratch)
jit_movi(tmp, state->offset_scratch);
- if (state->offset_ram != state->offset_scratch)
+ if (different_offsets)
jit_patch(to_end);
if (state->offset_ram || state->offset_scratch)
tmp = lightrec_alloc_reg_temp(reg_cache, _jit);
jit_new_node_ww(swap_code, tmp, rt);
- jit_new_node_www(code, 0, tmp2, tmp);
+
+ if (c.i.op == OP_META_SWU)
+ jit_unstr(tmp2, tmp, LIGHTNING_UNALIGNED_32BIT);
+ else
+ jit_new_node_www(code, 0, tmp2, tmp);
lightrec_free_reg(reg_cache, tmp);
+ } else if (c.i.op == OP_META_SWU) {
+ jit_unstr(tmp2, rt, LIGHTNING_UNALIGNED_32BIT);
} else {
jit_new_node_www(code, 0, tmp2, rt);
}
rs = lightrec_alloc_reg_in(reg_cache, _jit, c.i.rs, 0);
rt = lightrec_alloc_reg_out(reg_cache, _jit, out_reg, flags);
- if (!cstate->state->mirrors_mapped && c.i.imm && !no_mask) {
+ if ((op->i.op == OP_META_LWU && c.i.imm)
+ || (!cstate->state->mirrors_mapped && c.i.imm && !no_mask)) {
jit_addi(rt, rs, (s16)c.i.imm);
addr_reg = rt;
imm = 0;
imm = (s16)c.i.imm;
}
+ if (op->i.op == OP_META_LWU)
+ imm = LIGHTNING_UNALIGNED_32BIT;
+
if (!no_mask) {
reg_imm = lightrec_alloc_reg_temp_with_value(reg_cache, _jit,
addr_mask);
jit_code_t code, jit_code_t swap_code,
bool is_unsigned)
{
- struct lightrec_state *state = cstate->state;
+ const struct lightrec_state *state = cstate->state;
struct regcache *reg_cache = cstate->reg_cache;
struct opcode *op = &block->opcode_list[offset];
bool load_delay = op_flag_load_delay(op->flags) && !cstate->no_load_delay;
jit_state_t *_jit = block->_jit;
jit_node_t *to_not_ram, *to_not_bios, *to_end, *to_end2;
u8 tmp, rs, rt, out_reg, addr_reg, flags = REG_EXT;
+ bool different_offsets = state->offset_bios != state->offset_scratch;
union code c = op->c;
s32 addr_mask;
u32 reg_imm;
if ((state->offset_ram == state->offset_bios &&
state->offset_ram == state->offset_scratch &&
- state->mirrors_mapped) || !c.i.imm) {
+ state->mirrors_mapped && c.i.op != OP_META_LWU)
+ || !c.i.imm) {
addr_reg = rs;
imm = (s16)c.i.imm;
} else {
lightrec_free_reg(reg_cache, rs);
}
+ if (op->i.op == OP_META_LWU)
+ imm = LIGHTNING_UNALIGNED_32BIT;
+
tmp = lightrec_alloc_reg_temp(reg_cache, _jit);
if (state->offset_ram == state->offset_bios &&
jit_patch(to_not_ram);
- if (state->offset_bios != state->offset_scratch)
+ if (different_offsets)
to_not_bios = jit_bmci(addr_reg, BIT(22));
/* Convert to KUNSEG */
jit_movi(tmp, state->offset_bios);
- if (state->offset_bios != state->offset_scratch) {
+ if (different_offsets) {
to_end2 = jit_b();
jit_patch(to_not_bios);
hi = lightrec_alloc_reg_out(reg_cache, _jit,
reg_hi, hiflags);
- if (c.r.op >= 32)
+ if (c.r.op >= 32) {
jit_lshi(hi, rs, c.r.op - 32);
- else if (is_signed)
- jit_rshi(hi, rs, 32 - c.r.op);
- else
- jit_rshi_u(hi, rs, 32 - c.r.op);
+ } else if (is_signed) {
+ if (c.r.op)
+ jit_rshi(hi, rs, 32 - c.r.op);
+ else
+ jit_rshi(hi, rs, 31);
+ } else {
+ if (c.r.op)
+ jit_rshi_u(hi, rs, 32 - c.r.op);
+ else
+ jit_movi(hi, 0);
+ }
lightrec_free_reg(reg_cache, hi);
}
lightrec_free_reg(reg_cache, rd);
}
+static void rec_meta_LWU(struct lightrec_cstate *state,
+ const struct block *block,
+ u16 offset)
+{
+ jit_code_t code;
+
+ if (is_big_endian() && __WORDSIZE == 64)
+ code = jit_code_unldr_u;
+ else
+ code = jit_code_unldr;
+
+ _jit_name(block->_jit, __func__);
+ rec_load(state, block, offset, code, jit_code_bswapr_ui, false);
+}
+
+static void rec_meta_SWU(struct lightrec_cstate *state,
+ const struct block *block,
+ u16 offset)
+{
+ _jit_name(block->_jit, __func__);
+ rec_store(state, block, offset, jit_code_unstr, jit_code_bswapr_ui);
+}
+
static void unknown_opcode(struct lightrec_cstate *state,
const struct block *block, u16 offset)
{
[OP_META] = rec_META,
[OP_META_MULT2] = rec_meta_MULT2,
[OP_META_MULTU2] = rec_meta_MULT2,
+ [OP_META_LWU] = rec_meta_LWU,
+ [OP_META_SWU] = rec_meta_SWU,
};
static const lightrec_rec_func_t rec_special[64] = {