#include <stdbool.h>
#include <stddef.h>
+#define LIGHTNING_UNALIGNED_32BIT 4
+
typedef void (*lightrec_rec_func_t)(struct lightrec_cstate *, const struct block *, u16);
/* Forward declarations */
u8 reg_hi = get_mult_div_hi(c);
jit_state_t *_jit = block->_jit;
u8 lo, hi, rs, rt, rflags = 0;
+ bool no_lo = op_flag_no_lo(flags);
+ bool no_hi = op_flag_no_hi(flags);
jit_note(__FILE__, __LINE__);
rs = lightrec_alloc_reg_in(reg_cache, _jit, c.r.rs, rflags);
rt = lightrec_alloc_reg_in(reg_cache, _jit, c.r.rt, rflags);
- if (!op_flag_no_lo(flags))
+ if (!no_lo)
lo = lightrec_alloc_reg_out(reg_cache, _jit, reg_lo, 0);
- else if (__WORDSIZE == 32)
- lo = lightrec_alloc_reg_temp(reg_cache, _jit);
- if (!op_flag_no_hi(flags))
+ if (!no_hi)
hi = lightrec_alloc_reg_out(reg_cache, _jit, reg_hi, REG_EXT);
if (__WORDSIZE == 32) {
/* On 32-bit systems, do a 32*32->64 bit operation, or a 32*32->32 bit
* operation if the MULT was detected a 32-bit only. */
- if (!op_flag_no_hi(flags)) {
+ if (no_lo) {
if (is_signed)
- jit_qmulr(lo, hi, rs, rt);
+ jit_hmulr(hi, rs, rt);
else
- jit_qmulr_u(lo, hi, rs, rt);
- } else {
+ jit_hmulr_u(hi, rs, rt);
+ } else if (no_hi) {
jit_mulr(lo, rs, rt);
+ } else if (is_signed) {
+ jit_qmulr(lo, hi, rs, rt);
+ } else {
+ jit_qmulr_u(lo, hi, rs, rt);
}
} else {
/* On 64-bit systems, do a 64*64->64 bit operation. */
- if (op_flag_no_lo(flags)) {
+ if (no_lo) {
jit_mulr(hi, rs, rt);
jit_rshi(hi, hi, 32);
} else {
jit_mulr(lo, rs, rt);
/* The 64-bit output value is in $lo, store the upper 32 bits in $hi */
- if (!op_flag_no_hi(flags))
+ if (!no_hi)
jit_rshi(hi, lo, 32);
}
}
lightrec_free_reg(reg_cache, rs);
lightrec_free_reg(reg_cache, rt);
- if (!op_flag_no_lo(flags) || __WORDSIZE == 32)
+ if (!no_lo)
lightrec_free_reg(reg_cache, lo);
- if (!op_flag_no_hi(flags))
+ if (!no_hi)
lightrec_free_reg(reg_cache, hi);
}
s32 lut_offt = offsetof(struct lightrec_state, code_lut);
bool no_mask = op_flag_no_mask(op->flags);
bool add_imm = c.i.imm &&
- ((!state->mirrors_mapped && !no_mask) || (invalidate &&
+ (c.i.op == OP_META_SWU
+ || (!state->mirrors_mapped && !no_mask) || (invalidate &&
((imm & 0x3) || simm + lut_offt != (s16)(simm + lut_offt))));
bool need_tmp = !no_mask || add_imm || invalidate;
bool swc2 = c.i.op == OP_SWC2;
tmp3 = lightrec_alloc_reg_temp(reg_cache, _jit);
jit_new_node_ww(swap_code, tmp3, rt);
- jit_new_node_www(code, imm, addr_reg2, tmp3);
+
+ if (c.i.op == OP_META_SWU)
+ jit_unstr(addr_reg2, tmp3, LIGHTNING_UNALIGNED_32BIT);
+ else
+ jit_new_node_www(code, imm, addr_reg2, tmp3);
lightrec_free_reg(reg_cache, tmp3);
+ } else if (c.i.op == OP_META_SWU) {
+ jit_unstr(addr_reg2, rt, LIGHTNING_UNALIGNED_32BIT);
} else {
jit_new_node_www(code, imm, addr_reg2, rt);
}
reg_imm = lightrec_alloc_reg_temp_with_value(reg_cache, _jit, addr_mask);
/* Convert to KUNSEG and avoid RAM mirrors */
- if (!state->mirrors_mapped && c.i.imm) {
+ if ((c.i.op == OP_META_SWU || !state->mirrors_mapped) && c.i.imm) {
imm = 0;
jit_addi(tmp, rs, (s16)c.i.imm);
jit_andr(tmp, tmp, reg_imm);
tmp2 = lightrec_alloc_reg_temp(reg_cache, _jit);
jit_new_node_ww(swap_code, tmp2, rt);
- jit_new_node_www(code, imm, tmp, tmp2);
+
+ if (c.i.op == OP_META_SWU)
+ jit_unstr(tmp, tmp2, LIGHTNING_UNALIGNED_32BIT);
+ else
+ jit_new_node_www(code, imm, tmp, tmp2);
lightrec_free_reg(reg_cache, tmp2);
+ } else if (c.i.op == OP_META_SWU) {
+ jit_unstr(tmp, rt, LIGHTNING_UNALIGNED_32BIT);
} else {
jit_new_node_www(code, imm, tmp, rt);
}
else
jit_stxi(offsetof(struct lightrec_state, code_lut), tmp, tmp3);
+ if (c.i.op == OP_META_SWU) {
+ /* With a SWU opcode, we might have touched the following 32-bit
+ * word, so invalidate it as well */
+ if (lut_is_32bit(state)) {
+ jit_stxi_i(offsetof(struct lightrec_state, code_lut) + 4,
+ tmp, tmp3);
+ } else {
+ jit_stxi(offsetof(struct lightrec_state, code_lut)
+ + sizeof(uintptr_t), tmp, tmp3);
+ }
+ }
+
if (different_offsets) {
jit_movi(tmp, state->offset_ram);
tmp = lightrec_alloc_reg_temp(reg_cache, _jit);
jit_new_node_ww(swap_code, tmp, rt);
- jit_new_node_www(code, 0, tmp2, tmp);
+
+ if (c.i.op == OP_META_SWU)
+ jit_unstr(tmp2, tmp, LIGHTNING_UNALIGNED_32BIT);
+ else
+ jit_new_node_www(code, 0, tmp2, tmp);
lightrec_free_reg(reg_cache, tmp);
+ } else if (c.i.op == OP_META_SWU) {
+ jit_unstr(tmp2, rt, LIGHTNING_UNALIGNED_32BIT);
} else {
jit_new_node_www(code, 0, tmp2, rt);
}
rs = lightrec_alloc_reg_in(reg_cache, _jit, c.i.rs, 0);
rt = lightrec_alloc_reg_out(reg_cache, _jit, out_reg, flags);
- if (!cstate->state->mirrors_mapped && c.i.imm && !no_mask) {
+ if ((op->i.op == OP_META_LWU && c.i.imm)
+ || (!cstate->state->mirrors_mapped && c.i.imm && !no_mask)) {
jit_addi(rt, rs, (s16)c.i.imm);
addr_reg = rt;
imm = 0;
imm = (s16)c.i.imm;
}
+ if (op->i.op == OP_META_LWU)
+ imm = LIGHTNING_UNALIGNED_32BIT;
+
if (!no_mask) {
reg_imm = lightrec_alloc_reg_temp_with_value(reg_cache, _jit,
addr_mask);
if ((state->offset_ram == state->offset_bios &&
state->offset_ram == state->offset_scratch &&
- state->mirrors_mapped) || !c.i.imm) {
+ state->mirrors_mapped && c.i.op != OP_META_LWU)
+ || !c.i.imm) {
addr_reg = rs;
imm = (s16)c.i.imm;
} else {
lightrec_free_reg(reg_cache, rs);
}
+ if (op->i.op == OP_META_LWU)
+ imm = LIGHTNING_UNALIGNED_32BIT;
+
tmp = lightrec_alloc_reg_temp(reg_cache, _jit);
if (state->offset_ram == state->offset_bios &&
lightrec_free_reg(reg_cache, rd);
}
+static void rec_meta_LWU(struct lightrec_cstate *state,
+ const struct block *block,
+ u16 offset)
+{
+ jit_code_t code;
+
+ if (is_big_endian() && __WORDSIZE == 64)
+ code = jit_code_unldr_u;
+ else
+ code = jit_code_unldr;
+
+ _jit_name(block->_jit, __func__);
+ rec_load(state, block, offset, code, jit_code_bswapr_ui, false);
+}
+
+static void rec_meta_SWU(struct lightrec_cstate *state,
+ const struct block *block,
+ u16 offset)
+{
+ _jit_name(block->_jit, __func__);
+ rec_store(state, block, offset, jit_code_unstr, jit_code_bswapr_ui);
+}
+
static void unknown_opcode(struct lightrec_cstate *state,
const struct block *block, u16 offset)
{
[OP_META] = rec_META,
[OP_META_MULT2] = rec_meta_MULT2,
[OP_META_MULTU2] = rec_meta_MULT2,
+ [OP_META_LWU] = rec_meta_LWU,
+ [OP_META_SWU] = rec_meta_SWU,
};
static const lightrec_rec_func_t rec_special[64] = {
return LE32TOH(*(u32 *)host);
}
+static u32 lightrec_default_lwu(struct lightrec_state *state,
+ u32 opcode, void *host, u32 addr)
+{
+ u32 val;
+
+ memcpy(&val, host, 4);
+
+ return LE32TOH(val);
+}
+
+static void lightrec_default_swu(struct lightrec_state *state, u32 opcode,
+ void *host, u32 addr, u32 data)
+{
+ data = HTOLE32(data);
+
+ memcpy(host, &data, 4);
+
+ if (!(state->opt_flags & LIGHTREC_OPT_INV_DMA_ONLY))
+ lightrec_invalidate(state, addr & ~0x3, 8);
+}
+
static const struct lightrec_mem_map_ops lightrec_default_ops = {
.sb = lightrec_default_sb,
.sh = lightrec_default_sh,
.lb = lightrec_default_lb,
.lh = lightrec_default_lh,
.lw = lightrec_default_lw,
+ .lwu = lightrec_default_lwu,
+ .swu = lightrec_default_swu,
};
static void __segfault_cb(struct lightrec_state *state, u32 addr,
return lightrec_lwl(state, ops, opcode, host, addr, data);
case OP_LWR:
return lightrec_lwr(state, ops, opcode, host, addr, data);
+ case OP_META_LWU:
+ return ops->lwu(state, opcode, host, addr);
+ case OP_META_SWU:
+ ops->swu(state, opcode, host, addr, data);
+ return 0;
case OP_LW:
default:
return ops->lw(state, opcode, host, addr);
case OP_LWL:
case OP_LWR:
case OP_LW:
+ case OP_META_LWU:
if (OPT_HANDLE_LOAD_DELAYS && unlikely(!state->in_delay_slot_n)) {
state->temp_reg = ret;
state->in_delay_slot_n = 0xff;
case OP_SWR:
case OP_LWC2:
case OP_SWC2:
+ case OP_META_LWU:
+ case OP_META_SWU:
if (!LIGHTREC_FLAGS_GET_IO_MODE(op->flags))
return false;
fallthrough;