+ if (addr_offset) {
+ tmp2 = lightrec_alloc_reg_temp(reg_cache, _jit);
+ jit_addi(tmp2, addr_reg, addr_offset);
+ addr_reg2 = tmp2;
+ } else {
+ addr_reg2 = addr_reg;
+ }
+
+ if (is_big_endian() && swap_code && in_reg) {
+ tmp3 = lightrec_alloc_reg_temp(reg_cache, _jit);
+
+ jit_new_node_ww(swap_code, tmp3, rt);
+ jit_new_node_www(code, imm, addr_reg2, tmp3);
+
+ lightrec_free_reg(reg_cache, tmp3);
+ } else {
+ jit_new_node_www(code, imm, addr_reg2, rt);
+ }
+
+ lightrec_free_reg(reg_cache, rt);
+
+ if (invalidate) {
+ tmp3 = lightrec_alloc_reg_in(reg_cache, _jit, 0, 0);
+
+ if (c.i.op != OP_SW) {
+ jit_andi(tmp, addr_reg, ~3);
+ addr_reg = tmp;
+ }
+
+ if (!lut_is_32bit(state)) {
+ jit_lshi(tmp, addr_reg, 1);
+ addr_reg = tmp;
+ }
+
+ if (addr_reg == rs && c.i.rs == 0) {
+ addr_reg = LIGHTREC_REG_STATE;
+ } else {
+ jit_add_state(tmp, addr_reg);
+ addr_reg = tmp;
+ }
+
+ if (lut_is_32bit(state))
+ jit_stxi_i(lut_offt, addr_reg, tmp3);
+ else
+ jit_stxi(lut_offt, addr_reg, tmp3);
+
+ lightrec_free_reg(reg_cache, tmp3);
+ }
+
+ if (addr_offset)
+ lightrec_free_reg(reg_cache, tmp2);
+ if (need_tmp)
+ lightrec_free_reg(reg_cache, tmp);
+ lightrec_free_reg(reg_cache, rs);
+}
+
+static void rec_store_ram(struct lightrec_cstate *cstate,
+ const struct block *block,
+ u16 offset, jit_code_t code,
+ jit_code_t swap_code, bool invalidate)
+{
+ struct lightrec_state *state = cstate->state;
+
+ _jit_note(block->_jit, __FILE__, __LINE__);
+
+ return rec_store_memory(cstate, block, offset, code, swap_code,
+ state->offset_ram, rec_ram_mask(state),
+ invalidate);
+}
+
+static void rec_store_scratch(struct lightrec_cstate *cstate,
+ const struct block *block, u16 offset,
+ jit_code_t code, jit_code_t swap_code)
+{
+ _jit_note(block->_jit, __FILE__, __LINE__);
+
+ return rec_store_memory(cstate, block, offset, code, swap_code,
+ cstate->state->offset_scratch,
+ 0x1fffffff, false);
+}
+
+static void rec_store_io(struct lightrec_cstate *cstate,
+ const struct block *block, u16 offset,
+ jit_code_t code, jit_code_t swap_code)
+{
+ _jit_note(block->_jit, __FILE__, __LINE__);
+
+ return rec_store_memory(cstate, block, offset, code, swap_code,
+ cstate->state->offset_io,
+ rec_io_mask(cstate->state), false);
+}
+
+static void rec_store_direct_no_invalidate(struct lightrec_cstate *cstate,
+ const struct block *block,
+ u16 offset, jit_code_t code,
+ jit_code_t swap_code)
+{
+ struct lightrec_state *state = cstate->state;
+ struct regcache *reg_cache = cstate->reg_cache;
+ union code c = block->opcode_list[offset].c;
+ jit_state_t *_jit = block->_jit;
+ jit_node_t *to_not_ram, *to_end;
+ bool swc2 = c.i.op == OP_SWC2;
+ bool offset_ram_or_scratch = state->offset_ram || state->offset_scratch;
+ u8 tmp, tmp2, rs, rt, in_reg = swc2 ? REG_TEMP : c.i.rt;
+ s16 imm;
+
+ jit_note(__FILE__, __LINE__);
+ rs = lightrec_alloc_reg_in(reg_cache, _jit, c.i.rs, 0);
+ tmp = lightrec_alloc_reg_temp(reg_cache, _jit);
+
+ if (offset_ram_or_scratch)
+ tmp2 = lightrec_alloc_reg_temp(reg_cache, _jit);
+
+ /* Convert to KUNSEG and avoid RAM mirrors */
+ if (state->mirrors_mapped) {
+ imm = (s16)c.i.imm;
+ jit_andi(tmp, rs, 0x1f800000 | (4 * RAM_SIZE - 1));
+ } else if (c.i.imm) {
+ imm = 0;
+ jit_addi(tmp, rs, (s16)c.i.imm);
+ jit_andi(tmp, tmp, 0x1f800000 | (RAM_SIZE - 1));
+ } else {
+ imm = 0;
+ jit_andi(tmp, rs, 0x1f800000 | (RAM_SIZE - 1));
+ }
+
+ lightrec_free_reg(reg_cache, rs);
+
+ if (state->offset_ram != state->offset_scratch) {
+ to_not_ram = jit_bmsi(tmp, BIT(28));
+
+ jit_movi(tmp2, state->offset_ram);
+
+ to_end = jit_b();
+ jit_patch(to_not_ram);
+
+ jit_movi(tmp2, state->offset_scratch);
+ jit_patch(to_end);
+ } else if (state->offset_ram) {
+ jit_movi(tmp2, state->offset_ram);
+ }
+
+ if (offset_ram_or_scratch) {