+ addr_reg2 = addr_reg;
+ }
+
+ rt = lightrec_alloc_reg_in(reg_cache, _jit, in_reg, 0);
+
+ if (is_big_endian() && swap_code && in_reg) {
+ tmp3 = lightrec_alloc_reg_temp(reg_cache, _jit);
+
+ jit_new_node_ww(swap_code, tmp3, rt);
+ jit_new_node_www(code, imm, addr_reg2, tmp3);
+
+ lightrec_free_reg(reg_cache, tmp3);
+ } else {
+ jit_new_node_www(code, imm, addr_reg2, rt);
+ }
+
+ lightrec_free_reg(reg_cache, rt);
+
+ if (invalidate) {
+ tmp3 = lightrec_alloc_reg_in(reg_cache, _jit, 0, 0);
+
+ if (c.i.op != OP_SW) {
+ jit_andi(tmp, addr_reg, ~3);
+ addr_reg = tmp;
+ }
+
+ if (!lut_is_32bit(state)) {
+ jit_lshi(tmp, addr_reg, 1);
+ addr_reg = tmp;
+ }
+
+ if (addr_reg == rs && c.i.rs == 0) {
+ addr_reg = LIGHTREC_REG_STATE;
+ } else {
+ jit_add_state(tmp, addr_reg);
+ addr_reg = tmp;
+ }
+
+ if (lut_is_32bit(state))
+ jit_stxi_i(lut_offt, addr_reg, tmp3);
+ else
+ jit_stxi(lut_offt, addr_reg, tmp3);
+
+ lightrec_free_reg(reg_cache, tmp3);
+ }
+
+ if (addr_offset)
+ lightrec_free_reg(reg_cache, tmp2);
+ if (need_tmp)
+ lightrec_free_reg(reg_cache, tmp);
+ lightrec_free_reg(reg_cache, rs);
+}
+
+static void rec_store_ram(struct lightrec_cstate *cstate,
+ const struct block *block,
+ u16 offset, jit_code_t code,
+ jit_code_t swap_code, bool invalidate)
+{
+ struct lightrec_state *state = cstate->state;
+
+ _jit_note(block->_jit, __FILE__, __LINE__);
+
+ return rec_store_memory(cstate, block, offset, code, swap_code,
+ state->offset_ram, rec_ram_mask(state),
+ invalidate);
+}
+
+static void rec_store_scratch(struct lightrec_cstate *cstate,
+ const struct block *block, u16 offset,
+ jit_code_t code, jit_code_t swap_code)
+{
+ _jit_note(block->_jit, __FILE__, __LINE__);
+
+ return rec_store_memory(cstate, block, offset, code, swap_code,
+ cstate->state->offset_scratch,
+ 0x1fffffff, false);
+}
+
+static void rec_store_io(struct lightrec_cstate *cstate,
+ const struct block *block, u16 offset,
+ jit_code_t code, jit_code_t swap_code)
+{
+ _jit_note(block->_jit, __FILE__, __LINE__);
+
+ return rec_store_memory(cstate, block, offset, code, swap_code,
+ cstate->state->offset_io,
+ rec_io_mask(cstate->state), false);
+}
+
+static void rec_store_direct_no_invalidate(struct lightrec_cstate *cstate,
+ const struct block *block,
+ u16 offset, jit_code_t code,
+ jit_code_t swap_code)
+{
+ struct lightrec_state *state = cstate->state;
+ struct regcache *reg_cache = cstate->reg_cache;
+ union code c = block->opcode_list[offset].c;
+ jit_state_t *_jit = block->_jit;
+ jit_node_t *to_not_ram, *to_end;
+ bool swc2 = c.i.op == OP_SWC2;
+ u8 tmp, tmp2 = 0, rs, rt, in_reg = swc2 ? REG_TEMP : c.i.rt;
+ u32 addr_mask;
+ s32 reg_imm;
+ s16 imm;
+
+ jit_note(__FILE__, __LINE__);
+ rs = lightrec_alloc_reg_in(reg_cache, _jit, c.i.rs, 0);
+ tmp = lightrec_alloc_reg_temp(reg_cache, _jit);
+
+ if (state->mirrors_mapped)
+ addr_mask = 0x1f800000 | (4 * RAM_SIZE - 1);
+ else
+ addr_mask = 0x1f800000 | (RAM_SIZE - 1);
+
+ reg_imm = lightrec_alloc_reg_temp_with_value(reg_cache, _jit, addr_mask);
+
+ /* Convert to KUNSEG and avoid RAM mirrors */
+ if (!state->mirrors_mapped && c.i.imm) {