+
+ lightrec_discard_reg_if_loaded(cache, reg_out);
+
+ nreg = lightning_reg_to_lightrec(cache, jit_reg);
+ clean_reg(_jit, nreg, jit_reg, !discard);
+
+ nreg->output = true;
+ nreg->emulated_register = reg_out;
+ nreg->extend = nreg->extended;
+ nreg->zero_extend = nreg->zero_extended;
+}
+
+static bool reg_pc_is_mapped(struct regcache *cache)
+{
+ struct native_register *nreg = lightning_reg_to_lightrec(cache, JIT_V0);
+
+ return nreg->prio == REG_IS_LOADED && nreg->emulated_register == REG_PC;
+}
+
+void lightrec_load_imm(struct regcache *cache,
+ jit_state_t *_jit, u8 jit_reg, u32 pc, u32 imm)
+{
+ s32 delta = imm - pc;
+
+ if (!reg_pc_is_mapped(cache) || !can_sign_extend(delta, 16))
+ jit_movi(jit_reg, imm);
+ else if (jit_reg != JIT_V0 || delta)
+ jit_addi(jit_reg, JIT_V0, delta);
+}
+
+void lightrec_load_next_pc_imm(struct regcache *cache,
+ jit_state_t *_jit, u32 pc, u32 imm)
+{
+ struct native_register *nreg = lightning_reg_to_lightrec(cache, JIT_V0);
+ u8 reg = JIT_V0;
+
+ if (lightrec_store_next_pc())
+ reg = lightrec_alloc_reg_temp(cache, _jit);
+
+ if (reg_pc_is_mapped(cache)) {
+ /* JIT_V0 contains next PC - so we can overwrite it */
+ lightrec_load_imm(cache, _jit, reg, pc, imm);
+ } else {
+ /* JIT_V0 contains something else - invalidate it */
+ if (reg == JIT_V0)
+ lightrec_unload_reg(cache, _jit, JIT_V0);
+
+ jit_movi(reg, imm);
+ }
+
+ if (lightrec_store_next_pc()) {
+ jit_stxi_i(offsetof(struct lightrec_state, next_pc),
+ LIGHTREC_REG_STATE, reg);
+ lightrec_free_reg(cache, reg);
+ } else {
+ nreg->prio = REG_IS_LOADED;
+ nreg->emulated_register = -1;
+ nreg->locked = true;
+ }
+}
+
+void lightrec_load_next_pc(struct regcache *cache, jit_state_t *_jit, u8 reg)
+{
+ struct native_register *nreg_v0, *nreg;