+ lightrec_free_reg(reg_cache, rd);
+}
+
+static void rec_meta_MULT2(struct lightrec_cstate *state,
+ const struct block *block,
+ u16 offset)
+{
+ struct regcache *reg_cache = state->reg_cache;
+ union code c = block->opcode_list[offset].c;
+ jit_state_t *_jit = block->_jit;
+ u8 reg_lo = get_mult_div_lo(c);
+ u8 reg_hi = get_mult_div_hi(c);
+ u32 flags = block->opcode_list[offset].flags;
+ bool is_signed = c.i.op == OP_META_MULT2;
+ u8 rs, lo, hi, rflags = 0, hiflags = 0;
+ unsigned int i;
+
+ if (!op_flag_no_hi(flags) && c.r.op < 32) {
+ rflags = is_signed ? REG_EXT : REG_ZEXT;
+ hiflags = is_signed ? REG_EXT : (REG_EXT | REG_ZEXT);
+ }
+
+ _jit_name(block->_jit, __func__);
+ jit_note(__FILE__, __LINE__);
+
+ rs = lightrec_alloc_reg_in(reg_cache, _jit, c.i.rs, rflags);
+
+ /*
+ * We must handle the case where one of the output registers is our rs
+ * input register. Thanksfully, computing LO/HI can be done in any
+ * order. Here, we make sure that the computation that overwrites the
+ * input register is always performed last.
+ */
+ for (i = 0; i < 2; i++) {
+ if ((!i ^ (reg_lo == c.i.rs)) && !op_flag_no_lo(flags)) {
+ lo = lightrec_alloc_reg_out(reg_cache, _jit, reg_lo, 0);
+
+ if (c.r.op < 32)
+ jit_lshi(lo, rs, c.r.op);
+ else
+ jit_movi(lo, 0);
+
+ lightrec_free_reg(reg_cache, lo);
+ continue;
+ }
+
+ if ((!!i ^ (reg_lo == c.i.rs)) && !op_flag_no_hi(flags)) {
+ hi = lightrec_alloc_reg_out(reg_cache, _jit,
+ reg_hi, hiflags);
+
+ if (c.r.op >= 32) {
+ jit_lshi(hi, rs, c.r.op - 32);
+ } else if (is_signed) {
+ if (c.r.op)
+ jit_rshi(hi, rs, 32 - c.r.op);
+ else
+ jit_rshi(hi, rs, 31);
+ } else {
+ if (c.r.op)
+ jit_rshi_u(hi, rs, 32 - c.r.op);
+ else
+ jit_movi(hi, 0);
+ }
+
+ lightrec_free_reg(reg_cache, hi);
+ }
+ }
+
+ lightrec_free_reg(reg_cache, rs);
+
+ _jit_name(block->_jit, __func__);
+ jit_note(__FILE__, __LINE__);
+}
+
+static void rec_meta_COM(struct lightrec_cstate *state,
+ const struct block *block, u16 offset)
+{
+ struct regcache *reg_cache = state->reg_cache;
+ union code c = block->opcode_list[offset].c;
+ jit_state_t *_jit = block->_jit;
+ u8 rd, rs, flags;
+
+ jit_note(__FILE__, __LINE__);
+
+ rec_alloc_rs_rd(reg_cache, _jit, &block->opcode_list[offset],
+ c.m.rs, c.m.rd, 0, 0, &rs, &rd);
+
+ flags = lightrec_get_reg_in_flags(reg_cache, rs);
+
+ lightrec_set_reg_out_flags(reg_cache, rd,
+ flags & REG_EXT);
+
+ jit_comr(rd, rs);
+
+ lightrec_free_reg(reg_cache, rs);
+ lightrec_free_reg(reg_cache, rd);
+}
+
+static void rec_meta_LWU(struct lightrec_cstate *state,
+ const struct block *block,
+ u16 offset)
+{
+ jit_code_t code;
+
+ if (is_big_endian() && __WORDSIZE == 64)
+ code = jit_code_unldr_u;
+ else
+ code = jit_code_unldr;
+
+ _jit_name(block->_jit, __func__);
+ rec_load(state, block, offset, code, jit_code_bswapr_ui, false);
+}
+
+static void rec_meta_SWU(struct lightrec_cstate *state,
+ const struct block *block,
+ u16 offset)
+{
+ _jit_name(block->_jit, __func__);
+ rec_store(state, block, offset, jit_code_unstr, jit_code_bswapr_ui);
+}
+
+static void unknown_opcode(struct lightrec_cstate *state,
+ const struct block *block, u16 offset)
+{
+ rec_exit_early(state, block, offset, LIGHTREC_EXIT_UNKNOWN_OP,
+ block->pc + (offset << 2));