+int lightrec_compile_block(struct lightrec_cstate *cstate, struct block *block);
+void lightrec_free_opcode_list(struct lightrec_state *state,
+ struct opcode *list);
+
+unsigned int lightrec_cycles_of_opcode(const struct lightrec_state *state,
+ union code code);
+
+static inline u8 get_mult_div_lo(union code c)
+{
+ return (OPT_FLAG_MULT_DIV && c.r.rd) ? c.r.rd : REG_LO;
+}
+
+static inline u8 get_mult_div_hi(union code c)
+{
+ return (OPT_FLAG_MULT_DIV && c.r.imm) ? c.r.imm : REG_HI;
+}
+
+static inline s16 s16_max(s16 a, s16 b)
+{
+ return a > b ? a : b;
+}
+
+static inline _Bool block_has_flag(struct block *block, u8 flag)
+{
+#if ENABLE_THREADED_COMPILER
+ return atomic_load_explicit(&block->flags, memory_order_relaxed) & flag;
+#else
+ return block->flags & flag;
+#endif
+}
+
+static inline u8 block_set_flags(struct block *block, u8 mask)
+{
+#if ENABLE_THREADED_COMPILER
+ return atomic_fetch_or_explicit(&block->flags, mask,
+ memory_order_relaxed);
+#else
+ u8 flags = block->flags;
+
+ block->flags |= mask;
+
+ return flags;
+#endif
+}
+
+static inline u8 block_clear_flags(struct block *block, u8 mask)
+{
+#if ENABLE_THREADED_COMPILER
+ return atomic_fetch_and_explicit(&block->flags, ~mask,
+ memory_order_relaxed);
+#else
+ u8 flags = block->flags;
+
+ block->flags &= ~mask;
+
+ return flags;
+#endif
+}
+
+static inline _Bool can_sign_extend(s32 value, u8 order)
+{
+ return ((u32)(value >> (order - 1)) + 1) < 2;
+}
+
+static inline _Bool can_zero_extend(u32 value, u8 order)
+{
+ return (value >> order) == 0;
+}
+
+static inline const struct opcode *
+get_delay_slot(const struct opcode *list, u16 i)
+{
+ return op_flag_no_ds(list[i].flags) ? &list[i - 1] : &list[i + 1];
+}
+
+static inline _Bool lightrec_store_next_pc(void)
+{
+ return NUM_REGS + NUM_TEMPS <= 4;
+}