+#define EMITH_CONDITIONAL(code, is_nonzero) { \
+ u32 val, cond, *ptr; \
+ cond = (is_nonzero) ? A_COND_NE : A_COND_EQ; \
+ ptr = (void *)tcache_ptr; \
+ tcache_ptr = (void *)(ptr + 1); \
+ code; \
+ val = (u32 *)tcache_ptr - (ptr + 2); \
+ EMIT_PTR(ptr, ((cond)<<28) | 0x0a000000 | (val & 0xffffff)); \
+}
+
+#define emith_move_r_r(dst, src) \
+ EOP_MOV_REG_SIMPLE(dst, src)
+
+#define emith_move_r_imm(r, imm) \
+ emith_op_imm(A_COND_AL, A_OP_MOV, r, imm)
+
+#define emith_add_r_imm(r, imm) \
+ emith_op_imm(A_COND_AL, A_OP_ADD, r, imm)
+
+#define emith_sub_r_imm(r, imm) \
+ emith_op_imm(A_COND_AL, A_OP_SUB, r, imm)
+
+#define emith_ctx_read(r, offs) \
+ EOP_LDR_IMM(r, CONTEXT_REG, offs)
+
+#define emith_ctx_write(r, offs) \
+ EOP_STR_IMM(r, CONTEXT_REG, offs)
+
+#define emith_ctx_sub(val, offs) { \
+ emith_ctx_read(0, offs); \
+ emith_sub_r_imm(0, val); \
+ emith_ctx_write(0, offs); \
+}
+
+// upto 4 args
+#define emith_pass_arg_r(arg, reg) \
+ EOP_MOV_REG_SIMPLE(arg, reg)
+
+#define emith_pass_arg_imm(arg, imm) \
+ emith_move_r_imm(arg, imm)
+
+#define emith_call_cond(cond, target) \
+ emith_xbranch(cond, target, 1)
+
+#define emith_jump_cond(cond, target) \
+ emith_xbranch(cond, target, 0)
+
+#define emith_call(target) \
+ emith_call_cond(A_COND_AL, target)
+
+#define emith_jump(target) \
+ emith_jump_cond(A_COND_AL, target)
+
+/* SH2 drc specific */
+#define emith_test_t() { \
+ int r = reg_map_g2h[SHR_SR]; \
+ if (r == -1) { \
+ emith_ctx_read(0, SHR_SR * 4); \
+ r = 0; \
+ } \
+ EOP_TST_IMM(r, 0, 1); \
+}
+
+