+ emith_op_imm(A_COND_AL, 0, A_OP_SUB, r, imm)
+
+#define emith_bic_r_imm(r, imm) \
+ emith_op_imm(A_COND_AL, 0, A_OP_BIC, r, imm)
+
+#define emith_and_r_imm(r, imm) \
+ emith_op_imm(A_COND_AL, 0, A_OP_AND, r, imm)
+
+#define emith_or_r_imm(r, imm) \
+ emith_op_imm(A_COND_AL, 0, A_OP_ORR, r, imm)
+
+#define emith_eor_r_imm(r, imm) \
+ emith_op_imm(A_COND_AL, 0, A_OP_EOR, r, imm)
+
+// note: only use 8bit imm for these
+#define emith_tst_r_imm(r, imm) \
+ emith_top_imm(A_COND_AL, A_OP_TST, r, imm)
+
+#define emith_cmp_r_imm(r, imm) { \
+ u32 op = A_OP_CMP, imm_ = imm; \
+ if (~imm_ < 0x100) { \
+ imm_ = ~imm_; \
+ op = A_OP_CMN; \
+ } \
+ emith_top_imm(A_COND_AL, op, r, imm); \
+}
+
+#define emith_subf_r_imm(r, imm) \
+ emith_op_imm(A_COND_AL, 1, A_OP_SUB, r, imm)
+
+#define emith_move_r_imm_c(cond, r, imm) \
+ emith_op_imm(cond, 0, A_OP_MOV, r, imm)
+
+#define emith_add_r_imm_c(cond, r, imm) \
+ emith_op_imm(cond, 0, A_OP_ADD, r, imm)
+
+#define emith_sub_r_imm_c(cond, r, imm) \
+ emith_op_imm(cond, 0, A_OP_SUB, r, imm)
+
+#define emith_or_r_imm_c(cond, r, imm) \
+ emith_op_imm(cond, 0, A_OP_ORR, r, imm)
+
+#define emith_eor_r_imm_c(cond, r, imm) \
+ emith_op_imm(cond, 0, A_OP_EOR, r, imm)
+
+#define emith_bic_r_imm_c(cond, r, imm) \
+ emith_op_imm(cond, 0, A_OP_BIC, r, imm)
+
+#define emith_move_r_imm_s8(r, imm) { \
+ if ((imm) & 0x80) \
+ EOP_MVN_IMM(r, 0, ((imm) ^ 0xff)); \
+ else \
+ EOP_MOV_IMM(r, 0, imm); \
+}
+
+#define emith_and_r_r_imm(d, s, imm) \
+ emith_op_imm2(A_COND_AL, 0, A_OP_AND, d, s, imm)
+
+#define emith_add_r_r_imm(d, s, imm) \
+ emith_op_imm2(A_COND_AL, 0, A_OP_ADD, d, s, imm)
+
+#define emith_add_r_r_ptr_imm(d, s, imm) \
+ emith_add_r_r_imm(d, s, imm)
+
+#define emith_sub_r_r_imm(d, s, imm) \
+ emith_op_imm2(A_COND_AL, 0, A_OP_SUB, d, s, imm)
+
+#define emith_neg_r_r(d, s) \
+ EOP_RSB_IMM(d, s, 0, 0)
+
+#define emith_lsl(d, s, cnt) \
+ EOP_MOV_REG(A_COND_AL,0,d,s,A_AM1_LSL,cnt)
+
+#define emith_lsr(d, s, cnt) \
+ EOP_MOV_REG(A_COND_AL,0,d,s,A_AM1_LSR,cnt)
+
+#define emith_asr(d, s, cnt) \
+ EOP_MOV_REG(A_COND_AL,0,d,s,A_AM1_ASR,cnt)
+
+#define emith_ror_c(cond, d, s, cnt) \
+ EOP_MOV_REG(cond,0,d,s,A_AM1_ROR,cnt)
+
+#define emith_ror(d, s, cnt) \
+ emith_ror_c(A_COND_AL, d, s, cnt)
+
+#define emith_rol(d, s, cnt) \
+ EOP_MOV_REG(A_COND_AL,0,d,s,A_AM1_ROR,32-(cnt)); \
+
+#define emith_lslf(d, s, cnt) \
+ EOP_MOV_REG(A_COND_AL,1,d,s,A_AM1_LSL,cnt)
+
+#define emith_lsrf(d, s, cnt) \
+ EOP_MOV_REG(A_COND_AL,1,d,s,A_AM1_LSR,cnt)
+
+#define emith_asrf(d, s, cnt) \
+ EOP_MOV_REG(A_COND_AL,1,d,s,A_AM1_ASR,cnt)
+
+// note: only C flag updated correctly
+#define emith_rolf(d, s, cnt) { \
+ EOP_MOV_REG(A_COND_AL,1,d,s,A_AM1_ROR,32-(cnt)); \
+ /* we don't have ROL so we shift to get the right carry */ \
+ EOP_TST_REG(A_COND_AL,d,d,A_AM1_LSR,1); \
+}
+
+#define emith_rorf(d, s, cnt) \
+ EOP_MOV_REG(A_COND_AL,1,d,s,A_AM1_ROR,cnt)
+
+#define emith_rolcf(d) \
+ emith_adcf_r_r(d, d)
+
+#define emith_rorcf(d) \
+ EOP_MOV_REG(A_COND_AL,1,d,d,A_AM1_ROR,0) /* ROR #0 -> RRX */
+
+#define emith_negcf_r_r(d, s) \
+ EOP_C_DOP_IMM(A_COND_AL,A_OP_RSC,1,s,d,0,0)
+
+#define emith_mul(d, s1, s2) { \
+ if ((d) != (s1)) /* rd != rm limitation */ \
+ EOP_MUL(d, s1, s2); \
+ else \
+ EOP_MUL(d, s2, s1); \
+}
+
+#define emith_mul_u64(dlo, dhi, s1, s2) \
+ EOP_C_UMULL(A_COND_AL,0,dhi,dlo,s1,s2)
+
+#define emith_mul_s64(dlo, dhi, s1, s2) \
+ EOP_C_SMULL(A_COND_AL,0,dhi,dlo,s1,s2)
+
+#define emith_mula_s64(dlo, dhi, s1, s2) \
+ EOP_C_SMLAL(A_COND_AL,0,dhi,dlo,s1,s2)
+
+// misc
+#define emith_read_r_r_offs_c(cond, r, rs, offs) \
+ EOP_LDR_IMM2(cond, r, rs, offs)
+
+#define emith_read8_r_r_offs_c(cond, r, rs, offs) \
+ EOP_LDRB_IMM2(cond, r, rs, offs)
+
+#define emith_read16_r_r_offs_c(cond, r, rs, offs) \
+ EOP_LDRH_IMM2(cond, r, rs, offs)
+
+#define emith_read_r_r_offs(r, rs, offs) \
+ emith_read_r_r_offs_c(A_COND_AL, r, rs, offs)
+
+#define emith_read8_r_r_offs(r, rs, offs) \
+ emith_read8_r_r_offs_c(A_COND_AL, r, rs, offs)
+
+#define emith_read16_r_r_offs(r, rs, offs) \
+ emith_read16_r_r_offs_c(A_COND_AL, r, rs, offs)