#define EOP_MSR_REG(rm) EOP_C_MSR_REG(A_COND_AL,rm)
-// XXX: AND, RSB, *C, MVN will break if 1 insn is not enough
+// XXX: AND, RSB, *C, will break if 1 insn is not enough
static void emith_op_imm2(int cond, int s, int op, int rd, int rn, unsigned int imm)
{
int ror2;
u32 v;
- if (op == A_OP_MOV) {
+ switch (op) {
+ case A_OP_MOV:
rn = 0;
- if (~imm < 0x100) {
+ if (~imm < 0x10000) {
imm = ~imm;
op = A_OP_MVN;
}
- } else if (imm == 0)
- return;
+ break;
+
+ case A_OP_EOR:
+ case A_OP_SUB:
+ case A_OP_ADD:
+ case A_OP_ORR:
+ case A_OP_BIC:
+ if (s == 0 && imm == 0)
+ return;
+ break;
+ }
- for (v = imm, ror2 = 0; v != 0 || op == A_OP_MOV; v >>= 8, ror2 -= 8/2) {
+ for (v = imm, ror2 = 0; ; ror2 -= 8/2) {
/* shift down to get 'best' rot2 */
for (; v && !(v & 3); v >>= 2)
ror2--;
EOP_C_DOP_IMM(cond, op, s, rn, rd, ror2 & 0x0f, v & 0xff);
+ v >>= 8;
+ if (v == 0)
+ break;
if (op == A_OP_MOV)
op = A_OP_ORR;
+ if (op == A_OP_MVN)
+ op = A_OP_BIC;
rn = rd;
}
}
#define emith_add_r_imm(r, imm) \
emith_op_imm(A_COND_AL, 0, A_OP_ADD, r, imm)
+#define emith_adc_r_imm(r, imm) \
+ emith_op_imm(A_COND_AL, 0, A_OP_ADC, r, imm)
+
#define emith_sub_r_imm(r, imm) \
emith_op_imm(A_COND_AL, 0, A_OP_SUB, r, imm)
#define emith_jump(target) \
emith_jump_cond(A_COND_AL, target)
+#define emith_jump_patchable(target) \
+ emith_jump(target)
+
#define emith_jump_cond(cond, target) \
emith_xbranch(cond, target, 0)
-#define emith_jump_patchable(cond) \
- emith_jump_cond(cond, 0)
+#define emith_jump_cond_patchable(cond, target) \
+ emith_jump_cond(cond, target)
#define emith_jump_patch(ptr, target) do { \
u32 *ptr_ = ptr; \
- u32 val = (u32 *)(target) - (u32 *)ptr_ - 2; \
- *ptr_ = (*ptr_ & 0xff000000) | (val & 0x00ffffff); \
+ u32 val_ = (u32 *)(target) - ptr_ - 2; \
+ *ptr_ = (*ptr_ & 0xff000000) | (val_ & 0x00ffffff); \
} while (0)
#define emith_jump_reg_c(cond, r) \