do { \
*(u32 *)ptr = x; \
ptr = (void *)((u8 *)ptr + sizeof(u32)); \
+ COUNT_OP; \
} while (0)
#define EMIT(x) EMIT_PTR(tcache_ptr, x)
static void emith_op_imm(int cond, int op, int r, unsigned int imm)
{
- u32 v, ror2;
+ int ror2, rn = r;
+ u32 v;
- if (imm == 0 && op != A_OP_MOV)
+ if (op == A_OP_MOV)
+ rn = 0;
+ else if (imm == 0)
return;
- /* shift down to get starting rot2 */
- for (v = imm, ror2 = 0; v && !(v & 3); v >>= 2)
- ror2++;
- ror2 = 16 - ror2;
+ for (v = imm, ror2 = 0; v != 0 || op == A_OP_MOV; v >>= 8, ror2 -= 8/2) {
+ /* shift down to get 'best' rot2 */
+ for (; v && !(v & 3); v >>= 2)
+ ror2--;
- EOP_C_DOP_IMM(cond, op, 0, op == A_OP_MOV ? 0 : r, r, ror2 & 0x0f, v & 0xff);
- if (op == A_OP_MOV)
- op = A_OP_ORR;
-
- v >>= 8;
- if (v & 0xff)
- EOP_C_DOP_IMM(cond, op, 0, r, r, (ror2 - 8/2) & 0x0f, v & 0xff);
- v >>= 8;
- if (v & 0xff)
- EOP_C_DOP_IMM(cond, op, 0, r, r, (ror2 - 8/2) & 0x0f, v & 0xff);
- v >>= 8;
- if (v & 0xff)
- EOP_C_DOP_IMM(cond, op, 0, r, r, (ror2 - 8/2) & 0x0f, v & 0xff);
+ EOP_C_DOP_IMM(cond, op, 0, rn, r, ror2 & 0x0f, v & 0xff);
+
+ if (op == A_OP_MOV) {
+ op = A_OP_ORR;
+ rn = r;
+ }
+ }
}
#define is_offset_24(val) \
return (u32 *)tcache_ptr - start_ptr;
}
-static void handle_caches(void)
-{
-#ifdef ARM
- extern void cache_flush_d_inval_i(const void *start_addr, const void *end_addr);
- cache_flush_d_inval_i(tcache, tcache_ptr);
-#endif
-}
-
#define EMITH_CONDITIONAL(code, is_nonzero) { \
u32 val, cond, *ptr; \
#define emith_ctx_write(r, offs) \
EOP_STR_IMM(r, CONTEXT_REG, offs)
-#define emith_ctx_sub(val, offs) { \
- emith_ctx_read(0, offs); \
- emith_sub_r_imm(0, val); \
- emith_ctx_write(0, offs); \
-}
-
// upto 4 args
#define emith_pass_arg_r(arg, reg) \
EOP_MOV_REG_SIMPLE(arg, reg)
/* SH2 drc specific */
#define emith_test_t() { \
- int r = reg_map_g2h[SHR_SR]; \
- if (r == -1) { \
- emith_ctx_read(0, SHR_SR * 4); \
- r = 0; \
- } \
+ int r = rcache_get_reg(SHR_SR, RC_GR_READ); \
EOP_TST_IMM(r, 0, 1); \
}