-// Basic macros to emit ARM instructions and some utils
-
-// (c) Copyright 2008-2009, Grazvydas "notaz" Ignotas
-// Free for non-commercial use.
-
+/*
+ * Basic macros to emit ARM instructions and some utils
+ * Copyright (C) 2008,2009,2010 notaz
+ *
+ * This work is licensed under the terms of MAME license.
+ * See COPYING file in the top-level directory.
+ */
#define CONTEXT_REG 11
+#define RET_REG 0
// XXX: tcache_ptr type for SVP and SH2 compilers differs..
#define EMIT_PTR(ptr, x) \
#define A_R9M (1 << 9)
#define A_R10M (1 << 10)
#define A_R11M (1 << 11)
+#define A_R12M (1 << 12)
#define A_R14M (1 << 14)
#define A_R15M (1 << 15)
#define EOP_MSR_IMM(ror2,imm) EOP_C_MSR_IMM(A_COND_AL,ror2,imm)
#define EOP_MSR_REG(rm) EOP_C_MSR_REG(A_COND_AL,rm)
+#define EOP_MOVW(rd,imm) \
+ EMIT(0xe3000000 | ((rd)<<12) | ((imm)&0xfff) | (((imm)<<4)&0xf0000))
+
+#define EOP_MOVT(rd,imm) \
+ EMIT(0xe3400000 | ((rd)<<12) | (((imm)>>16)&0xfff) | (((imm)>>12)&0xf0000))
// XXX: AND, RSB, *C, will break if 1 insn is not enough
static void emith_op_imm2(int cond, int s, int op, int rd, int rn, unsigned int imm)
imm = ~imm;
op = A_OP_MVN;
}
+#ifdef HAVE_ARMV7
+ for (v = imm, ror2 = 0; v && !(v & 3); v >>= 2)
+ ror2--;
+ if (v >> 8) {
+ /* 2+ insns needed - prefer movw/movt */
+ if (op == A_OP_MVN)
+ imm = ~imm;
+ EOP_MOVW(rd, imm);
+ if (imm & 0xffff0000)
+ EOP_MOVT(rd, imm);
+ return;
+ }
+#endif
break;
case A_OP_EOR:
tcache_ptr += sizeof(u32)
#define JMP_EMIT(cond, ptr) { \
- int val = (u32 *)tcache_ptr - (u32 *)(ptr) - 2; \
- EOP_C_B_PTR(ptr, cond, 0, val & 0xffffff); \
+ u32 val_ = (u32 *)tcache_ptr - (u32 *)(ptr) - 2; \
+ EOP_C_B_PTR(ptr, cond, 0, val_ & 0xffffff); \
}
#define EMITH_JMP_START(cond) { \
#define EMITH_NOTHING1(cond) \
(void)(cond)
+#define EMITH_SJMP_DECL_()
+#define EMITH_SJMP_START_(cond) EMITH_NOTHING1(cond)
+#define EMITH_SJMP_END_(cond) EMITH_NOTHING1(cond)
#define EMITH_SJMP_START(cond) EMITH_NOTHING1(cond)
#define EMITH_SJMP_END(cond) EMITH_NOTHING1(cond)
#define EMITH_SJMP3_START(cond) EMITH_NOTHING1(cond)
#define emith_move_r_r(d, s) \
EOP_MOV_REG_SIMPLE(d, s)
+#define emith_move_r_r_ptr(d, s) \
+ emith_move_r_r(d, s)
+
#define emith_mvn_r_r(d, s) \
EOP_MVN_REG(A_COND_AL,0,d,s,A_AM1_LSL,0)
+#define emith_add_r_r_r_lsl(d, s1, s2, lslimm) \
+ EOP_ADD_REG(A_COND_AL,0,d,s1,s2,A_AM1_LSL,lslimm)
+
#define emith_or_r_r_r_lsl(d, s1, s2, lslimm) \
EOP_ORR_REG(A_COND_AL,0,d,s1,s2,A_AM1_LSL,lslimm)
#define emith_eor_r_r_lsr(d, s, lsrimm) \
emith_eor_r_r_r_lsr(d, d, s, lsrimm)
+#define emith_add_r_r_r(d, s1, s2) \
+ emith_add_r_r_r_lsl(d, s1, s2, 0)
+
#define emith_or_r_r_r(d, s1, s2) \
emith_or_r_r_r_lsl(d, s1, s2, 0)
emith_eor_r_r_r_lsl(d, s1, s2, 0)
#define emith_add_r_r(d, s) \
- EOP_ADD_REG(A_COND_AL,0,d,d,s,A_AM1_LSL,0)
+ emith_add_r_r_r(d, d, s)
#define emith_sub_r_r(d, s) \
EOP_SUB_REG(A_COND_AL,0,d,d,s,A_AM1_LSL,0)
#define emith_tst_r_r(d, s) \
EOP_TST_REG(A_COND_AL,d,s,A_AM1_LSL,0)
+#define emith_tst_r_r_ptr(d, s) \
+ emith_tst_r_r(d, s)
+
#define emith_teq_r_r(d, s) \
EOP_TEQ_REG(A_COND_AL,d,s,A_AM1_LSL,0)
#define emith_add_r_r_imm(d, s, imm) \
emith_op_imm2(A_COND_AL, 0, A_OP_ADD, d, s, imm)
+#define emith_add_r_r_ptr_imm(d, s, imm) \
+ emith_add_r_r_imm(d, s, imm)
+
#define emith_sub_r_r_imm(d, s, imm) \
emith_op_imm2(A_COND_AL, 0, A_OP_SUB, d, s, imm)
#define emith_ctx_read(r, offs) \
emith_read_r_r_offs(r, CONTEXT_REG, offs)
+#define emith_ctx_read_ptr(r, offs) \
+ emith_ctx_read(r, offs)
+
#define emith_ctx_write(r, offs) \
EOP_STR_IMM(r, CONTEXT_REG, offs)
EOP_MOV_REG_ASR(d,d,32 - (bits)); \
}
-#define host_arg2reg(rd, arg) \
- rd = arg
+#define emith_do_caller_regs(mask, func) { \
+ u32 _reg_mask = (mask) & 0x500f; \
+ if (_reg_mask) { \
+ if (__builtin_parity(_reg_mask) == 1) \
+ _reg_mask |= 0x10; /* eabi align */ \
+ func(_reg_mask); \
+ } \
+}
+
+#define emith_save_caller_regs(mask) \
+ emith_do_caller_regs(mask, EOP_STMFD_SP)
+
+#define emith_restore_caller_regs(mask) \
+ emith_do_caller_regs(mask, EOP_LDMFD_SP)
// upto 4 args
#define emith_pass_arg_r(arg, reg) \
*ptr_ = (*ptr_ & 0xff000000) | (val_ & 0x00ffffff); \
} while (0)
+#define emith_jump_at(ptr, target) { \
+ u32 val_ = (u32 *)(target) - (u32 *)(ptr) - 2; \
+ EOP_C_B_PTR(ptr, A_COND_AL, 0, val_ & 0xffffff); \
+}
+
#define emith_jump_reg_c(cond, r) \
EOP_C_BX(cond, r)
#define emith_ret_to_ctx(offs) \
emith_ctx_write(14, offs)
+#define emith_push_ret() \
+ EOP_STMFD_SP(A_R14M)
+
+#define emith_pop_and_ret() \
+ EOP_LDMFD_SP(A_R15M)
+
+#define host_instructions_updated(base, end) \
+ cache_flush_d_inval_i(base, end)
+
+#define host_arg2reg(rd, arg) \
+ rd = arg
+
/* SH2 drc specific */
+/* pushes r12 for eabi alignment */
#define emith_sh2_drc_entry() \
- EOP_STMFD_SP(A_R4M|A_R5M|A_R6M|A_R7M|A_R8M|A_R9M|A_R10M|A_R11M|A_R14M)
+ EOP_STMFD_SP(A_R4M|A_R5M|A_R6M|A_R7M|A_R8M|A_R9M|A_R10M|A_R11M|A_R12M|A_R14M)
#define emith_sh2_drc_exit() \
- EOP_LDMFD_SP(A_R4M|A_R5M|A_R6M|A_R7M|A_R8M|A_R9M|A_R10M|A_R11M|A_R15M)
-
-#define emith_sh2_wcall(a, tab, ret_ptr) { \
- int val_ = (char *)(ret_ptr) - (char *)tcache_ptr - 2*4; \
- if (val_ >= 0) \
- emith_add_r_r_imm(14, 15, val_); \
- else if (val_ < 0) \
- emith_sub_r_r_imm(14, 15, -val_); \
+ EOP_LDMFD_SP(A_R4M|A_R5M|A_R6M|A_R7M|A_R8M|A_R9M|A_R10M|A_R11M|A_R12M|A_R15M)
+
+#define emith_sh2_wcall(a, tab) { \
emith_lsr(12, a, SH2_WRITE_SHIFT); \
EOP_LDR_REG_LSL(A_COND_AL,12,tab,12,2); \
- emith_ctx_read(2, offsetof(SH2, is_slave)); \
+ emith_move_r_r(2, CONTEXT_REG); \
emith_jump_reg(12); \
}