1 // Basic macros to emit ARM instructions and some utils
3 // (c) Copyright 2008-2009, Grazvydas "notaz" Ignotas
4 // Free for non-commercial use.
8 // XXX: tcache_ptr type for SVP and SH2 compilers differs..
9 #define EMIT_PTR(ptr, x) \
12 ptr = (void *)((u8 *)ptr + sizeof(u32)); \
16 #define EMIT(x) EMIT_PTR(tcache_ptr, x)
18 #define A_R4M (1 << 4)
19 #define A_R5M (1 << 5)
20 #define A_R6M (1 << 6)
21 #define A_R7M (1 << 7)
22 #define A_R8M (1 << 8)
23 #define A_R9M (1 << 9)
24 #define A_R10M (1 << 10)
25 #define A_R11M (1 << 11)
26 #define A_R14M (1 << 14)
36 /* unified conditions */
37 #define DCOND_EQ A_COND_EQ
38 #define DCOND_NE A_COND_NE
39 #define DCOND_MI A_COND_MI
40 #define DCOND_PL A_COND_PL
42 /* addressing mode 1 */
48 #define A_AM1_IMM(ror2,imm8) (((ror2)<<8) | (imm8) | 0x02000000)
49 #define A_AM1_REG_XIMM(shift_imm,shift_op,rm) (((shift_imm)<<7) | ((shift_op)<<5) | (rm))
50 #define A_AM1_REG_XREG(rs,shift_op,rm) (((rs)<<8) | ((shift_op)<<5) | 0x10 | (rm))
52 /* data processing op */
65 #define EOP_C_DOP_X(cond,op,s,rn,rd,shifter_op) \
66 EMIT(((cond)<<28) | ((op)<< 21) | ((s)<<20) | ((rn)<<16) | ((rd)<<12) | (shifter_op))
68 #define EOP_C_DOP_IMM( cond,op,s,rn,rd,ror2,imm8) EOP_C_DOP_X(cond,op,s,rn,rd,A_AM1_IMM(ror2,imm8))
69 #define EOP_C_DOP_REG_XIMM(cond,op,s,rn,rd,shift_imm,shift_op,rm) EOP_C_DOP_X(cond,op,s,rn,rd,A_AM1_REG_XIMM(shift_imm,shift_op,rm))
70 #define EOP_C_DOP_REG_XREG(cond,op,s,rn,rd,rs, shift_op,rm) EOP_C_DOP_X(cond,op,s,rn,rd,A_AM1_REG_XREG(rs, shift_op,rm))
72 #define EOP_MOV_IMM(rd, ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_MOV,0, 0,rd,ror2,imm8)
73 #define EOP_ORR_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_ORR,0,rn,rd,ror2,imm8)
74 #define EOP_ADD_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_ADD,0,rn,rd,ror2,imm8)
75 #define EOP_BIC_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_BIC,0,rn,rd,ror2,imm8)
76 #define EOP_AND_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_AND,0,rn,rd,ror2,imm8)
77 #define EOP_SUB_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_SUB,0,rn,rd,ror2,imm8)
78 #define EOP_TST_IMM( rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_TST,1,rn, 0,ror2,imm8)
79 #define EOP_CMP_IMM( rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_CMP,1,rn, 0,ror2,imm8)
80 #define EOP_RSB_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_RSB,0,rn,rd,ror2,imm8)
82 #define EOP_MOV_IMM_C(cond,rd, ror2,imm8) EOP_C_DOP_IMM(cond,A_OP_MOV,0, 0,rd,ror2,imm8)
83 #define EOP_ORR_IMM_C(cond,rd,rn,ror2,imm8) EOP_C_DOP_IMM(cond,A_OP_ORR,0,rn,rd,ror2,imm8)
84 #define EOP_RSB_IMM_C(cond,rd,rn,ror2,imm8) EOP_C_DOP_IMM(cond,A_OP_RSB,0,rn,rd,ror2,imm8)
86 #define EOP_MOV_REG(cond,s,rd, rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_MOV,s, 0,rd,shift_imm,shift_op,rm)
87 #define EOP_ORR_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_ORR,s,rn,rd,shift_imm,shift_op,rm)
88 #define EOP_ADD_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_ADD,s,rn,rd,shift_imm,shift_op,rm)
89 #define EOP_SUB_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_SUB,s,rn,rd,shift_imm,shift_op,rm)
90 #define EOP_TST_REG(cond, rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_TST,1,rn, 0,shift_imm,shift_op,rm)
91 #define EOP_TEQ_REG(cond, rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_TEQ,1,rn, 0,shift_imm,shift_op,rm)
93 #define EOP_MOV_REG2(s,rd, rm,shift_op,rs) EOP_C_DOP_REG_XREG(A_COND_AL,A_OP_MOV,s, 0,rd,rs,shift_op,rm)
94 #define EOP_ADD_REG2(s,rd,rn,rm,shift_op,rs) EOP_C_DOP_REG_XREG(A_COND_AL,A_OP_ADD,s,rn,rd,rs,shift_op,rm)
95 #define EOP_SUB_REG2(s,rd,rn,rm,shift_op,rs) EOP_C_DOP_REG_XREG(A_COND_AL,A_OP_SUB,s,rn,rd,rs,shift_op,rm)
97 #define EOP_MOV_REG_SIMPLE(rd,rm) EOP_MOV_REG(A_COND_AL,0,rd,rm,A_AM1_LSL,0)
98 #define EOP_MOV_REG_LSL(rd, rm,shift_imm) EOP_MOV_REG(A_COND_AL,0,rd,rm,A_AM1_LSL,shift_imm)
99 #define EOP_MOV_REG_LSR(rd, rm,shift_imm) EOP_MOV_REG(A_COND_AL,0,rd,rm,A_AM1_LSR,shift_imm)
100 #define EOP_MOV_REG_ASR(rd, rm,shift_imm) EOP_MOV_REG(A_COND_AL,0,rd,rm,A_AM1_ASR,shift_imm)
101 #define EOP_MOV_REG_ROR(rd, rm,shift_imm) EOP_MOV_REG(A_COND_AL,0,rd,rm,A_AM1_ROR,shift_imm)
103 #define EOP_ORR_REG_SIMPLE(rd,rm) EOP_ORR_REG(A_COND_AL,0,rd,rd,rm,A_AM1_LSL,0)
104 #define EOP_ORR_REG_LSL(rd,rn,rm,shift_imm) EOP_ORR_REG(A_COND_AL,0,rd,rn,rm,A_AM1_LSL,shift_imm)
105 #define EOP_ORR_REG_LSR(rd,rn,rm,shift_imm) EOP_ORR_REG(A_COND_AL,0,rd,rn,rm,A_AM1_LSR,shift_imm)
106 #define EOP_ORR_REG_ASR(rd,rn,rm,shift_imm) EOP_ORR_REG(A_COND_AL,0,rd,rn,rm,A_AM1_ASR,shift_imm)
107 #define EOP_ORR_REG_ROR(rd,rn,rm,shift_imm) EOP_ORR_REG(A_COND_AL,0,rd,rn,rm,A_AM1_ROR,shift_imm)
109 #define EOP_ADD_REG_SIMPLE(rd,rm) EOP_ADD_REG(A_COND_AL,0,rd,rd,rm,A_AM1_LSL,0)
110 #define EOP_ADD_REG_LSL(rd,rn,rm,shift_imm) EOP_ADD_REG(A_COND_AL,0,rd,rn,rm,A_AM1_LSL,shift_imm)
111 #define EOP_ADD_REG_LSR(rd,rn,rm,shift_imm) EOP_ADD_REG(A_COND_AL,0,rd,rn,rm,A_AM1_LSR,shift_imm)
113 #define EOP_TST_REG_SIMPLE(rn,rm) EOP_TST_REG(A_COND_AL, rn, 0,A_AM1_LSL,rm)
115 #define EOP_MOV_REG2_LSL(rd, rm,rs) EOP_MOV_REG2(0,rd, rm,A_AM1_LSL,rs)
116 #define EOP_MOV_REG2_ROR(rd, rm,rs) EOP_MOV_REG2(0,rd, rm,A_AM1_ROR,rs)
117 #define EOP_ADD_REG2_LSL(rd,rn,rm,rs) EOP_ADD_REG2(0,rd,rn,rm,A_AM1_LSL,rs)
118 #define EOP_SUB_REG2_LSL(rd,rn,rm,rs) EOP_SUB_REG2(0,rd,rn,rm,A_AM1_LSL,rs)
120 /* addressing mode 2 */
121 #define EOP_C_AM2_IMM(cond,u,b,l,rn,rd,offset_12) \
122 EMIT(((cond)<<28) | 0x05000000 | ((u)<<23) | ((b)<<22) | ((l)<<20) | ((rn)<<16) | ((rd)<<12) | (offset_12))
124 /* addressing mode 3 */
125 #define EOP_C_AM3(cond,u,r,l,rn,rd,s,h,immed_reg) \
126 EMIT(((cond)<<28) | 0x01000090 | ((u)<<23) | ((r)<<22) | ((l)<<20) | ((rn)<<16) | ((rd)<<12) | \
127 ((s)<<6) | ((h)<<5) | (immed_reg))
129 #define EOP_C_AM3_IMM(cond,u,l,rn,rd,s,h,offset_8) EOP_C_AM3(cond,u,1,l,rn,rd,s,h,(((offset_8)&0xf0)<<4)|((offset_8)&0xf))
131 #define EOP_C_AM3_REG(cond,u,l,rn,rd,s,h,rm) EOP_C_AM3(cond,u,0,l,rn,rd,s,h,rm)
134 #define EOP_LDR_IMM( rd,rn,offset_12) EOP_C_AM2_IMM(A_COND_AL,1,0,1,rn,rd,offset_12)
135 #define EOP_LDR_NEGIMM(rd,rn,offset_12) EOP_C_AM2_IMM(A_COND_AL,0,0,1,rn,rd,offset_12)
136 #define EOP_LDR_SIMPLE(rd,rn) EOP_C_AM2_IMM(A_COND_AL,1,0,1,rn,rd,0)
137 #define EOP_STR_IMM( rd,rn,offset_12) EOP_C_AM2_IMM(A_COND_AL,1,0,0,rn,rd,offset_12)
138 #define EOP_STR_SIMPLE(rd,rn) EOP_C_AM2_IMM(A_COND_AL,1,0,0,rn,rd,0)
140 #define EOP_LDRH_IMM( rd,rn,offset_8) EOP_C_AM3_IMM(A_COND_AL,1,1,rn,rd,0,1,offset_8)
141 #define EOP_LDRH_SIMPLE(rd,rn) EOP_C_AM3_IMM(A_COND_AL,1,1,rn,rd,0,1,0)
142 #define EOP_LDRH_REG( rd,rn,rm) EOP_C_AM3_REG(A_COND_AL,1,1,rn,rd,0,1,rm)
143 #define EOP_STRH_IMM( rd,rn,offset_8) EOP_C_AM3_IMM(A_COND_AL,1,0,rn,rd,0,1,offset_8)
144 #define EOP_STRH_SIMPLE(rd,rn) EOP_C_AM3_IMM(A_COND_AL,1,0,rn,rd,0,1,0)
145 #define EOP_STRH_REG( rd,rn,rm) EOP_C_AM3_REG(A_COND_AL,1,0,rn,rd,0,1,rm)
148 #define EOP_XXM(cond,p,u,s,w,l,rn,list) \
149 EMIT(((cond)<<28) | (1<<27) | ((p)<<24) | ((u)<<23) | ((s)<<22) | ((w)<<21) | ((l)<<20) | ((rn)<<16) | (list))
151 #define EOP_STMFD_ST(list) EOP_XXM(A_COND_AL,1,0,0,1,0,13,list)
152 #define EOP_LDMFD_ST(list) EOP_XXM(A_COND_AL,0,1,0,1,1,13,list)
155 #define EOP_C_BX(cond,rm) \
156 EMIT(((cond)<<28) | 0x012fff10 | (rm))
158 #define EOP_BX(rm) EOP_C_BX(A_COND_AL,rm)
160 #define EOP_C_B(cond,l,signed_immed_24) \
161 EMIT(((cond)<<28) | 0x0a000000 | ((l)<<24) | (signed_immed_24))
163 #define EOP_B( signed_immed_24) EOP_C_B(A_COND_AL,0,signed_immed_24)
164 #define EOP_BL(signed_immed_24) EOP_C_B(A_COND_AL,1,signed_immed_24)
167 #define EOP_C_MUL(cond,s,rd,rs,rm) \
168 EMIT(((cond)<<28) | ((s)<<20) | ((rd)<<16) | ((rs)<<8) | 0x90 | (rm))
170 #define EOP_MUL(rd,rm,rs) EOP_C_MUL(A_COND_AL,0,rd,rs,rm) // note: rd != rm
172 #define EOP_C_MRS(cond,rd) \
173 EMIT(((cond)<<28) | 0x010f0000 | ((rd)<<12))
175 #define EOP_C_MSR_IMM(cond,ror2,imm) \
176 EMIT(((cond)<<28) | 0x0328f000 | ((ror2)<<8) | (imm)) // cpsr_f
178 #define EOP_C_MSR_REG(cond,rm) \
179 EMIT(((cond)<<28) | 0x0128f000 | (rm)) // cpsr_f
181 #define EOP_MRS(rd) EOP_C_MRS(A_COND_AL,rd)
182 #define EOP_MSR_IMM(ror2,imm) EOP_C_MSR_IMM(A_COND_AL,ror2,imm)
183 #define EOP_MSR_REG(rm) EOP_C_MSR_REG(A_COND_AL,rm)
186 static void emith_op_imm(int cond, int s, int op, int r, unsigned int imm)
188 int ror2, rd = r, rn = r;
193 else if (op == A_OP_TST || op == A_OP_TEQ)
198 for (v = imm, ror2 = 0; v != 0 || op == A_OP_MOV; v >>= 8, ror2 -= 8/2) {
199 /* shift down to get 'best' rot2 */
200 for (; v && !(v & 3); v >>= 2)
203 EOP_C_DOP_IMM(cond, op, s, rn, rd, ror2 & 0x0f, v & 0xff);
205 if (op == A_OP_MOV) {
212 #define is_offset_24(val) \
213 ((val) >= (int)0xff000000 && (val) <= 0x00ffffff)
215 static int emith_xbranch(int cond, void *target, int is_call)
217 int val = (u32 *)target - (u32 *)tcache_ptr - 2;
218 int direct = is_offset_24(val);
219 u32 *start_ptr = (u32 *)tcache_ptr;
223 EOP_C_B(cond,is_call,val & 0xffffff); // b, bl target
228 // elprintf(EL_SVP, "emitting indirect jmp %08x->%08x", tcache_ptr, target);
230 EOP_ADD_IMM(14,15,0,8); // add lr,pc,#8
231 EOP_C_AM2_IMM(cond,1,0,1,15,15,0); // ldrcc pc,[pc]
232 EOP_MOV_REG_SIMPLE(15,15); // mov pc, pc
235 // should never happen
236 elprintf(EL_STATUS|EL_SVP|EL_ANOMALY, "indirect jmp %08x->%08x", target, tcache_ptr);
241 return (u32 *)tcache_ptr - start_ptr;
245 // fake "simple" or "short" jump - using cond insns instead
246 #define EMITH_SJMP_START(cond) \
249 #define EMITH_SJMP_END(cond) \
252 #define EMITH_CONDITIONAL(code, is_nonzero) { \
253 u32 val, cond, *ptr; \
254 cond = (is_nonzero) ? A_COND_NE : A_COND_EQ; \
255 ptr = (void *)tcache_ptr; \
256 tcache_ptr = (void *)(ptr + 1); \
258 val = (u32 *)tcache_ptr - (ptr + 2); \
259 EMIT_PTR(ptr, ((cond)<<28) | 0x0a000000 | (val & 0xffffff)); \
262 #define emith_move_r_r(d, s) \
263 EOP_MOV_REG_SIMPLE(d, s)
265 #define emith_add_r_r(d, s) \
266 EOP_ADD_REG(A_COND_AL,0,d,d,s,A_AM1_LSL,0)
268 #define emith_sub_r_r(d, s) \
269 EOP_SUB_REG(A_COND_AL,0,d,d,s,A_AM1_LSL,0)
271 #define emith_teq_r_r(d, s) \
272 EOP_TEQ_REG(A_COND_AL,d,s,A_AM1_LSL,0)
274 #define emith_subf_r_r(d, s) \
275 EOP_SUB_REG(A_COND_AL,1,d,d,s,A_AM1_LSL,0)
277 #define emith_move_r_imm(r, imm) \
278 emith_op_imm(A_COND_AL, 0, A_OP_MOV, r, imm)
280 #define emith_add_r_imm(r, imm) \
281 emith_op_imm(A_COND_AL, 0, A_OP_ADD, r, imm)
283 #define emith_sub_r_imm(r, imm) \
284 emith_op_imm(A_COND_AL, 0, A_OP_SUB, r, imm)
286 #define emith_bic_r_imm(r, imm) \
287 emith_op_imm(A_COND_AL, 0, A_OP_BIC, r, imm)
289 #define emith_or_r_imm(r, imm) \
290 emith_op_imm(A_COND_AL, 0, A_OP_ORR, r, imm)
292 // note: use 8bit imm only
293 #define emith_tst_r_imm(r, imm) \
294 emith_op_imm(A_COND_AL, 1, A_OP_TST, r, imm)
296 #define emith_subf_r_imm(r, imm) \
297 emith_op_imm(A_COND_AL, 1, A_OP_SUB, r, imm)
299 #define emith_add_r_imm_c(cond, r, imm) \
300 emith_op_imm(cond, 0, A_OP_ADD, r, imm)
302 #define emith_sub_r_imm_c(cond, r, imm) \
303 emith_op_imm(cond, 0, A_OP_SUB, r, imm)
305 #define emith_or_r_imm_c(cond, r, imm) \
306 emith_op_imm(cond, 0, A_OP_ORR, r, imm)
308 #define emith_lsl(d, s, cnt) \
309 EOP_MOV_REG(A_COND_AL,0,d,s,A_AM1_LSL,cnt)
311 #define emith_lsr(d, s, cnt) \
312 EOP_MOV_REG(A_COND_AL,0,d,s,A_AM1_LSR,cnt)
314 #define emith_asrf(d, s, cnt) \
315 EOP_MOV_REG(A_COND_AL,1,d,s,A_AM1_ASR,cnt)
317 #define emith_mul(d, s1, s2) { \
318 if ((d) != (s1)) /* rd != rm limitation */ \
319 EOP_MUL(d, s1, s2); \
321 EOP_MUL(d, s2, s1); \
324 #define emith_ctx_read(r, offs) \
325 EOP_LDR_IMM(r, CONTEXT_REG, offs)
327 #define emith_ctx_write(r, offs) \
328 EOP_STR_IMM(r, CONTEXT_REG, offs)
330 #define emith_clear_msb(d, s, count) { \
332 if ((count) <= 8) { \
334 t = (0xff << t) & 0xff; \
335 EOP_BIC_IMM(d,s,8/2,t); \
336 } else if ((count) >= 24) { \
339 EOP_AND_IMM(d,s,0,t); \
341 EOP_MOV_REG_LSL(d,s,count); \
342 EOP_MOV_REG_LSR(d,d,count); \
346 #define emith_sext(d, s, bits) { \
347 EOP_MOV_REG_LSL(d,s,32 - (bits)); \
348 EOP_MOV_REG_ASR(d,d,32 - (bits)); \
351 #define host_arg2reg(rd, arg) \
355 #define emith_pass_arg_r(arg, reg) \
356 EOP_MOV_REG_SIMPLE(arg, reg)
358 #define emith_pass_arg_imm(arg, imm) \
359 emith_move_r_imm(arg, imm)
361 #define emith_call_cond(cond, target) \
362 emith_xbranch(cond, target, 1)
364 #define emith_jump_cond(cond, target) \
365 emith_xbranch(cond, target, 0)
367 #define emith_call(target) \
368 emith_call_cond(A_COND_AL, target)
370 #define emith_jump(target) \
371 emith_jump_cond(A_COND_AL, target)
373 /* SH2 drc specific */
374 #define emith_sh2_test_t() { \
375 int r = rcache_get_reg(SHR_SR, RC_GR_READ); \
376 EOP_TST_IMM(r, 0, 1); \
379 #define emith_sh2_dtbf_loop() { \
381 tmp = rcache_get_tmp(); \
382 cr = rcache_get_reg(SHR_SR, RC_GR_RMW); \
383 rn = rcache_get_reg((op >> 8) & 0x0f, RC_GR_RMW); \
384 emith_sub_r_imm(rn, 1); /* sub rn, #1 */ \
385 emith_bic_r_imm(cr, 1); /* bic cr, #1 */ \
386 emith_sub_r_imm(cr, (cycles+1) << 12); /* sub cr, #(cycles+1)<<12 */ \
388 emith_asrf(tmp, cr, 2+12); /* movs tmp, cr, asr #2+12 */ \
389 EOP_MOV_IMM_C(A_COND_MI,tmp,0,0); /* movmi tmp, #0 */ \
390 emith_lsl(cr, cr, 20); /* mov cr, cr, lsl #20 */ \
391 emith_lsr(cr, cr, 20); /* mov cr, cr, lsr #20 */ \
392 emith_subf_r_r(rn, tmp); /* subs rn, tmp */ \
393 EOP_RSB_IMM_C(A_COND_LS,tmp,rn,0,0); /* rsbls tmp, rn, #0 */ \
394 EOP_ORR_REG(A_COND_LS,0,cr,cr,tmp,A_AM1_LSL,12+2); /* orrls cr,tmp,lsl #12+2 */\
395 EOP_ORR_IMM_C(A_COND_LS,cr,cr,0,1); /* orrls cr, #1 */ \
396 EOP_MOV_IMM_C(A_COND_LS,rn,0,0); /* movls rn, #0 */ \
397 rcache_free_tmp(tmp); \