1 // Basic macros to emit ARM instructions and some utils
3 // (c) Copyright 2008-2009, Grazvydas "notaz" Ignotas
4 // Free for non-commercial use.
8 // XXX: tcache_ptr type for SVP and SH2 compilers differs..
9 #define EMIT_PTR(ptr, x) \
12 ptr = (void *)((u8 *)ptr + sizeof(u32)); \
16 #define EMIT(x) EMIT_PTR(tcache_ptr, x)
18 #define A_R4M (1 << 4)
19 #define A_R5M (1 << 5)
20 #define A_R6M (1 << 6)
21 #define A_R7M (1 << 7)
22 #define A_R8M (1 << 8)
23 #define A_R9M (1 << 9)
24 #define A_R10M (1 << 10)
25 #define A_R11M (1 << 11)
26 #define A_R14M (1 << 14)
27 #define A_R15M (1 << 15)
44 #define A_COND_CS A_COND_HS
45 #define A_COND_CC A_COND_LO
47 /* unified conditions */
48 #define DCOND_EQ A_COND_EQ
49 #define DCOND_NE A_COND_NE
50 #define DCOND_MI A_COND_MI
51 #define DCOND_PL A_COND_PL
52 #define DCOND_HI A_COND_HI
53 #define DCOND_HS A_COND_HS
54 #define DCOND_LO A_COND_LO
55 #define DCOND_GE A_COND_GE
56 #define DCOND_GT A_COND_GT
57 #define DCOND_LT A_COND_LT
58 #define DCOND_LS A_COND_LS
59 #define DCOND_LE A_COND_LE
60 #define DCOND_VS A_COND_VS
61 #define DCOND_VC A_COND_VC
63 /* addressing mode 1 */
69 #define A_AM1_IMM(ror2,imm8) (((ror2)<<8) | (imm8) | 0x02000000)
70 #define A_AM1_REG_XIMM(shift_imm,shift_op,rm) (((shift_imm)<<7) | ((shift_op)<<5) | (rm))
71 #define A_AM1_REG_XREG(rs,shift_op,rm) (((rs)<<8) | ((shift_op)<<5) | 0x10 | (rm))
73 /* data processing op */
91 #define EOP_C_DOP_X(cond,op,s,rn,rd,shifter_op) \
92 EMIT(((cond)<<28) | ((op)<< 21) | ((s)<<20) | ((rn)<<16) | ((rd)<<12) | (shifter_op))
94 #define EOP_C_DOP_IMM( cond,op,s,rn,rd,ror2,imm8) EOP_C_DOP_X(cond,op,s,rn,rd,A_AM1_IMM(ror2,imm8))
95 #define EOP_C_DOP_REG_XIMM(cond,op,s,rn,rd,shift_imm,shift_op,rm) EOP_C_DOP_X(cond,op,s,rn,rd,A_AM1_REG_XIMM(shift_imm,shift_op,rm))
96 #define EOP_C_DOP_REG_XREG(cond,op,s,rn,rd,rs, shift_op,rm) EOP_C_DOP_X(cond,op,s,rn,rd,A_AM1_REG_XREG(rs, shift_op,rm))
98 #define EOP_MOV_IMM(rd, ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_MOV,0, 0,rd,ror2,imm8)
99 #define EOP_MVN_IMM(rd, ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_MVN,0, 0,rd,ror2,imm8)
100 #define EOP_ORR_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_ORR,0,rn,rd,ror2,imm8)
101 #define EOP_EOR_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_EOR,0,rn,rd,ror2,imm8)
102 #define EOP_ADD_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_ADD,0,rn,rd,ror2,imm8)
103 #define EOP_BIC_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_BIC,0,rn,rd,ror2,imm8)
104 #define EOP_AND_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_AND,0,rn,rd,ror2,imm8)
105 #define EOP_SUB_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_SUB,0,rn,rd,ror2,imm8)
106 #define EOP_TST_IMM( rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_TST,1,rn, 0,ror2,imm8)
107 #define EOP_CMP_IMM( rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_CMP,1,rn, 0,ror2,imm8)
108 #define EOP_RSB_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_RSB,0,rn,rd,ror2,imm8)
110 #define EOP_MOV_IMM_C(cond,rd, ror2,imm8) EOP_C_DOP_IMM(cond,A_OP_MOV,0, 0,rd,ror2,imm8)
111 #define EOP_ORR_IMM_C(cond,rd,rn,ror2,imm8) EOP_C_DOP_IMM(cond,A_OP_ORR,0,rn,rd,ror2,imm8)
112 #define EOP_RSB_IMM_C(cond,rd,rn,ror2,imm8) EOP_C_DOP_IMM(cond,A_OP_RSB,0,rn,rd,ror2,imm8)
114 #define EOP_MOV_REG(cond,s,rd, rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_MOV,s, 0,rd,shift_imm,shift_op,rm)
115 #define EOP_MVN_REG(cond,s,rd, rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_MVN,s, 0,rd,shift_imm,shift_op,rm)
116 #define EOP_ORR_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_ORR,s,rn,rd,shift_imm,shift_op,rm)
117 #define EOP_ADD_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_ADD,s,rn,rd,shift_imm,shift_op,rm)
118 #define EOP_ADC_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_ADC,s,rn,rd,shift_imm,shift_op,rm)
119 #define EOP_SUB_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_SUB,s,rn,rd,shift_imm,shift_op,rm)
120 #define EOP_SBC_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_SBC,s,rn,rd,shift_imm,shift_op,rm)
121 #define EOP_AND_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_AND,s,rn,rd,shift_imm,shift_op,rm)
122 #define EOP_EOR_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_EOR,s,rn,rd,shift_imm,shift_op,rm)
123 #define EOP_CMP_REG(cond, rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_CMP,1,rn, 0,shift_imm,shift_op,rm)
124 #define EOP_TST_REG(cond, rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_TST,1,rn, 0,shift_imm,shift_op,rm)
125 #define EOP_TEQ_REG(cond, rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_TEQ,1,rn, 0,shift_imm,shift_op,rm)
127 #define EOP_MOV_REG2(s,rd, rm,shift_op,rs) EOP_C_DOP_REG_XREG(A_COND_AL,A_OP_MOV,s, 0,rd,rs,shift_op,rm)
128 #define EOP_ADD_REG2(s,rd,rn,rm,shift_op,rs) EOP_C_DOP_REG_XREG(A_COND_AL,A_OP_ADD,s,rn,rd,rs,shift_op,rm)
129 #define EOP_SUB_REG2(s,rd,rn,rm,shift_op,rs) EOP_C_DOP_REG_XREG(A_COND_AL,A_OP_SUB,s,rn,rd,rs,shift_op,rm)
131 #define EOP_MOV_REG_SIMPLE(rd,rm) EOP_MOV_REG(A_COND_AL,0,rd,rm,A_AM1_LSL,0)
132 #define EOP_MOV_REG_LSL(rd, rm,shift_imm) EOP_MOV_REG(A_COND_AL,0,rd,rm,A_AM1_LSL,shift_imm)
133 #define EOP_MOV_REG_LSR(rd, rm,shift_imm) EOP_MOV_REG(A_COND_AL,0,rd,rm,A_AM1_LSR,shift_imm)
134 #define EOP_MOV_REG_ASR(rd, rm,shift_imm) EOP_MOV_REG(A_COND_AL,0,rd,rm,A_AM1_ASR,shift_imm)
135 #define EOP_MOV_REG_ROR(rd, rm,shift_imm) EOP_MOV_REG(A_COND_AL,0,rd,rm,A_AM1_ROR,shift_imm)
137 #define EOP_ORR_REG_SIMPLE(rd,rm) EOP_ORR_REG(A_COND_AL,0,rd,rd,rm,A_AM1_LSL,0)
138 #define EOP_ORR_REG_LSL(rd,rn,rm,shift_imm) EOP_ORR_REG(A_COND_AL,0,rd,rn,rm,A_AM1_LSL,shift_imm)
139 #define EOP_ORR_REG_LSR(rd,rn,rm,shift_imm) EOP_ORR_REG(A_COND_AL,0,rd,rn,rm,A_AM1_LSR,shift_imm)
140 #define EOP_ORR_REG_ASR(rd,rn,rm,shift_imm) EOP_ORR_REG(A_COND_AL,0,rd,rn,rm,A_AM1_ASR,shift_imm)
141 #define EOP_ORR_REG_ROR(rd,rn,rm,shift_imm) EOP_ORR_REG(A_COND_AL,0,rd,rn,rm,A_AM1_ROR,shift_imm)
143 #define EOP_ADD_REG_SIMPLE(rd,rm) EOP_ADD_REG(A_COND_AL,0,rd,rd,rm,A_AM1_LSL,0)
144 #define EOP_ADD_REG_LSL(rd,rn,rm,shift_imm) EOP_ADD_REG(A_COND_AL,0,rd,rn,rm,A_AM1_LSL,shift_imm)
145 #define EOP_ADD_REG_LSR(rd,rn,rm,shift_imm) EOP_ADD_REG(A_COND_AL,0,rd,rn,rm,A_AM1_LSR,shift_imm)
147 #define EOP_TST_REG_SIMPLE(rn,rm) EOP_TST_REG(A_COND_AL, rn, 0,A_AM1_LSL,rm)
149 #define EOP_MOV_REG2_LSL(rd, rm,rs) EOP_MOV_REG2(0,rd, rm,A_AM1_LSL,rs)
150 #define EOP_MOV_REG2_ROR(rd, rm,rs) EOP_MOV_REG2(0,rd, rm,A_AM1_ROR,rs)
151 #define EOP_ADD_REG2_LSL(rd,rn,rm,rs) EOP_ADD_REG2(0,rd,rn,rm,A_AM1_LSL,rs)
152 #define EOP_SUB_REG2_LSL(rd,rn,rm,rs) EOP_SUB_REG2(0,rd,rn,rm,A_AM1_LSL,rs)
154 /* addressing mode 2 */
155 #define EOP_C_AM2_IMM(cond,u,b,l,rn,rd,offset_12) \
156 EMIT(((cond)<<28) | 0x05000000 | ((u)<<23) | ((b)<<22) | ((l)<<20) | ((rn)<<16) | ((rd)<<12) | (offset_12))
158 #define EOP_C_AM2_REG(cond,u,b,l,rn,rd,shift_imm,shift_op,rm) \
159 EMIT(((cond)<<28) | 0x07000000 | ((u)<<23) | ((b)<<22) | ((l)<<20) | ((rn)<<16) | ((rd)<<12) | \
160 ((shift_imm)<<7) | ((shift_op)<<5) | (rm))
162 /* addressing mode 3 */
163 #define EOP_C_AM3(cond,u,r,l,rn,rd,s,h,immed_reg) \
164 EMIT(((cond)<<28) | 0x01000090 | ((u)<<23) | ((r)<<22) | ((l)<<20) | ((rn)<<16) | ((rd)<<12) | \
165 ((s)<<6) | ((h)<<5) | (immed_reg))
167 #define EOP_C_AM3_IMM(cond,u,l,rn,rd,s,h,offset_8) EOP_C_AM3(cond,u,1,l,rn,rd,s,h,(((offset_8)&0xf0)<<4)|((offset_8)&0xf))
169 #define EOP_C_AM3_REG(cond,u,l,rn,rd,s,h,rm) EOP_C_AM3(cond,u,0,l,rn,rd,s,h,rm)
172 #define EOP_LDR_IMM2(cond,rd,rn,offset_12) EOP_C_AM2_IMM(cond,1,0,1,rn,rd,offset_12)
173 #define EOP_LDRB_IMM2(cond,rd,rn,offset_12) EOP_C_AM2_IMM(cond,1,1,1,rn,rd,offset_12)
175 #define EOP_LDR_IMM( rd,rn,offset_12) EOP_C_AM2_IMM(A_COND_AL,1,0,1,rn,rd,offset_12)
176 #define EOP_LDR_NEGIMM(rd,rn,offset_12) EOP_C_AM2_IMM(A_COND_AL,0,0,1,rn,rd,offset_12)
177 #define EOP_LDR_SIMPLE(rd,rn) EOP_C_AM2_IMM(A_COND_AL,1,0,1,rn,rd,0)
178 #define EOP_STR_IMM( rd,rn,offset_12) EOP_C_AM2_IMM(A_COND_AL,1,0,0,rn,rd,offset_12)
179 #define EOP_STR_SIMPLE(rd,rn) EOP_C_AM2_IMM(A_COND_AL,1,0,0,rn,rd,0)
181 #define EOP_LDR_REG_LSL(cond,rd,rn,rm,shift_imm) EOP_C_AM2_REG(cond,1,0,1,rn,rd,shift_imm,A_AM1_LSL,rm)
183 #define EOP_LDRH_IMM2(cond,rd,rn,offset_8) EOP_C_AM3_IMM(cond,1,1,rn,rd,0,1,offset_8)
185 #define EOP_LDRH_IMM( rd,rn,offset_8) EOP_C_AM3_IMM(A_COND_AL,1,1,rn,rd,0,1,offset_8)
186 #define EOP_LDRH_SIMPLE(rd,rn) EOP_C_AM3_IMM(A_COND_AL,1,1,rn,rd,0,1,0)
187 #define EOP_LDRH_REG( rd,rn,rm) EOP_C_AM3_REG(A_COND_AL,1,1,rn,rd,0,1,rm)
188 #define EOP_STRH_IMM( rd,rn,offset_8) EOP_C_AM3_IMM(A_COND_AL,1,0,rn,rd,0,1,offset_8)
189 #define EOP_STRH_SIMPLE(rd,rn) EOP_C_AM3_IMM(A_COND_AL,1,0,rn,rd,0,1,0)
190 #define EOP_STRH_REG( rd,rn,rm) EOP_C_AM3_REG(A_COND_AL,1,0,rn,rd,0,1,rm)
193 #define EOP_XXM(cond,p,u,s,w,l,rn,list) \
194 EMIT(((cond)<<28) | (1<<27) | ((p)<<24) | ((u)<<23) | ((s)<<22) | ((w)<<21) | ((l)<<20) | ((rn)<<16) | (list))
196 #define EOP_STMIA(rb,list) EOP_XXM(A_COND_AL,0,1,0,0,0,rb,list)
197 #define EOP_LDMIA(rb,list) EOP_XXM(A_COND_AL,0,1,0,0,1,rb,list)
199 #define EOP_STMFD_SP(list) EOP_XXM(A_COND_AL,1,0,0,1,0,13,list)
200 #define EOP_LDMFD_SP(list) EOP_XXM(A_COND_AL,0,1,0,1,1,13,list)
203 #define EOP_C_BX(cond,rm) \
204 EMIT(((cond)<<28) | 0x012fff10 | (rm))
206 #define EOP_C_B_PTR(ptr,cond,l,signed_immed_24) \
207 EMIT_PTR(ptr, ((cond)<<28) | 0x0a000000 | ((l)<<24) | (signed_immed_24))
209 #define EOP_C_B(cond,l,signed_immed_24) \
210 EOP_C_B_PTR(tcache_ptr,cond,l,signed_immed_24)
212 #define EOP_B( signed_immed_24) EOP_C_B(A_COND_AL,0,signed_immed_24)
213 #define EOP_BL(signed_immed_24) EOP_C_B(A_COND_AL,1,signed_immed_24)
216 #define EOP_C_MUL(cond,s,rd,rs,rm) \
217 EMIT(((cond)<<28) | ((s)<<20) | ((rd)<<16) | ((rs)<<8) | 0x90 | (rm))
219 #define EOP_C_UMULL(cond,s,rdhi,rdlo,rs,rm) \
220 EMIT(((cond)<<28) | 0x00800000 | ((s)<<20) | ((rdhi)<<16) | ((rdlo)<<12) | ((rs)<<8) | 0x90 | (rm))
222 #define EOP_C_SMULL(cond,s,rdhi,rdlo,rs,rm) \
223 EMIT(((cond)<<28) | 0x00c00000 | ((s)<<20) | ((rdhi)<<16) | ((rdlo)<<12) | ((rs)<<8) | 0x90 | (rm))
225 #define EOP_C_SMLAL(cond,s,rdhi,rdlo,rs,rm) \
226 EMIT(((cond)<<28) | 0x00e00000 | ((s)<<20) | ((rdhi)<<16) | ((rdlo)<<12) | ((rs)<<8) | 0x90 | (rm))
228 #define EOP_MUL(rd,rm,rs) EOP_C_MUL(A_COND_AL,0,rd,rs,rm) // note: rd != rm
230 #define EOP_C_MRS(cond,rd) \
231 EMIT(((cond)<<28) | 0x010f0000 | ((rd)<<12))
233 #define EOP_C_MSR_IMM(cond,ror2,imm) \
234 EMIT(((cond)<<28) | 0x0328f000 | ((ror2)<<8) | (imm)) // cpsr_f
236 #define EOP_C_MSR_REG(cond,rm) \
237 EMIT(((cond)<<28) | 0x0128f000 | (rm)) // cpsr_f
239 #define EOP_MRS(rd) EOP_C_MRS(A_COND_AL,rd)
240 #define EOP_MSR_IMM(ror2,imm) EOP_C_MSR_IMM(A_COND_AL,ror2,imm)
241 #define EOP_MSR_REG(rm) EOP_C_MSR_REG(A_COND_AL,rm)
244 // XXX: AND, RSB, *C, will break if 1 insn is not enough
245 static void emith_op_imm2(int cond, int s, int op, int rd, int rn, unsigned int imm)
253 if (~imm < 0x10000) {
264 if (s == 0 && imm == 0)
269 for (v = imm, ror2 = 0; ; ror2 -= 8/2) {
270 /* shift down to get 'best' rot2 */
271 for (; v && !(v & 3); v >>= 2)
274 EOP_C_DOP_IMM(cond, op, s, rn, rd, ror2 & 0x0f, v & 0xff);
287 #define emith_op_imm(cond, s, op, r, imm) \
288 emith_op_imm2(cond, s, op, r, r, imm)
291 #define emith_top_imm(cond, op, r, imm) do { \
293 for (ror2 = 0, v = imm; v && !(v & 3); v >>= 2) \
295 EOP_C_DOP_IMM(cond, op, 1, r, 0, ror2 & 0x0f, v & 0xff); \
298 #define is_offset_24(val) \
299 ((val) >= (int)0xff000000 && (val) <= 0x00ffffff)
301 static int emith_xbranch(int cond, void *target, int is_call)
303 int val = (u32 *)target - (u32 *)tcache_ptr - 2;
304 int direct = is_offset_24(val);
305 u32 *start_ptr = (u32 *)tcache_ptr;
309 EOP_C_B(cond,is_call,val & 0xffffff); // b, bl target
314 // elprintf(EL_SVP, "emitting indirect jmp %08x->%08x", tcache_ptr, target);
316 EOP_ADD_IMM(14,15,0,8); // add lr,pc,#8
317 EOP_C_AM2_IMM(cond,1,0,1,15,15,0); // ldrcc pc,[pc]
318 EOP_MOV_REG_SIMPLE(15,15); // mov pc, pc
321 // should never happen
322 elprintf(EL_STATUS|EL_SVP|EL_ANOMALY, "indirect jmp %08x->%08x", target, tcache_ptr);
327 return (u32 *)tcache_ptr - start_ptr;
330 #define JMP_POS(ptr) \
332 tcache_ptr += sizeof(u32)
334 #define JMP_EMIT(cond, ptr) { \
335 u32 val_ = (u32 *)tcache_ptr - (u32 *)(ptr) - 2; \
336 EOP_C_B_PTR(ptr, cond, 0, val_ & 0xffffff); \
339 #define EMITH_JMP_START(cond) { \
343 #define EMITH_JMP_END(cond) \
344 JMP_EMIT(cond, cond_ptr); \
347 // fake "simple" or "short" jump - using cond insns instead
348 #define EMITH_NOTHING1(cond) \
351 #define EMITH_SJMP_START(cond) EMITH_NOTHING1(cond)
352 #define EMITH_SJMP_END(cond) EMITH_NOTHING1(cond)
353 #define EMITH_SJMP3_START(cond) EMITH_NOTHING1(cond)
354 #define EMITH_SJMP3_MID(cond) EMITH_NOTHING1(cond)
355 #define EMITH_SJMP3_END()
357 #define emith_move_r_r(d, s) \
358 EOP_MOV_REG_SIMPLE(d, s)
360 #define emith_mvn_r_r(d, s) \
361 EOP_MVN_REG(A_COND_AL,0,d,s,A_AM1_LSL,0)
363 #define emith_or_r_r_r_lsl(d, s1, s2, lslimm) \
364 EOP_ORR_REG(A_COND_AL,0,d,s1,s2,A_AM1_LSL,lslimm)
366 #define emith_eor_r_r_r_lsl(d, s1, s2, lslimm) \
367 EOP_EOR_REG(A_COND_AL,0,d,s1,s2,A_AM1_LSL,lslimm)
369 #define emith_eor_r_r_r_lsr(d, s1, s2, lsrimm) \
370 EOP_EOR_REG(A_COND_AL,0,d,s1,s2,A_AM1_LSR,lsrimm)
372 #define emith_or_r_r_lsl(d, s, lslimm) \
373 emith_or_r_r_r_lsl(d, d, s, lslimm)
375 #define emith_eor_r_r_lsr(d, s, lsrimm) \
376 emith_eor_r_r_r_lsr(d, d, s, lsrimm)
378 #define emith_or_r_r_r(d, s1, s2) \
379 emith_or_r_r_r_lsl(d, s1, s2, 0)
381 #define emith_eor_r_r_r(d, s1, s2) \
382 emith_eor_r_r_r_lsl(d, s1, s2, 0)
384 #define emith_add_r_r(d, s) \
385 EOP_ADD_REG(A_COND_AL,0,d,d,s,A_AM1_LSL,0)
387 #define emith_sub_r_r(d, s) \
388 EOP_SUB_REG(A_COND_AL,0,d,d,s,A_AM1_LSL,0)
390 #define emith_adc_r_r(d, s) \
391 EOP_ADC_REG(A_COND_AL,0,d,d,s,A_AM1_LSL,0)
393 #define emith_and_r_r(d, s) \
394 EOP_AND_REG(A_COND_AL,0,d,d,s,A_AM1_LSL,0)
396 #define emith_or_r_r(d, s) \
397 emith_or_r_r_r(d, d, s)
399 #define emith_eor_r_r(d, s) \
400 emith_eor_r_r_r(d, d, s)
402 #define emith_tst_r_r(d, s) \
403 EOP_TST_REG(A_COND_AL,d,s,A_AM1_LSL,0)
405 #define emith_teq_r_r(d, s) \
406 EOP_TEQ_REG(A_COND_AL,d,s,A_AM1_LSL,0)
408 #define emith_cmp_r_r(d, s) \
409 EOP_CMP_REG(A_COND_AL,d,s,A_AM1_LSL,0)
411 #define emith_addf_r_r(d, s) \
412 EOP_ADD_REG(A_COND_AL,1,d,d,s,A_AM1_LSL,0)
414 #define emith_subf_r_r(d, s) \
415 EOP_SUB_REG(A_COND_AL,1,d,d,s,A_AM1_LSL,0)
417 #define emith_adcf_r_r(d, s) \
418 EOP_ADC_REG(A_COND_AL,1,d,d,s,A_AM1_LSL,0)
420 #define emith_sbcf_r_r(d, s) \
421 EOP_SBC_REG(A_COND_AL,1,d,d,s,A_AM1_LSL,0)
423 #define emith_eorf_r_r(d, s) \
424 EOP_EOR_REG(A_COND_AL,1,d,d,s,A_AM1_LSL,0)
426 #define emith_move_r_imm(r, imm) \
427 emith_op_imm(A_COND_AL, 0, A_OP_MOV, r, imm)
429 #define emith_add_r_imm(r, imm) \
430 emith_op_imm(A_COND_AL, 0, A_OP_ADD, r, imm)
432 #define emith_adc_r_imm(r, imm) \
433 emith_op_imm(A_COND_AL, 0, A_OP_ADC, r, imm)
435 #define emith_sub_r_imm(r, imm) \
436 emith_op_imm(A_COND_AL, 0, A_OP_SUB, r, imm)
438 #define emith_bic_r_imm(r, imm) \
439 emith_op_imm(A_COND_AL, 0, A_OP_BIC, r, imm)
441 #define emith_and_r_imm(r, imm) \
442 emith_op_imm(A_COND_AL, 0, A_OP_AND, r, imm)
444 #define emith_or_r_imm(r, imm) \
445 emith_op_imm(A_COND_AL, 0, A_OP_ORR, r, imm)
447 #define emith_eor_r_imm(r, imm) \
448 emith_op_imm(A_COND_AL, 0, A_OP_EOR, r, imm)
450 // note: only use 8bit imm for these
451 #define emith_tst_r_imm(r, imm) \
452 emith_top_imm(A_COND_AL, A_OP_TST, r, imm)
454 #define emith_cmp_r_imm(r, imm) { \
455 u32 op = A_OP_CMP, imm_ = imm; \
456 if (~imm_ < 0x100) { \
460 emith_top_imm(A_COND_AL, op, r, imm); \
463 #define emith_subf_r_imm(r, imm) \
464 emith_op_imm(A_COND_AL, 1, A_OP_SUB, r, imm)
466 #define emith_move_r_imm_c(cond, r, imm) \
467 emith_op_imm(cond, 0, A_OP_MOV, r, imm)
469 #define emith_add_r_imm_c(cond, r, imm) \
470 emith_op_imm(cond, 0, A_OP_ADD, r, imm)
472 #define emith_sub_r_imm_c(cond, r, imm) \
473 emith_op_imm(cond, 0, A_OP_SUB, r, imm)
475 #define emith_or_r_imm_c(cond, r, imm) \
476 emith_op_imm(cond, 0, A_OP_ORR, r, imm)
478 #define emith_eor_r_imm_c(cond, r, imm) \
479 emith_op_imm(cond, 0, A_OP_EOR, r, imm)
481 #define emith_bic_r_imm_c(cond, r, imm) \
482 emith_op_imm(cond, 0, A_OP_BIC, r, imm)
484 #define emith_move_r_imm_s8(r, imm) { \
486 EOP_MVN_IMM(r, 0, ((imm) ^ 0xff)); \
488 EOP_MOV_IMM(r, 0, imm); \
491 #define emith_and_r_r_imm(d, s, imm) \
492 emith_op_imm2(A_COND_AL, 0, A_OP_AND, d, s, imm)
494 #define emith_add_r_r_imm(d, s, imm) \
495 emith_op_imm2(A_COND_AL, 0, A_OP_ADD, d, s, imm)
497 #define emith_sub_r_r_imm(d, s, imm) \
498 emith_op_imm2(A_COND_AL, 0, A_OP_SUB, d, s, imm)
500 #define emith_neg_r_r(d, s) \
501 EOP_RSB_IMM(d, s, 0, 0)
503 #define emith_lsl(d, s, cnt) \
504 EOP_MOV_REG(A_COND_AL,0,d,s,A_AM1_LSL,cnt)
506 #define emith_lsr(d, s, cnt) \
507 EOP_MOV_REG(A_COND_AL,0,d,s,A_AM1_LSR,cnt)
509 #define emith_asr(d, s, cnt) \
510 EOP_MOV_REG(A_COND_AL,0,d,s,A_AM1_ASR,cnt)
512 #define emith_ror_c(cond, d, s, cnt) \
513 EOP_MOV_REG(cond,0,d,s,A_AM1_ROR,cnt)
515 #define emith_ror(d, s, cnt) \
516 emith_ror_c(A_COND_AL, d, s, cnt)
518 #define emith_rol(d, s, cnt) \
519 EOP_MOV_REG(A_COND_AL,0,d,s,A_AM1_ROR,32-(cnt)); \
521 #define emith_lslf(d, s, cnt) \
522 EOP_MOV_REG(A_COND_AL,1,d,s,A_AM1_LSL,cnt)
524 #define emith_lsrf(d, s, cnt) \
525 EOP_MOV_REG(A_COND_AL,1,d,s,A_AM1_LSR,cnt)
527 #define emith_asrf(d, s, cnt) \
528 EOP_MOV_REG(A_COND_AL,1,d,s,A_AM1_ASR,cnt)
530 // note: only C flag updated correctly
531 #define emith_rolf(d, s, cnt) { \
532 EOP_MOV_REG(A_COND_AL,1,d,s,A_AM1_ROR,32-(cnt)); \
533 /* we don't have ROL so we shift to get the right carry */ \
534 EOP_TST_REG(A_COND_AL,d,d,A_AM1_LSR,1); \
537 #define emith_rorf(d, s, cnt) \
538 EOP_MOV_REG(A_COND_AL,1,d,s,A_AM1_ROR,cnt)
540 #define emith_rolcf(d) \
543 #define emith_rorcf(d) \
544 EOP_MOV_REG(A_COND_AL,1,d,d,A_AM1_ROR,0) /* ROR #0 -> RRX */
546 #define emith_negcf_r_r(d, s) \
547 EOP_C_DOP_IMM(A_COND_AL,A_OP_RSC,1,s,d,0,0)
549 #define emith_mul(d, s1, s2) { \
550 if ((d) != (s1)) /* rd != rm limitation */ \
551 EOP_MUL(d, s1, s2); \
553 EOP_MUL(d, s2, s1); \
556 #define emith_mul_u64(dlo, dhi, s1, s2) \
557 EOP_C_UMULL(A_COND_AL,0,dhi,dlo,s1,s2)
559 #define emith_mul_s64(dlo, dhi, s1, s2) \
560 EOP_C_SMULL(A_COND_AL,0,dhi,dlo,s1,s2)
562 #define emith_mula_s64(dlo, dhi, s1, s2) \
563 EOP_C_SMLAL(A_COND_AL,0,dhi,dlo,s1,s2)
566 #define emith_read_r_r_offs_c(cond, r, rs, offs) \
567 EOP_LDR_IMM2(cond, r, rs, offs)
569 #define emith_read8_r_r_offs_c(cond, r, rs, offs) \
570 EOP_LDRB_IMM2(cond, r, rs, offs)
572 #define emith_read16_r_r_offs_c(cond, r, rs, offs) \
573 EOP_LDRH_IMM2(cond, r, rs, offs)
575 #define emith_read_r_r_offs(r, rs, offs) \
576 emith_read_r_r_offs_c(A_COND_AL, r, rs, offs)
578 #define emith_read8_r_r_offs(r, rs, offs) \
579 emith_read8_r_r_offs_c(A_COND_AL, r, rs, offs)
581 #define emith_read16_r_r_offs(r, rs, offs) \
582 emith_read16_r_r_offs_c(A_COND_AL, r, rs, offs)
584 #define emith_ctx_read(r, offs) \
585 emith_read_r_r_offs(r, CONTEXT_REG, offs)
587 #define emith_ctx_write(r, offs) \
588 EOP_STR_IMM(r, CONTEXT_REG, offs)
590 #define emith_ctx_do_multiple(op, r, offs, count, tmpr) do { \
591 int v_, r_ = r, c_ = count, b_ = CONTEXT_REG; \
592 for (v_ = 0; c_; c_--, r_++) \
595 EOP_ADD_IMM(tmpr,CONTEXT_REG,30/2,(offs)>>2);\
601 #define emith_ctx_read_multiple(r, offs, count, tmpr) \
602 emith_ctx_do_multiple(EOP_LDMIA, r, offs, count, tmpr)
604 #define emith_ctx_write_multiple(r, offs, count, tmpr) \
605 emith_ctx_do_multiple(EOP_STMIA, r, offs, count, tmpr)
607 #define emith_clear_msb_c(cond, d, s, count) { \
609 if ((count) <= 8) { \
611 t = (0xff << t) & 0xff; \
612 EOP_BIC_IMM(d,s,8/2,t); \
613 EOP_C_DOP_IMM(cond,A_OP_BIC,0,s,d,8/2,t); \
614 } else if ((count) >= 24) { \
617 EOP_AND_IMM(d,s,0,t); \
618 EOP_C_DOP_IMM(cond,A_OP_AND,0,s,d,0,t); \
620 EOP_MOV_REG(cond,0,d,s,A_AM1_LSL,count); \
621 EOP_MOV_REG(cond,0,d,d,A_AM1_LSR,count); \
625 #define emith_clear_msb(d, s, count) \
626 emith_clear_msb_c(A_COND_AL, d, s, count)
628 #define emith_sext(d, s, bits) { \
629 EOP_MOV_REG_LSL(d,s,32 - (bits)); \
630 EOP_MOV_REG_ASR(d,d,32 - (bits)); \
634 #define emith_pass_arg_r(arg, reg) \
635 EOP_MOV_REG_SIMPLE(arg, reg)
637 #define emith_pass_arg_imm(arg, imm) \
638 emith_move_r_imm(arg, imm)
640 #define emith_jump(target) \
641 emith_jump_cond(A_COND_AL, target)
643 #define emith_jump_patchable(target) \
646 #define emith_jump_cond(cond, target) \
647 emith_xbranch(cond, target, 0)
649 #define emith_jump_cond_patchable(cond, target) \
650 emith_jump_cond(cond, target)
652 #define emith_jump_patch(ptr, target) do { \
654 u32 val_ = (u32 *)(target) - ptr_ - 2; \
655 *ptr_ = (*ptr_ & 0xff000000) | (val_ & 0x00ffffff); \
658 #define emith_jump_at(ptr, target) { \
659 u32 val_ = (u32 *)(target) - (u32 *)(ptr) - 2; \
660 EOP_C_B_PTR(ptr, A_COND_AL, 0, val_ & 0xffffff); \
663 #define emith_jump_reg_c(cond, r) \
666 #define emith_jump_reg(r) \
667 emith_jump_reg_c(A_COND_AL, r)
669 #define emith_jump_ctx_c(cond, offs) \
670 EOP_LDR_IMM2(cond,15,CONTEXT_REG,offs)
672 #define emith_jump_ctx(offs) \
673 emith_jump_ctx_c(A_COND_AL, offs)
675 #define emith_call_cond(cond, target) \
676 emith_xbranch(cond, target, 1)
678 #define emith_call(target) \
679 emith_call_cond(A_COND_AL, target)
681 #define emith_call_ctx(offs) { \
682 emith_move_r_r(14, 15); \
683 emith_jump_ctx(offs); \
686 #define emith_ret_c(cond) \
687 emith_jump_reg_c(cond, 14)
689 #define emith_ret() \
690 emith_ret_c(A_COND_AL)
692 #define emith_ret_to_ctx(offs) \
693 emith_ctx_write(14, offs)
695 #define emith_push_ret() \
698 #define emith_pop_and_ret() \
701 #define host_instructions_updated(base, end) \
702 cache_flush_d_inval_i(base, end)
704 #define host_arg2reg(rd, arg) \
707 /* SH2 drc specific */
708 #define emith_sh2_drc_entry() \
709 EOP_STMFD_SP(A_R4M|A_R5M|A_R6M|A_R7M|A_R8M|A_R9M|A_R10M|A_R11M|A_R14M)
711 #define emith_sh2_drc_exit() \
712 EOP_LDMFD_SP(A_R4M|A_R5M|A_R6M|A_R7M|A_R8M|A_R9M|A_R10M|A_R11M|A_R15M)
714 #define emith_sh2_wcall(a, tab, ret_ptr) { \
715 int val_ = (char *)(ret_ptr) - (char *)tcache_ptr - 2*4; \
717 emith_add_r_r_imm(14, 15, val_); \
719 emith_sub_r_r_imm(14, 15, -val_); \
720 emith_lsr(12, a, SH2_WRITE_SHIFT); \
721 EOP_LDR_REG_LSL(A_COND_AL,12,tab,12,2); \
722 emith_ctx_read(2, offsetof(SH2, is_slave)); \
723 emith_jump_reg(12); \
726 #define emith_sh2_dtbf_loop() { \
728 int tmp_ = rcache_get_tmp(); \
729 cr = rcache_get_reg(SHR_SR, RC_GR_RMW); \
730 rn = rcache_get_reg((op >> 8) & 0x0f, RC_GR_RMW); \
731 emith_sub_r_imm(rn, 1); /* sub rn, #1 */ \
732 emith_bic_r_imm(cr, 1); /* bic cr, #1 */ \
733 emith_sub_r_imm(cr, (cycles+1) << 12); /* sub cr, #(cycles+1)<<12 */ \
735 emith_asrf(tmp_, cr, 2+12); /* movs tmp_, cr, asr #2+12 */\
736 EOP_MOV_IMM_C(A_COND_MI,tmp_,0,0); /* movmi tmp_, #0 */ \
737 emith_lsl(cr, cr, 20); /* mov cr, cr, lsl #20 */ \
738 emith_lsr(cr, cr, 20); /* mov cr, cr, lsr #20 */ \
739 emith_subf_r_r(rn, tmp_); /* subs rn, tmp_ */ \
740 EOP_RSB_IMM_C(A_COND_LS,tmp_,rn,0,0); /* rsbls tmp_, rn, #0 */ \
741 EOP_ORR_REG(A_COND_LS,0,cr,cr,tmp_,A_AM1_LSL,12+2); /* orrls cr,tmp_,lsl #12+2 */\
742 EOP_ORR_IMM_C(A_COND_LS,cr,cr,0,1); /* orrls cr, #1 */ \
743 EOP_MOV_IMM_C(A_COND_LS,rn,0,0); /* movls rn, #0 */ \
744 rcache_free_tmp(tmp_); \
747 #define emith_write_sr(sr, srcr) { \
748 emith_lsr(sr, sr, 10); \
749 emith_or_r_r_r_lsl(sr, sr, srcr, 22); \
750 emith_ror(sr, sr, 22); \
753 #define emith_carry_to_t(srr, is_sub) { \
754 if (is_sub) { /* has inverted C on ARM */ \
755 emith_or_r_imm_c(A_COND_CC, srr, 1); \
756 emith_bic_r_imm_c(A_COND_CS, srr, 1); \
758 emith_or_r_imm_c(A_COND_CS, srr, 1); \
759 emith_bic_r_imm_c(A_COND_CC, srr, 1); \
763 #define emith_tpop_carry(sr, is_sub) { \
765 emith_eor_r_imm(sr, 1); \
766 emith_lsrf(sr, sr, 1); \
769 #define emith_tpush_carry(sr, is_sub) { \
770 emith_adc_r_r(sr, sr); \
772 emith_eor_r_imm(sr, 1); \
777 * t = carry(Rn += Rm)
779 * t = carry(Rn -= Rm)
782 #define emith_sh2_div1_step(rn, rm, sr) { \
784 emith_tst_r_imm(sr, Q); /* if (Q ^ M) */ \
785 JMP_POS(jmp0); /* beq do_sub */ \
786 emith_addf_r_r(rn, rm); \
787 emith_eor_r_imm_c(A_COND_CS, sr, T); \
788 JMP_POS(jmp1); /* b done */ \
789 JMP_EMIT(A_COND_EQ, jmp0); /* do_sub: */ \
790 emith_subf_r_r(rn, rm); \
791 emith_eor_r_imm_c(A_COND_CC, sr, T); \
792 JMP_EMIT(A_COND_AL, jmp1); /* done: */ \