db6f86ae30ffe437e14ec4f42c690d5aee454efb
[picodrive.git] / cpu / drc / emit_arm.c
1 // Basic macros to emit ARM instructions and some utils
2
3 // (c) Copyright 2008-2009, Grazvydas "notaz" Ignotas
4 // Free for non-commercial use.
5
6 #define CONTEXT_REG 7
7
8 // XXX: tcache_ptr type for SVP and SH2 compilers differs..
9 #define EMIT_PTR(ptr, x) \
10         do { \
11                 *(u32 *)ptr = x; \
12                 ptr = (void *)((u8 *)ptr + sizeof(u32)); \
13                 COUNT_OP; \
14         } while (0)
15
16 #define EMIT(x) EMIT_PTR(tcache_ptr, x)
17
18 #define A_R4M  (1 << 4)
19 #define A_R5M  (1 << 5)
20 #define A_R6M  (1 << 6)
21 #define A_R7M  (1 << 7)
22 #define A_R8M  (1 << 8)
23 #define A_R9M  (1 << 9)
24 #define A_R10M (1 << 10)
25 #define A_R11M (1 << 11)
26 #define A_R14M (1 << 14)
27
28 #define A_COND_AL 0xe
29 #define A_COND_EQ 0x0
30 #define A_COND_NE 0x1
31 #define A_COND_HS 0x2
32 #define A_COND_LO 0x3
33 #define A_COND_MI 0x4
34 #define A_COND_PL 0x5
35 #define A_COND_VS 0x6
36 #define A_COND_VC 0x7
37 #define A_COND_HI 0x8
38 #define A_COND_LS 0x9
39 #define A_COND_GE 0xa
40 #define A_COND_LT 0xb
41 #define A_COND_GT 0xc
42 #define A_COND_LE 0xd
43 #define A_COND_CS A_COND_HS
44 #define A_COND_CC A_COND_LO
45
46 /* unified conditions */
47 #define DCOND_EQ A_COND_EQ
48 #define DCOND_NE A_COND_NE
49 #define DCOND_MI A_COND_MI
50 #define DCOND_PL A_COND_PL
51 #define DCOND_HI A_COND_HI
52 #define DCOND_HS A_COND_HS
53 #define DCOND_LO A_COND_LO
54 #define DCOND_GE A_COND_GE
55 #define DCOND_GT A_COND_GT
56 #define DCOND_LT A_COND_LT
57 #define DCOND_LS A_COND_LS
58 #define DCOND_LE A_COND_LE
59 #define DCOND_VS A_COND_VS
60 #define DCOND_VC A_COND_VC
61
62 /* addressing mode 1 */
63 #define A_AM1_LSL 0
64 #define A_AM1_LSR 1
65 #define A_AM1_ASR 2
66 #define A_AM1_ROR 3
67
68 #define A_AM1_IMM(ror2,imm8)                  (((ror2)<<8) | (imm8) | 0x02000000)
69 #define A_AM1_REG_XIMM(shift_imm,shift_op,rm) (((shift_imm)<<7) | ((shift_op)<<5) | (rm))
70 #define A_AM1_REG_XREG(rs,shift_op,rm)        (((rs)<<8) | ((shift_op)<<5) | 0x10 | (rm))
71
72 /* data processing op */
73 #define A_OP_AND 0x0
74 #define A_OP_EOR 0x1
75 #define A_OP_SUB 0x2
76 #define A_OP_RSB 0x3
77 #define A_OP_ADD 0x4
78 #define A_OP_ADC 0x5
79 #define A_OP_SBC 0x6
80 #define A_OP_RSC 0x7
81 #define A_OP_TST 0x8
82 #define A_OP_TEQ 0x9
83 #define A_OP_CMP 0xa
84 #define A_OP_ORR 0xc
85 #define A_OP_MOV 0xd
86 #define A_OP_BIC 0xe
87 #define A_OP_MVN 0xf
88
89 #define EOP_C_DOP_X(cond,op,s,rn,rd,shifter_op) \
90         EMIT(((cond)<<28) | ((op)<< 21) | ((s)<<20) | ((rn)<<16) | ((rd)<<12) | (shifter_op))
91
92 #define EOP_C_DOP_IMM(     cond,op,s,rn,rd,ror2,imm8)             EOP_C_DOP_X(cond,op,s,rn,rd,A_AM1_IMM(ror2,imm8))
93 #define EOP_C_DOP_REG_XIMM(cond,op,s,rn,rd,shift_imm,shift_op,rm) EOP_C_DOP_X(cond,op,s,rn,rd,A_AM1_REG_XIMM(shift_imm,shift_op,rm))
94 #define EOP_C_DOP_REG_XREG(cond,op,s,rn,rd,rs,       shift_op,rm) EOP_C_DOP_X(cond,op,s,rn,rd,A_AM1_REG_XREG(rs,       shift_op,rm))
95
96 #define EOP_MOV_IMM(rd,   ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_MOV,0, 0,rd,ror2,imm8)
97 #define EOP_MVN_IMM(rd,   ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_MVN,0, 0,rd,ror2,imm8)
98 #define EOP_ORR_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_ORR,0,rn,rd,ror2,imm8)
99 #define EOP_EOR_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_EOR,0,rn,rd,ror2,imm8)
100 #define EOP_ADD_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_ADD,0,rn,rd,ror2,imm8)
101 #define EOP_BIC_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_BIC,0,rn,rd,ror2,imm8)
102 #define EOP_AND_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_AND,0,rn,rd,ror2,imm8)
103 #define EOP_SUB_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_SUB,0,rn,rd,ror2,imm8)
104 #define EOP_TST_IMM(   rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_TST,1,rn, 0,ror2,imm8)
105 #define EOP_CMP_IMM(   rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_CMP,1,rn, 0,ror2,imm8)
106 #define EOP_RSB_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_RSB,0,rn,rd,ror2,imm8)
107
108 #define EOP_MOV_IMM_C(cond,rd,   ror2,imm8) EOP_C_DOP_IMM(cond,A_OP_MOV,0, 0,rd,ror2,imm8)
109 #define EOP_ORR_IMM_C(cond,rd,rn,ror2,imm8) EOP_C_DOP_IMM(cond,A_OP_ORR,0,rn,rd,ror2,imm8)
110 #define EOP_RSB_IMM_C(cond,rd,rn,ror2,imm8) EOP_C_DOP_IMM(cond,A_OP_RSB,0,rn,rd,ror2,imm8)
111
112 #define EOP_MOV_REG(cond,s,rd,   rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_MOV,s, 0,rd,shift_imm,shift_op,rm)
113 #define EOP_MVN_REG(cond,s,rd,   rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_MVN,s, 0,rd,shift_imm,shift_op,rm)
114 #define EOP_ORR_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_ORR,s,rn,rd,shift_imm,shift_op,rm)
115 #define EOP_ADD_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_ADD,s,rn,rd,shift_imm,shift_op,rm)
116 #define EOP_ADC_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_ADC,s,rn,rd,shift_imm,shift_op,rm)
117 #define EOP_SUB_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_SUB,s,rn,rd,shift_imm,shift_op,rm)
118 #define EOP_SBC_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_SBC,s,rn,rd,shift_imm,shift_op,rm)
119 #define EOP_AND_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_AND,s,rn,rd,shift_imm,shift_op,rm)
120 #define EOP_EOR_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_EOR,s,rn,rd,shift_imm,shift_op,rm)
121 #define EOP_CMP_REG(cond,     rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_CMP,1,rn, 0,shift_imm,shift_op,rm)
122 #define EOP_TST_REG(cond,     rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_TST,1,rn, 0,shift_imm,shift_op,rm)
123 #define EOP_TEQ_REG(cond,     rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_TEQ,1,rn, 0,shift_imm,shift_op,rm)
124
125 #define EOP_MOV_REG2(s,rd,   rm,shift_op,rs) EOP_C_DOP_REG_XREG(A_COND_AL,A_OP_MOV,s, 0,rd,rs,shift_op,rm)
126 #define EOP_ADD_REG2(s,rd,rn,rm,shift_op,rs) EOP_C_DOP_REG_XREG(A_COND_AL,A_OP_ADD,s,rn,rd,rs,shift_op,rm)
127 #define EOP_SUB_REG2(s,rd,rn,rm,shift_op,rs) EOP_C_DOP_REG_XREG(A_COND_AL,A_OP_SUB,s,rn,rd,rs,shift_op,rm)
128
129 #define EOP_MOV_REG_SIMPLE(rd,rm)           EOP_MOV_REG(A_COND_AL,0,rd,rm,A_AM1_LSL,0)
130 #define EOP_MOV_REG_LSL(rd,   rm,shift_imm) EOP_MOV_REG(A_COND_AL,0,rd,rm,A_AM1_LSL,shift_imm)
131 #define EOP_MOV_REG_LSR(rd,   rm,shift_imm) EOP_MOV_REG(A_COND_AL,0,rd,rm,A_AM1_LSR,shift_imm)
132 #define EOP_MOV_REG_ASR(rd,   rm,shift_imm) EOP_MOV_REG(A_COND_AL,0,rd,rm,A_AM1_ASR,shift_imm)
133 #define EOP_MOV_REG_ROR(rd,   rm,shift_imm) EOP_MOV_REG(A_COND_AL,0,rd,rm,A_AM1_ROR,shift_imm)
134
135 #define EOP_ORR_REG_SIMPLE(rd,rm)           EOP_ORR_REG(A_COND_AL,0,rd,rd,rm,A_AM1_LSL,0)
136 #define EOP_ORR_REG_LSL(rd,rn,rm,shift_imm) EOP_ORR_REG(A_COND_AL,0,rd,rn,rm,A_AM1_LSL,shift_imm)
137 #define EOP_ORR_REG_LSR(rd,rn,rm,shift_imm) EOP_ORR_REG(A_COND_AL,0,rd,rn,rm,A_AM1_LSR,shift_imm)
138 #define EOP_ORR_REG_ASR(rd,rn,rm,shift_imm) EOP_ORR_REG(A_COND_AL,0,rd,rn,rm,A_AM1_ASR,shift_imm)
139 #define EOP_ORR_REG_ROR(rd,rn,rm,shift_imm) EOP_ORR_REG(A_COND_AL,0,rd,rn,rm,A_AM1_ROR,shift_imm)
140
141 #define EOP_ADD_REG_SIMPLE(rd,rm)           EOP_ADD_REG(A_COND_AL,0,rd,rd,rm,A_AM1_LSL,0)
142 #define EOP_ADD_REG_LSL(rd,rn,rm,shift_imm) EOP_ADD_REG(A_COND_AL,0,rd,rn,rm,A_AM1_LSL,shift_imm)
143 #define EOP_ADD_REG_LSR(rd,rn,rm,shift_imm) EOP_ADD_REG(A_COND_AL,0,rd,rn,rm,A_AM1_LSR,shift_imm)
144
145 #define EOP_TST_REG_SIMPLE(rn,rm)           EOP_TST_REG(A_COND_AL,  rn,   0,A_AM1_LSL,rm)
146
147 #define EOP_MOV_REG2_LSL(rd,   rm,rs)       EOP_MOV_REG2(0,rd,   rm,A_AM1_LSL,rs)
148 #define EOP_MOV_REG2_ROR(rd,   rm,rs)       EOP_MOV_REG2(0,rd,   rm,A_AM1_ROR,rs)
149 #define EOP_ADD_REG2_LSL(rd,rn,rm,rs)       EOP_ADD_REG2(0,rd,rn,rm,A_AM1_LSL,rs)
150 #define EOP_SUB_REG2_LSL(rd,rn,rm,rs)       EOP_SUB_REG2(0,rd,rn,rm,A_AM1_LSL,rs)
151
152 /* addressing mode 2 */
153 #define EOP_C_AM2_IMM(cond,u,b,l,rn,rd,offset_12) \
154         EMIT(((cond)<<28) | 0x05000000 | ((u)<<23) | ((b)<<22) | ((l)<<20) | ((rn)<<16) | ((rd)<<12) | (offset_12))
155
156 /* addressing mode 3 */
157 #define EOP_C_AM3(cond,u,r,l,rn,rd,s,h,immed_reg) \
158         EMIT(((cond)<<28) | 0x01000090 | ((u)<<23) | ((r)<<22) | ((l)<<20) | ((rn)<<16) | ((rd)<<12) | \
159                         ((s)<<6) | ((h)<<5) | (immed_reg))
160
161 #define EOP_C_AM3_IMM(cond,u,l,rn,rd,s,h,offset_8) EOP_C_AM3(cond,u,1,l,rn,rd,s,h,(((offset_8)&0xf0)<<4)|((offset_8)&0xf))
162
163 #define EOP_C_AM3_REG(cond,u,l,rn,rd,s,h,rm)       EOP_C_AM3(cond,u,0,l,rn,rd,s,h,rm)
164
165 /* ldr and str */
166 #define EOP_LDR_IMM(   rd,rn,offset_12) EOP_C_AM2_IMM(A_COND_AL,1,0,1,rn,rd,offset_12)
167 #define EOP_LDR_NEGIMM(rd,rn,offset_12) EOP_C_AM2_IMM(A_COND_AL,0,0,1,rn,rd,offset_12)
168 #define EOP_LDR_SIMPLE(rd,rn)           EOP_C_AM2_IMM(A_COND_AL,1,0,1,rn,rd,0)
169 #define EOP_STR_IMM(   rd,rn,offset_12) EOP_C_AM2_IMM(A_COND_AL,1,0,0,rn,rd,offset_12)
170 #define EOP_STR_SIMPLE(rd,rn)           EOP_C_AM2_IMM(A_COND_AL,1,0,0,rn,rd,0)
171
172 #define EOP_LDRH_IMM(   rd,rn,offset_8)  EOP_C_AM3_IMM(A_COND_AL,1,1,rn,rd,0,1,offset_8)
173 #define EOP_LDRH_SIMPLE(rd,rn)           EOP_C_AM3_IMM(A_COND_AL,1,1,rn,rd,0,1,0)
174 #define EOP_LDRH_REG(   rd,rn,rm)        EOP_C_AM3_REG(A_COND_AL,1,1,rn,rd,0,1,rm)
175 #define EOP_STRH_IMM(   rd,rn,offset_8)  EOP_C_AM3_IMM(A_COND_AL,1,0,rn,rd,0,1,offset_8)
176 #define EOP_STRH_SIMPLE(rd,rn)           EOP_C_AM3_IMM(A_COND_AL,1,0,rn,rd,0,1,0)
177 #define EOP_STRH_REG(   rd,rn,rm)        EOP_C_AM3_REG(A_COND_AL,1,0,rn,rd,0,1,rm)
178
179 /* ldm and stm */
180 #define EOP_XXM(cond,p,u,s,w,l,rn,list) \
181         EMIT(((cond)<<28) | (1<<27) | ((p)<<24) | ((u)<<23) | ((s)<<22) | ((w)<<21) | ((l)<<20) | ((rn)<<16) | (list))
182
183 #define EOP_STMFD_ST(list) EOP_XXM(A_COND_AL,1,0,0,1,0,13,list)
184 #define EOP_LDMFD_ST(list) EOP_XXM(A_COND_AL,0,1,0,1,1,13,list)
185
186 /* branches */
187 #define EOP_C_BX(cond,rm) \
188         EMIT(((cond)<<28) | 0x012fff10 | (rm))
189
190 #define EOP_BX(rm) EOP_C_BX(A_COND_AL,rm)
191
192 #define EOP_C_B(cond,l,signed_immed_24) \
193         EMIT(((cond)<<28) | 0x0a000000 | ((l)<<24) | (signed_immed_24))
194
195 #define EOP_B( signed_immed_24) EOP_C_B(A_COND_AL,0,signed_immed_24)
196 #define EOP_BL(signed_immed_24) EOP_C_B(A_COND_AL,1,signed_immed_24)
197
198 /* misc */
199 #define EOP_C_MUL(cond,s,rd,rs,rm) \
200         EMIT(((cond)<<28) | ((s)<<20) | ((rd)<<16) | ((rs)<<8) | 0x90 | (rm))
201
202 #define EOP_C_UMULL(cond,s,rdhi,rdlo,rs,rm) \
203         EMIT(((cond)<<28) | 0x00800000 | ((s)<<20) | ((rdhi)<<16) | ((rdlo)<<12) | ((rs)<<8) | 0x90 | (rm))
204
205 #define EOP_C_SMULL(cond,s,rdhi,rdlo,rs,rm) \
206         EMIT(((cond)<<28) | 0x00c00000 | ((s)<<20) | ((rdhi)<<16) | ((rdlo)<<12) | ((rs)<<8) | 0x90 | (rm))
207
208 #define EOP_MUL(rd,rm,rs) EOP_C_MUL(A_COND_AL,0,rd,rs,rm) // note: rd != rm
209
210 #define EOP_C_MRS(cond,rd) \
211         EMIT(((cond)<<28) | 0x010f0000 | ((rd)<<12))
212
213 #define EOP_C_MSR_IMM(cond,ror2,imm) \
214         EMIT(((cond)<<28) | 0x0328f000 | ((ror2)<<8) | (imm)) // cpsr_f
215
216 #define EOP_C_MSR_REG(cond,rm) \
217         EMIT(((cond)<<28) | 0x0128f000 | (rm)) // cpsr_f
218
219 #define EOP_MRS(rd)           EOP_C_MRS(A_COND_AL,rd)
220 #define EOP_MSR_IMM(ror2,imm) EOP_C_MSR_IMM(A_COND_AL,ror2,imm)
221 #define EOP_MSR_REG(rm)       EOP_C_MSR_REG(A_COND_AL,rm)
222
223
224 static void emith_op_imm2(int cond, int s, int op, int rd, int rn, unsigned int imm)
225 {
226         int ror2;
227         u32 v;
228
229         if (op == A_OP_MOV)
230                 rn = 0;
231         else if (imm == 0)
232                 return;
233
234         for (v = imm, ror2 = 0; v != 0 || op == A_OP_MOV; v >>= 8, ror2 -= 8/2) {
235                 /* shift down to get 'best' rot2 */
236                 for (; v && !(v & 3); v >>= 2)
237                         ror2--;
238
239                 EOP_C_DOP_IMM(cond, op, s, rn, rd, ror2 & 0x0f, v & 0xff);
240
241                 if (op == A_OP_MOV) {
242                         op = A_OP_ORR;
243                         rn = rd;
244                 }
245         }
246 }
247
248 #define emith_op_imm(cond, s, op, r, imm) \
249         emith_op_imm2(cond, s, op, r, r, imm)
250
251 // test op
252 #define emith_top_imm(cond, op, r, imm) { \
253         u32 ror2, v; \
254         for (ror2 = 0, v = imm; v && !(v & 3); v >>= 2) \
255                 ror2--; \
256         EOP_C_DOP_IMM(cond, op, 1, r, 0, ror2 & 0x0f, v & 0xff); \
257 }
258
259 #define is_offset_24(val) \
260         ((val) >= (int)0xff000000 && (val) <= 0x00ffffff)
261
262 static int emith_xbranch(int cond, void *target, int is_call)
263 {
264         int val = (u32 *)target - (u32 *)tcache_ptr - 2;
265         int direct = is_offset_24(val);
266         u32 *start_ptr = (u32 *)tcache_ptr;
267
268         if (direct)
269         {
270                 EOP_C_B(cond,is_call,val & 0xffffff);           // b, bl target
271         }
272         else
273         {
274 #ifdef __EPOC32__
275 //              elprintf(EL_SVP, "emitting indirect jmp %08x->%08x", tcache_ptr, target);
276                 if (is_call)
277                         EOP_ADD_IMM(14,15,0,8);                 // add lr,pc,#8
278                 EOP_C_AM2_IMM(cond,1,0,1,15,15,0);              // ldrcc pc,[pc]
279                 EOP_MOV_REG_SIMPLE(15,15);                      // mov pc, pc
280                 EMIT((u32)target);
281 #else
282                 // should never happen
283                 elprintf(EL_STATUS|EL_SVP|EL_ANOMALY, "indirect jmp %08x->%08x", target, tcache_ptr);
284                 exit(1);
285 #endif
286         }
287
288         return (u32 *)tcache_ptr - start_ptr;
289 }
290
291
292 // fake "simple" or "short" jump - using cond insns instead
293 #define EMITH_SJMP_START(cond) \
294         (void)(cond)
295
296 #define EMITH_SJMP_END(cond) \
297         (void)(cond)
298
299 #define emith_move_r_r(d, s) \
300         EOP_MOV_REG_SIMPLE(d, s)
301
302 #define emith_mvn_r_r(d, s) \
303         EOP_MVN_REG(A_COND_AL,0,d,s,A_AM1_LSL,0)
304
305 #define emith_or_r_r_r_lsl(d, s1, s2, lslimm) \
306         EOP_ORR_REG(A_COND_AL,0,d,s1,s2,A_AM1_LSL,lslimm)
307
308 #define emith_eor_r_r_r_lsl(d, s1, s2, lslimm) \
309         EOP_EOR_REG(A_COND_AL,0,d,s1,s2,A_AM1_LSL,lslimm)
310
311 #define emith_or_r_r_r(d, s1, s2) \
312         emith_or_r_r_r_lsl(d, s1, s2, 0)
313
314 #define emith_eor_r_r_r(d, s1, s2) \
315         emith_eor_r_r_r_lsl(d, s1, s2, 0)
316
317 #define emith_add_r_r(d, s) \
318         EOP_ADD_REG(A_COND_AL,0,d,d,s,A_AM1_LSL,0)
319
320 #define emith_sub_r_r(d, s) \
321         EOP_SUB_REG(A_COND_AL,0,d,d,s,A_AM1_LSL,0)
322
323 #define emith_and_r_r(d, s) \
324         EOP_AND_REG(A_COND_AL,0,d,d,s,A_AM1_LSL,0)
325
326 #define emith_or_r_r(d, s) \
327         emith_or_r_r_r(d, d, s)
328
329 #define emith_eor_r_r(d, s) \
330         emith_eor_r_r_r(d, d, s)
331
332 #define emith_tst_r_r(d, s) \
333         EOP_TST_REG(A_COND_AL,d,s,A_AM1_LSL,0)
334
335 #define emith_teq_r_r(d, s) \
336         EOP_TEQ_REG(A_COND_AL,d,s,A_AM1_LSL,0)
337
338 #define emith_cmp_r_r(d, s) \
339         EOP_CMP_REG(A_COND_AL,d,s,A_AM1_LSL,0)
340
341 #define emith_addf_r_r(d, s) \
342         EOP_ADD_REG(A_COND_AL,1,d,d,s,A_AM1_LSL,0)
343
344 #define emith_subf_r_r(d, s) \
345         EOP_SUB_REG(A_COND_AL,1,d,d,s,A_AM1_LSL,0)
346
347 #define emith_adcf_r_r(d, s) \
348         EOP_ADC_REG(A_COND_AL,1,d,d,s,A_AM1_LSL,0)
349
350 #define emith_sbcf_r_r(d, s) \
351         EOP_SBC_REG(A_COND_AL,1,d,d,s,A_AM1_LSL,0)
352
353 #define emith_move_r_imm(r, imm) \
354         emith_op_imm(A_COND_AL, 0, A_OP_MOV, r, imm)
355
356 #define emith_add_r_imm(r, imm) \
357         emith_op_imm(A_COND_AL, 0, A_OP_ADD, r, imm)
358
359 #define emith_sub_r_imm(r, imm) \
360         emith_op_imm(A_COND_AL, 0, A_OP_SUB, r, imm)
361
362 #define emith_bic_r_imm(r, imm) \
363         emith_op_imm(A_COND_AL, 0, A_OP_BIC, r, imm)
364
365 #define emith_and_r_imm(r, imm) \
366         emith_op_imm(A_COND_AL, 0, A_OP_AND, r, imm)
367
368 #define emith_or_r_imm(r, imm) \
369         emith_op_imm(A_COND_AL, 0, A_OP_ORR, r, imm)
370
371 #define emith_eor_r_imm(r, imm) \
372         emith_op_imm(A_COND_AL, 0, A_OP_EOR, r, imm)
373
374 // note: only use 8bit imm for these
375 #define emith_tst_r_imm(r, imm) \
376         emith_top_imm(A_COND_AL, A_OP_TST, r, imm)
377
378 #define emith_cmp_r_imm(r, imm) \
379         emith_top_imm(A_COND_AL, A_OP_CMP, r, imm)
380
381 #define emith_subf_r_imm(r, imm) \
382         emith_op_imm(A_COND_AL, 1, A_OP_SUB, r, imm)
383
384 #define emith_add_r_imm_c(cond, r, imm) \
385         emith_op_imm(cond, 0, A_OP_ADD, r, imm)
386
387 #define emith_sub_r_imm_c(cond, r, imm) \
388         emith_op_imm(cond, 0, A_OP_SUB, r, imm)
389
390 #define emith_or_r_imm_c(cond, r, imm) \
391         emith_op_imm(cond, 0, A_OP_ORR, r, imm)
392
393 #define emith_bic_r_imm_c(cond, r, imm) \
394         emith_op_imm(cond, 0, A_OP_BIC, r, imm)
395
396 #define emith_move_r_imm_s8(r, imm) { \
397         if ((imm) & 0x80) \
398                 EOP_MVN_IMM(r, 0, ((imm) ^ 0xff)); \
399         else \
400                 EOP_MOV_IMM(r, 0, imm); \
401 }
402
403 #define emith_and_r_r_imm(d, s, imm) \
404         emith_op_imm2(A_COND_AL, 0, A_OP_AND, d, s, imm)
405
406 #define emith_neg_r_r(d, s) \
407         EOP_RSB_IMM(d, s, 0, 0)
408
409 #define emith_lsl(d, s, cnt) \
410         EOP_MOV_REG(A_COND_AL,0,d,s,A_AM1_LSL,cnt)
411
412 #define emith_lsr(d, s, cnt) \
413         EOP_MOV_REG(A_COND_AL,0,d,s,A_AM1_LSR,cnt)
414
415 #define emith_ror(d, s, cnt) \
416         EOP_MOV_REG(A_COND_AL,0,d,s,A_AM1_ROR,cnt)
417
418 #define emith_rol(d, s, cnt) \
419         EOP_MOV_REG(A_COND_AL,0,d,s,A_AM1_ROR,32-(cnt)); \
420
421 #define emith_lslf(d, s, cnt) \
422         EOP_MOV_REG(A_COND_AL,1,d,s,A_AM1_LSL,cnt)
423
424 #define emith_lsrf(d, s, cnt) \
425         EOP_MOV_REG(A_COND_AL,1,d,s,A_AM1_LSR,cnt)
426
427 #define emith_asrf(d, s, cnt) \
428         EOP_MOV_REG(A_COND_AL,1,d,s,A_AM1_ASR,cnt)
429
430 // note: only C flag updated correctly
431 #define emith_rolf(d, s, cnt) { \
432         EOP_MOV_REG(A_COND_AL,1,d,s,A_AM1_ROR,32-(cnt)); \
433         /* we don't have ROL so we shift to get the right carry */ \
434         EOP_TST_REG(A_COND_AL,d,d,A_AM1_LSR,1); \
435 }
436
437 #define emith_rorf(d, s, cnt) \
438         EOP_MOV_REG(A_COND_AL,1,d,s,A_AM1_ROR,cnt)
439
440 #define emith_rolcf(d) \
441         emith_adcf_r_r(d, d)
442
443 #define emith_rorcf(d) \
444         EOP_MOV_REG(A_COND_AL,1,d,d,A_AM1_ROR,0) /* ROR #0 -> RRX */
445
446 #define emith_negcf_r_r(d, s) \
447         EOP_C_DOP_IMM(A_COND_AL,A_OP_RSC,1,s,d,0,0)
448
449 #define emith_mul(d, s1, s2) { \
450         if ((d) != (s1)) /* rd != rm limitation */ \
451                 EOP_MUL(d, s1, s2); \
452         else \
453                 EOP_MUL(d, s2, s1); \
454 }
455
456 #define emith_mul_u64(dlo, dhi, s1, s2) \
457         EOP_C_UMULL(A_COND_AL,0,dhi,dlo,s1,s2)
458
459 #define emith_mul_s64(dlo, dhi, s1, s2) \
460         EOP_C_SMULL(A_COND_AL,0,dhi,dlo,s1,s2)
461
462 // misc
463 #define emith_ctx_read(r, offs) \
464         EOP_LDR_IMM(r, CONTEXT_REG, offs)
465
466 #define emith_ctx_write(r, offs) \
467         EOP_STR_IMM(r, CONTEXT_REG, offs)
468
469 #define emith_clear_msb(d, s, count) { \
470         u32 t; \
471         if ((count) <= 8) { \
472                 t = (count) - 8; \
473                 t = (0xff << t) & 0xff; \
474                 EOP_BIC_IMM(d,s,8/2,t); \
475         } else if ((count) >= 24) { \
476                 t = (count) - 24; \
477                 t = 0xff >> t; \
478                 EOP_AND_IMM(d,s,0,t); \
479         } else { \
480                 EOP_MOV_REG_LSL(d,s,count); \
481                 EOP_MOV_REG_LSR(d,d,count); \
482         } \
483 }
484
485 #define emith_sext(d, s, bits) { \
486         EOP_MOV_REG_LSL(d,s,32 - (bits)); \
487         EOP_MOV_REG_ASR(d,d,32 - (bits)); \
488 }
489
490 // put bit0 of r0 to carry
491 #define emith_set_carry(r0) \
492         EOP_TST_REG(A_COND_AL,r0,r0,A_AM1_LSR,1) /* shift out to carry */ \
493
494 // put bit0 of r0 to carry (for subtraction, inverted on ARM)
495 #define emith_set_carry_sub(r0) { \
496         int t = rcache_get_tmp(); \
497         EOP_EOR_IMM(t,r0,0,1); /* invert */ \
498         EOP_MOV_REG(A_COND_AL,1,t,t,A_AM1_LSR,1); /* shift out to carry */ \
499         rcache_free_tmp(t); \
500 }
501
502 #define host_arg2reg(rd, arg) \
503         rd = arg
504
505 // upto 4 args
506 #define emith_pass_arg_r(arg, reg) \
507         EOP_MOV_REG_SIMPLE(arg, reg)
508
509 #define emith_pass_arg_imm(arg, imm) \
510         emith_move_r_imm(arg, imm)
511
512 #define emith_call_cond(cond, target) \
513         emith_xbranch(cond, target, 1)
514
515 #define emith_jump_cond(cond, target) \
516         emith_xbranch(cond, target, 0)
517
518 #define emith_call(target) \
519         emith_call_cond(A_COND_AL, target)
520
521 #define emith_jump(target) \
522         emith_jump_cond(A_COND_AL, target)
523
524 /* SH2 drc specific */
525 #define emith_sh2_test_t() { \
526         int r = rcache_get_reg(SHR_SR, RC_GR_READ); \
527         EOP_TST_IMM(r, 0, 1); \
528 }
529
530 #define emith_sh2_dtbf_loop() { \
531         int cr, rn;                                                          \
532         int tmp_ = rcache_get_tmp();                                         \
533         cr = rcache_get_reg(SHR_SR, RC_GR_RMW);                              \
534         rn = rcache_get_reg((op >> 8) & 0x0f, RC_GR_RMW);                    \
535         emith_sub_r_imm(rn, 1);                /* sub rn, #1 */              \
536         emith_bic_r_imm(cr, 1);                /* bic cr, #1 */              \
537         emith_sub_r_imm(cr, (cycles+1) << 12); /* sub cr, #(cycles+1)<<12 */ \
538         cycles = 0;                                                          \
539         emith_asrf(tmp_, cr, 2+12);            /* movs tmp_, cr, asr #2+12 */\
540         EOP_MOV_IMM_C(A_COND_MI,tmp_,0,0);     /* movmi tmp_, #0 */          \
541         emith_lsl(cr, cr, 20);                 /* mov cr, cr, lsl #20 */     \
542         emith_lsr(cr, cr, 20);                 /* mov cr, cr, lsr #20 */     \
543         emith_subf_r_r(rn, tmp_);              /* subs rn, tmp_ */           \
544         EOP_RSB_IMM_C(A_COND_LS,tmp_,rn,0,0);  /* rsbls tmp_, rn, #0 */      \
545         EOP_ORR_REG(A_COND_LS,0,cr,cr,tmp_,A_AM1_LSL,12+2); /* orrls cr,tmp_,lsl #12+2 */\
546         EOP_ORR_IMM_C(A_COND_LS,cr,cr,0,1);    /* orrls cr, #1 */            \
547         EOP_MOV_IMM_C(A_COND_LS,rn,0,0);       /* movls rn, #0 */            \
548         rcache_free_tmp(tmp_);                                               \
549 }
550
551 #define emith_write_sr(srcr) { \
552         int srr = rcache_get_reg(SHR_SR, RC_GR_RMW); \
553         emith_lsr(srr, srr, 12); \
554         emith_or_r_r_r_lsl(srr, srr, srcr, 20); \
555         emith_ror(srr, srr, 20); \
556 }
557
558 #define emith_carry_to_t(srr, is_sub) { \
559         if (is_sub) { /* has inverted C on ARM */ \
560                 emith_or_r_imm_c(A_COND_CC, srr, 1); \
561                 emith_bic_r_imm_c(A_COND_CS, srr, 1); \
562         } else { \
563                 emith_or_r_imm_c(A_COND_CS, srr, 1); \
564                 emith_bic_r_imm_c(A_COND_CC, srr, 1); \
565         } \
566 }