32x: drc: self-reentrant blocks
[picodrive.git] / cpu / drc / emit_arm.c
1 // Basic macros to emit ARM instructions and some utils
2
3 // (c) Copyright 2008-2009, Grazvydas "notaz" Ignotas
4 // Free for non-commercial use.
5
6 #define CONTEXT_REG 11
7
8 // XXX: tcache_ptr type for SVP and SH2 compilers differs..
9 #define EMIT_PTR(ptr, x) \
10         do { \
11                 *(u32 *)ptr = x; \
12                 ptr = (void *)((u8 *)ptr + sizeof(u32)); \
13                 COUNT_OP; \
14         } while (0)
15
16 #define EMIT(x) EMIT_PTR(tcache_ptr, x)
17
18 #define A_R4M  (1 << 4)
19 #define A_R5M  (1 << 5)
20 #define A_R6M  (1 << 6)
21 #define A_R7M  (1 << 7)
22 #define A_R8M  (1 << 8)
23 #define A_R9M  (1 << 9)
24 #define A_R10M (1 << 10)
25 #define A_R11M (1 << 11)
26 #define A_R14M (1 << 14)
27 #define A_R15M (1 << 15)
28
29 #define A_COND_AL 0xe
30 #define A_COND_EQ 0x0
31 #define A_COND_NE 0x1
32 #define A_COND_HS 0x2
33 #define A_COND_LO 0x3
34 #define A_COND_MI 0x4
35 #define A_COND_PL 0x5
36 #define A_COND_VS 0x6
37 #define A_COND_VC 0x7
38 #define A_COND_HI 0x8
39 #define A_COND_LS 0x9
40 #define A_COND_GE 0xa
41 #define A_COND_LT 0xb
42 #define A_COND_GT 0xc
43 #define A_COND_LE 0xd
44 #define A_COND_CS A_COND_HS
45 #define A_COND_CC A_COND_LO
46
47 /* unified conditions */
48 #define DCOND_EQ A_COND_EQ
49 #define DCOND_NE A_COND_NE
50 #define DCOND_MI A_COND_MI
51 #define DCOND_PL A_COND_PL
52 #define DCOND_HI A_COND_HI
53 #define DCOND_HS A_COND_HS
54 #define DCOND_LO A_COND_LO
55 #define DCOND_GE A_COND_GE
56 #define DCOND_GT A_COND_GT
57 #define DCOND_LT A_COND_LT
58 #define DCOND_LS A_COND_LS
59 #define DCOND_LE A_COND_LE
60 #define DCOND_VS A_COND_VS
61 #define DCOND_VC A_COND_VC
62
63 /* addressing mode 1 */
64 #define A_AM1_LSL 0
65 #define A_AM1_LSR 1
66 #define A_AM1_ASR 2
67 #define A_AM1_ROR 3
68
69 #define A_AM1_IMM(ror2,imm8)                  (((ror2)<<8) | (imm8) | 0x02000000)
70 #define A_AM1_REG_XIMM(shift_imm,shift_op,rm) (((shift_imm)<<7) | ((shift_op)<<5) | (rm))
71 #define A_AM1_REG_XREG(rs,shift_op,rm)        (((rs)<<8) | ((shift_op)<<5) | 0x10 | (rm))
72
73 /* data processing op */
74 #define A_OP_AND 0x0
75 #define A_OP_EOR 0x1
76 #define A_OP_SUB 0x2
77 #define A_OP_RSB 0x3
78 #define A_OP_ADD 0x4
79 #define A_OP_ADC 0x5
80 #define A_OP_SBC 0x6
81 #define A_OP_RSC 0x7
82 #define A_OP_TST 0x8
83 #define A_OP_TEQ 0x9
84 #define A_OP_CMP 0xa
85 #define A_OP_CMN 0xa
86 #define A_OP_ORR 0xc
87 #define A_OP_MOV 0xd
88 #define A_OP_BIC 0xe
89 #define A_OP_MVN 0xf
90
91 #define EOP_C_DOP_X(cond,op,s,rn,rd,shifter_op) \
92         EMIT(((cond)<<28) | ((op)<< 21) | ((s)<<20) | ((rn)<<16) | ((rd)<<12) | (shifter_op))
93
94 #define EOP_C_DOP_IMM(     cond,op,s,rn,rd,ror2,imm8)             EOP_C_DOP_X(cond,op,s,rn,rd,A_AM1_IMM(ror2,imm8))
95 #define EOP_C_DOP_REG_XIMM(cond,op,s,rn,rd,shift_imm,shift_op,rm) EOP_C_DOP_X(cond,op,s,rn,rd,A_AM1_REG_XIMM(shift_imm,shift_op,rm))
96 #define EOP_C_DOP_REG_XREG(cond,op,s,rn,rd,rs,       shift_op,rm) EOP_C_DOP_X(cond,op,s,rn,rd,A_AM1_REG_XREG(rs,       shift_op,rm))
97
98 #define EOP_MOV_IMM(rd,   ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_MOV,0, 0,rd,ror2,imm8)
99 #define EOP_MVN_IMM(rd,   ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_MVN,0, 0,rd,ror2,imm8)
100 #define EOP_ORR_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_ORR,0,rn,rd,ror2,imm8)
101 #define EOP_EOR_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_EOR,0,rn,rd,ror2,imm8)
102 #define EOP_ADD_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_ADD,0,rn,rd,ror2,imm8)
103 #define EOP_BIC_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_BIC,0,rn,rd,ror2,imm8)
104 #define EOP_AND_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_AND,0,rn,rd,ror2,imm8)
105 #define EOP_SUB_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_SUB,0,rn,rd,ror2,imm8)
106 #define EOP_TST_IMM(   rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_TST,1,rn, 0,ror2,imm8)
107 #define EOP_CMP_IMM(   rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_CMP,1,rn, 0,ror2,imm8)
108 #define EOP_RSB_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_RSB,0,rn,rd,ror2,imm8)
109
110 #define EOP_MOV_IMM_C(cond,rd,   ror2,imm8) EOP_C_DOP_IMM(cond,A_OP_MOV,0, 0,rd,ror2,imm8)
111 #define EOP_ORR_IMM_C(cond,rd,rn,ror2,imm8) EOP_C_DOP_IMM(cond,A_OP_ORR,0,rn,rd,ror2,imm8)
112 #define EOP_RSB_IMM_C(cond,rd,rn,ror2,imm8) EOP_C_DOP_IMM(cond,A_OP_RSB,0,rn,rd,ror2,imm8)
113
114 #define EOP_MOV_REG(cond,s,rd,   rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_MOV,s, 0,rd,shift_imm,shift_op,rm)
115 #define EOP_MVN_REG(cond,s,rd,   rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_MVN,s, 0,rd,shift_imm,shift_op,rm)
116 #define EOP_ORR_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_ORR,s,rn,rd,shift_imm,shift_op,rm)
117 #define EOP_ADD_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_ADD,s,rn,rd,shift_imm,shift_op,rm)
118 #define EOP_ADC_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_ADC,s,rn,rd,shift_imm,shift_op,rm)
119 #define EOP_SUB_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_SUB,s,rn,rd,shift_imm,shift_op,rm)
120 #define EOP_SBC_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_SBC,s,rn,rd,shift_imm,shift_op,rm)
121 #define EOP_AND_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_AND,s,rn,rd,shift_imm,shift_op,rm)
122 #define EOP_EOR_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_EOR,s,rn,rd,shift_imm,shift_op,rm)
123 #define EOP_CMP_REG(cond,     rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_CMP,1,rn, 0,shift_imm,shift_op,rm)
124 #define EOP_TST_REG(cond,     rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_TST,1,rn, 0,shift_imm,shift_op,rm)
125 #define EOP_TEQ_REG(cond,     rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_TEQ,1,rn, 0,shift_imm,shift_op,rm)
126
127 #define EOP_MOV_REG2(s,rd,   rm,shift_op,rs) EOP_C_DOP_REG_XREG(A_COND_AL,A_OP_MOV,s, 0,rd,rs,shift_op,rm)
128 #define EOP_ADD_REG2(s,rd,rn,rm,shift_op,rs) EOP_C_DOP_REG_XREG(A_COND_AL,A_OP_ADD,s,rn,rd,rs,shift_op,rm)
129 #define EOP_SUB_REG2(s,rd,rn,rm,shift_op,rs) EOP_C_DOP_REG_XREG(A_COND_AL,A_OP_SUB,s,rn,rd,rs,shift_op,rm)
130
131 #define EOP_MOV_REG_SIMPLE(rd,rm)           EOP_MOV_REG(A_COND_AL,0,rd,rm,A_AM1_LSL,0)
132 #define EOP_MOV_REG_LSL(rd,   rm,shift_imm) EOP_MOV_REG(A_COND_AL,0,rd,rm,A_AM1_LSL,shift_imm)
133 #define EOP_MOV_REG_LSR(rd,   rm,shift_imm) EOP_MOV_REG(A_COND_AL,0,rd,rm,A_AM1_LSR,shift_imm)
134 #define EOP_MOV_REG_ASR(rd,   rm,shift_imm) EOP_MOV_REG(A_COND_AL,0,rd,rm,A_AM1_ASR,shift_imm)
135 #define EOP_MOV_REG_ROR(rd,   rm,shift_imm) EOP_MOV_REG(A_COND_AL,0,rd,rm,A_AM1_ROR,shift_imm)
136
137 #define EOP_ORR_REG_SIMPLE(rd,rm)           EOP_ORR_REG(A_COND_AL,0,rd,rd,rm,A_AM1_LSL,0)
138 #define EOP_ORR_REG_LSL(rd,rn,rm,shift_imm) EOP_ORR_REG(A_COND_AL,0,rd,rn,rm,A_AM1_LSL,shift_imm)
139 #define EOP_ORR_REG_LSR(rd,rn,rm,shift_imm) EOP_ORR_REG(A_COND_AL,0,rd,rn,rm,A_AM1_LSR,shift_imm)
140 #define EOP_ORR_REG_ASR(rd,rn,rm,shift_imm) EOP_ORR_REG(A_COND_AL,0,rd,rn,rm,A_AM1_ASR,shift_imm)
141 #define EOP_ORR_REG_ROR(rd,rn,rm,shift_imm) EOP_ORR_REG(A_COND_AL,0,rd,rn,rm,A_AM1_ROR,shift_imm)
142
143 #define EOP_ADD_REG_SIMPLE(rd,rm)           EOP_ADD_REG(A_COND_AL,0,rd,rd,rm,A_AM1_LSL,0)
144 #define EOP_ADD_REG_LSL(rd,rn,rm,shift_imm) EOP_ADD_REG(A_COND_AL,0,rd,rn,rm,A_AM1_LSL,shift_imm)
145 #define EOP_ADD_REG_LSR(rd,rn,rm,shift_imm) EOP_ADD_REG(A_COND_AL,0,rd,rn,rm,A_AM1_LSR,shift_imm)
146
147 #define EOP_TST_REG_SIMPLE(rn,rm)           EOP_TST_REG(A_COND_AL,  rn,   0,A_AM1_LSL,rm)
148
149 #define EOP_MOV_REG2_LSL(rd,   rm,rs)       EOP_MOV_REG2(0,rd,   rm,A_AM1_LSL,rs)
150 #define EOP_MOV_REG2_ROR(rd,   rm,rs)       EOP_MOV_REG2(0,rd,   rm,A_AM1_ROR,rs)
151 #define EOP_ADD_REG2_LSL(rd,rn,rm,rs)       EOP_ADD_REG2(0,rd,rn,rm,A_AM1_LSL,rs)
152 #define EOP_SUB_REG2_LSL(rd,rn,rm,rs)       EOP_SUB_REG2(0,rd,rn,rm,A_AM1_LSL,rs)
153
154 /* addressing mode 2 */
155 #define EOP_C_AM2_IMM(cond,u,b,l,rn,rd,offset_12) \
156         EMIT(((cond)<<28) | 0x05000000 | ((u)<<23) | ((b)<<22) | ((l)<<20) | ((rn)<<16) | ((rd)<<12) | (offset_12))
157
158 /* addressing mode 3 */
159 #define EOP_C_AM3(cond,u,r,l,rn,rd,s,h,immed_reg) \
160         EMIT(((cond)<<28) | 0x01000090 | ((u)<<23) | ((r)<<22) | ((l)<<20) | ((rn)<<16) | ((rd)<<12) | \
161                         ((s)<<6) | ((h)<<5) | (immed_reg))
162
163 #define EOP_C_AM3_IMM(cond,u,l,rn,rd,s,h,offset_8) EOP_C_AM3(cond,u,1,l,rn,rd,s,h,(((offset_8)&0xf0)<<4)|((offset_8)&0xf))
164
165 #define EOP_C_AM3_REG(cond,u,l,rn,rd,s,h,rm)       EOP_C_AM3(cond,u,0,l,rn,rd,s,h,rm)
166
167 /* ldr and str */
168 #define EOP_LDR_IMM(   rd,rn,offset_12) EOP_C_AM2_IMM(A_COND_AL,1,0,1,rn,rd,offset_12)
169 #define EOP_LDR_NEGIMM(rd,rn,offset_12) EOP_C_AM2_IMM(A_COND_AL,0,0,1,rn,rd,offset_12)
170 #define EOP_LDR_SIMPLE(rd,rn)           EOP_C_AM2_IMM(A_COND_AL,1,0,1,rn,rd,0)
171 #define EOP_STR_IMM(   rd,rn,offset_12) EOP_C_AM2_IMM(A_COND_AL,1,0,0,rn,rd,offset_12)
172 #define EOP_STR_SIMPLE(rd,rn)           EOP_C_AM2_IMM(A_COND_AL,1,0,0,rn,rd,0)
173
174 #define EOP_LDRH_IMM(   rd,rn,offset_8)  EOP_C_AM3_IMM(A_COND_AL,1,1,rn,rd,0,1,offset_8)
175 #define EOP_LDRH_SIMPLE(rd,rn)           EOP_C_AM3_IMM(A_COND_AL,1,1,rn,rd,0,1,0)
176 #define EOP_LDRH_REG(   rd,rn,rm)        EOP_C_AM3_REG(A_COND_AL,1,1,rn,rd,0,1,rm)
177 #define EOP_STRH_IMM(   rd,rn,offset_8)  EOP_C_AM3_IMM(A_COND_AL,1,0,rn,rd,0,1,offset_8)
178 #define EOP_STRH_SIMPLE(rd,rn)           EOP_C_AM3_IMM(A_COND_AL,1,0,rn,rd,0,1,0)
179 #define EOP_STRH_REG(   rd,rn,rm)        EOP_C_AM3_REG(A_COND_AL,1,0,rn,rd,0,1,rm)
180
181 /* ldm and stm */
182 #define EOP_XXM(cond,p,u,s,w,l,rn,list) \
183         EMIT(((cond)<<28) | (1<<27) | ((p)<<24) | ((u)<<23) | ((s)<<22) | ((w)<<21) | ((l)<<20) | ((rn)<<16) | (list))
184
185 #define EOP_STMIA(rb,list) EOP_XXM(A_COND_AL,0,1,0,0,0,rb,list)
186 #define EOP_LDMIA(rb,list) EOP_XXM(A_COND_AL,0,1,0,0,1,rb,list)
187
188 #define EOP_STMFD_SP(list) EOP_XXM(A_COND_AL,1,0,0,1,0,13,list)
189 #define EOP_LDMFD_SP(list) EOP_XXM(A_COND_AL,0,1,0,1,1,13,list)
190
191 /* branches */
192 #define EOP_C_BX(cond,rm) \
193         EMIT(((cond)<<28) | 0x012fff10 | (rm))
194
195 #define EOP_BX(rm) EOP_C_BX(A_COND_AL,rm)
196
197 #define EOP_C_B_PTR(ptr,cond,l,signed_immed_24) \
198         EMIT_PTR(ptr, ((cond)<<28) | 0x0a000000 | ((l)<<24) | (signed_immed_24))
199
200 #define EOP_C_B(cond,l,signed_immed_24) \
201         EOP_C_B_PTR(tcache_ptr,cond,l,signed_immed_24)
202
203 #define EOP_B( signed_immed_24) EOP_C_B(A_COND_AL,0,signed_immed_24)
204 #define EOP_BL(signed_immed_24) EOP_C_B(A_COND_AL,1,signed_immed_24)
205
206 /* misc */
207 #define EOP_C_MUL(cond,s,rd,rs,rm) \
208         EMIT(((cond)<<28) | ((s)<<20) | ((rd)<<16) | ((rs)<<8) | 0x90 | (rm))
209
210 #define EOP_C_UMULL(cond,s,rdhi,rdlo,rs,rm) \
211         EMIT(((cond)<<28) | 0x00800000 | ((s)<<20) | ((rdhi)<<16) | ((rdlo)<<12) | ((rs)<<8) | 0x90 | (rm))
212
213 #define EOP_C_SMULL(cond,s,rdhi,rdlo,rs,rm) \
214         EMIT(((cond)<<28) | 0x00c00000 | ((s)<<20) | ((rdhi)<<16) | ((rdlo)<<12) | ((rs)<<8) | 0x90 | (rm))
215
216 #define EOP_C_SMLAL(cond,s,rdhi,rdlo,rs,rm) \
217         EMIT(((cond)<<28) | 0x00e00000 | ((s)<<20) | ((rdhi)<<16) | ((rdlo)<<12) | ((rs)<<8) | 0x90 | (rm))
218
219 #define EOP_MUL(rd,rm,rs) EOP_C_MUL(A_COND_AL,0,rd,rs,rm) // note: rd != rm
220
221 #define EOP_C_MRS(cond,rd) \
222         EMIT(((cond)<<28) | 0x010f0000 | ((rd)<<12))
223
224 #define EOP_C_MSR_IMM(cond,ror2,imm) \
225         EMIT(((cond)<<28) | 0x0328f000 | ((ror2)<<8) | (imm)) // cpsr_f
226
227 #define EOP_C_MSR_REG(cond,rm) \
228         EMIT(((cond)<<28) | 0x0128f000 | (rm)) // cpsr_f
229
230 #define EOP_MRS(rd)           EOP_C_MRS(A_COND_AL,rd)
231 #define EOP_MSR_IMM(ror2,imm) EOP_C_MSR_IMM(A_COND_AL,ror2,imm)
232 #define EOP_MSR_REG(rm)       EOP_C_MSR_REG(A_COND_AL,rm)
233
234
235 static void emith_op_imm2(int cond, int s, int op, int rd, int rn, unsigned int imm)
236 {
237         int ror2;
238         u32 v;
239
240         if (op == A_OP_MOV) {
241                 rn = 0;
242                 if (~imm < 0x100) {
243                         imm = ~imm;
244                         op = A_OP_MVN;
245                 }
246         } else if (imm == 0)
247                 return;
248
249         for (v = imm, ror2 = 0; v != 0 || op == A_OP_MOV; v >>= 8, ror2 -= 8/2) {
250                 /* shift down to get 'best' rot2 */
251                 for (; v && !(v & 3); v >>= 2)
252                         ror2--;
253
254                 EOP_C_DOP_IMM(cond, op, s, rn, rd, ror2 & 0x0f, v & 0xff);
255
256                 if (op == A_OP_MOV) {
257                         op = A_OP_ORR;
258                         rn = rd;
259                 }
260         }
261 }
262
263 #define emith_op_imm(cond, s, op, r, imm) \
264         emith_op_imm2(cond, s, op, r, r, imm)
265
266 // test op
267 #define emith_top_imm(cond, op, r, imm) do { \
268         u32 ror2, v; \
269         for (ror2 = 0, v = imm; v && !(v & 3); v >>= 2) \
270                 ror2--; \
271         EOP_C_DOP_IMM(cond, op, 1, r, 0, ror2 & 0x0f, v & 0xff); \
272 } while (0)
273
274 #define is_offset_24(val) \
275         ((val) >= (int)0xff000000 && (val) <= 0x00ffffff)
276
277 static int emith_xbranch(int cond, void *target, int is_call)
278 {
279         int val = (u32 *)target - (u32 *)tcache_ptr - 2;
280         int direct = is_offset_24(val);
281         u32 *start_ptr = (u32 *)tcache_ptr;
282
283         if (direct)
284         {
285                 EOP_C_B(cond,is_call,val & 0xffffff);           // b, bl target
286         }
287         else
288         {
289 #ifdef __EPOC32__
290 //              elprintf(EL_SVP, "emitting indirect jmp %08x->%08x", tcache_ptr, target);
291                 if (is_call)
292                         EOP_ADD_IMM(14,15,0,8);                 // add lr,pc,#8
293                 EOP_C_AM2_IMM(cond,1,0,1,15,15,0);              // ldrcc pc,[pc]
294                 EOP_MOV_REG_SIMPLE(15,15);                      // mov pc, pc
295                 EMIT((u32)target);
296 #else
297                 // should never happen
298                 elprintf(EL_STATUS|EL_SVP|EL_ANOMALY, "indirect jmp %08x->%08x", target, tcache_ptr);
299                 exit(1);
300 #endif
301         }
302
303         return (u32 *)tcache_ptr - start_ptr;
304 }
305
306 #define JMP_POS(ptr) \
307         ptr = tcache_ptr; \
308         tcache_ptr += sizeof(u32)
309
310 #define JMP_EMIT(cond, ptr) { \
311         int val = (u32 *)tcache_ptr - (u32 *)(ptr) - 2; \
312         EOP_C_B_PTR(ptr, cond, 0, val & 0xffffff); \
313 }
314
315 #define EMITH_JMP_START(cond) { \
316         void *cond_ptr; \
317         JMP_POS(cond_ptr)
318
319 #define EMITH_JMP_END(cond) \
320         JMP_EMIT(cond, cond_ptr); \
321 }
322
323 // fake "simple" or "short" jump - using cond insns instead
324 #define EMITH_SJMP_START(cond) \
325         (void)(cond)
326
327 #define EMITH_SJMP_END(cond) \
328         (void)(cond)
329
330 #define emith_move_r_r(d, s) \
331         EOP_MOV_REG_SIMPLE(d, s)
332
333 #define emith_mvn_r_r(d, s) \
334         EOP_MVN_REG(A_COND_AL,0,d,s,A_AM1_LSL,0)
335
336 #define emith_or_r_r_r_lsl(d, s1, s2, lslimm) \
337         EOP_ORR_REG(A_COND_AL,0,d,s1,s2,A_AM1_LSL,lslimm)
338
339 #define emith_eor_r_r_r_lsl(d, s1, s2, lslimm) \
340         EOP_EOR_REG(A_COND_AL,0,d,s1,s2,A_AM1_LSL,lslimm)
341
342 #define emith_eor_r_r_r_lsr(d, s1, s2, lsrimm) \
343         EOP_EOR_REG(A_COND_AL,0,d,s1,s2,A_AM1_LSR,lsrimm)
344
345 #define emith_or_r_r_lsl(d, s, lslimm) \
346         emith_or_r_r_r_lsl(d, d, s, lslimm)
347
348 #define emith_eor_r_r_lsr(d, s, lsrimm) \
349         emith_eor_r_r_r_lsr(d, d, s, lsrimm)
350
351 #define emith_or_r_r_r(d, s1, s2) \
352         emith_or_r_r_r_lsl(d, s1, s2, 0)
353
354 #define emith_eor_r_r_r(d, s1, s2) \
355         emith_eor_r_r_r_lsl(d, s1, s2, 0)
356
357 #define emith_add_r_r(d, s) \
358         EOP_ADD_REG(A_COND_AL,0,d,d,s,A_AM1_LSL,0)
359
360 #define emith_sub_r_r(d, s) \
361         EOP_SUB_REG(A_COND_AL,0,d,d,s,A_AM1_LSL,0)
362
363 #define emith_adc_r_r(d, s) \
364         EOP_ADC_REG(A_COND_AL,0,d,d,s,A_AM1_LSL,0)
365
366 #define emith_and_r_r(d, s) \
367         EOP_AND_REG(A_COND_AL,0,d,d,s,A_AM1_LSL,0)
368
369 #define emith_or_r_r(d, s) \
370         emith_or_r_r_r(d, d, s)
371
372 #define emith_eor_r_r(d, s) \
373         emith_eor_r_r_r(d, d, s)
374
375 #define emith_tst_r_r(d, s) \
376         EOP_TST_REG(A_COND_AL,d,s,A_AM1_LSL,0)
377
378 #define emith_teq_r_r(d, s) \
379         EOP_TEQ_REG(A_COND_AL,d,s,A_AM1_LSL,0)
380
381 #define emith_cmp_r_r(d, s) \
382         EOP_CMP_REG(A_COND_AL,d,s,A_AM1_LSL,0)
383
384 #define emith_addf_r_r(d, s) \
385         EOP_ADD_REG(A_COND_AL,1,d,d,s,A_AM1_LSL,0)
386
387 #define emith_subf_r_r(d, s) \
388         EOP_SUB_REG(A_COND_AL,1,d,d,s,A_AM1_LSL,0)
389
390 #define emith_adcf_r_r(d, s) \
391         EOP_ADC_REG(A_COND_AL,1,d,d,s,A_AM1_LSL,0)
392
393 #define emith_sbcf_r_r(d, s) \
394         EOP_SBC_REG(A_COND_AL,1,d,d,s,A_AM1_LSL,0)
395
396 #define emith_eorf_r_r(d, s) \
397         EOP_EOR_REG(A_COND_AL,1,d,d,s,A_AM1_LSL,0)
398
399 #define emith_move_r_imm(r, imm) \
400         emith_op_imm(A_COND_AL, 0, A_OP_MOV, r, imm)
401
402 #define emith_add_r_imm(r, imm) \
403         emith_op_imm(A_COND_AL, 0, A_OP_ADD, r, imm)
404
405 #define emith_sub_r_imm(r, imm) \
406         emith_op_imm(A_COND_AL, 0, A_OP_SUB, r, imm)
407
408 #define emith_bic_r_imm(r, imm) \
409         emith_op_imm(A_COND_AL, 0, A_OP_BIC, r, imm)
410
411 #define emith_and_r_imm(r, imm) \
412         emith_op_imm(A_COND_AL, 0, A_OP_AND, r, imm)
413
414 #define emith_or_r_imm(r, imm) \
415         emith_op_imm(A_COND_AL, 0, A_OP_ORR, r, imm)
416
417 #define emith_eor_r_imm(r, imm) \
418         emith_op_imm(A_COND_AL, 0, A_OP_EOR, r, imm)
419
420 // note: only use 8bit imm for these
421 #define emith_tst_r_imm(r, imm) \
422         emith_top_imm(A_COND_AL, A_OP_TST, r, imm)
423
424 #define emith_cmp_r_imm(r, imm) { \
425         u32 op = A_OP_CMP, imm_ = imm; \
426         if (~imm_ < 0x100) { \
427                 imm_ = ~imm_; \
428                 op = A_OP_CMN; \
429         } \
430         emith_top_imm(A_COND_AL, op, r, imm); \
431 }
432
433 #define emith_subf_r_imm(r, imm) \
434         emith_op_imm(A_COND_AL, 1, A_OP_SUB, r, imm)
435
436 #define emith_move_r_imm_c(cond, r, imm) \
437         emith_op_imm(cond, 0, A_OP_MOV, r, imm)
438
439 #define emith_add_r_imm_c(cond, r, imm) \
440         emith_op_imm(cond, 0, A_OP_ADD, r, imm)
441
442 #define emith_sub_r_imm_c(cond, r, imm) \
443         emith_op_imm(cond, 0, A_OP_SUB, r, imm)
444
445 #define emith_or_r_imm_c(cond, r, imm) \
446         emith_op_imm(cond, 0, A_OP_ORR, r, imm)
447
448 #define emith_eor_r_imm_c(cond, r, imm) \
449         emith_op_imm(cond, 0, A_OP_EOR, r, imm)
450
451 #define emith_bic_r_imm_c(cond, r, imm) \
452         emith_op_imm(cond, 0, A_OP_BIC, r, imm)
453
454 #define emith_move_r_imm_s8(r, imm) { \
455         if ((imm) & 0x80) \
456                 EOP_MVN_IMM(r, 0, ((imm) ^ 0xff)); \
457         else \
458                 EOP_MOV_IMM(r, 0, imm); \
459 }
460
461 #define emith_and_r_r_imm(d, s, imm) \
462         emith_op_imm2(A_COND_AL, 0, A_OP_AND, d, s, imm)
463
464 #define emith_neg_r_r(d, s) \
465         EOP_RSB_IMM(d, s, 0, 0)
466
467 #define emith_lsl(d, s, cnt) \
468         EOP_MOV_REG(A_COND_AL,0,d,s,A_AM1_LSL,cnt)
469
470 #define emith_lsr(d, s, cnt) \
471         EOP_MOV_REG(A_COND_AL,0,d,s,A_AM1_LSR,cnt)
472
473 #define emith_asr(d, s, cnt) \
474         EOP_MOV_REG(A_COND_AL,0,d,s,A_AM1_ASR,cnt)
475
476 #define emith_ror(d, s, cnt) \
477         EOP_MOV_REG(A_COND_AL,0,d,s,A_AM1_ROR,cnt)
478
479 #define emith_rol(d, s, cnt) \
480         EOP_MOV_REG(A_COND_AL,0,d,s,A_AM1_ROR,32-(cnt)); \
481
482 #define emith_lslf(d, s, cnt) \
483         EOP_MOV_REG(A_COND_AL,1,d,s,A_AM1_LSL,cnt)
484
485 #define emith_lsrf(d, s, cnt) \
486         EOP_MOV_REG(A_COND_AL,1,d,s,A_AM1_LSR,cnt)
487
488 #define emith_asrf(d, s, cnt) \
489         EOP_MOV_REG(A_COND_AL,1,d,s,A_AM1_ASR,cnt)
490
491 // note: only C flag updated correctly
492 #define emith_rolf(d, s, cnt) { \
493         EOP_MOV_REG(A_COND_AL,1,d,s,A_AM1_ROR,32-(cnt)); \
494         /* we don't have ROL so we shift to get the right carry */ \
495         EOP_TST_REG(A_COND_AL,d,d,A_AM1_LSR,1); \
496 }
497
498 #define emith_rorf(d, s, cnt) \
499         EOP_MOV_REG(A_COND_AL,1,d,s,A_AM1_ROR,cnt)
500
501 #define emith_rolcf(d) \
502         emith_adcf_r_r(d, d)
503
504 #define emith_rorcf(d) \
505         EOP_MOV_REG(A_COND_AL,1,d,d,A_AM1_ROR,0) /* ROR #0 -> RRX */
506
507 #define emith_negcf_r_r(d, s) \
508         EOP_C_DOP_IMM(A_COND_AL,A_OP_RSC,1,s,d,0,0)
509
510 #define emith_mul(d, s1, s2) { \
511         if ((d) != (s1)) /* rd != rm limitation */ \
512                 EOP_MUL(d, s1, s2); \
513         else \
514                 EOP_MUL(d, s2, s1); \
515 }
516
517 #define emith_mul_u64(dlo, dhi, s1, s2) \
518         EOP_C_UMULL(A_COND_AL,0,dhi,dlo,s1,s2)
519
520 #define emith_mul_s64(dlo, dhi, s1, s2) \
521         EOP_C_SMULL(A_COND_AL,0,dhi,dlo,s1,s2)
522
523 #define emith_mula_s64(dlo, dhi, s1, s2) \
524         EOP_C_SMLAL(A_COND_AL,0,dhi,dlo,s1,s2)
525
526 // misc
527 #define emith_ctx_read(r, offs) \
528         EOP_LDR_IMM(r, CONTEXT_REG, offs)
529
530 #define emith_ctx_write(r, offs) \
531         EOP_STR_IMM(r, CONTEXT_REG, offs)
532
533 #define emith_ctx_do_multiple(op, r, offs, count, tmpr) do { \
534         int v_, r_ = r, c_ = count, b_ = CONTEXT_REG;        \
535         for (v_ = 0; c_; c_--, r_++)                         \
536                 v_ |= 1 << r_;                               \
537         if ((offs) != 0) {                                   \
538                 EOP_ADD_IMM(tmpr,CONTEXT_REG,30/2,(offs)>>2);\
539                 b_ = tmpr;                                   \
540         }                                                    \
541         op(b_,v_);                                           \
542 } while(0)
543
544 #define emith_ctx_read_multiple(r, offs, count, tmpr) \
545         emith_ctx_do_multiple(EOP_LDMIA, r, offs, count, tmpr)
546
547 #define emith_ctx_write_multiple(r, offs, count, tmpr) \
548         emith_ctx_do_multiple(EOP_STMIA, r, offs, count, tmpr)
549
550 #define emith_clear_msb_c(cond, d, s, count) { \
551         u32 t; \
552         if ((count) <= 8) { \
553                 t = (count) - 8; \
554                 t = (0xff << t) & 0xff; \
555                 EOP_BIC_IMM(d,s,8/2,t); \
556                 EOP_C_DOP_IMM(cond,A_OP_BIC,0,s,d,8/2,t); \
557         } else if ((count) >= 24) { \
558                 t = (count) - 24; \
559                 t = 0xff >> t; \
560                 EOP_AND_IMM(d,s,0,t); \
561                 EOP_C_DOP_IMM(cond,A_OP_AND,0,s,d,0,t); \
562         } else { \
563                 EOP_MOV_REG(cond,0,d,s,A_AM1_LSL,count); \
564                 EOP_MOV_REG(cond,0,d,d,A_AM1_LSR,count); \
565         } \
566 }
567
568 #define emith_clear_msb(d, s, count) \
569         emith_clear_msb_c(A_COND_AL, d, s, count)
570
571 #define emith_sext(d, s, bits) { \
572         EOP_MOV_REG_LSL(d,s,32 - (bits)); \
573         EOP_MOV_REG_ASR(d,d,32 - (bits)); \
574 }
575
576 #define host_arg2reg(rd, arg) \
577         rd = arg
578
579 // upto 4 args
580 #define emith_pass_arg_r(arg, reg) \
581         EOP_MOV_REG_SIMPLE(arg, reg)
582
583 #define emith_pass_arg_imm(arg, imm) \
584         emith_move_r_imm(arg, imm)
585
586 #define emith_call_cond(cond, target) \
587         emith_xbranch(cond, target, 1)
588
589 #define emith_jump_cond(cond, target) \
590         emith_xbranch(cond, target, 0)
591
592 #define emith_call(target) \
593         emith_call_cond(A_COND_AL, target)
594
595 #define emith_jump(target) \
596         emith_jump_cond(A_COND_AL, target)
597
598 #define emith_jump_patchable(cond) \
599         emith_jump_cond(cond, 0)
600
601 #define emith_jump_patch(ptr, target) do { \
602         u32 *ptr_ = ptr; \
603         u32 val = (u32 *)(target) - (u32 *)ptr_ - 2; \
604         *ptr_ = (*ptr_ & 0xff000000) | (val & 0x00ffffff); \
605 } while (0)
606
607 #define emith_jump_reg(r) \
608         EOP_BX(r)
609
610 /* SH2 drc specific */
611 #define emith_sh2_drc_entry() \
612         EOP_STMFD_SP(A_R4M|A_R5M|A_R6M|A_R7M|A_R8M|A_R9M|A_R10M|A_R11M|A_R14M)
613
614 #define emith_sh2_drc_exit() \
615         EOP_LDMFD_SP(A_R4M|A_R5M|A_R6M|A_R7M|A_R8M|A_R9M|A_R10M|A_R11M|A_R15M)
616
617 #define emith_sh2_dtbf_loop() { \
618         int cr, rn;                                                          \
619         int tmp_ = rcache_get_tmp();                                         \
620         cr = rcache_get_reg(SHR_SR, RC_GR_RMW);                              \
621         rn = rcache_get_reg((op >> 8) & 0x0f, RC_GR_RMW);                    \
622         emith_sub_r_imm(rn, 1);                /* sub rn, #1 */              \
623         emith_bic_r_imm(cr, 1);                /* bic cr, #1 */              \
624         emith_sub_r_imm(cr, (cycles+1) << 12); /* sub cr, #(cycles+1)<<12 */ \
625         cycles = 0;                                                          \
626         emith_asrf(tmp_, cr, 2+12);            /* movs tmp_, cr, asr #2+12 */\
627         EOP_MOV_IMM_C(A_COND_MI,tmp_,0,0);     /* movmi tmp_, #0 */          \
628         emith_lsl(cr, cr, 20);                 /* mov cr, cr, lsl #20 */     \
629         emith_lsr(cr, cr, 20);                 /* mov cr, cr, lsr #20 */     \
630         emith_subf_r_r(rn, tmp_);              /* subs rn, tmp_ */           \
631         EOP_RSB_IMM_C(A_COND_LS,tmp_,rn,0,0);  /* rsbls tmp_, rn, #0 */      \
632         EOP_ORR_REG(A_COND_LS,0,cr,cr,tmp_,A_AM1_LSL,12+2); /* orrls cr,tmp_,lsl #12+2 */\
633         EOP_ORR_IMM_C(A_COND_LS,cr,cr,0,1);    /* orrls cr, #1 */            \
634         EOP_MOV_IMM_C(A_COND_LS,rn,0,0);       /* movls rn, #0 */            \
635         rcache_free_tmp(tmp_);                                               \
636 }
637
638 #define emith_write_sr(sr, srcr) { \
639         emith_lsr(sr, sr, 10); \
640         emith_or_r_r_r_lsl(sr, sr, srcr, 22); \
641         emith_ror(sr, sr, 22); \
642 }
643
644 #define emith_carry_to_t(srr, is_sub) { \
645         if (is_sub) { /* has inverted C on ARM */ \
646                 emith_or_r_imm_c(A_COND_CC, srr, 1); \
647                 emith_bic_r_imm_c(A_COND_CS, srr, 1); \
648         } else { \
649                 emith_or_r_imm_c(A_COND_CS, srr, 1); \
650                 emith_bic_r_imm_c(A_COND_CC, srr, 1); \
651         } \
652 }
653
654 #define emith_tpop_carry(sr, is_sub) {  \
655         if (is_sub)                     \
656                 emith_eor_r_imm(sr, 1); \
657         emith_lsrf(sr, sr, 1);          \
658 }
659
660 #define emith_tpush_carry(sr, is_sub) { \
661         emith_adc_r_r(sr, sr);          \
662         if (is_sub)                     \
663                 emith_eor_r_imm(sr, 1); \
664 }
665
666 /*
667  * if Q
668  *   t = carry(Rn += Rm)
669  * else
670  *   t = carry(Rn -= Rm)
671  * T ^= t
672  */
673 #define emith_sh2_div1_step(rn, rm, sr) {         \
674         void *jmp0, *jmp1;                        \
675         emith_tst_r_imm(sr, Q);  /* if (Q ^ M) */ \
676         JMP_POS(jmp0);           /* beq do_sub */ \
677         emith_addf_r_r(rn, rm);                   \
678         emith_eor_r_imm_c(A_COND_CS, sr, T);      \
679         JMP_POS(jmp1);           /* b done */     \
680         JMP_EMIT(A_COND_EQ, jmp0); /* do_sub: */  \
681         emith_subf_r_r(rn, rm);                   \
682         emith_eor_r_imm_c(A_COND_CC, sr, T);      \
683         JMP_EMIT(A_COND_AL, jmp1); /* done: */    \
684 }
685