Merge pull request #58 from psyke83/master
[picodrive.git] / cpu / drc / emit_arm.c
1 /*
2  * Basic macros to emit ARM instructions and some utils
3  * Copyright (C) 2008,2009,2010 notaz
4  * Copyright (C) 2016      lentillog
5  * Copyright (C) 2016      Daniel De Matteis
6  *
7  * This work is licensed under the terms of MAME license.
8  * See COPYING file in the top-level directory.
9  */
10 #define CONTEXT_REG 11
11
12 // XXX: tcache_ptr type for SVP and SH2 compilers differs..
13 #define EMIT_PTR(ptr, x) \
14         do { \
15                 *(u32 *)ptr = x; \
16                 ptr = (void *)((u8 *)ptr + sizeof(u32)); \
17                 COUNT_OP; \
18         } while (0)
19
20 #define EMIT(x) EMIT_PTR(tcache_ptr, x)
21
22 #define A_R4M  (1 << 4)
23 #define A_R5M  (1 << 5)
24 #define A_R6M  (1 << 6)
25 #define A_R7M  (1 << 7)
26 #define A_R8M  (1 << 8)
27 #define A_R9M  (1 << 9)
28 #define A_R10M (1 << 10)
29 #define A_R11M (1 << 11)
30 #define A_R12M (1 << 12)
31 #define A_R14M (1 << 14)
32 #define A_R15M (1 << 15)
33
34 #define A_COND_AL 0xe
35 #define A_COND_EQ 0x0
36 #define A_COND_NE 0x1
37 #define A_COND_HS 0x2
38 #define A_COND_LO 0x3
39 #define A_COND_MI 0x4
40 #define A_COND_PL 0x5
41 #define A_COND_VS 0x6
42 #define A_COND_VC 0x7
43 #define A_COND_HI 0x8
44 #define A_COND_LS 0x9
45 #define A_COND_GE 0xa
46 #define A_COND_LT 0xb
47 #define A_COND_GT 0xc
48 #define A_COND_LE 0xd
49 #define A_COND_CS A_COND_HS
50 #define A_COND_CC A_COND_LO
51
52 /* unified conditions */
53 #define DCOND_EQ A_COND_EQ
54 #define DCOND_NE A_COND_NE
55 #define DCOND_MI A_COND_MI
56 #define DCOND_PL A_COND_PL
57 #define DCOND_HI A_COND_HI
58 #define DCOND_HS A_COND_HS
59 #define DCOND_LO A_COND_LO
60 #define DCOND_GE A_COND_GE
61 #define DCOND_GT A_COND_GT
62 #define DCOND_LT A_COND_LT
63 #define DCOND_LS A_COND_LS
64 #define DCOND_LE A_COND_LE
65 #define DCOND_VS A_COND_VS
66 #define DCOND_VC A_COND_VC
67
68 /* addressing mode 1 */
69 #define A_AM1_LSL 0
70 #define A_AM1_LSR 1
71 #define A_AM1_ASR 2
72 #define A_AM1_ROR 3
73
74 #define A_AM1_IMM(ror2,imm8)                  (((ror2)<<8) | (imm8) | 0x02000000)
75 #define A_AM1_REG_XIMM(shift_imm,shift_op,rm) (((shift_imm)<<7) | ((shift_op)<<5) | (rm))
76 #define A_AM1_REG_XREG(rs,shift_op,rm)        (((rs)<<8) | ((shift_op)<<5) | 0x10 | (rm))
77
78 /* data processing op */
79 #define A_OP_AND 0x0
80 #define A_OP_EOR 0x1
81 #define A_OP_SUB 0x2
82 #define A_OP_RSB 0x3
83 #define A_OP_ADD 0x4
84 #define A_OP_ADC 0x5
85 #define A_OP_SBC 0x6
86 #define A_OP_RSC 0x7
87 #define A_OP_TST 0x8
88 #define A_OP_TEQ 0x9
89 #define A_OP_CMP 0xa
90 #define A_OP_CMN 0xa
91 #define A_OP_ORR 0xc
92 #define A_OP_MOV 0xd
93 #define A_OP_BIC 0xe
94 #define A_OP_MVN 0xf
95
96 #define EOP_C_DOP_X(cond,op,s,rn,rd,shifter_op) \
97         EMIT(((cond)<<28) | ((op)<< 21) | ((s)<<20) | ((rn)<<16) | ((rd)<<12) | (shifter_op))
98
99 #define EOP_C_DOP_IMM(     cond,op,s,rn,rd,ror2,imm8)             EOP_C_DOP_X(cond,op,s,rn,rd,A_AM1_IMM(ror2,imm8))
100 #define EOP_C_DOP_REG_XIMM(cond,op,s,rn,rd,shift_imm,shift_op,rm) EOP_C_DOP_X(cond,op,s,rn,rd,A_AM1_REG_XIMM(shift_imm,shift_op,rm))
101 #define EOP_C_DOP_REG_XREG(cond,op,s,rn,rd,rs,       shift_op,rm) EOP_C_DOP_X(cond,op,s,rn,rd,A_AM1_REG_XREG(rs,       shift_op,rm))
102
103 #define EOP_MOV_IMM(rd,   ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_MOV,0, 0,rd,ror2,imm8)
104 #define EOP_MVN_IMM(rd,   ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_MVN,0, 0,rd,ror2,imm8)
105 #define EOP_ORR_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_ORR,0,rn,rd,ror2,imm8)
106 #define EOP_EOR_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_EOR,0,rn,rd,ror2,imm8)
107 #define EOP_ADD_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_ADD,0,rn,rd,ror2,imm8)
108 #define EOP_BIC_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_BIC,0,rn,rd,ror2,imm8)
109 #define EOP_AND_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_AND,0,rn,rd,ror2,imm8)
110 #define EOP_SUB_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_SUB,0,rn,rd,ror2,imm8)
111 #define EOP_TST_IMM(   rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_TST,1,rn, 0,ror2,imm8)
112 #define EOP_CMP_IMM(   rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_CMP,1,rn, 0,ror2,imm8)
113 #define EOP_RSB_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_RSB,0,rn,rd,ror2,imm8)
114
115 #define EOP_MOV_IMM_C(cond,rd,   ror2,imm8) EOP_C_DOP_IMM(cond,A_OP_MOV,0, 0,rd,ror2,imm8)
116 #define EOP_ORR_IMM_C(cond,rd,rn,ror2,imm8) EOP_C_DOP_IMM(cond,A_OP_ORR,0,rn,rd,ror2,imm8)
117 #define EOP_RSB_IMM_C(cond,rd,rn,ror2,imm8) EOP_C_DOP_IMM(cond,A_OP_RSB,0,rn,rd,ror2,imm8)
118
119 #define EOP_MOV_REG(cond,s,rd,   rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_MOV,s, 0,rd,shift_imm,shift_op,rm)
120 #define EOP_MVN_REG(cond,s,rd,   rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_MVN,s, 0,rd,shift_imm,shift_op,rm)
121 #define EOP_ORR_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_ORR,s,rn,rd,shift_imm,shift_op,rm)
122 #define EOP_ADD_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_ADD,s,rn,rd,shift_imm,shift_op,rm)
123 #define EOP_ADC_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_ADC,s,rn,rd,shift_imm,shift_op,rm)
124 #define EOP_SUB_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_SUB,s,rn,rd,shift_imm,shift_op,rm)
125 #define EOP_SBC_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_SBC,s,rn,rd,shift_imm,shift_op,rm)
126 #define EOP_AND_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_AND,s,rn,rd,shift_imm,shift_op,rm)
127 #define EOP_EOR_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_EOR,s,rn,rd,shift_imm,shift_op,rm)
128 #define EOP_CMP_REG(cond,     rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_CMP,1,rn, 0,shift_imm,shift_op,rm)
129 #define EOP_TST_REG(cond,     rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_TST,1,rn, 0,shift_imm,shift_op,rm)
130 #define EOP_TEQ_REG(cond,     rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_TEQ,1,rn, 0,shift_imm,shift_op,rm)
131
132 #define EOP_MOV_REG2(s,rd,   rm,shift_op,rs) EOP_C_DOP_REG_XREG(A_COND_AL,A_OP_MOV,s, 0,rd,rs,shift_op,rm)
133 #define EOP_ADD_REG2(s,rd,rn,rm,shift_op,rs) EOP_C_DOP_REG_XREG(A_COND_AL,A_OP_ADD,s,rn,rd,rs,shift_op,rm)
134 #define EOP_SUB_REG2(s,rd,rn,rm,shift_op,rs) EOP_C_DOP_REG_XREG(A_COND_AL,A_OP_SUB,s,rn,rd,rs,shift_op,rm)
135
136 #define EOP_MOV_REG_SIMPLE(rd,rm)           EOP_MOV_REG(A_COND_AL,0,rd,rm,A_AM1_LSL,0)
137 #define EOP_MOV_REG_LSL(rd,   rm,shift_imm) EOP_MOV_REG(A_COND_AL,0,rd,rm,A_AM1_LSL,shift_imm)
138 #define EOP_MOV_REG_LSR(rd,   rm,shift_imm) EOP_MOV_REG(A_COND_AL,0,rd,rm,A_AM1_LSR,shift_imm)
139 #define EOP_MOV_REG_ASR(rd,   rm,shift_imm) EOP_MOV_REG(A_COND_AL,0,rd,rm,A_AM1_ASR,shift_imm)
140 #define EOP_MOV_REG_ROR(rd,   rm,shift_imm) EOP_MOV_REG(A_COND_AL,0,rd,rm,A_AM1_ROR,shift_imm)
141
142 #define EOP_ORR_REG_SIMPLE(rd,rm)           EOP_ORR_REG(A_COND_AL,0,rd,rd,rm,A_AM1_LSL,0)
143 #define EOP_ORR_REG_LSL(rd,rn,rm,shift_imm) EOP_ORR_REG(A_COND_AL,0,rd,rn,rm,A_AM1_LSL,shift_imm)
144 #define EOP_ORR_REG_LSR(rd,rn,rm,shift_imm) EOP_ORR_REG(A_COND_AL,0,rd,rn,rm,A_AM1_LSR,shift_imm)
145 #define EOP_ORR_REG_ASR(rd,rn,rm,shift_imm) EOP_ORR_REG(A_COND_AL,0,rd,rn,rm,A_AM1_ASR,shift_imm)
146 #define EOP_ORR_REG_ROR(rd,rn,rm,shift_imm) EOP_ORR_REG(A_COND_AL,0,rd,rn,rm,A_AM1_ROR,shift_imm)
147
148 #define EOP_ADD_REG_SIMPLE(rd,rm)           EOP_ADD_REG(A_COND_AL,0,rd,rd,rm,A_AM1_LSL,0)
149 #define EOP_ADD_REG_LSL(rd,rn,rm,shift_imm) EOP_ADD_REG(A_COND_AL,0,rd,rn,rm,A_AM1_LSL,shift_imm)
150 #define EOP_ADD_REG_LSR(rd,rn,rm,shift_imm) EOP_ADD_REG(A_COND_AL,0,rd,rn,rm,A_AM1_LSR,shift_imm)
151
152 #define EOP_TST_REG_SIMPLE(rn,rm)           EOP_TST_REG(A_COND_AL,  rn,   0,A_AM1_LSL,rm)
153
154 #define EOP_MOV_REG2_LSL(rd,   rm,rs)       EOP_MOV_REG2(0,rd,   rm,A_AM1_LSL,rs)
155 #define EOP_MOV_REG2_ROR(rd,   rm,rs)       EOP_MOV_REG2(0,rd,   rm,A_AM1_ROR,rs)
156 #define EOP_ADD_REG2_LSL(rd,rn,rm,rs)       EOP_ADD_REG2(0,rd,rn,rm,A_AM1_LSL,rs)
157 #define EOP_SUB_REG2_LSL(rd,rn,rm,rs)       EOP_SUB_REG2(0,rd,rn,rm,A_AM1_LSL,rs)
158
159 /* addressing mode 2 */
160 #define EOP_C_AM2_IMM(cond,u,b,l,rn,rd,offset_12) \
161         EMIT(((cond)<<28) | 0x05000000 | ((u)<<23) | ((b)<<22) | ((l)<<20) | ((rn)<<16) | ((rd)<<12) | (offset_12))
162
163 #define EOP_C_AM2_REG(cond,u,b,l,rn,rd,shift_imm,shift_op,rm) \
164         EMIT(((cond)<<28) | 0x07000000 | ((u)<<23) | ((b)<<22) | ((l)<<20) | ((rn)<<16) | ((rd)<<12) | \
165                 ((shift_imm)<<7) | ((shift_op)<<5) | (rm))
166
167 /* addressing mode 3 */
168 #define EOP_C_AM3(cond,u,r,l,rn,rd,s,h,immed_reg) \
169         EMIT(((cond)<<28) | 0x01000090 | ((u)<<23) | ((r)<<22) | ((l)<<20) | ((rn)<<16) | ((rd)<<12) | \
170                         ((s)<<6) | ((h)<<5) | (immed_reg))
171
172 #define EOP_C_AM3_IMM(cond,u,l,rn,rd,s,h,offset_8) EOP_C_AM3(cond,u,1,l,rn,rd,s,h,(((offset_8)&0xf0)<<4)|((offset_8)&0xf))
173
174 #define EOP_C_AM3_REG(cond,u,l,rn,rd,s,h,rm)       EOP_C_AM3(cond,u,0,l,rn,rd,s,h,rm)
175
176 /* ldr and str */
177 #define EOP_LDR_IMM2(cond,rd,rn,offset_12)  EOP_C_AM2_IMM(cond,1,0,1,rn,rd,offset_12)
178 #define EOP_LDRB_IMM2(cond,rd,rn,offset_12) EOP_C_AM2_IMM(cond,1,1,1,rn,rd,offset_12)
179
180 #define EOP_LDR_IMM(   rd,rn,offset_12) EOP_C_AM2_IMM(A_COND_AL,1,0,1,rn,rd,offset_12)
181 #define EOP_LDR_NEGIMM(rd,rn,offset_12) EOP_C_AM2_IMM(A_COND_AL,0,0,1,rn,rd,offset_12)
182 #define EOP_LDR_SIMPLE(rd,rn)           EOP_C_AM2_IMM(A_COND_AL,1,0,1,rn,rd,0)
183 #define EOP_STR_IMM(   rd,rn,offset_12) EOP_C_AM2_IMM(A_COND_AL,1,0,0,rn,rd,offset_12)
184 #define EOP_STR_SIMPLE(rd,rn)           EOP_C_AM2_IMM(A_COND_AL,1,0,0,rn,rd,0)
185
186 #define EOP_LDR_REG_LSL(cond,rd,rn,rm,shift_imm) EOP_C_AM2_REG(cond,1,0,1,rn,rd,shift_imm,A_AM1_LSL,rm)
187
188 #define EOP_LDRH_IMM2(cond,rd,rn,offset_8)  EOP_C_AM3_IMM(cond,1,1,rn,rd,0,1,offset_8)
189
190 #define EOP_LDRH_IMM(   rd,rn,offset_8)  EOP_C_AM3_IMM(A_COND_AL,1,1,rn,rd,0,1,offset_8)
191 #define EOP_LDRH_SIMPLE(rd,rn)           EOP_C_AM3_IMM(A_COND_AL,1,1,rn,rd,0,1,0)
192 #define EOP_LDRH_REG(   rd,rn,rm)        EOP_C_AM3_REG(A_COND_AL,1,1,rn,rd,0,1,rm)
193 #define EOP_STRH_IMM(   rd,rn,offset_8)  EOP_C_AM3_IMM(A_COND_AL,1,0,rn,rd,0,1,offset_8)
194 #define EOP_STRH_SIMPLE(rd,rn)           EOP_C_AM3_IMM(A_COND_AL,1,0,rn,rd,0,1,0)
195 #define EOP_STRH_REG(   rd,rn,rm)        EOP_C_AM3_REG(A_COND_AL,1,0,rn,rd,0,1,rm)
196
197 /* ldm and stm */
198 #define EOP_XXM(cond,p,u,s,w,l,rn,list) \
199         EMIT(((cond)<<28) | (1<<27) | ((p)<<24) | ((u)<<23) | ((s)<<22) | ((w)<<21) | ((l)<<20) | ((rn)<<16) | (list))
200
201 #define EOP_STMIA(rb,list) EOP_XXM(A_COND_AL,0,1,0,0,0,rb,list)
202 #define EOP_LDMIA(rb,list) EOP_XXM(A_COND_AL,0,1,0,0,1,rb,list)
203
204 #define EOP_STMFD_SP(list) EOP_XXM(A_COND_AL,1,0,0,1,0,13,list)
205 #define EOP_LDMFD_SP(list) EOP_XXM(A_COND_AL,0,1,0,1,1,13,list)
206
207 /* branches */
208 #define EOP_C_BX(cond,rm) \
209         EMIT(((cond)<<28) | 0x012fff10 | (rm))
210
211 #define EOP_C_B_PTR(ptr,cond,l,signed_immed_24) \
212         EMIT_PTR(ptr, ((cond)<<28) | 0x0a000000 | ((l)<<24) | (signed_immed_24))
213
214 #define EOP_C_B(cond,l,signed_immed_24) \
215         EOP_C_B_PTR(tcache_ptr,cond,l,signed_immed_24)
216
217 #define EOP_B( signed_immed_24) EOP_C_B(A_COND_AL,0,signed_immed_24)
218 #define EOP_BL(signed_immed_24) EOP_C_B(A_COND_AL,1,signed_immed_24)
219
220 /* misc */
221 #define EOP_C_MUL(cond,s,rd,rs,rm) \
222         EMIT(((cond)<<28) | ((s)<<20) | ((rd)<<16) | ((rs)<<8) | 0x90 | (rm))
223
224 #define EOP_C_UMULL(cond,s,rdhi,rdlo,rs,rm) \
225         EMIT(((cond)<<28) | 0x00800000 | ((s)<<20) | ((rdhi)<<16) | ((rdlo)<<12) | ((rs)<<8) | 0x90 | (rm))
226
227 #define EOP_C_SMULL(cond,s,rdhi,rdlo,rs,rm) \
228         EMIT(((cond)<<28) | 0x00c00000 | ((s)<<20) | ((rdhi)<<16) | ((rdlo)<<12) | ((rs)<<8) | 0x90 | (rm))
229
230 #define EOP_C_SMLAL(cond,s,rdhi,rdlo,rs,rm) \
231         EMIT(((cond)<<28) | 0x00e00000 | ((s)<<20) | ((rdhi)<<16) | ((rdlo)<<12) | ((rs)<<8) | 0x90 | (rm))
232
233 #define EOP_MUL(rd,rm,rs) EOP_C_MUL(A_COND_AL,0,rd,rs,rm) // note: rd != rm
234
235 #define EOP_C_MRS(cond,rd) \
236         EMIT(((cond)<<28) | 0x010f0000 | ((rd)<<12))
237
238 #define EOP_C_MSR_IMM(cond,ror2,imm) \
239         EMIT(((cond)<<28) | 0x0328f000 | ((ror2)<<8) | (imm)) // cpsr_f
240
241 #define EOP_C_MSR_REG(cond,rm) \
242         EMIT(((cond)<<28) | 0x0128f000 | (rm)) // cpsr_f
243
244 #define EOP_MRS(rd)           EOP_C_MRS(A_COND_AL,rd)
245 #define EOP_MSR_IMM(ror2,imm) EOP_C_MSR_IMM(A_COND_AL,ror2,imm)
246 #define EOP_MSR_REG(rm)       EOP_C_MSR_REG(A_COND_AL,rm)
247
248
249 // XXX: AND, RSB, *C, will break if 1 insn is not enough
250 static void emith_op_imm2(int cond, int s, int op, int rd, int rn, unsigned int imm)
251 {
252         int ror2;
253         u32 v;
254
255         switch (op) {
256         case A_OP_MOV:
257                 rn = 0;
258                 if (~imm < 0x10000) {
259                         imm = ~imm;
260                         op = A_OP_MVN;
261                 }
262                 break;
263
264         case A_OP_EOR:
265         case A_OP_SUB:
266         case A_OP_ADD:
267         case A_OP_ORR:
268         case A_OP_BIC:
269                 if (s == 0 && imm == 0)
270                         return;
271                 break;
272         }
273
274         for (v = imm, ror2 = 0; ; ror2 -= 8/2) {
275                 /* shift down to get 'best' rot2 */
276                 for (; v && !(v & 3); v >>= 2)
277                         ror2--;
278
279                 EOP_C_DOP_IMM(cond, op, s, rn, rd, ror2 & 0x0f, v & 0xff);
280
281                 v >>= 8;
282                 if (v == 0)
283                         break;
284                 if (op == A_OP_MOV)
285                         op = A_OP_ORR;
286                 if (op == A_OP_MVN)
287                         op = A_OP_BIC;
288                 rn = rd;
289         }
290 }
291
292 #define emith_op_imm(cond, s, op, r, imm) \
293         emith_op_imm2(cond, s, op, r, r, imm)
294
295 // test op
296 #define emith_top_imm(cond, op, r, imm) do { \
297         u32 ror2, v; \
298         for (ror2 = 0, v = imm; v && !(v & 3); v >>= 2) \
299                 ror2--; \
300         EOP_C_DOP_IMM(cond, op, 1, r, 0, ror2 & 0x0f, v & 0xff); \
301 } while (0)
302
303 #define is_offset_24(val) \
304         ((val) >= (int)0xff000000 && (val) <= 0x00ffffff)
305
306 static int emith_xbranch(int cond, void *target, int is_call)
307 {
308         int val = (u32 *)target - (u32 *)tcache_ptr - 2;
309         int direct = is_offset_24(val);
310         u32 *start_ptr = (u32 *)tcache_ptr;
311
312         if (direct)
313         {
314                 EOP_C_B(cond,is_call,val & 0xffffff);           // b, bl target
315         }
316         else
317         {
318 #ifdef __EPOC32__
319 //              elprintf(EL_SVP, "emitting indirect jmp %08x->%08x", tcache_ptr, target);
320                 if (is_call)
321                         EOP_ADD_IMM(14,15,0,8);                 // add lr,pc,#8
322                 EOP_C_AM2_IMM(cond,1,0,1,15,15,0);              // ldrcc pc,[pc]
323                 EOP_MOV_REG_SIMPLE(15,15);                      // mov pc, pc
324                 EMIT((u32)target);
325 #else
326                 // should never happen
327                 elprintf(EL_STATUS|EL_SVP|EL_ANOMALY, "indirect jmp %08x->%08x", target, tcache_ptr);
328                 exit(1);
329 #endif
330         }
331
332         return (u32 *)tcache_ptr - start_ptr;
333 }
334
335 #define JMP_POS(ptr) \
336         ptr = tcache_ptr; \
337         tcache_ptr += sizeof(u32)
338
339 #define JMP_EMIT(cond, ptr) { \
340         u32 val_ = (u32 *)tcache_ptr - (u32 *)(ptr) - 2; \
341         EOP_C_B_PTR(ptr, cond, 0, val_ & 0xffffff); \
342 }
343
344 #define EMITH_JMP_START(cond) { \
345         void *cond_ptr; \
346         JMP_POS(cond_ptr)
347
348 #define EMITH_JMP_END(cond) \
349         JMP_EMIT(cond, cond_ptr); \
350 }
351
352 // fake "simple" or "short" jump - using cond insns instead
353 #define EMITH_NOTHING1(cond) \
354         (void)(cond)
355
356 #define EMITH_SJMP_START(cond)  EMITH_NOTHING1(cond)
357 #define EMITH_SJMP_END(cond)    EMITH_NOTHING1(cond)
358 #define EMITH_SJMP3_START(cond) EMITH_NOTHING1(cond)
359 #define EMITH_SJMP3_MID(cond)   EMITH_NOTHING1(cond)
360 #define EMITH_SJMP3_END()
361
362 #define emith_move_r_r(d, s) \
363         EOP_MOV_REG_SIMPLE(d, s)
364
365 #define emith_mvn_r_r(d, s) \
366         EOP_MVN_REG(A_COND_AL,0,d,s,A_AM1_LSL,0)
367
368 #define emith_add_r_r_r_lsl(d, s1, s2, lslimm) \
369         EOP_ADD_REG(A_COND_AL,0,d,s1,s2,A_AM1_LSL,lslimm)
370
371 #define emith_or_r_r_r_lsl(d, s1, s2, lslimm) \
372         EOP_ORR_REG(A_COND_AL,0,d,s1,s2,A_AM1_LSL,lslimm)
373
374 #define emith_eor_r_r_r_lsl(d, s1, s2, lslimm) \
375         EOP_EOR_REG(A_COND_AL,0,d,s1,s2,A_AM1_LSL,lslimm)
376
377 #define emith_eor_r_r_r_lsr(d, s1, s2, lsrimm) \
378         EOP_EOR_REG(A_COND_AL,0,d,s1,s2,A_AM1_LSR,lsrimm)
379
380 #define emith_or_r_r_lsl(d, s, lslimm) \
381         emith_or_r_r_r_lsl(d, d, s, lslimm)
382
383 #define emith_eor_r_r_lsr(d, s, lsrimm) \
384         emith_eor_r_r_r_lsr(d, d, s, lsrimm)
385
386 #define emith_add_r_r_r(d, s1, s2) \
387         emith_add_r_r_r_lsl(d, s1, s2, 0)
388
389 #define emith_or_r_r_r(d, s1, s2) \
390         emith_or_r_r_r_lsl(d, s1, s2, 0)
391
392 #define emith_eor_r_r_r(d, s1, s2) \
393         emith_eor_r_r_r_lsl(d, s1, s2, 0)
394
395 #define emith_add_r_r(d, s) \
396         emith_add_r_r_r(d, d, s)
397
398 #define emith_sub_r_r(d, s) \
399         EOP_SUB_REG(A_COND_AL,0,d,d,s,A_AM1_LSL,0)
400
401 #define emith_adc_r_r(d, s) \
402         EOP_ADC_REG(A_COND_AL,0,d,d,s,A_AM1_LSL,0)
403
404 #define emith_and_r_r(d, s) \
405         EOP_AND_REG(A_COND_AL,0,d,d,s,A_AM1_LSL,0)
406
407 #define emith_or_r_r(d, s) \
408         emith_or_r_r_r(d, d, s)
409
410 #define emith_eor_r_r(d, s) \
411         emith_eor_r_r_r(d, d, s)
412
413 #define emith_tst_r_r(d, s) \
414         EOP_TST_REG(A_COND_AL,d,s,A_AM1_LSL,0)
415
416 #define emith_teq_r_r(d, s) \
417         EOP_TEQ_REG(A_COND_AL,d,s,A_AM1_LSL,0)
418
419 #define emith_cmp_r_r(d, s) \
420         EOP_CMP_REG(A_COND_AL,d,s,A_AM1_LSL,0)
421
422 #define emith_addf_r_r(d, s) \
423         EOP_ADD_REG(A_COND_AL,1,d,d,s,A_AM1_LSL,0)
424
425 #define emith_subf_r_r(d, s) \
426         EOP_SUB_REG(A_COND_AL,1,d,d,s,A_AM1_LSL,0)
427
428 #define emith_adcf_r_r(d, s) \
429         EOP_ADC_REG(A_COND_AL,1,d,d,s,A_AM1_LSL,0)
430
431 #define emith_sbcf_r_r(d, s) \
432         EOP_SBC_REG(A_COND_AL,1,d,d,s,A_AM1_LSL,0)
433
434 #define emith_eorf_r_r(d, s) \
435         EOP_EOR_REG(A_COND_AL,1,d,d,s,A_AM1_LSL,0)
436
437 #define emith_move_r_imm(r, imm) \
438         emith_op_imm(A_COND_AL, 0, A_OP_MOV, r, imm)
439
440 #define emith_add_r_imm(r, imm) \
441         emith_op_imm(A_COND_AL, 0, A_OP_ADD, r, imm)
442
443 #define emith_adc_r_imm(r, imm) \
444         emith_op_imm(A_COND_AL, 0, A_OP_ADC, r, imm)
445
446 #define emith_sub_r_imm(r, imm) \
447         emith_op_imm(A_COND_AL, 0, A_OP_SUB, r, imm)
448
449 #define emith_bic_r_imm(r, imm) \
450         emith_op_imm(A_COND_AL, 0, A_OP_BIC, r, imm)
451
452 #define emith_and_r_imm(r, imm) \
453         emith_op_imm(A_COND_AL, 0, A_OP_AND, r, imm)
454
455 #define emith_or_r_imm(r, imm) \
456         emith_op_imm(A_COND_AL, 0, A_OP_ORR, r, imm)
457
458 #define emith_eor_r_imm(r, imm) \
459         emith_op_imm(A_COND_AL, 0, A_OP_EOR, r, imm)
460
461 // note: only use 8bit imm for these
462 #define emith_tst_r_imm(r, imm) \
463         emith_top_imm(A_COND_AL, A_OP_TST, r, imm)
464
465 #define emith_cmp_r_imm(r, imm) { \
466         u32 op = A_OP_CMP, imm_ = imm; \
467         if (~imm_ < 0x100) { \
468                 imm_ = ~imm_; \
469                 op = A_OP_CMN; \
470         } \
471         emith_top_imm(A_COND_AL, op, r, imm); \
472 }
473
474 #define emith_subf_r_imm(r, imm) \
475         emith_op_imm(A_COND_AL, 1, A_OP_SUB, r, imm)
476
477 #define emith_move_r_imm_c(cond, r, imm) \
478         emith_op_imm(cond, 0, A_OP_MOV, r, imm)
479
480 #define emith_add_r_imm_c(cond, r, imm) \
481         emith_op_imm(cond, 0, A_OP_ADD, r, imm)
482
483 #define emith_sub_r_imm_c(cond, r, imm) \
484         emith_op_imm(cond, 0, A_OP_SUB, r, imm)
485
486 #define emith_or_r_imm_c(cond, r, imm) \
487         emith_op_imm(cond, 0, A_OP_ORR, r, imm)
488
489 #define emith_eor_r_imm_c(cond, r, imm) \
490         emith_op_imm(cond, 0, A_OP_EOR, r, imm)
491
492 #define emith_bic_r_imm_c(cond, r, imm) \
493         emith_op_imm(cond, 0, A_OP_BIC, r, imm)
494
495 #define emith_move_r_imm_s8(r, imm) { \
496         if ((imm) & 0x80) \
497                 EOP_MVN_IMM(r, 0, ((imm) ^ 0xff)); \
498         else \
499                 EOP_MOV_IMM(r, 0, imm); \
500 }
501
502 #define emith_and_r_r_imm(d, s, imm) \
503         emith_op_imm2(A_COND_AL, 0, A_OP_AND, d, s, imm)
504
505 #define emith_add_r_r_imm(d, s, imm) \
506         emith_op_imm2(A_COND_AL, 0, A_OP_ADD, d, s, imm)
507
508 #define emith_sub_r_r_imm(d, s, imm) \
509         emith_op_imm2(A_COND_AL, 0, A_OP_SUB, d, s, imm)
510
511 #define emith_neg_r_r(d, s) \
512         EOP_RSB_IMM(d, s, 0, 0)
513
514 #define emith_lsl(d, s, cnt) \
515         EOP_MOV_REG(A_COND_AL,0,d,s,A_AM1_LSL,cnt)
516
517 #define emith_lsr(d, s, cnt) \
518         EOP_MOV_REG(A_COND_AL,0,d,s,A_AM1_LSR,cnt)
519
520 #define emith_asr(d, s, cnt) \
521         EOP_MOV_REG(A_COND_AL,0,d,s,A_AM1_ASR,cnt)
522
523 #define emith_ror_c(cond, d, s, cnt) \
524         EOP_MOV_REG(cond,0,d,s,A_AM1_ROR,cnt)
525
526 #define emith_ror(d, s, cnt) \
527         emith_ror_c(A_COND_AL, d, s, cnt)
528
529 #define emith_rol(d, s, cnt) \
530         EOP_MOV_REG(A_COND_AL,0,d,s,A_AM1_ROR,32-(cnt)); \
531
532 #define emith_lslf(d, s, cnt) \
533         EOP_MOV_REG(A_COND_AL,1,d,s,A_AM1_LSL,cnt)
534
535 #define emith_lsrf(d, s, cnt) \
536         EOP_MOV_REG(A_COND_AL,1,d,s,A_AM1_LSR,cnt)
537
538 #define emith_asrf(d, s, cnt) \
539         EOP_MOV_REG(A_COND_AL,1,d,s,A_AM1_ASR,cnt)
540
541 // note: only C flag updated correctly
542 #define emith_rolf(d, s, cnt) { \
543         EOP_MOV_REG(A_COND_AL,1,d,s,A_AM1_ROR,32-(cnt)); \
544         /* we don't have ROL so we shift to get the right carry */ \
545         EOP_TST_REG(A_COND_AL,d,d,A_AM1_LSR,1); \
546 }
547
548 #define emith_rorf(d, s, cnt) \
549         EOP_MOV_REG(A_COND_AL,1,d,s,A_AM1_ROR,cnt)
550
551 #define emith_rolcf(d) \
552         emith_adcf_r_r(d, d)
553
554 #define emith_rorcf(d) \
555         EOP_MOV_REG(A_COND_AL,1,d,d,A_AM1_ROR,0) /* ROR #0 -> RRX */
556
557 #define emith_negcf_r_r(d, s) \
558         EOP_C_DOP_IMM(A_COND_AL,A_OP_RSC,1,s,d,0,0)
559
560 #define emith_mul(d, s1, s2) { \
561         if ((d) != (s1)) /* rd != rm limitation */ \
562                 EOP_MUL(d, s1, s2); \
563         else \
564                 EOP_MUL(d, s2, s1); \
565 }
566
567 #define emith_mul_u64(dlo, dhi, s1, s2) \
568         EOP_C_UMULL(A_COND_AL,0,dhi,dlo,s1,s2)
569
570 #define emith_mul_s64(dlo, dhi, s1, s2) \
571         EOP_C_SMULL(A_COND_AL,0,dhi,dlo,s1,s2)
572
573 #define emith_mula_s64(dlo, dhi, s1, s2) \
574         EOP_C_SMLAL(A_COND_AL,0,dhi,dlo,s1,s2)
575
576 // misc
577 #define emith_read_r_r_offs_c(cond, r, rs, offs) \
578         EOP_LDR_IMM2(cond, r, rs, offs)
579
580 #define emith_read8_r_r_offs_c(cond, r, rs, offs) \
581         EOP_LDRB_IMM2(cond, r, rs, offs)
582
583 #define emith_read16_r_r_offs_c(cond, r, rs, offs) \
584         EOP_LDRH_IMM2(cond, r, rs, offs)
585
586 #define emith_read_r_r_offs(r, rs, offs) \
587         emith_read_r_r_offs_c(A_COND_AL, r, rs, offs)
588
589 #define emith_read8_r_r_offs(r, rs, offs) \
590         emith_read8_r_r_offs_c(A_COND_AL, r, rs, offs)
591
592 #define emith_read16_r_r_offs(r, rs, offs) \
593         emith_read16_r_r_offs_c(A_COND_AL, r, rs, offs)
594
595 #define emith_ctx_read(r, offs) \
596         emith_read_r_r_offs(r, CONTEXT_REG, offs)
597
598 #define emith_ctx_write(r, offs) \
599         EOP_STR_IMM(r, CONTEXT_REG, offs)
600
601 #define emith_ctx_do_multiple(op, r, offs, count, tmpr) do { \
602         int v_, r_ = r, c_ = count, b_ = CONTEXT_REG;        \
603         for (v_ = 0; c_; c_--, r_++)                         \
604                 v_ |= 1 << r_;                               \
605         if ((offs) != 0) {                                   \
606                 EOP_ADD_IMM(tmpr,CONTEXT_REG,30/2,(offs)>>2);\
607                 b_ = tmpr;                                   \
608         }                                                    \
609         op(b_,v_);                                           \
610 } while(0)
611
612 #define emith_ctx_read_multiple(r, offs, count, tmpr) \
613         emith_ctx_do_multiple(EOP_LDMIA, r, offs, count, tmpr)
614
615 #define emith_ctx_write_multiple(r, offs, count, tmpr) \
616         emith_ctx_do_multiple(EOP_STMIA, r, offs, count, tmpr)
617
618 #define emith_clear_msb_c(cond, d, s, count) { \
619         u32 t; \
620         if ((count) <= 8) { \
621                 t = (count) - 8; \
622                 t = (0xff << t) & 0xff; \
623                 EOP_BIC_IMM(d,s,8/2,t); \
624                 EOP_C_DOP_IMM(cond,A_OP_BIC,0,s,d,8/2,t); \
625         } else if ((count) >= 24) { \
626                 t = (count) - 24; \
627                 t = 0xff >> t; \
628                 EOP_AND_IMM(d,s,0,t); \
629                 EOP_C_DOP_IMM(cond,A_OP_AND,0,s,d,0,t); \
630         } else { \
631                 EOP_MOV_REG(cond,0,d,s,A_AM1_LSL,count); \
632                 EOP_MOV_REG(cond,0,d,d,A_AM1_LSR,count); \
633         } \
634 }
635
636 #define emith_clear_msb(d, s, count) \
637         emith_clear_msb_c(A_COND_AL, d, s, count)
638
639 #define emith_sext(d, s, bits) { \
640         EOP_MOV_REG_LSL(d,s,32 - (bits)); \
641         EOP_MOV_REG_ASR(d,d,32 - (bits)); \
642 }
643
644 #define emith_do_caller_regs(mask, func) { \
645         u32 _reg_mask = (mask) & 0x500f; \
646         if (_reg_mask) { \
647                 if (__builtin_parity(_reg_mask) == 1) \
648                         _reg_mask |= 0x10; /* eabi align */ \
649                 func(_reg_mask); \
650         } \
651 }
652
653 #define emith_save_caller_regs(mask) \
654         emith_do_caller_regs(mask, EOP_STMFD_SP)
655
656 #define emith_restore_caller_regs(mask) \
657         emith_do_caller_regs(mask, EOP_LDMFD_SP)
658
659 // upto 4 args
660 #define emith_pass_arg_r(arg, reg) \
661         EOP_MOV_REG_SIMPLE(arg, reg)
662
663 #define emith_pass_arg_imm(arg, imm) \
664         emith_move_r_imm(arg, imm)
665
666 #define emith_jump(target) \
667         emith_jump_cond(A_COND_AL, target)
668
669 #define emith_jump_patchable(target) \
670         emith_jump(target)
671
672 #define emith_jump_cond(cond, target) \
673         emith_xbranch(cond, target, 0)
674
675 #define emith_jump_cond_patchable(cond, target) \
676         emith_jump_cond(cond, target)
677
678 #define emith_jump_patch(ptr, target) do { \
679         u32 *ptr_ = ptr; \
680         u32 val_ = (u32 *)(target) - ptr_ - 2; \
681         *ptr_ = (*ptr_ & 0xff000000) | (val_ & 0x00ffffff); \
682 } while (0)
683
684 #define emith_jump_at(ptr, target) { \
685         u32 val_ = (u32 *)(target) - (u32 *)(ptr) - 2; \
686         EOP_C_B_PTR(ptr, A_COND_AL, 0, val_ & 0xffffff); \
687 }
688
689 #define emith_jump_reg_c(cond, r) \
690         EOP_C_BX(cond, r)
691
692 #define emith_jump_reg(r) \
693         emith_jump_reg_c(A_COND_AL, r)
694
695 #define emith_jump_ctx_c(cond, offs) \
696         EOP_LDR_IMM2(cond,15,CONTEXT_REG,offs)
697
698 #define emith_jump_ctx(offs) \
699         emith_jump_ctx_c(A_COND_AL, offs)
700
701 #define emith_call_cond(cond, target) \
702         emith_xbranch(cond, target, 1)
703
704 #define emith_call(target) \
705         emith_call_cond(A_COND_AL, target)
706
707 #define emith_call_ctx(offs) { \
708         emith_move_r_r(14, 15); \
709         emith_jump_ctx(offs); \
710 }
711
712 #define emith_ret_c(cond) \
713         emith_jump_reg_c(cond, 14)
714
715 #define emith_ret() \
716         emith_ret_c(A_COND_AL)
717
718 #define emith_ret_to_ctx(offs) \
719         emith_ctx_write(14, offs)
720
721 #define emith_push_ret() \
722         EOP_STMFD_SP(A_R14M)
723
724 #define emith_pop_and_ret() \
725         EOP_LDMFD_SP(A_R15M)
726
727 #define host_instructions_updated(base, end) \
728         cache_flush_d_inval_i(base, end)
729
730 #define host_arg2reg(rd, arg) \
731         rd = arg
732
733 /* SH2 drc specific */
734 /* pushes r12 for eabi alignment */
735 #define emith_sh2_drc_entry() \
736         EOP_STMFD_SP(A_R4M|A_R5M|A_R6M|A_R7M|A_R8M|A_R9M|A_R10M|A_R11M|A_R12M|A_R14M)
737
738 #define emith_sh2_drc_exit() \
739         EOP_LDMFD_SP(A_R4M|A_R5M|A_R6M|A_R7M|A_R8M|A_R9M|A_R10M|A_R11M|A_R12M|A_R15M)
740
741 #define emith_sh2_wcall(a, tab) { \
742         emith_lsr(12, a, SH2_WRITE_SHIFT); \
743         EOP_LDR_REG_LSL(A_COND_AL,12,tab,12,2); \
744         emith_move_r_r(2, CONTEXT_REG); \
745         emith_jump_reg(12); \
746 }
747
748 #define emith_sh2_dtbf_loop() { \
749         int cr, rn;                                                          \
750         int tmp_ = rcache_get_tmp();                                         \
751         cr = rcache_get_reg(SHR_SR, RC_GR_RMW);                              \
752         rn = rcache_get_reg((op >> 8) & 0x0f, RC_GR_RMW);                    \
753         emith_sub_r_imm(rn, 1);                /* sub rn, #1 */              \
754         emith_bic_r_imm(cr, 1);                /* bic cr, #1 */              \
755         emith_sub_r_imm(cr, (cycles+1) << 12); /* sub cr, #(cycles+1)<<12 */ \
756         cycles = 0;                                                          \
757         emith_asrf(tmp_, cr, 2+12);            /* movs tmp_, cr, asr #2+12 */\
758         EOP_MOV_IMM_C(A_COND_MI,tmp_,0,0);     /* movmi tmp_, #0 */          \
759         emith_lsl(cr, cr, 20);                 /* mov cr, cr, lsl #20 */     \
760         emith_lsr(cr, cr, 20);                 /* mov cr, cr, lsr #20 */     \
761         emith_subf_r_r(rn, tmp_);              /* subs rn, tmp_ */           \
762         EOP_RSB_IMM_C(A_COND_LS,tmp_,rn,0,0);  /* rsbls tmp_, rn, #0 */      \
763         EOP_ORR_REG(A_COND_LS,0,cr,cr,tmp_,A_AM1_LSL,12+2); /* orrls cr,tmp_,lsl #12+2 */\
764         EOP_ORR_IMM_C(A_COND_LS,cr,cr,0,1);    /* orrls cr, #1 */            \
765         EOP_MOV_IMM_C(A_COND_LS,rn,0,0);       /* movls rn, #0 */            \
766         rcache_free_tmp(tmp_);                                               \
767 }
768
769 #define emith_write_sr(sr, srcr) { \
770         emith_lsr(sr, sr, 10); \
771         emith_or_r_r_r_lsl(sr, sr, srcr, 22); \
772         emith_ror(sr, sr, 22); \
773 }
774
775 #define emith_carry_to_t(srr, is_sub) { \
776         if (is_sub) { /* has inverted C on ARM */ \
777                 emith_or_r_imm_c(A_COND_CC, srr, 1); \
778                 emith_bic_r_imm_c(A_COND_CS, srr, 1); \
779         } else { \
780                 emith_or_r_imm_c(A_COND_CS, srr, 1); \
781                 emith_bic_r_imm_c(A_COND_CC, srr, 1); \
782         } \
783 }
784
785 #define emith_tpop_carry(sr, is_sub) {  \
786         if (is_sub)                     \
787                 emith_eor_r_imm(sr, 1); \
788         emith_lsrf(sr, sr, 1);          \
789 }
790
791 #define emith_tpush_carry(sr, is_sub) { \
792         emith_adc_r_r(sr, sr);          \
793         if (is_sub)                     \
794                 emith_eor_r_imm(sr, 1); \
795 }
796
797 /*
798  * if Q
799  *   t = carry(Rn += Rm)
800  * else
801  *   t = carry(Rn -= Rm)
802  * T ^= t
803  */
804 #define emith_sh2_div1_step(rn, rm, sr) {         \
805         void *jmp0, *jmp1;                        \
806         emith_tst_r_imm(sr, Q);  /* if (Q ^ M) */ \
807         JMP_POS(jmp0);           /* beq do_sub */ \
808         emith_addf_r_r(rn, rm);                   \
809         emith_eor_r_imm_c(A_COND_CS, sr, T);      \
810         JMP_POS(jmp1);           /* b done */     \
811         JMP_EMIT(A_COND_EQ, jmp0); /* do_sub: */  \
812         emith_subf_r_r(rn, rm);                   \
813         emith_eor_r_imm_c(A_COND_CC, sr, T);      \
814         JMP_EMIT(A_COND_AL, jmp1); /* done: */    \
815 }
816