drc: rework block tracking and lookup
[picodrive.git] / cpu / drc / emit_arm.c
1 /*
2  * Basic macros to emit ARM instructions and some utils
3  * Copyright (C) 2008,2009,2010 notaz
4  *
5  * This work is licensed under the terms of MAME license.
6  * See COPYING file in the top-level directory.
7  */
8 #define CONTEXT_REG 11
9
10 // XXX: tcache_ptr type for SVP and SH2 compilers differs..
11 #define EMIT_PTR(ptr, x) \
12         do { \
13                 *(u32 *)ptr = x; \
14                 ptr = (void *)((u8 *)ptr + sizeof(u32)); \
15                 COUNT_OP; \
16         } while (0)
17
18 #define EMIT(x) EMIT_PTR(tcache_ptr, x)
19
20 #define A_R4M  (1 << 4)
21 #define A_R5M  (1 << 5)
22 #define A_R6M  (1 << 6)
23 #define A_R7M  (1 << 7)
24 #define A_R8M  (1 << 8)
25 #define A_R9M  (1 << 9)
26 #define A_R10M (1 << 10)
27 #define A_R11M (1 << 11)
28 #define A_R12M (1 << 12)
29 #define A_R14M (1 << 14)
30 #define A_R15M (1 << 15)
31
32 #define A_COND_AL 0xe
33 #define A_COND_EQ 0x0
34 #define A_COND_NE 0x1
35 #define A_COND_HS 0x2
36 #define A_COND_LO 0x3
37 #define A_COND_MI 0x4
38 #define A_COND_PL 0x5
39 #define A_COND_VS 0x6
40 #define A_COND_VC 0x7
41 #define A_COND_HI 0x8
42 #define A_COND_LS 0x9
43 #define A_COND_GE 0xa
44 #define A_COND_LT 0xb
45 #define A_COND_GT 0xc
46 #define A_COND_LE 0xd
47 #define A_COND_CS A_COND_HS
48 #define A_COND_CC A_COND_LO
49
50 /* unified conditions */
51 #define DCOND_EQ A_COND_EQ
52 #define DCOND_NE A_COND_NE
53 #define DCOND_MI A_COND_MI
54 #define DCOND_PL A_COND_PL
55 #define DCOND_HI A_COND_HI
56 #define DCOND_HS A_COND_HS
57 #define DCOND_LO A_COND_LO
58 #define DCOND_GE A_COND_GE
59 #define DCOND_GT A_COND_GT
60 #define DCOND_LT A_COND_LT
61 #define DCOND_LS A_COND_LS
62 #define DCOND_LE A_COND_LE
63 #define DCOND_VS A_COND_VS
64 #define DCOND_VC A_COND_VC
65
66 /* addressing mode 1 */
67 #define A_AM1_LSL 0
68 #define A_AM1_LSR 1
69 #define A_AM1_ASR 2
70 #define A_AM1_ROR 3
71
72 #define A_AM1_IMM(ror2,imm8)                  (((ror2)<<8) | (imm8) | 0x02000000)
73 #define A_AM1_REG_XIMM(shift_imm,shift_op,rm) (((shift_imm)<<7) | ((shift_op)<<5) | (rm))
74 #define A_AM1_REG_XREG(rs,shift_op,rm)        (((rs)<<8) | ((shift_op)<<5) | 0x10 | (rm))
75
76 /* data processing op */
77 #define A_OP_AND 0x0
78 #define A_OP_EOR 0x1
79 #define A_OP_SUB 0x2
80 #define A_OP_RSB 0x3
81 #define A_OP_ADD 0x4
82 #define A_OP_ADC 0x5
83 #define A_OP_SBC 0x6
84 #define A_OP_RSC 0x7
85 #define A_OP_TST 0x8
86 #define A_OP_TEQ 0x9
87 #define A_OP_CMP 0xa
88 #define A_OP_CMN 0xa
89 #define A_OP_ORR 0xc
90 #define A_OP_MOV 0xd
91 #define A_OP_BIC 0xe
92 #define A_OP_MVN 0xf
93
94 #define EOP_C_DOP_X(cond,op,s,rn,rd,shifter_op) \
95         EMIT(((cond)<<28) | ((op)<< 21) | ((s)<<20) | ((rn)<<16) | ((rd)<<12) | (shifter_op))
96
97 #define EOP_C_DOP_IMM(     cond,op,s,rn,rd,ror2,imm8)             EOP_C_DOP_X(cond,op,s,rn,rd,A_AM1_IMM(ror2,imm8))
98 #define EOP_C_DOP_REG_XIMM(cond,op,s,rn,rd,shift_imm,shift_op,rm) EOP_C_DOP_X(cond,op,s,rn,rd,A_AM1_REG_XIMM(shift_imm,shift_op,rm))
99 #define EOP_C_DOP_REG_XREG(cond,op,s,rn,rd,rs,       shift_op,rm) EOP_C_DOP_X(cond,op,s,rn,rd,A_AM1_REG_XREG(rs,       shift_op,rm))
100
101 #define EOP_MOV_IMM(rd,   ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_MOV,0, 0,rd,ror2,imm8)
102 #define EOP_MVN_IMM(rd,   ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_MVN,0, 0,rd,ror2,imm8)
103 #define EOP_ORR_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_ORR,0,rn,rd,ror2,imm8)
104 #define EOP_EOR_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_EOR,0,rn,rd,ror2,imm8)
105 #define EOP_ADD_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_ADD,0,rn,rd,ror2,imm8)
106 #define EOP_BIC_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_BIC,0,rn,rd,ror2,imm8)
107 #define EOP_AND_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_AND,0,rn,rd,ror2,imm8)
108 #define EOP_SUB_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_SUB,0,rn,rd,ror2,imm8)
109 #define EOP_TST_IMM(   rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_TST,1,rn, 0,ror2,imm8)
110 #define EOP_CMP_IMM(   rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_CMP,1,rn, 0,ror2,imm8)
111 #define EOP_RSB_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_RSB,0,rn,rd,ror2,imm8)
112
113 #define EOP_MOV_IMM_C(cond,rd,   ror2,imm8) EOP_C_DOP_IMM(cond,A_OP_MOV,0, 0,rd,ror2,imm8)
114 #define EOP_ORR_IMM_C(cond,rd,rn,ror2,imm8) EOP_C_DOP_IMM(cond,A_OP_ORR,0,rn,rd,ror2,imm8)
115 #define EOP_RSB_IMM_C(cond,rd,rn,ror2,imm8) EOP_C_DOP_IMM(cond,A_OP_RSB,0,rn,rd,ror2,imm8)
116
117 #define EOP_MOV_REG(cond,s,rd,   rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_MOV,s, 0,rd,shift_imm,shift_op,rm)
118 #define EOP_MVN_REG(cond,s,rd,   rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_MVN,s, 0,rd,shift_imm,shift_op,rm)
119 #define EOP_ORR_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_ORR,s,rn,rd,shift_imm,shift_op,rm)
120 #define EOP_ADD_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_ADD,s,rn,rd,shift_imm,shift_op,rm)
121 #define EOP_ADC_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_ADC,s,rn,rd,shift_imm,shift_op,rm)
122 #define EOP_SUB_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_SUB,s,rn,rd,shift_imm,shift_op,rm)
123 #define EOP_SBC_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_SBC,s,rn,rd,shift_imm,shift_op,rm)
124 #define EOP_AND_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_AND,s,rn,rd,shift_imm,shift_op,rm)
125 #define EOP_EOR_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_EOR,s,rn,rd,shift_imm,shift_op,rm)
126 #define EOP_CMP_REG(cond,     rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_CMP,1,rn, 0,shift_imm,shift_op,rm)
127 #define EOP_TST_REG(cond,     rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_TST,1,rn, 0,shift_imm,shift_op,rm)
128 #define EOP_TEQ_REG(cond,     rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_TEQ,1,rn, 0,shift_imm,shift_op,rm)
129
130 #define EOP_MOV_REG2(s,rd,   rm,shift_op,rs) EOP_C_DOP_REG_XREG(A_COND_AL,A_OP_MOV,s, 0,rd,rs,shift_op,rm)
131 #define EOP_ADD_REG2(s,rd,rn,rm,shift_op,rs) EOP_C_DOP_REG_XREG(A_COND_AL,A_OP_ADD,s,rn,rd,rs,shift_op,rm)
132 #define EOP_SUB_REG2(s,rd,rn,rm,shift_op,rs) EOP_C_DOP_REG_XREG(A_COND_AL,A_OP_SUB,s,rn,rd,rs,shift_op,rm)
133
134 #define EOP_MOV_REG_SIMPLE(rd,rm)           EOP_MOV_REG(A_COND_AL,0,rd,rm,A_AM1_LSL,0)
135 #define EOP_MOV_REG_LSL(rd,   rm,shift_imm) EOP_MOV_REG(A_COND_AL,0,rd,rm,A_AM1_LSL,shift_imm)
136 #define EOP_MOV_REG_LSR(rd,   rm,shift_imm) EOP_MOV_REG(A_COND_AL,0,rd,rm,A_AM1_LSR,shift_imm)
137 #define EOP_MOV_REG_ASR(rd,   rm,shift_imm) EOP_MOV_REG(A_COND_AL,0,rd,rm,A_AM1_ASR,shift_imm)
138 #define EOP_MOV_REG_ROR(rd,   rm,shift_imm) EOP_MOV_REG(A_COND_AL,0,rd,rm,A_AM1_ROR,shift_imm)
139
140 #define EOP_ORR_REG_SIMPLE(rd,rm)           EOP_ORR_REG(A_COND_AL,0,rd,rd,rm,A_AM1_LSL,0)
141 #define EOP_ORR_REG_LSL(rd,rn,rm,shift_imm) EOP_ORR_REG(A_COND_AL,0,rd,rn,rm,A_AM1_LSL,shift_imm)
142 #define EOP_ORR_REG_LSR(rd,rn,rm,shift_imm) EOP_ORR_REG(A_COND_AL,0,rd,rn,rm,A_AM1_LSR,shift_imm)
143 #define EOP_ORR_REG_ASR(rd,rn,rm,shift_imm) EOP_ORR_REG(A_COND_AL,0,rd,rn,rm,A_AM1_ASR,shift_imm)
144 #define EOP_ORR_REG_ROR(rd,rn,rm,shift_imm) EOP_ORR_REG(A_COND_AL,0,rd,rn,rm,A_AM1_ROR,shift_imm)
145
146 #define EOP_ADD_REG_SIMPLE(rd,rm)           EOP_ADD_REG(A_COND_AL,0,rd,rd,rm,A_AM1_LSL,0)
147 #define EOP_ADD_REG_LSL(rd,rn,rm,shift_imm) EOP_ADD_REG(A_COND_AL,0,rd,rn,rm,A_AM1_LSL,shift_imm)
148 #define EOP_ADD_REG_LSR(rd,rn,rm,shift_imm) EOP_ADD_REG(A_COND_AL,0,rd,rn,rm,A_AM1_LSR,shift_imm)
149
150 #define EOP_TST_REG_SIMPLE(rn,rm)           EOP_TST_REG(A_COND_AL,  rn,   0,A_AM1_LSL,rm)
151
152 #define EOP_MOV_REG2_LSL(rd,   rm,rs)       EOP_MOV_REG2(0,rd,   rm,A_AM1_LSL,rs)
153 #define EOP_MOV_REG2_ROR(rd,   rm,rs)       EOP_MOV_REG2(0,rd,   rm,A_AM1_ROR,rs)
154 #define EOP_ADD_REG2_LSL(rd,rn,rm,rs)       EOP_ADD_REG2(0,rd,rn,rm,A_AM1_LSL,rs)
155 #define EOP_SUB_REG2_LSL(rd,rn,rm,rs)       EOP_SUB_REG2(0,rd,rn,rm,A_AM1_LSL,rs)
156
157 /* addressing mode 2 */
158 #define EOP_C_AM2_IMM(cond,u,b,l,rn,rd,offset_12) \
159         EMIT(((cond)<<28) | 0x05000000 | ((u)<<23) | ((b)<<22) | ((l)<<20) | ((rn)<<16) | ((rd)<<12) | (offset_12))
160
161 #define EOP_C_AM2_REG(cond,u,b,l,rn,rd,shift_imm,shift_op,rm) \
162         EMIT(((cond)<<28) | 0x07000000 | ((u)<<23) | ((b)<<22) | ((l)<<20) | ((rn)<<16) | ((rd)<<12) | \
163                 ((shift_imm)<<7) | ((shift_op)<<5) | (rm))
164
165 /* addressing mode 3 */
166 #define EOP_C_AM3(cond,u,r,l,rn,rd,s,h,immed_reg) \
167         EMIT(((cond)<<28) | 0x01000090 | ((u)<<23) | ((r)<<22) | ((l)<<20) | ((rn)<<16) | ((rd)<<12) | \
168                         ((s)<<6) | ((h)<<5) | (immed_reg))
169
170 #define EOP_C_AM3_IMM(cond,u,l,rn,rd,s,h,offset_8) EOP_C_AM3(cond,u,1,l,rn,rd,s,h,(((offset_8)&0xf0)<<4)|((offset_8)&0xf))
171
172 #define EOP_C_AM3_REG(cond,u,l,rn,rd,s,h,rm)       EOP_C_AM3(cond,u,0,l,rn,rd,s,h,rm)
173
174 /* ldr and str */
175 #define EOP_LDR_IMM2(cond,rd,rn,offset_12)  EOP_C_AM2_IMM(cond,1,0,1,rn,rd,offset_12)
176 #define EOP_LDRB_IMM2(cond,rd,rn,offset_12) EOP_C_AM2_IMM(cond,1,1,1,rn,rd,offset_12)
177
178 #define EOP_LDR_IMM(   rd,rn,offset_12) EOP_C_AM2_IMM(A_COND_AL,1,0,1,rn,rd,offset_12)
179 #define EOP_LDR_NEGIMM(rd,rn,offset_12) EOP_C_AM2_IMM(A_COND_AL,0,0,1,rn,rd,offset_12)
180 #define EOP_LDR_SIMPLE(rd,rn)           EOP_C_AM2_IMM(A_COND_AL,1,0,1,rn,rd,0)
181 #define EOP_STR_IMM(   rd,rn,offset_12) EOP_C_AM2_IMM(A_COND_AL,1,0,0,rn,rd,offset_12)
182 #define EOP_STR_SIMPLE(rd,rn)           EOP_C_AM2_IMM(A_COND_AL,1,0,0,rn,rd,0)
183
184 #define EOP_LDR_REG_LSL(cond,rd,rn,rm,shift_imm) EOP_C_AM2_REG(cond,1,0,1,rn,rd,shift_imm,A_AM1_LSL,rm)
185
186 #define EOP_LDRH_IMM2(cond,rd,rn,offset_8)  EOP_C_AM3_IMM(cond,1,1,rn,rd,0,1,offset_8)
187
188 #define EOP_LDRH_IMM(   rd,rn,offset_8)  EOP_C_AM3_IMM(A_COND_AL,1,1,rn,rd,0,1,offset_8)
189 #define EOP_LDRH_SIMPLE(rd,rn)           EOP_C_AM3_IMM(A_COND_AL,1,1,rn,rd,0,1,0)
190 #define EOP_LDRH_REG(   rd,rn,rm)        EOP_C_AM3_REG(A_COND_AL,1,1,rn,rd,0,1,rm)
191 #define EOP_STRH_IMM(   rd,rn,offset_8)  EOP_C_AM3_IMM(A_COND_AL,1,0,rn,rd,0,1,offset_8)
192 #define EOP_STRH_SIMPLE(rd,rn)           EOP_C_AM3_IMM(A_COND_AL,1,0,rn,rd,0,1,0)
193 #define EOP_STRH_REG(   rd,rn,rm)        EOP_C_AM3_REG(A_COND_AL,1,0,rn,rd,0,1,rm)
194
195 /* ldm and stm */
196 #define EOP_XXM(cond,p,u,s,w,l,rn,list) \
197         EMIT(((cond)<<28) | (1<<27) | ((p)<<24) | ((u)<<23) | ((s)<<22) | ((w)<<21) | ((l)<<20) | ((rn)<<16) | (list))
198
199 #define EOP_STMIA(rb,list) EOP_XXM(A_COND_AL,0,1,0,0,0,rb,list)
200 #define EOP_LDMIA(rb,list) EOP_XXM(A_COND_AL,0,1,0,0,1,rb,list)
201
202 #define EOP_STMFD_SP(list) EOP_XXM(A_COND_AL,1,0,0,1,0,13,list)
203 #define EOP_LDMFD_SP(list) EOP_XXM(A_COND_AL,0,1,0,1,1,13,list)
204
205 /* branches */
206 #define EOP_C_BX(cond,rm) \
207         EMIT(((cond)<<28) | 0x012fff10 | (rm))
208
209 #define EOP_C_B_PTR(ptr,cond,l,signed_immed_24) \
210         EMIT_PTR(ptr, ((cond)<<28) | 0x0a000000 | ((l)<<24) | (signed_immed_24))
211
212 #define EOP_C_B(cond,l,signed_immed_24) \
213         EOP_C_B_PTR(tcache_ptr,cond,l,signed_immed_24)
214
215 #define EOP_B( signed_immed_24) EOP_C_B(A_COND_AL,0,signed_immed_24)
216 #define EOP_BL(signed_immed_24) EOP_C_B(A_COND_AL,1,signed_immed_24)
217
218 /* misc */
219 #define EOP_C_MUL(cond,s,rd,rs,rm) \
220         EMIT(((cond)<<28) | ((s)<<20) | ((rd)<<16) | ((rs)<<8) | 0x90 | (rm))
221
222 #define EOP_C_UMULL(cond,s,rdhi,rdlo,rs,rm) \
223         EMIT(((cond)<<28) | 0x00800000 | ((s)<<20) | ((rdhi)<<16) | ((rdlo)<<12) | ((rs)<<8) | 0x90 | (rm))
224
225 #define EOP_C_SMULL(cond,s,rdhi,rdlo,rs,rm) \
226         EMIT(((cond)<<28) | 0x00c00000 | ((s)<<20) | ((rdhi)<<16) | ((rdlo)<<12) | ((rs)<<8) | 0x90 | (rm))
227
228 #define EOP_C_SMLAL(cond,s,rdhi,rdlo,rs,rm) \
229         EMIT(((cond)<<28) | 0x00e00000 | ((s)<<20) | ((rdhi)<<16) | ((rdlo)<<12) | ((rs)<<8) | 0x90 | (rm))
230
231 #define EOP_MUL(rd,rm,rs) EOP_C_MUL(A_COND_AL,0,rd,rs,rm) // note: rd != rm
232
233 #define EOP_C_MRS(cond,rd) \
234         EMIT(((cond)<<28) | 0x010f0000 | ((rd)<<12))
235
236 #define EOP_C_MSR_IMM(cond,ror2,imm) \
237         EMIT(((cond)<<28) | 0x0328f000 | ((ror2)<<8) | (imm)) // cpsr_f
238
239 #define EOP_C_MSR_REG(cond,rm) \
240         EMIT(((cond)<<28) | 0x0128f000 | (rm)) // cpsr_f
241
242 #define EOP_MRS(rd)           EOP_C_MRS(A_COND_AL,rd)
243 #define EOP_MSR_IMM(ror2,imm) EOP_C_MSR_IMM(A_COND_AL,ror2,imm)
244 #define EOP_MSR_REG(rm)       EOP_C_MSR_REG(A_COND_AL,rm)
245
246
247 // XXX: AND, RSB, *C, will break if 1 insn is not enough
248 static void emith_op_imm2(int cond, int s, int op, int rd, int rn, unsigned int imm)
249 {
250         int ror2;
251         u32 v;
252
253         switch (op) {
254         case A_OP_MOV:
255                 rn = 0;
256                 if (~imm < 0x10000) {
257                         imm = ~imm;
258                         op = A_OP_MVN;
259                 }
260                 break;
261
262         case A_OP_EOR:
263         case A_OP_SUB:
264         case A_OP_ADD:
265         case A_OP_ORR:
266         case A_OP_BIC:
267                 if (s == 0 && imm == 0)
268                         return;
269                 break;
270         }
271
272         for (v = imm, ror2 = 0; ; ror2 -= 8/2) {
273                 /* shift down to get 'best' rot2 */
274                 for (; v && !(v & 3); v >>= 2)
275                         ror2--;
276
277                 EOP_C_DOP_IMM(cond, op, s, rn, rd, ror2 & 0x0f, v & 0xff);
278
279                 v >>= 8;
280                 if (v == 0)
281                         break;
282                 if (op == A_OP_MOV)
283                         op = A_OP_ORR;
284                 if (op == A_OP_MVN)
285                         op = A_OP_BIC;
286                 rn = rd;
287         }
288 }
289
290 #define emith_op_imm(cond, s, op, r, imm) \
291         emith_op_imm2(cond, s, op, r, r, imm)
292
293 // test op
294 #define emith_top_imm(cond, op, r, imm) do { \
295         u32 ror2, v; \
296         for (ror2 = 0, v = imm; v && !(v & 3); v >>= 2) \
297                 ror2--; \
298         EOP_C_DOP_IMM(cond, op, 1, r, 0, ror2 & 0x0f, v & 0xff); \
299 } while (0)
300
301 #define is_offset_24(val) \
302         ((val) >= (int)0xff000000 && (val) <= 0x00ffffff)
303
304 static int emith_xbranch(int cond, void *target, int is_call)
305 {
306         int val = (u32 *)target - (u32 *)tcache_ptr - 2;
307         int direct = is_offset_24(val);
308         u32 *start_ptr = (u32 *)tcache_ptr;
309
310         if (direct)
311         {
312                 EOP_C_B(cond,is_call,val & 0xffffff);           // b, bl target
313         }
314         else
315         {
316 #ifdef __EPOC32__
317 //              elprintf(EL_SVP, "emitting indirect jmp %08x->%08x", tcache_ptr, target);
318                 if (is_call)
319                         EOP_ADD_IMM(14,15,0,8);                 // add lr,pc,#8
320                 EOP_C_AM2_IMM(cond,1,0,1,15,15,0);              // ldrcc pc,[pc]
321                 EOP_MOV_REG_SIMPLE(15,15);                      // mov pc, pc
322                 EMIT((u32)target);
323 #else
324                 // should never happen
325                 elprintf(EL_STATUS|EL_SVP|EL_ANOMALY, "indirect jmp %08x->%08x", target, tcache_ptr);
326                 exit(1);
327 #endif
328         }
329
330         return (u32 *)tcache_ptr - start_ptr;
331 }
332
333 #define JMP_POS(ptr) \
334         ptr = tcache_ptr; \
335         tcache_ptr += sizeof(u32)
336
337 #define JMP_EMIT(cond, ptr) { \
338         u32 val_ = (u32 *)tcache_ptr - (u32 *)(ptr) - 2; \
339         EOP_C_B_PTR(ptr, cond, 0, val_ & 0xffffff); \
340 }
341
342 #define EMITH_JMP_START(cond) { \
343         void *cond_ptr; \
344         JMP_POS(cond_ptr)
345
346 #define EMITH_JMP_END(cond) \
347         JMP_EMIT(cond, cond_ptr); \
348 }
349
350 // fake "simple" or "short" jump - using cond insns instead
351 #define EMITH_NOTHING1(cond) \
352         (void)(cond)
353
354 #define EMITH_SJMP_START(cond)  EMITH_NOTHING1(cond)
355 #define EMITH_SJMP_END(cond)    EMITH_NOTHING1(cond)
356 #define EMITH_SJMP3_START(cond) EMITH_NOTHING1(cond)
357 #define EMITH_SJMP3_MID(cond)   EMITH_NOTHING1(cond)
358 #define EMITH_SJMP3_END()
359
360 #define emith_move_r_r(d, s) \
361         EOP_MOV_REG_SIMPLE(d, s)
362
363 #define emith_mvn_r_r(d, s) \
364         EOP_MVN_REG(A_COND_AL,0,d,s,A_AM1_LSL,0)
365
366 #define emith_or_r_r_r_lsl(d, s1, s2, lslimm) \
367         EOP_ORR_REG(A_COND_AL,0,d,s1,s2,A_AM1_LSL,lslimm)
368
369 #define emith_eor_r_r_r_lsl(d, s1, s2, lslimm) \
370         EOP_EOR_REG(A_COND_AL,0,d,s1,s2,A_AM1_LSL,lslimm)
371
372 #define emith_eor_r_r_r_lsr(d, s1, s2, lsrimm) \
373         EOP_EOR_REG(A_COND_AL,0,d,s1,s2,A_AM1_LSR,lsrimm)
374
375 #define emith_or_r_r_lsl(d, s, lslimm) \
376         emith_or_r_r_r_lsl(d, d, s, lslimm)
377
378 #define emith_eor_r_r_lsr(d, s, lsrimm) \
379         emith_eor_r_r_r_lsr(d, d, s, lsrimm)
380
381 #define emith_or_r_r_r(d, s1, s2) \
382         emith_or_r_r_r_lsl(d, s1, s2, 0)
383
384 #define emith_eor_r_r_r(d, s1, s2) \
385         emith_eor_r_r_r_lsl(d, s1, s2, 0)
386
387 #define emith_add_r_r(d, s) \
388         EOP_ADD_REG(A_COND_AL,0,d,d,s,A_AM1_LSL,0)
389
390 #define emith_sub_r_r(d, s) \
391         EOP_SUB_REG(A_COND_AL,0,d,d,s,A_AM1_LSL,0)
392
393 #define emith_adc_r_r(d, s) \
394         EOP_ADC_REG(A_COND_AL,0,d,d,s,A_AM1_LSL,0)
395
396 #define emith_and_r_r(d, s) \
397         EOP_AND_REG(A_COND_AL,0,d,d,s,A_AM1_LSL,0)
398
399 #define emith_or_r_r(d, s) \
400         emith_or_r_r_r(d, d, s)
401
402 #define emith_eor_r_r(d, s) \
403         emith_eor_r_r_r(d, d, s)
404
405 #define emith_tst_r_r(d, s) \
406         EOP_TST_REG(A_COND_AL,d,s,A_AM1_LSL,0)
407
408 #define emith_teq_r_r(d, s) \
409         EOP_TEQ_REG(A_COND_AL,d,s,A_AM1_LSL,0)
410
411 #define emith_cmp_r_r(d, s) \
412         EOP_CMP_REG(A_COND_AL,d,s,A_AM1_LSL,0)
413
414 #define emith_addf_r_r(d, s) \
415         EOP_ADD_REG(A_COND_AL,1,d,d,s,A_AM1_LSL,0)
416
417 #define emith_subf_r_r(d, s) \
418         EOP_SUB_REG(A_COND_AL,1,d,d,s,A_AM1_LSL,0)
419
420 #define emith_adcf_r_r(d, s) \
421         EOP_ADC_REG(A_COND_AL,1,d,d,s,A_AM1_LSL,0)
422
423 #define emith_sbcf_r_r(d, s) \
424         EOP_SBC_REG(A_COND_AL,1,d,d,s,A_AM1_LSL,0)
425
426 #define emith_eorf_r_r(d, s) \
427         EOP_EOR_REG(A_COND_AL,1,d,d,s,A_AM1_LSL,0)
428
429 #define emith_move_r_imm(r, imm) \
430         emith_op_imm(A_COND_AL, 0, A_OP_MOV, r, imm)
431
432 #define emith_add_r_imm(r, imm) \
433         emith_op_imm(A_COND_AL, 0, A_OP_ADD, r, imm)
434
435 #define emith_adc_r_imm(r, imm) \
436         emith_op_imm(A_COND_AL, 0, A_OP_ADC, r, imm)
437
438 #define emith_sub_r_imm(r, imm) \
439         emith_op_imm(A_COND_AL, 0, A_OP_SUB, r, imm)
440
441 #define emith_bic_r_imm(r, imm) \
442         emith_op_imm(A_COND_AL, 0, A_OP_BIC, r, imm)
443
444 #define emith_and_r_imm(r, imm) \
445         emith_op_imm(A_COND_AL, 0, A_OP_AND, r, imm)
446
447 #define emith_or_r_imm(r, imm) \
448         emith_op_imm(A_COND_AL, 0, A_OP_ORR, r, imm)
449
450 #define emith_eor_r_imm(r, imm) \
451         emith_op_imm(A_COND_AL, 0, A_OP_EOR, r, imm)
452
453 // note: only use 8bit imm for these
454 #define emith_tst_r_imm(r, imm) \
455         emith_top_imm(A_COND_AL, A_OP_TST, r, imm)
456
457 #define emith_cmp_r_imm(r, imm) { \
458         u32 op = A_OP_CMP, imm_ = imm; \
459         if (~imm_ < 0x100) { \
460                 imm_ = ~imm_; \
461                 op = A_OP_CMN; \
462         } \
463         emith_top_imm(A_COND_AL, op, r, imm); \
464 }
465
466 #define emith_subf_r_imm(r, imm) \
467         emith_op_imm(A_COND_AL, 1, A_OP_SUB, r, imm)
468
469 #define emith_move_r_imm_c(cond, r, imm) \
470         emith_op_imm(cond, 0, A_OP_MOV, r, imm)
471
472 #define emith_add_r_imm_c(cond, r, imm) \
473         emith_op_imm(cond, 0, A_OP_ADD, r, imm)
474
475 #define emith_sub_r_imm_c(cond, r, imm) \
476         emith_op_imm(cond, 0, A_OP_SUB, r, imm)
477
478 #define emith_or_r_imm_c(cond, r, imm) \
479         emith_op_imm(cond, 0, A_OP_ORR, r, imm)
480
481 #define emith_eor_r_imm_c(cond, r, imm) \
482         emith_op_imm(cond, 0, A_OP_EOR, r, imm)
483
484 #define emith_bic_r_imm_c(cond, r, imm) \
485         emith_op_imm(cond, 0, A_OP_BIC, r, imm)
486
487 #define emith_move_r_imm_s8(r, imm) { \
488         if ((imm) & 0x80) \
489                 EOP_MVN_IMM(r, 0, ((imm) ^ 0xff)); \
490         else \
491                 EOP_MOV_IMM(r, 0, imm); \
492 }
493
494 #define emith_and_r_r_imm(d, s, imm) \
495         emith_op_imm2(A_COND_AL, 0, A_OP_AND, d, s, imm)
496
497 #define emith_add_r_r_imm(d, s, imm) \
498         emith_op_imm2(A_COND_AL, 0, A_OP_ADD, d, s, imm)
499
500 #define emith_sub_r_r_imm(d, s, imm) \
501         emith_op_imm2(A_COND_AL, 0, A_OP_SUB, d, s, imm)
502
503 #define emith_neg_r_r(d, s) \
504         EOP_RSB_IMM(d, s, 0, 0)
505
506 #define emith_lsl(d, s, cnt) \
507         EOP_MOV_REG(A_COND_AL,0,d,s,A_AM1_LSL,cnt)
508
509 #define emith_lsr(d, s, cnt) \
510         EOP_MOV_REG(A_COND_AL,0,d,s,A_AM1_LSR,cnt)
511
512 #define emith_asr(d, s, cnt) \
513         EOP_MOV_REG(A_COND_AL,0,d,s,A_AM1_ASR,cnt)
514
515 #define emith_ror_c(cond, d, s, cnt) \
516         EOP_MOV_REG(cond,0,d,s,A_AM1_ROR,cnt)
517
518 #define emith_ror(d, s, cnt) \
519         emith_ror_c(A_COND_AL, d, s, cnt)
520
521 #define emith_rol(d, s, cnt) \
522         EOP_MOV_REG(A_COND_AL,0,d,s,A_AM1_ROR,32-(cnt)); \
523
524 #define emith_lslf(d, s, cnt) \
525         EOP_MOV_REG(A_COND_AL,1,d,s,A_AM1_LSL,cnt)
526
527 #define emith_lsrf(d, s, cnt) \
528         EOP_MOV_REG(A_COND_AL,1,d,s,A_AM1_LSR,cnt)
529
530 #define emith_asrf(d, s, cnt) \
531         EOP_MOV_REG(A_COND_AL,1,d,s,A_AM1_ASR,cnt)
532
533 // note: only C flag updated correctly
534 #define emith_rolf(d, s, cnt) { \
535         EOP_MOV_REG(A_COND_AL,1,d,s,A_AM1_ROR,32-(cnt)); \
536         /* we don't have ROL so we shift to get the right carry */ \
537         EOP_TST_REG(A_COND_AL,d,d,A_AM1_LSR,1); \
538 }
539
540 #define emith_rorf(d, s, cnt) \
541         EOP_MOV_REG(A_COND_AL,1,d,s,A_AM1_ROR,cnt)
542
543 #define emith_rolcf(d) \
544         emith_adcf_r_r(d, d)
545
546 #define emith_rorcf(d) \
547         EOP_MOV_REG(A_COND_AL,1,d,d,A_AM1_ROR,0) /* ROR #0 -> RRX */
548
549 #define emith_negcf_r_r(d, s) \
550         EOP_C_DOP_IMM(A_COND_AL,A_OP_RSC,1,s,d,0,0)
551
552 #define emith_mul(d, s1, s2) { \
553         if ((d) != (s1)) /* rd != rm limitation */ \
554                 EOP_MUL(d, s1, s2); \
555         else \
556                 EOP_MUL(d, s2, s1); \
557 }
558
559 #define emith_mul_u64(dlo, dhi, s1, s2) \
560         EOP_C_UMULL(A_COND_AL,0,dhi,dlo,s1,s2)
561
562 #define emith_mul_s64(dlo, dhi, s1, s2) \
563         EOP_C_SMULL(A_COND_AL,0,dhi,dlo,s1,s2)
564
565 #define emith_mula_s64(dlo, dhi, s1, s2) \
566         EOP_C_SMLAL(A_COND_AL,0,dhi,dlo,s1,s2)
567
568 // misc
569 #define emith_read_r_r_offs_c(cond, r, rs, offs) \
570         EOP_LDR_IMM2(cond, r, rs, offs)
571
572 #define emith_read8_r_r_offs_c(cond, r, rs, offs) \
573         EOP_LDRB_IMM2(cond, r, rs, offs)
574
575 #define emith_read16_r_r_offs_c(cond, r, rs, offs) \
576         EOP_LDRH_IMM2(cond, r, rs, offs)
577
578 #define emith_read_r_r_offs(r, rs, offs) \
579         emith_read_r_r_offs_c(A_COND_AL, r, rs, offs)
580
581 #define emith_read8_r_r_offs(r, rs, offs) \
582         emith_read8_r_r_offs_c(A_COND_AL, r, rs, offs)
583
584 #define emith_read16_r_r_offs(r, rs, offs) \
585         emith_read16_r_r_offs_c(A_COND_AL, r, rs, offs)
586
587 #define emith_ctx_read(r, offs) \
588         emith_read_r_r_offs(r, CONTEXT_REG, offs)
589
590 #define emith_ctx_write(r, offs) \
591         EOP_STR_IMM(r, CONTEXT_REG, offs)
592
593 #define emith_ctx_do_multiple(op, r, offs, count, tmpr) do { \
594         int v_, r_ = r, c_ = count, b_ = CONTEXT_REG;        \
595         for (v_ = 0; c_; c_--, r_++)                         \
596                 v_ |= 1 << r_;                               \
597         if ((offs) != 0) {                                   \
598                 EOP_ADD_IMM(tmpr,CONTEXT_REG,30/2,(offs)>>2);\
599                 b_ = tmpr;                                   \
600         }                                                    \
601         op(b_,v_);                                           \
602 } while(0)
603
604 #define emith_ctx_read_multiple(r, offs, count, tmpr) \
605         emith_ctx_do_multiple(EOP_LDMIA, r, offs, count, tmpr)
606
607 #define emith_ctx_write_multiple(r, offs, count, tmpr) \
608         emith_ctx_do_multiple(EOP_STMIA, r, offs, count, tmpr)
609
610 #define emith_clear_msb_c(cond, d, s, count) { \
611         u32 t; \
612         if ((count) <= 8) { \
613                 t = (count) - 8; \
614                 t = (0xff << t) & 0xff; \
615                 EOP_BIC_IMM(d,s,8/2,t); \
616                 EOP_C_DOP_IMM(cond,A_OP_BIC,0,s,d,8/2,t); \
617         } else if ((count) >= 24) { \
618                 t = (count) - 24; \
619                 t = 0xff >> t; \
620                 EOP_AND_IMM(d,s,0,t); \
621                 EOP_C_DOP_IMM(cond,A_OP_AND,0,s,d,0,t); \
622         } else { \
623                 EOP_MOV_REG(cond,0,d,s,A_AM1_LSL,count); \
624                 EOP_MOV_REG(cond,0,d,d,A_AM1_LSR,count); \
625         } \
626 }
627
628 #define emith_clear_msb(d, s, count) \
629         emith_clear_msb_c(A_COND_AL, d, s, count)
630
631 #define emith_sext(d, s, bits) { \
632         EOP_MOV_REG_LSL(d,s,32 - (bits)); \
633         EOP_MOV_REG_ASR(d,d,32 - (bits)); \
634 }
635
636 // upto 4 args
637 #define emith_pass_arg_r(arg, reg) \
638         EOP_MOV_REG_SIMPLE(arg, reg)
639
640 #define emith_pass_arg_imm(arg, imm) \
641         emith_move_r_imm(arg, imm)
642
643 #define emith_jump(target) \
644         emith_jump_cond(A_COND_AL, target)
645
646 #define emith_jump_patchable(target) \
647         emith_jump(target)
648
649 #define emith_jump_cond(cond, target) \
650         emith_xbranch(cond, target, 0)
651
652 #define emith_jump_cond_patchable(cond, target) \
653         emith_jump_cond(cond, target)
654
655 #define emith_jump_patch(ptr, target) do { \
656         u32 *ptr_ = ptr; \
657         u32 val_ = (u32 *)(target) - ptr_ - 2; \
658         *ptr_ = (*ptr_ & 0xff000000) | (val_ & 0x00ffffff); \
659 } while (0)
660
661 #define emith_jump_at(ptr, target) { \
662         u32 val_ = (u32 *)(target) - (u32 *)(ptr) - 2; \
663         EOP_C_B_PTR(ptr, A_COND_AL, 0, val_ & 0xffffff); \
664 }
665
666 #define emith_jump_reg_c(cond, r) \
667         EOP_C_BX(cond, r)
668
669 #define emith_jump_reg(r) \
670         emith_jump_reg_c(A_COND_AL, r)
671
672 #define emith_jump_ctx_c(cond, offs) \
673         EOP_LDR_IMM2(cond,15,CONTEXT_REG,offs)
674
675 #define emith_jump_ctx(offs) \
676         emith_jump_ctx_c(A_COND_AL, offs)
677
678 #define emith_call_cond(cond, target) \
679         emith_xbranch(cond, target, 1)
680
681 #define emith_call(target) \
682         emith_call_cond(A_COND_AL, target)
683
684 #define emith_call_ctx(offs) { \
685         emith_move_r_r(14, 15); \
686         emith_jump_ctx(offs); \
687 }
688
689 #define emith_ret_c(cond) \
690         emith_jump_reg_c(cond, 14)
691
692 #define emith_ret() \
693         emith_ret_c(A_COND_AL)
694
695 #define emith_ret_to_ctx(offs) \
696         emith_ctx_write(14, offs)
697
698 #define emith_push_ret() \
699         EOP_STMFD_SP(A_R14M)
700
701 #define emith_pop_and_ret() \
702         EOP_LDMFD_SP(A_R15M)
703
704 #define host_instructions_updated(base, end) \
705         cache_flush_d_inval_i(base, end)
706
707 #define host_arg2reg(rd, arg) \
708         rd = arg
709
710 /* SH2 drc specific */
711 /* pushes r12 for eabi alignment */
712 #define emith_sh2_drc_entry() \
713         EOP_STMFD_SP(A_R4M|A_R5M|A_R6M|A_R7M|A_R8M|A_R9M|A_R10M|A_R11M|A_R12M|A_R14M)
714
715 #define emith_sh2_drc_exit() \
716         EOP_LDMFD_SP(A_R4M|A_R5M|A_R6M|A_R7M|A_R8M|A_R9M|A_R10M|A_R11M|A_R12M|A_R15M)
717
718 #define emith_sh2_wcall(a, tab, ret_ptr) { \
719         int val_ = (char *)(ret_ptr) - (char *)tcache_ptr - 2*4; \
720         if (val_ >= 0) \
721                 emith_add_r_r_imm(14, 15, val_); \
722         else if (val_ < 0) \
723                 emith_sub_r_r_imm(14, 15, -val_); \
724         emith_lsr(12, a, SH2_WRITE_SHIFT); \
725         EOP_LDR_REG_LSL(A_COND_AL,12,tab,12,2); \
726         emith_ctx_read(2, offsetof(SH2, is_slave)); \
727         emith_jump_reg(12); \
728 }
729
730 #define emith_sh2_dtbf_loop() { \
731         int cr, rn;                                                          \
732         int tmp_ = rcache_get_tmp();                                         \
733         cr = rcache_get_reg(SHR_SR, RC_GR_RMW);                              \
734         rn = rcache_get_reg((op >> 8) & 0x0f, RC_GR_RMW);                    \
735         emith_sub_r_imm(rn, 1);                /* sub rn, #1 */              \
736         emith_bic_r_imm(cr, 1);                /* bic cr, #1 */              \
737         emith_sub_r_imm(cr, (cycles+1) << 12); /* sub cr, #(cycles+1)<<12 */ \
738         cycles = 0;                                                          \
739         emith_asrf(tmp_, cr, 2+12);            /* movs tmp_, cr, asr #2+12 */\
740         EOP_MOV_IMM_C(A_COND_MI,tmp_,0,0);     /* movmi tmp_, #0 */          \
741         emith_lsl(cr, cr, 20);                 /* mov cr, cr, lsl #20 */     \
742         emith_lsr(cr, cr, 20);                 /* mov cr, cr, lsr #20 */     \
743         emith_subf_r_r(rn, tmp_);              /* subs rn, tmp_ */           \
744         EOP_RSB_IMM_C(A_COND_LS,tmp_,rn,0,0);  /* rsbls tmp_, rn, #0 */      \
745         EOP_ORR_REG(A_COND_LS,0,cr,cr,tmp_,A_AM1_LSL,12+2); /* orrls cr,tmp_,lsl #12+2 */\
746         EOP_ORR_IMM_C(A_COND_LS,cr,cr,0,1);    /* orrls cr, #1 */            \
747         EOP_MOV_IMM_C(A_COND_LS,rn,0,0);       /* movls rn, #0 */            \
748         rcache_free_tmp(tmp_);                                               \
749 }
750
751 #define emith_write_sr(sr, srcr) { \
752         emith_lsr(sr, sr, 10); \
753         emith_or_r_r_r_lsl(sr, sr, srcr, 22); \
754         emith_ror(sr, sr, 22); \
755 }
756
757 #define emith_carry_to_t(srr, is_sub) { \
758         if (is_sub) { /* has inverted C on ARM */ \
759                 emith_or_r_imm_c(A_COND_CC, srr, 1); \
760                 emith_bic_r_imm_c(A_COND_CS, srr, 1); \
761         } else { \
762                 emith_or_r_imm_c(A_COND_CS, srr, 1); \
763                 emith_bic_r_imm_c(A_COND_CC, srr, 1); \
764         } \
765 }
766
767 #define emith_tpop_carry(sr, is_sub) {  \
768         if (is_sub)                     \
769                 emith_eor_r_imm(sr, 1); \
770         emith_lsrf(sr, sr, 1);          \
771 }
772
773 #define emith_tpush_carry(sr, is_sub) { \
774         emith_adc_r_r(sr, sr);          \
775         if (is_sub)                     \
776                 emith_eor_r_imm(sr, 1); \
777 }
778
779 /*
780  * if Q
781  *   t = carry(Rn += Rm)
782  * else
783  *   t = carry(Rn -= Rm)
784  * T ^= t
785  */
786 #define emith_sh2_div1_step(rn, rm, sr) {         \
787         void *jmp0, *jmp1;                        \
788         emith_tst_r_imm(sr, Q);  /* if (Q ^ M) */ \
789         JMP_POS(jmp0);           /* beq do_sub */ \
790         emith_addf_r_r(rn, rm);                   \
791         emith_eor_r_imm_c(A_COND_CS, sr, T);      \
792         JMP_POS(jmp1);           /* b done */     \
793         JMP_EMIT(A_COND_EQ, jmp0); /* do_sub: */  \
794         emith_subf_r_r(rn, rm);                   \
795         emith_eor_r_imm_c(A_COND_CC, sr, T);      \
796         JMP_EMIT(A_COND_AL, jmp1); /* done: */    \
797 }
798