2 * note about silly things like emith_eor_r_r_r_lsl:
3 * these are here because the compiler was designed
4 * for ARM as it's primary target.
8 enum { xAX = 0, xCX, xDX, xBX, xSP, xBP, xSI, xDI };
10 #define CONTEXT_REG xBP
27 // unified conditions (we just use rel8 jump instructions for x86)
28 #define DCOND_EQ IOP_JE
29 #define DCOND_NE IOP_JNE
30 #define DCOND_MI IOP_JS // MInus
31 #define DCOND_PL IOP_JNS // PLus or zero
32 #define DCOND_HI IOP_JA // higher (unsigned)
33 #define DCOND_HS IOP_JAE // higher || same (unsigned)
34 #define DCOND_LO IOP_JB // lower (unsigned)
35 #define DCOND_LS IOP_JBE // lower || same (unsigned)
36 #define DCOND_GE IOP_JGE // greater || equal (signed)
37 #define DCOND_GT IOP_JG // greater (signed)
38 #define DCOND_LE IOP_JLE // less || equal (signed)
39 #define DCOND_LT IOP_JL // less (signed)
40 #define DCOND_VS IOP_JO // oVerflow Set
41 #define DCOND_VC IOP_JNO // oVerflow Clear
43 #define EMIT_PTR(ptr, val, type) \
46 #define EMIT(val, type) { \
47 EMIT_PTR(tcache_ptr, val, type); \
48 tcache_ptr += sizeof(type); \
51 #define EMIT_OP(op) { \
56 #define EMIT_MODRM(mod,r,rm) \
57 EMIT(((mod)<<6) | ((r)<<3) | (rm), u8)
59 #define EMIT_OP_MODRM(op,mod,r,rm) { \
61 EMIT_MODRM(mod, r, rm); \
64 #define JMP8_POS(ptr) \
68 #define JMP8_EMIT(op, ptr) \
69 EMIT_PTR(ptr, op, u8); \
70 EMIT_PTR(ptr + 1, (tcache_ptr - (ptr+2)), u8)
73 #define emith_move_r_r(dst, src) \
74 EMIT_OP_MODRM(0x8b, 3, dst, src)
76 #define emith_add_r_r(d, s) \
77 EMIT_OP_MODRM(0x01, 3, s, d)
79 #define emith_sub_r_r(d, s) \
80 EMIT_OP_MODRM(0x29, 3, s, d)
82 #define emith_adc_r_r(d, s) \
83 EMIT_OP_MODRM(0x11, 3, s, d)
85 #define emith_sbc_r_r(d, s) \
86 EMIT_OP_MODRM(0x19, 3, s, d) /* SBB */
88 #define emith_or_r_r(d, s) \
89 EMIT_OP_MODRM(0x09, 3, s, d)
91 #define emith_and_r_r(d, s) \
92 EMIT_OP_MODRM(0x21, 3, s, d)
94 #define emith_eor_r_r(d, s) \
95 EMIT_OP_MODRM(0x31, 3, s, d) /* XOR */
97 #define emith_tst_r_r(d, s) \
98 EMIT_OP_MODRM(0x85, 3, s, d) /* TEST */
100 #define emith_cmp_r_r(d, s) \
101 EMIT_OP_MODRM(0x39, 3, s, d)
103 // fake teq - test equivalence - get_flags(d ^ s)
104 #define emith_teq_r_r(d, s) { \
106 emith_eor_r_r(d, s); \
111 #define emith_eor_r_r_r(d, s1, s2) { \
113 emith_move_r_r(d, s1); \
114 emith_eor_r_r(d, s2); \
117 #define emith_or_r_r_r_lsl(d, s1, s2, lslimm) { \
118 if (d != s2 && d != s1) { \
119 emith_lsl(d, s2, lslimm); \
120 emith_or_r_r(d, s1); \
123 emith_move_r_r(d, s1); \
125 emith_lsl(s2, s2, lslimm); \
126 emith_or_r_r(d, s2); \
132 #define emith_move_r_imm(r, imm) { \
133 EMIT_OP(0xb8 + (r)); \
137 #define emith_arith_r_imm(op, r, imm) { \
138 EMIT_OP_MODRM(0x81, 3, op, r); \
142 // 2 - adc, 3 - sbb, 6 - xor
143 #define emith_add_r_imm(r, imm) \
144 emith_arith_r_imm(0, r, imm)
146 #define emith_or_r_imm(r, imm) \
147 emith_arith_r_imm(1, r, imm)
149 #define emith_and_r_imm(r, imm) \
150 emith_arith_r_imm(4, r, imm)
152 #define emith_sub_r_imm(r, imm) \
153 emith_arith_r_imm(5, r, imm)
155 #define emith_cmp_r_imm(r, imm) \
156 emith_arith_r_imm(7, r, imm)
158 #define emith_tst_r_imm(r, imm) { \
159 EMIT_OP_MODRM(0xf7, 3, 0, r); \
164 #define emith_bic_r_imm(r, imm) \
165 emith_arith_r_imm(4, r, ~(imm))
167 // fake conditionals (using SJMP instead)
168 #define emith_add_r_imm_c(cond, r, imm) { \
170 emith_add_r_imm(r, imm); \
173 #define emith_or_r_imm_c(cond, r, imm) { \
175 emith_or_r_imm(r, imm); \
178 #define emith_sub_r_imm_c(cond, r, imm) { \
180 emith_sub_r_imm(r, imm); \
183 #define emith_bic_r_imm_c(cond, r, imm) { \
185 emith_bic_r_imm(r, imm); \
189 #define emith_shift(op, d, s, cnt) { \
191 emith_move_r_r(d, s); \
192 EMIT_OP_MODRM(0xc1, 3, op, d); \
196 #define emith_lsl(d, s, cnt) \
197 emith_shift(4, d, s, cnt)
199 #define emith_lsr(d, s, cnt) \
200 emith_shift(5, d, s, cnt)
202 #define emith_asr(d, s, cnt) \
203 emith_shift(7, d, s, cnt)
205 #define emith_rol(d, s, cnt) \
206 emith_shift(0, d, s, cnt)
208 #define emith_ror(d, s, cnt) \
209 emith_shift(1, d, s, cnt)
211 #define emith_rolc(r) \
212 EMIT_OP_MODRM(0xd1, 3, 2, r)
214 #define emith_rorc(r) \
215 EMIT_OP_MODRM(0xd1, 3, 3, r)
218 #define emith_push(r) \
221 #define emith_pop(r) \
224 #define emith_neg_r(r) \
225 EMIT_OP_MODRM(0xf7, 3, 3, r)
227 #define emith_clear_msb(d, s, count) { \
231 emith_move_r_r(d, s); \
232 emith_and_r_imm(d, t); \
235 #define emith_sext(d, s, bits) { \
236 emith_lsl(d, s, 32 - (bits)); \
237 emith_asr(d, d, 32 - (bits)); \
240 // put bit0 of r0 to carry
241 #define emith_set_carry(r0) { \
242 emith_tst_r_imm(r0, 1); /* clears C */ \
243 EMITH_SJMP_START(DCOND_EQ); \
244 EMIT_OP(0xf9); /* STC */ \
245 EMITH_SJMP_END(DCOND_EQ); \
248 // put bit0 of r0 to carry (for subtraction)
249 #define emith_set_carry_sub emith_set_carry
252 #define emith_mul_(op, dlo, dhi, s1, s2) { \
254 if (dlo != xAX && dhi != xAX) \
256 if (dlo != xDX && dhi != xDX) \
260 else if ((s2) == xAX) \
263 emith_move_r_r(xAX, s1); \
266 EMIT_OP_MODRM(0xf7, 3, op, rmr); /* xMUL rmr */ \
267 /* XXX: using push/pop for the case of edx->eax; eax->edx */ \
268 if (dhi != xDX && dhi != -1) \
271 emith_move_r_r(dlo, xAX); \
272 if (dhi != xDX && dhi != -1) \
274 if (dlo != xDX && dhi != xDX) \
276 if (dlo != xAX && dhi != xAX) \
280 #define emith_mul_u64(dlo, dhi, s1, s2) \
281 emith_mul_(4, dlo, dhi, s1, s2) /* MUL */
283 #define emith_mul_s64(dlo, dhi, s1, s2) \
284 emith_mul_(5, dlo, dhi, s1, s2) /* IMUL */
286 #define emith_mul(d, s1, s2) \
287 emith_mul_(4, d, -1, s1, s2)
289 // "flag" instructions are the same
290 #define emith_subf_r_imm emith_sub_r_imm
291 #define emith_addf_r_r emith_add_r_r
292 #define emith_subf_r_r emith_sub_r_r
293 #define emith_adcf_r_r emith_adc_r_r
294 #define emith_sbcf_r_r emith_sbc_r_r
296 #define emith_lslf emith_lsl
297 #define emith_lsrf emith_lsr
298 #define emith_asrf emith_asr
299 #define emith_rolf emith_rol
300 #define emith_rorf emith_ror
301 #define emith_rolcf emith_rolc
302 #define emith_rorcf emith_rorc
304 // XXX: offs is 8bit only
305 #define emith_ctx_read(r, offs) { \
306 EMIT_OP_MODRM(0x8b, 1, r, xBP); \
307 EMIT(offs, u8); /* mov tmp, [ebp+#offs] */ \
310 #define emith_ctx_write(r, offs) { \
311 EMIT_OP_MODRM(0x89, 1, r, xBP); \
312 EMIT(offs, u8); /* mov [ebp+#offs], tmp */ \
315 #define emith_jump(ptr) { \
316 u32 disp = (u32)ptr - ((u32)tcache_ptr + 5); \
321 #define emith_call(ptr) { \
322 u32 disp = (u32)ptr - ((u32)tcache_ptr + 5); \
327 // "simple" or "short" jump
328 #define EMITH_SJMP_START(cond) { \
332 #define EMITH_SJMP_END(cond) \
333 JMP8_EMIT(cond, cond_ptr); \
336 #define host_arg2reg(rd, arg) \
338 case 0: rd = xAX; break; \
339 case 1: rd = xDX; break; \
340 case 2: rd = xCX; break; \
343 #define emith_pass_arg_r(arg, reg) { \
345 host_arg2reg(rd, arg); \
346 emith_move_r_r(rd, reg); \
349 #define emith_pass_arg_imm(arg, imm) { \
351 host_arg2reg(rd, arg); \
352 emith_move_r_imm(rd, imm); \
355 /* SH2 drc specific */
356 #define emith_sh2_test_t() { \
357 int t = rcache_get_reg(SHR_SR, RC_GR_READ); \
358 EMIT_OP_MODRM(0xf6, 3, 0, t); \
359 EMIT(0x01, u8); /* test <reg>, byte 1 */ \
362 #define emith_sh2_dtbf_loop() { \
363 u8 *jmp0; /* negative cycles check */ \
364 u8 *jmp1; /* unsinged overflow check */ \
366 tmp = rcache_get_tmp(); \
367 cr = rcache_get_reg(SHR_SR, RC_GR_RMW); \
368 rn = rcache_get_reg((op >> 8) & 0x0f, RC_GR_RMW);\
369 emith_sub_r_imm(rn, 1); \
370 emith_sub_r_imm(cr, (cycles+1) << 12); \
372 emith_asr(tmp, cr, 2+12); \
373 JMP8_POS(jmp0); /* no negative cycles */ \
374 emith_move_r_imm(tmp, 0); \
375 JMP8_EMIT(IOP_JNS, jmp0); \
376 emith_and_r_imm(cr, 0xffe); \
377 emith_subf_r_r(rn, tmp); \
378 JMP8_POS(jmp1); /* no overflow */ \
379 emith_neg_r(rn); /* count left */ \
380 emith_lsl(rn, rn, 2+12); \
381 emith_or_r_r(cr, rn); \
382 emith_or_r_imm(cr, 1); \
383 emith_move_r_imm(rn, 0); \
384 JMP8_EMIT(IOP_JA, jmp1); \
385 rcache_free_tmp(tmp); \
388 #define emith_write_sr(srcr) { \
389 int tmp = rcache_get_tmp(); \
390 int srr = rcache_get_reg(SHR_SR, RC_GR_RMW); \
391 emith_clear_msb(tmp, srcr, 20); \
392 emith_bic_r_imm(srr, 0xfff); \
393 emith_or_r_r(srr, tmp); \
394 rcache_free_tmp(tmp); \
397 #define emith_carry_to_t(srr, is_sub) { \
398 int tmp = rcache_get_tmp(); \
401 EMIT_MODRM(3, 0, tmp); /* SETC */ \
402 emith_bic_r_imm(srr, 1); \
403 EMIT_OP_MODRM(0x08, 3, tmp, srr); /* OR srrl, tmpl */ \
404 rcache_free_tmp(tmp); \