3 * temp registers must be eax-edx due to use of SETcc.
4 * note about silly things like emith_eor_r_r_r:
5 * these are here because the compiler was designed
6 * for ARM as it's primary target.
10 enum { xAX = 0, xCX, xDX, xBX, xSP, xBP, xSI, xDI };
12 #define CONTEXT_REG xBP
30 // unified conditions (we just use rel8 jump instructions for x86)
31 #define DCOND_EQ IOP_JE
32 #define DCOND_NE IOP_JNE
33 #define DCOND_MI IOP_JS // MInus
34 #define DCOND_PL IOP_JNS // PLus or zero
35 #define DCOND_HI IOP_JA // higher (unsigned)
36 #define DCOND_HS IOP_JAE // higher || same (unsigned)
37 #define DCOND_LO IOP_JB // lower (unsigned)
38 #define DCOND_LS IOP_JBE // lower || same (unsigned)
39 #define DCOND_GE IOP_JGE // greater || equal (signed)
40 #define DCOND_GT IOP_JG // greater (signed)
41 #define DCOND_LE IOP_JLE // less || equal (signed)
42 #define DCOND_LT IOP_JL // less (signed)
43 #define DCOND_VS IOP_JO // oVerflow Set
44 #define DCOND_VC IOP_JNO // oVerflow Clear
46 #define EMIT_PTR(ptr, val, type) \
49 #define EMIT(val, type) { \
50 EMIT_PTR(tcache_ptr, val, type); \
51 tcache_ptr += sizeof(type); \
54 #define EMIT_OP(op) { \
59 #define EMIT_MODRM(mod,r,rm) \
60 EMIT(((mod)<<6) | ((r)<<3) | (rm), u8)
62 #define EMIT_SIB(scale,index,base) \
63 EMIT(((scale)<<6) | ((index)<<3) | (base), u8)
65 #define EMIT_OP_MODRM(op,mod,r,rm) { \
67 EMIT_MODRM(mod, r, rm); \
70 #define JMP8_POS(ptr) \
74 #define JMP8_EMIT(op, ptr) \
75 EMIT_PTR(ptr, op, u8); \
76 EMIT_PTR(ptr + 1, (tcache_ptr - (ptr+2)), u8)
79 #define emith_move_r_r(dst, src) \
80 EMIT_OP_MODRM(0x8b, 3, dst, src)
82 #define emith_add_r_r(d, s) \
83 EMIT_OP_MODRM(0x01, 3, s, d)
85 #define emith_sub_r_r(d, s) \
86 EMIT_OP_MODRM(0x29, 3, s, d)
88 #define emith_adc_r_r(d, s) \
89 EMIT_OP_MODRM(0x11, 3, s, d)
91 #define emith_sbc_r_r(d, s) \
92 EMIT_OP_MODRM(0x19, 3, s, d) /* SBB */
94 #define emith_or_r_r(d, s) \
95 EMIT_OP_MODRM(0x09, 3, s, d)
97 #define emith_and_r_r(d, s) \
98 EMIT_OP_MODRM(0x21, 3, s, d)
100 #define emith_eor_r_r(d, s) \
101 EMIT_OP_MODRM(0x31, 3, s, d) /* XOR */
103 #define emith_tst_r_r(d, s) \
104 EMIT_OP_MODRM(0x85, 3, s, d) /* TEST */
106 #define emith_cmp_r_r(d, s) \
107 EMIT_OP_MODRM(0x39, 3, s, d)
109 // fake teq - test equivalence - get_flags(d ^ s)
110 #define emith_teq_r_r(d, s) { \
112 emith_eor_r_r(d, s); \
116 #define emith_mvn_r_r(d, s) { \
118 emith_move_r_r(d, s); \
119 EMIT_OP_MODRM(0xf7, 3, 2, d); /* NOT d */ \
122 #define emith_negc_r_r(d, s) { \
123 int tmp_ = rcache_get_tmp(); \
124 emith_move_r_imm(tmp_, 0); \
125 emith_sbc_r_r(tmp_, s); \
126 emith_move_r_r(d, tmp_); \
127 rcache_free_tmp(tmp_); \
130 #define emith_neg_r_r(d, s) { \
132 emith_move_r_r(d, s); \
133 EMIT_OP_MODRM(0xf7, 3, 3, d); /* NEG d */ \
137 #define emith_eor_r_r_r(d, s1, s2) { \
139 emith_eor_r_r(d, s2); \
140 } else if (d == s2) { \
141 emith_eor_r_r(d, s1); \
143 emith_move_r_r(d, s1); \
144 emith_eor_r_r(d, s2); \
149 #define emith_or_r_r_lsl(d, s, lslimm) { \
150 int tmp_ = rcache_get_tmp(); \
151 emith_lsl(tmp_, s, lslimm); \
152 emith_or_r_r(d, tmp_); \
153 rcache_free_tmp(tmp_); \
157 #define emith_eor_r_r_lsr(d, s, lsrimm) { \
159 emith_lsr(s, s, lsrimm); \
160 emith_eor_r_r(d, s); \
165 #define emith_move_r_imm(r, imm) { \
166 EMIT_OP(0xb8 + (r)); \
170 #define emith_move_r_imm_s8(r, imm) \
171 emith_move_r_imm(r, (u32)(signed int)(signed char)(imm))
173 #define emith_arith_r_imm(op, r, imm) { \
174 EMIT_OP_MODRM(0x81, 3, op, r); \
179 #define emith_add_r_imm(r, imm) \
180 emith_arith_r_imm(0, r, imm)
182 #define emith_or_r_imm(r, imm) \
183 emith_arith_r_imm(1, r, imm)
185 #define emith_and_r_imm(r, imm) \
186 emith_arith_r_imm(4, r, imm)
188 #define emith_sub_r_imm(r, imm) \
189 emith_arith_r_imm(5, r, imm)
191 #define emith_eor_r_imm(r, imm) \
192 emith_arith_r_imm(6, r, imm)
194 #define emith_cmp_r_imm(r, imm) \
195 emith_arith_r_imm(7, r, imm)
197 #define emith_tst_r_imm(r, imm) { \
198 EMIT_OP_MODRM(0xf7, 3, 0, r); \
203 #define emith_bic_r_imm(r, imm) \
204 emith_arith_r_imm(4, r, ~(imm))
206 // fake conditionals (using SJMP instead)
207 #define emith_move_r_imm_c(cond, r, imm) { \
209 emith_move_r_imm(r, imm); \
212 #define emith_add_r_imm_c(cond, r, imm) { \
214 emith_add_r_imm(r, imm); \
217 #define emith_or_r_imm_c(cond, r, imm) { \
219 emith_or_r_imm(r, imm); \
222 #define emith_eor_r_imm_c(cond, r, imm) { \
224 emith_eor_r_imm(r, imm); \
227 #define emith_sub_r_imm_c(cond, r, imm) { \
229 emith_sub_r_imm(r, imm); \
232 #define emith_bic_r_imm_c(cond, r, imm) { \
234 emith_bic_r_imm(r, imm); \
238 #define emith_and_r_r_imm(d, s, imm) { \
240 emith_move_r_r(d, s); \
241 emith_and_r_imm(d, imm) \
245 #define emith_shift(op, d, s, cnt) { \
247 emith_move_r_r(d, s); \
248 EMIT_OP_MODRM(0xc1, 3, op, d); \
252 #define emith_lsl(d, s, cnt) \
253 emith_shift(4, d, s, cnt)
255 #define emith_lsr(d, s, cnt) \
256 emith_shift(5, d, s, cnt)
258 #define emith_asr(d, s, cnt) \
259 emith_shift(7, d, s, cnt)
261 #define emith_rol(d, s, cnt) \
262 emith_shift(0, d, s, cnt)
264 #define emith_ror(d, s, cnt) \
265 emith_shift(1, d, s, cnt)
267 #define emith_rolc(r) \
268 EMIT_OP_MODRM(0xd1, 3, 2, r)
270 #define emith_rorc(r) \
271 EMIT_OP_MODRM(0xd1, 3, 3, r)
274 #define emith_push(r) \
277 #define emith_pop(r) \
280 #define emith_neg_r(r) \
281 EMIT_OP_MODRM(0xf7, 3, 3, r)
283 #define emith_clear_msb(d, s, count) { \
287 emith_move_r_r(d, s); \
288 emith_and_r_imm(d, t); \
291 #define emith_clear_msb_c(cond, d, s, count) { \
293 emith_clear_msb(d, s, count); \
296 #define emith_sext(d, s, bits) { \
297 emith_lsl(d, s, 32 - (bits)); \
298 emith_asr(d, d, 32 - (bits)); \
301 #define emith_setc(r) { \
303 EMIT_OP_MODRM(0x92, 3, 0, r); /* SETC r */ \
307 #define emith_mul_(op, dlo, dhi, s1, s2) { \
309 if (dlo != xAX && dhi != xAX) \
311 if (dlo != xDX && dhi != xDX) \
315 else if ((s2) == xAX) \
318 emith_move_r_r(xAX, s1); \
321 EMIT_OP_MODRM(0xf7, 3, op, rmr); /* xMUL rmr */ \
322 /* XXX: using push/pop for the case of edx->eax; eax->edx */ \
323 if (dhi != xDX && dhi != -1) \
326 emith_move_r_r(dlo, xAX); \
327 if (dhi != xDX && dhi != -1) \
329 if (dlo != xDX && dhi != xDX) \
331 if (dlo != xAX && dhi != xAX) \
335 #define emith_mul_u64(dlo, dhi, s1, s2) \
336 emith_mul_(4, dlo, dhi, s1, s2) /* MUL */
338 #define emith_mul_s64(dlo, dhi, s1, s2) \
339 emith_mul_(5, dlo, dhi, s1, s2) /* IMUL */
341 #define emith_mul(d, s1, s2) \
342 emith_mul_(4, d, -1, s1, s2)
344 // (dlo,dhi) += signed(s1) * signed(s2)
345 #define emith_mula_s64(dlo, dhi, s1, s2) { \
348 emith_mul_(5, dlo, dhi, s1, s2); \
349 EMIT_OP_MODRM(0x03, 0, dlo, 4); \
350 EMIT_SIB(0, 4, 4); /* add dlo, [esp] */ \
351 EMIT_OP_MODRM(0x13, 1, dhi, 4); \
353 EMIT(4, u8); /* adc dhi, [esp+4] */ \
354 emith_add_r_imm(xSP, 4*2); \
357 // "flag" instructions are the same
358 #define emith_subf_r_imm emith_sub_r_imm
359 #define emith_addf_r_r emith_add_r_r
360 #define emith_subf_r_r emith_sub_r_r
361 #define emith_adcf_r_r emith_adc_r_r
362 #define emith_sbcf_r_r emith_sbc_r_r
363 #define emith_eorf_r_r emith_eor_r_r
364 #define emith_negcf_r_r emith_negc_r_r
366 #define emith_lslf emith_lsl
367 #define emith_lsrf emith_lsr
368 #define emith_asrf emith_asr
369 #define emith_rolf emith_rol
370 #define emith_rorf emith_ror
371 #define emith_rolcf emith_rolc
372 #define emith_rorcf emith_rorc
374 // XXX: offs is 8bit only
375 #define emith_ctx_read(r, offs) do { \
376 EMIT_OP_MODRM(0x8b, 1, r, xBP); \
377 EMIT(offs, u8); /* mov tmp, [ebp+#offs] */ \
380 #define emith_ctx_read_multiple(r, offs, cnt, tmpr) do { \
381 int r_ = r, offs_ = offs, cnt_ = cnt; \
382 for (; cnt_ > 0; r_++, offs_ += 4, cnt_--) \
383 emith_ctx_read(r_, offs_); \
386 #define emith_ctx_write(r, offs) do { \
387 EMIT_OP_MODRM(0x89, 1, r, xBP); \
388 EMIT(offs, u8); /* mov [ebp+#offs], tmp */ \
391 #define emith_ctx_write_multiple(r, offs, cnt, tmpr) do { \
392 int r_ = r, offs_ = offs, cnt_ = cnt; \
393 for (; cnt_ > 0; r_++, offs_ += 4, cnt_--) \
394 emith_ctx_write(r_, offs_); \
397 #define emith_jump(ptr) { \
398 u32 disp = (u32)ptr - ((u32)tcache_ptr + 5); \
403 #define emith_call(ptr) { \
404 u32 disp = (u32)ptr - ((u32)tcache_ptr + 5); \
409 #define emith_call_cond(cond, ptr) \
412 #define emith_jump_reg(r) \
413 EMIT_OP_MODRM(0xff, 3, 4, r)
415 #define EMITH_JMP_START(cond) { \
419 #define EMITH_JMP_END(cond) \
420 JMP8_EMIT(cond, cond_ptr); \
423 // "simple" jump (no more then a few insns)
424 #define EMITH_SJMP_START EMITH_JMP_START
425 #define EMITH_SJMP_END EMITH_JMP_END
427 #define host_arg2reg(rd, arg) \
429 case 0: rd = xAX; break; \
430 case 1: rd = xDX; break; \
431 case 2: rd = xCX; break; \
434 #define emith_pass_arg_r(arg, reg) { \
436 host_arg2reg(rd, arg); \
437 emith_move_r_r(rd, reg); \
440 #define emith_pass_arg_imm(arg, imm) { \
442 host_arg2reg(rd, arg); \
443 emith_move_r_imm(rd, imm); \
446 /* SH2 drc specific */
447 #define emith_sh2_drc_entry() { \
454 #define emith_sh2_drc_exit() { \
459 EMIT_OP(0xc3); /* ret */\
462 #define emith_sh2_test_t() { \
463 int t = rcache_get_reg(SHR_SR, RC_GR_READ); \
465 EMIT_OP_MODRM(0xf7, 3, 0, t); \
466 EMIT(0x01, u16); /* test <reg>, word 1 */ \
469 #define emith_sh2_dtbf_loop() { \
470 u8 *jmp0; /* negative cycles check */ \
471 u8 *jmp1; /* unsinged overflow check */ \
473 int tmp_ = rcache_get_tmp(); \
474 cr = rcache_get_reg(SHR_SR, RC_GR_RMW); \
475 rn = rcache_get_reg((op >> 8) & 0x0f, RC_GR_RMW);\
476 emith_sub_r_imm(rn, 1); \
477 emith_sub_r_imm(cr, (cycles+1) << 12); \
479 emith_asr(tmp_, cr, 2+12); \
480 JMP8_POS(jmp0); /* no negative cycles */ \
481 emith_move_r_imm(tmp_, 0); \
482 JMP8_EMIT(IOP_JNS, jmp0); \
483 emith_and_r_imm(cr, 0xffe); \
484 emith_subf_r_r(rn, tmp_); \
485 JMP8_POS(jmp1); /* no overflow */ \
486 emith_neg_r(rn); /* count left */ \
487 emith_lsl(rn, rn, 2+12); \
488 emith_or_r_r(cr, rn); \
489 emith_or_r_imm(cr, 1); \
490 emith_move_r_imm(rn, 0); \
491 JMP8_EMIT(IOP_JA, jmp1); \
492 rcache_free_tmp(tmp_); \
495 #define emith_write_sr(srcr) { \
496 int tmp_ = rcache_get_tmp(); \
497 int srr = rcache_get_reg(SHR_SR, RC_GR_RMW); \
498 emith_clear_msb(tmp_, srcr, 20); \
499 emith_bic_r_imm(srr, 0xfff); \
500 emith_or_r_r(srr, tmp_); \
501 rcache_free_tmp(tmp_); \
504 #define emith_tpop_carry(sr, is_sub) \
507 #define emith_tpush_carry(sr, is_sub) \
508 emith_adc_r_r(sr, sr)
512 * t = carry(Rn += Rm)
514 * t = carry(Rn -= Rm)
517 #define emith_sh2_div1_step(rn, rm, sr) { \
519 int tmp_ = rcache_get_tmp(); \
520 emith_eor_r_r(tmp_, tmp_); \
521 emith_tst_r_imm(sr, Q); /* if (Q ^ M) */ \
522 JMP8_POS(jmp0); /* je do_sub */ \
523 emith_add_r_r(rn, rm); \
524 JMP8_POS(jmp1); /* jmp done */ \
525 JMP8_EMIT(IOP_JE, jmp0); /* do_sub: */ \
526 emith_sub_r_r(rn, rm); \
527 JMP8_EMIT(IOP_JMP, jmp1);/* done: */ \
529 EMIT_OP_MODRM(0x31, 3, tmp_, sr); /* T = Q1 ^ Q2 */ \
530 rcache_free_tmp(tmp_); \