2 * note about silly things like emith_eor_r_r_r:
3 * these are here because the compiler was designed
4 * for ARM as it's primary target.
8 enum { xAX = 0, xCX, xDX, xBX, xSP, xBP, xSI, xDI };
10 #define CONTEXT_REG xBP
28 // unified conditions (we just use rel8 jump instructions for x86)
29 #define DCOND_EQ IOP_JE
30 #define DCOND_NE IOP_JNE
31 #define DCOND_MI IOP_JS // MInus
32 #define DCOND_PL IOP_JNS // PLus or zero
33 #define DCOND_HI IOP_JA // higher (unsigned)
34 #define DCOND_HS IOP_JAE // higher || same (unsigned)
35 #define DCOND_LO IOP_JB // lower (unsigned)
36 #define DCOND_LS IOP_JBE // lower || same (unsigned)
37 #define DCOND_GE IOP_JGE // greater || equal (signed)
38 #define DCOND_GT IOP_JG // greater (signed)
39 #define DCOND_LE IOP_JLE // less || equal (signed)
40 #define DCOND_LT IOP_JL // less (signed)
41 #define DCOND_VS IOP_JO // oVerflow Set
42 #define DCOND_VC IOP_JNO // oVerflow Clear
44 #define EMIT_PTR(ptr, val, type) \
47 #define EMIT(val, type) { \
48 EMIT_PTR(tcache_ptr, val, type); \
49 tcache_ptr += sizeof(type); \
52 #define EMIT_OP(op) { \
57 #define EMIT_MODRM(mod,r,rm) \
58 EMIT(((mod)<<6) | ((r)<<3) | (rm), u8)
60 #define EMIT_SIB(scale,index,base) \
61 EMIT(((scale)<<6) | ((index)<<3) | (base), u8)
63 #define EMIT_OP_MODRM(op,mod,r,rm) { \
65 EMIT_MODRM(mod, r, rm); \
68 #define JMP8_POS(ptr) \
72 #define JMP8_EMIT(op, ptr) \
73 EMIT_PTR(ptr, op, u8); \
74 EMIT_PTR(ptr + 1, (tcache_ptr - (ptr+2)), u8)
77 #define emith_move_r_r(dst, src) \
78 EMIT_OP_MODRM(0x8b, 3, dst, src)
80 #define emith_add_r_r(d, s) \
81 EMIT_OP_MODRM(0x01, 3, s, d)
83 #define emith_sub_r_r(d, s) \
84 EMIT_OP_MODRM(0x29, 3, s, d)
86 #define emith_adc_r_r(d, s) \
87 EMIT_OP_MODRM(0x11, 3, s, d)
89 #define emith_sbc_r_r(d, s) \
90 EMIT_OP_MODRM(0x19, 3, s, d) /* SBB */
92 #define emith_or_r_r(d, s) \
93 EMIT_OP_MODRM(0x09, 3, s, d)
95 #define emith_and_r_r(d, s) \
96 EMIT_OP_MODRM(0x21, 3, s, d)
98 #define emith_eor_r_r(d, s) \
99 EMIT_OP_MODRM(0x31, 3, s, d) /* XOR */
101 #define emith_tst_r_r(d, s) \
102 EMIT_OP_MODRM(0x85, 3, s, d) /* TEST */
104 #define emith_cmp_r_r(d, s) \
105 EMIT_OP_MODRM(0x39, 3, s, d)
107 // fake teq - test equivalence - get_flags(d ^ s)
108 #define emith_teq_r_r(d, s) { \
110 emith_eor_r_r(d, s); \
114 #define emith_mvn_r_r(d, s) { \
116 emith_move_r_r(d, s); \
117 EMIT_OP_MODRM(0xf7, 3, 2, d); /* NOT d */ \
120 #define emith_negc_r_r(d, s) { \
121 int tmp_ = rcache_get_tmp(); \
122 emith_move_r_imm(tmp_, 0); \
123 emith_sbc_r_r(tmp_, s); \
124 emith_move_r_r(d, tmp_); \
125 rcache_free_tmp(tmp_); \
128 #define emith_neg_r_r(d, s) { \
130 emith_move_r_r(d, s); \
131 EMIT_OP_MODRM(0xf7, 3, 3, d); /* NEG d */ \
135 #define emith_eor_r_r_r(d, s1, s2) { \
137 emith_eor_r_r(d, s2); \
138 } else if (d == s2) { \
139 emith_eor_r_r(d, s1); \
141 emith_move_r_r(d, s1); \
142 emith_eor_r_r(d, s2); \
147 #define emith_or_r_r_lsl(d, s, lslimm) { \
148 int tmp_ = rcache_get_tmp(); \
149 emith_lsl(tmp_, s, lslimm); \
150 emith_or_r_r(d, tmp_); \
151 rcache_free_tmp(tmp_); \
155 #define emith_eor_r_r_lsr(d, s, lsrimm) { \
157 emith_lsr(s, s, lsrimm); \
158 emith_eor_r_r(d, s); \
163 #define emith_move_r_imm(r, imm) { \
164 EMIT_OP(0xb8 + (r)); \
168 #define emith_move_r_imm_s8(r, imm) \
169 emith_move_r_imm(r, (u32)(signed int)(signed char)(imm))
171 #define emith_arith_r_imm(op, r, imm) { \
172 EMIT_OP_MODRM(0x81, 3, op, r); \
177 #define emith_add_r_imm(r, imm) \
178 emith_arith_r_imm(0, r, imm)
180 #define emith_or_r_imm(r, imm) \
181 emith_arith_r_imm(1, r, imm)
183 #define emith_and_r_imm(r, imm) \
184 emith_arith_r_imm(4, r, imm)
186 #define emith_sub_r_imm(r, imm) \
187 emith_arith_r_imm(5, r, imm)
189 #define emith_eor_r_imm(r, imm) \
190 emith_arith_r_imm(6, r, imm)
192 #define emith_cmp_r_imm(r, imm) \
193 emith_arith_r_imm(7, r, imm)
195 #define emith_tst_r_imm(r, imm) { \
196 EMIT_OP_MODRM(0xf7, 3, 0, r); \
201 #define emith_bic_r_imm(r, imm) \
202 emith_arith_r_imm(4, r, ~(imm))
204 // fake conditionals (using SJMP instead)
205 #define emith_move_r_imm_c(cond, r, imm) { \
207 emith_move_r_imm(r, imm); \
210 #define emith_add_r_imm_c(cond, r, imm) { \
212 emith_add_r_imm(r, imm); \
215 #define emith_or_r_imm_c(cond, r, imm) { \
217 emith_or_r_imm(r, imm); \
220 #define emith_eor_r_imm_c(cond, r, imm) { \
222 emith_eor_r_imm(r, imm); \
225 #define emith_sub_r_imm_c(cond, r, imm) { \
227 emith_sub_r_imm(r, imm); \
230 #define emith_bic_r_imm_c(cond, r, imm) { \
232 emith_bic_r_imm(r, imm); \
236 #define emith_and_r_r_imm(d, s, imm) { \
238 emith_move_r_r(d, s); \
239 emith_and_r_imm(d, imm) \
243 #define emith_shift(op, d, s, cnt) { \
245 emith_move_r_r(d, s); \
246 EMIT_OP_MODRM(0xc1, 3, op, d); \
250 #define emith_lsl(d, s, cnt) \
251 emith_shift(4, d, s, cnt)
253 #define emith_lsr(d, s, cnt) \
254 emith_shift(5, d, s, cnt)
256 #define emith_asr(d, s, cnt) \
257 emith_shift(7, d, s, cnt)
259 #define emith_rol(d, s, cnt) \
260 emith_shift(0, d, s, cnt)
262 #define emith_ror(d, s, cnt) \
263 emith_shift(1, d, s, cnt)
265 #define emith_rolc(r) \
266 EMIT_OP_MODRM(0xd1, 3, 2, r)
268 #define emith_rorc(r) \
269 EMIT_OP_MODRM(0xd1, 3, 3, r)
272 #define emith_push(r) \
275 #define emith_pop(r) \
278 #define emith_neg_r(r) \
279 EMIT_OP_MODRM(0xf7, 3, 3, r)
281 #define emith_clear_msb(d, s, count) { \
285 emith_move_r_r(d, s); \
286 emith_and_r_imm(d, t); \
289 #define emith_clear_msb_c(cond, d, s, count) { \
291 emith_clear_msb(d, s, count); \
294 #define emith_sext(d, s, bits) { \
295 emith_lsl(d, s, 32 - (bits)); \
296 emith_asr(d, d, 32 - (bits)); \
299 #define emith_setc(r) { \
302 EMIT_MODRM(3, 0, r); /* SETC r */ \
305 // put bit0 of r0 to carry
306 #define emith_set_carry(r0) { \
307 emith_tst_r_imm(r0, 1); /* clears C */ \
308 EMITH_SJMP_START(DCOND_EQ); \
309 EMIT_OP(0xf9); /* STC */ \
310 EMITH_SJMP_END(DCOND_EQ); \
313 // put bit0 of r0 to carry (for subtraction)
314 #define emith_set_carry_sub emith_set_carry
317 #define emith_mul_(op, dlo, dhi, s1, s2) { \
319 if (dlo != xAX && dhi != xAX) \
321 if (dlo != xDX && dhi != xDX) \
325 else if ((s2) == xAX) \
328 emith_move_r_r(xAX, s1); \
331 EMIT_OP_MODRM(0xf7, 3, op, rmr); /* xMUL rmr */ \
332 /* XXX: using push/pop for the case of edx->eax; eax->edx */ \
333 if (dhi != xDX && dhi != -1) \
336 emith_move_r_r(dlo, xAX); \
337 if (dhi != xDX && dhi != -1) \
339 if (dlo != xDX && dhi != xDX) \
341 if (dlo != xAX && dhi != xAX) \
345 #define emith_mul_u64(dlo, dhi, s1, s2) \
346 emith_mul_(4, dlo, dhi, s1, s2) /* MUL */
348 #define emith_mul_s64(dlo, dhi, s1, s2) \
349 emith_mul_(5, dlo, dhi, s1, s2) /* IMUL */
351 #define emith_mul(d, s1, s2) \
352 emith_mul_(4, d, -1, s1, s2)
354 // (dlo,dhi) += signed(s1) * signed(s2)
355 #define emith_mula_s64(dlo, dhi, s1, s2) { \
358 emith_mul_(5, dlo, dhi, s1, s2); \
359 EMIT_OP_MODRM(0x03, 0, dlo, 4); \
360 EMIT_SIB(0, 4, 4); /* add dlo, [esp] */ \
361 EMIT_OP_MODRM(0x13, 1, dhi, 4); \
363 EMIT(4, u8); /* adc dhi, [esp+4] */ \
364 emith_add_r_imm(xSP, 4*2); \
367 // "flag" instructions are the same
368 #define emith_subf_r_imm emith_sub_r_imm
369 #define emith_addf_r_r emith_add_r_r
370 #define emith_subf_r_r emith_sub_r_r
371 #define emith_adcf_r_r emith_adc_r_r
372 #define emith_sbcf_r_r emith_sbc_r_r
373 #define emith_eorf_r_r emith_eor_r_r
374 #define emith_negcf_r_r emith_negc_r_r
376 #define emith_lslf emith_lsl
377 #define emith_lsrf emith_lsr
378 #define emith_asrf emith_asr
379 #define emith_rolf emith_rol
380 #define emith_rorf emith_ror
381 #define emith_rolcf emith_rolc
382 #define emith_rorcf emith_rorc
384 // XXX: offs is 8bit only
385 #define emith_ctx_read(r, offs) do { \
386 EMIT_OP_MODRM(0x8b, 1, r, xBP); \
387 EMIT(offs, u8); /* mov tmp, [ebp+#offs] */ \
390 #define emith_ctx_read_multiple(r, offs, cnt, tmpr) do { \
391 int r_ = r, offs_ = offs, cnt_ = cnt; \
392 for (; cnt > 0; r_++, offs_ += 4, cnt_--) \
393 emith_ctx_read(r_, offs_); \
396 #define emith_ctx_write(r, offs) do { \
397 EMIT_OP_MODRM(0x89, 1, r, xBP); \
398 EMIT(offs, u8); /* mov [ebp+#offs], tmp */ \
401 #define emith_ctx_write_multiple(r, offs, cnt, tmpr) do { \
402 int r_ = r, offs_ = offs, cnt_ = cnt; \
403 for (; cnt > 0; r_++, offs_ += 4, cnt_--) \
404 emith_ctx_write(r_, offs_); \
407 #define emith_jump(ptr) { \
408 u32 disp = (u32)ptr - ((u32)tcache_ptr + 5); \
413 #define emith_call(ptr) { \
414 u32 disp = (u32)ptr - ((u32)tcache_ptr + 5); \
419 #define emith_call_cond(cond, ptr) \
422 #define emith_jump_reg(r) \
423 EMIT_OP_MODRM(0xff, 3, 4, r)
425 #define EMITH_JMP_START(cond) { \
429 #define EMITH_JMP_END(cond) \
430 JMP8_EMIT(cond, cond_ptr); \
433 // "simple" jump (no more then a few insns)
434 #define EMITH_SJMP_START EMITH_JMP_START
435 #define EMITH_SJMP_END EMITH_JMP_END
437 #define host_arg2reg(rd, arg) \
439 case 0: rd = xAX; break; \
440 case 1: rd = xDX; break; \
441 case 2: rd = xCX; break; \
444 #define emith_pass_arg_r(arg, reg) { \
446 host_arg2reg(rd, arg); \
447 emith_move_r_r(rd, reg); \
450 #define emith_pass_arg_imm(arg, imm) { \
452 host_arg2reg(rd, arg); \
453 emith_move_r_imm(rd, imm); \
456 /* SH2 drc specific */
457 #define emith_sh2_drc_entry() { \
462 #define emith_sh2_drc_exit() { \
465 EMIT_OP(0xc3); /* ret */\
468 #define emith_sh2_test_t() { \
469 int t = rcache_get_reg(SHR_SR, RC_GR_READ); \
470 EMIT_OP_MODRM(0xf6, 3, 0, t); \
471 EMIT(0x01, u8); /* test <reg>, byte 1 */ \
474 #define emith_sh2_dtbf_loop() { \
475 u8 *jmp0; /* negative cycles check */ \
476 u8 *jmp1; /* unsinged overflow check */ \
478 int tmp_ = rcache_get_tmp(); \
479 cr = rcache_get_reg(SHR_SR, RC_GR_RMW); \
480 rn = rcache_get_reg((op >> 8) & 0x0f, RC_GR_RMW);\
481 emith_sub_r_imm(rn, 1); \
482 emith_sub_r_imm(cr, (cycles+1) << 12); \
484 emith_asr(tmp_, cr, 2+12); \
485 JMP8_POS(jmp0); /* no negative cycles */ \
486 emith_move_r_imm(tmp_, 0); \
487 JMP8_EMIT(IOP_JNS, jmp0); \
488 emith_and_r_imm(cr, 0xffe); \
489 emith_subf_r_r(rn, tmp_); \
490 JMP8_POS(jmp1); /* no overflow */ \
491 emith_neg_r(rn); /* count left */ \
492 emith_lsl(rn, rn, 2+12); \
493 emith_or_r_r(cr, rn); \
494 emith_or_r_imm(cr, 1); \
495 emith_move_r_imm(rn, 0); \
496 JMP8_EMIT(IOP_JA, jmp1); \
497 rcache_free_tmp(tmp_); \
500 #define emith_write_sr(srcr) { \
501 int tmp_ = rcache_get_tmp(); \
502 int srr = rcache_get_reg(SHR_SR, RC_GR_RMW); \
503 emith_clear_msb(tmp_, srcr, 20); \
504 emith_bic_r_imm(srr, 0xfff); \
505 emith_or_r_r(srr, tmp_); \
506 rcache_free_tmp(tmp_); \
509 #define emith_carry_to_t(srr, is_sub) { \
510 int tmp_ = rcache_get_tmp(); \
512 emith_bic_r_imm(srr, 1); \
513 EMIT_OP_MODRM(0x08, 3, tmp_, srr); /* OR srrl, tmpl */ \
514 rcache_free_tmp(tmp_); \
519 * t = carry(Rn += Rm)
521 * t = carry(Rn -= Rm)
524 #define emith_sh2_div1_step(rn, rm, sr) { \
526 int tmp_ = rcache_get_tmp(); \
527 emith_tst_r_imm(sr, Q); /* if (Q ^ M) */ \
528 JMP8_POS(jmp0); /* je do_sub */ \
529 emith_add_r_r(rn, rm); \
530 JMP8_POS(jmp1); /* jmp done */ \
531 JMP8_EMIT(IOP_JE, jmp0); /* do_sub: */ \
532 emith_sub_r_r(rn, rm); \
533 JMP8_EMIT(IOP_JMP, jmp1);/* done: */ \
535 EMIT_OP_MODRM(0x30, 3, tmp_, sr); /* T = Q1 ^ Q2 (byte) */ \
536 rcache_free_tmp(tmp_); \