3 * temp registers must be eax-edx due to use of SETcc.
4 * note about silly things like emith_eor_r_r_r:
5 * these are here because the compiler was designed
6 * for ARM as it's primary target.
10 enum { xAX = 0, xCX, xDX, xBX, xSP, xBP, xSI, xDI };
12 #define CONTEXT_REG xBP
15 #define ICOND_JNO 0x01
17 #define ICOND_JAE 0x03
19 #define ICOND_JNE 0x05
20 #define ICOND_JBE 0x06
23 #define ICOND_JNS 0x09
25 #define ICOND_JGE 0x0d
26 #define ICOND_JLE 0x0e
31 // unified conditions (we just use rel8 jump instructions for x86)
32 #define DCOND_EQ ICOND_JE
33 #define DCOND_NE ICOND_JNE
34 #define DCOND_MI ICOND_JS // MInus
35 #define DCOND_PL ICOND_JNS // PLus or zero
36 #define DCOND_HI ICOND_JA // higher (unsigned)
37 #define DCOND_HS ICOND_JAE // higher || same (unsigned)
38 #define DCOND_LO ICOND_JB // lower (unsigned)
39 #define DCOND_LS ICOND_JBE // lower || same (unsigned)
40 #define DCOND_GE ICOND_JGE // greater || equal (signed)
41 #define DCOND_GT ICOND_JG // greater (signed)
42 #define DCOND_LE ICOND_JLE // less || equal (signed)
43 #define DCOND_LT ICOND_JL // less (signed)
44 #define DCOND_VS ICOND_JO // oVerflow Set
45 #define DCOND_VC ICOND_JNO // oVerflow Clear
47 #define EMIT_PTR(ptr, val, type) \
50 #define EMIT(val, type) { \
51 EMIT_PTR(tcache_ptr, val, type); \
52 tcache_ptr += sizeof(type); \
55 #define EMIT_OP(op) { \
60 #define EMIT_MODRM(mod,r,rm) \
61 EMIT(((mod)<<6) | ((r)<<3) | (rm), u8)
63 #define EMIT_SIB(scale,index,base) \
64 EMIT(((scale)<<6) | ((index)<<3) | (base), u8)
66 #define EMIT_OP_MODRM(op,mod,r,rm) { \
68 EMIT_MODRM(mod, r, rm); \
71 #define JMP8_POS(ptr) \
75 #define JMP8_EMIT(op, ptr) \
76 EMIT_PTR(ptr, 0x70|(op), u8); \
77 EMIT_PTR(ptr + 1, (tcache_ptr - (ptr+2)), u8)
79 #define JMP8_EMIT_NC(ptr) \
80 EMIT_PTR(ptr, IOP_JMP, u8); \
81 EMIT_PTR(ptr + 1, (tcache_ptr - (ptr+2)), u8)
84 #define emith_move_r_r(dst, src) \
85 EMIT_OP_MODRM(0x8b, 3, dst, src)
87 #define emith_add_r_r(d, s) \
88 EMIT_OP_MODRM(0x01, 3, s, d)
90 #define emith_sub_r_r(d, s) \
91 EMIT_OP_MODRM(0x29, 3, s, d)
93 #define emith_adc_r_r(d, s) \
94 EMIT_OP_MODRM(0x11, 3, s, d)
96 #define emith_sbc_r_r(d, s) \
97 EMIT_OP_MODRM(0x19, 3, s, d) /* SBB */
99 #define emith_or_r_r(d, s) \
100 EMIT_OP_MODRM(0x09, 3, s, d)
102 #define emith_and_r_r(d, s) \
103 EMIT_OP_MODRM(0x21, 3, s, d)
105 #define emith_eor_r_r(d, s) \
106 EMIT_OP_MODRM(0x31, 3, s, d) /* XOR */
108 #define emith_tst_r_r(d, s) \
109 EMIT_OP_MODRM(0x85, 3, s, d) /* TEST */
111 #define emith_cmp_r_r(d, s) \
112 EMIT_OP_MODRM(0x39, 3, s, d)
114 // fake teq - test equivalence - get_flags(d ^ s)
115 #define emith_teq_r_r(d, s) { \
117 emith_eor_r_r(d, s); \
121 #define emith_mvn_r_r(d, s) { \
123 emith_move_r_r(d, s); \
124 EMIT_OP_MODRM(0xf7, 3, 2, d); /* NOT d */ \
127 #define emith_negc_r_r(d, s) { \
128 int tmp_ = rcache_get_tmp(); \
129 emith_move_r_imm(tmp_, 0); \
130 emith_sbc_r_r(tmp_, s); \
131 emith_move_r_r(d, tmp_); \
132 rcache_free_tmp(tmp_); \
135 #define emith_neg_r_r(d, s) { \
137 emith_move_r_r(d, s); \
138 EMIT_OP_MODRM(0xf7, 3, 3, d); /* NEG d */ \
142 #define emith_eor_r_r_r(d, s1, s2) { \
144 emith_eor_r_r(d, s2); \
145 } else if (d == s2) { \
146 emith_eor_r_r(d, s1); \
148 emith_move_r_r(d, s1); \
149 emith_eor_r_r(d, s2); \
154 #define emith_or_r_r_lsl(d, s, lslimm) { \
155 int tmp_ = rcache_get_tmp(); \
156 emith_lsl(tmp_, s, lslimm); \
157 emith_or_r_r(d, tmp_); \
158 rcache_free_tmp(tmp_); \
162 #define emith_eor_r_r_lsr(d, s, lsrimm) { \
164 emith_lsr(s, s, lsrimm); \
165 emith_eor_r_r(d, s); \
170 #define emith_move_r_imm(r, imm) { \
171 EMIT_OP(0xb8 + (r)); \
175 #define emith_move_r_imm_s8(r, imm) \
176 emith_move_r_imm(r, (u32)(signed int)(signed char)(imm))
178 #define emith_arith_r_imm(op, r, imm) do { \
179 EMIT_OP_MODRM(0x81, 3, op, r); \
184 #define emith_add_r_imm(r, imm) \
185 emith_arith_r_imm(0, r, imm)
187 #define emith_or_r_imm(r, imm) \
188 emith_arith_r_imm(1, r, imm)
190 #define emith_and_r_imm(r, imm) \
191 emith_arith_r_imm(4, r, imm)
193 #define emith_sub_r_imm(r, imm) \
194 emith_arith_r_imm(5, r, imm)
196 #define emith_eor_r_imm(r, imm) \
197 emith_arith_r_imm(6, r, imm)
199 #define emith_cmp_r_imm(r, imm) \
200 emith_arith_r_imm(7, r, imm)
202 #define emith_tst_r_imm(r, imm) do { \
203 EMIT_OP_MODRM(0xf7, 3, 0, r); \
208 #define emith_bic_r_imm(r, imm) \
209 emith_arith_r_imm(4, r, ~(imm))
211 // fake conditionals (using SJMP instead)
212 #define emith_move_r_imm_c(cond, r, imm) { \
214 emith_move_r_imm(r, imm); \
217 #define emith_add_r_imm_c(cond, r, imm) { \
219 emith_add_r_imm(r, imm); \
222 #define emith_or_r_imm_c(cond, r, imm) { \
224 emith_or_r_imm(r, imm); \
227 #define emith_eor_r_imm_c(cond, r, imm) { \
229 emith_eor_r_imm(r, imm); \
232 #define emith_sub_r_imm_c(cond, r, imm) { \
234 emith_sub_r_imm(r, imm); \
237 #define emith_bic_r_imm_c(cond, r, imm) { \
239 emith_bic_r_imm(r, imm); \
243 #define emith_and_r_r_imm(d, s, imm) { \
245 emith_move_r_r(d, s); \
246 emith_and_r_imm(d, imm); \
250 #define emith_shift(op, d, s, cnt) { \
252 emith_move_r_r(d, s); \
253 EMIT_OP_MODRM(0xc1, 3, op, d); \
257 #define emith_lsl(d, s, cnt) \
258 emith_shift(4, d, s, cnt)
260 #define emith_lsr(d, s, cnt) \
261 emith_shift(5, d, s, cnt)
263 #define emith_asr(d, s, cnt) \
264 emith_shift(7, d, s, cnt)
266 #define emith_rol(d, s, cnt) \
267 emith_shift(0, d, s, cnt)
269 #define emith_ror(d, s, cnt) \
270 emith_shift(1, d, s, cnt)
272 #define emith_rolc(r) \
273 EMIT_OP_MODRM(0xd1, 3, 2, r)
275 #define emith_rorc(r) \
276 EMIT_OP_MODRM(0xd1, 3, 3, r)
279 #define emith_push(r) \
282 #define emith_pop(r) \
285 #define emith_neg_r(r) \
286 EMIT_OP_MODRM(0xf7, 3, 3, r)
288 #define emith_clear_msb(d, s, count) { \
292 emith_move_r_r(d, s); \
293 emith_and_r_imm(d, t); \
296 #define emith_clear_msb_c(cond, d, s, count) { \
298 emith_clear_msb(d, s, count); \
301 #define emith_sext(d, s, bits) { \
302 emith_lsl(d, s, 32 - (bits)); \
303 emith_asr(d, d, 32 - (bits)); \
306 #define emith_setc(r) { \
308 EMIT_OP_MODRM(0x92, 3, 0, r); /* SETC r */ \
312 #define emith_mul_(op, dlo, dhi, s1, s2) { \
314 if (dlo != xAX && dhi != xAX) \
316 if (dlo != xDX && dhi != xDX) \
320 else if ((s2) == xAX) \
323 emith_move_r_r(xAX, s1); \
326 EMIT_OP_MODRM(0xf7, 3, op, rmr); /* xMUL rmr */ \
327 /* XXX: using push/pop for the case of edx->eax; eax->edx */ \
328 if (dhi != xDX && dhi != -1) \
331 emith_move_r_r(dlo, xAX); \
332 if (dhi != xDX && dhi != -1) \
334 if (dlo != xDX && dhi != xDX) \
336 if (dlo != xAX && dhi != xAX) \
340 #define emith_mul_u64(dlo, dhi, s1, s2) \
341 emith_mul_(4, dlo, dhi, s1, s2) /* MUL */
343 #define emith_mul_s64(dlo, dhi, s1, s2) \
344 emith_mul_(5, dlo, dhi, s1, s2) /* IMUL */
346 #define emith_mul(d, s1, s2) \
347 emith_mul_(4, d, -1, s1, s2)
349 // (dlo,dhi) += signed(s1) * signed(s2)
350 #define emith_mula_s64(dlo, dhi, s1, s2) { \
353 emith_mul_(5, dlo, dhi, s1, s2); \
354 EMIT_OP_MODRM(0x03, 0, dlo, 4); \
355 EMIT_SIB(0, 4, 4); /* add dlo, [esp] */ \
356 EMIT_OP_MODRM(0x13, 1, dhi, 4); \
358 EMIT(4, u8); /* adc dhi, [esp+4] */ \
359 emith_add_r_imm(xSP, 4*2); \
362 // "flag" instructions are the same
363 #define emith_subf_r_imm emith_sub_r_imm
364 #define emith_addf_r_r emith_add_r_r
365 #define emith_subf_r_r emith_sub_r_r
366 #define emith_adcf_r_r emith_adc_r_r
367 #define emith_sbcf_r_r emith_sbc_r_r
368 #define emith_eorf_r_r emith_eor_r_r
369 #define emith_negcf_r_r emith_negc_r_r
371 #define emith_lslf emith_lsl
372 #define emith_lsrf emith_lsr
373 #define emith_asrf emith_asr
374 #define emith_rolf emith_rol
375 #define emith_rorf emith_ror
376 #define emith_rolcf emith_rolc
377 #define emith_rorcf emith_rorc
379 // XXX: offs is 8bit only
380 #define emith_ctx_read(r, offs) do { \
381 EMIT_OP_MODRM(0x8b, 1, r, xBP); \
382 EMIT(offs, u8); /* mov tmp, [ebp+#offs] */ \
385 #define emith_ctx_read_multiple(r, offs, cnt, tmpr) do { \
386 int r_ = r, offs_ = offs, cnt_ = cnt; \
387 for (; cnt_ > 0; r_++, offs_ += 4, cnt_--) \
388 emith_ctx_read(r_, offs_); \
391 #define emith_ctx_write(r, offs) do { \
392 EMIT_OP_MODRM(0x89, 1, r, xBP); \
393 EMIT(offs, u8); /* mov [ebp+#offs], tmp */ \
396 #define emith_ctx_write_multiple(r, offs, cnt, tmpr) do { \
397 int r_ = r, offs_ = offs, cnt_ = cnt; \
398 for (; cnt_ > 0; r_++, offs_ += 4, cnt_--) \
399 emith_ctx_write(r_, offs_); \
402 #define emith_jump(ptr) { \
403 u32 disp = (u32)(ptr) - ((u32)tcache_ptr + 5); \
408 #define emith_jump_cond(cond, ptr) { \
409 u32 disp = (u32)(ptr) - ((u32)tcache_ptr + 6); \
411 EMIT_OP(0x80 | (cond)); \
415 #define emith_jump_patchable(cond) \
416 emith_jump_cond(cond, 0)
418 #define emith_jump_patch(ptr, target) do { \
419 u32 disp = (u32)(target) - ((u32)(ptr) + 6); \
420 EMIT_PTR((u8 *)(ptr) + 2, disp, u32); \
423 #define emith_call(ptr) { \
424 u32 disp = (u32)(ptr) - ((u32)tcache_ptr + 5); \
429 #define emith_call_cond(cond, ptr) \
432 #define emith_jump_reg(r) \
433 EMIT_OP_MODRM(0xff, 3, 4, r)
435 #define EMITH_JMP_START(cond) { \
439 #define EMITH_JMP_END(cond) \
440 JMP8_EMIT(cond, cond_ptr); \
443 // "simple" jump (no more then a few insns)
444 #define EMITH_SJMP_START EMITH_JMP_START
445 #define EMITH_SJMP_END EMITH_JMP_END
447 #define host_arg2reg(rd, arg) \
449 case 0: rd = xAX; break; \
450 case 1: rd = xDX; break; \
451 case 2: rd = xCX; break; \
454 #define emith_pass_arg_r(arg, reg) { \
456 host_arg2reg(rd, arg); \
457 emith_move_r_r(rd, reg); \
460 #define emith_pass_arg_imm(arg, imm) { \
462 host_arg2reg(rd, arg); \
463 emith_move_r_imm(rd, imm); \
466 /* SH2 drc specific */
467 #define emith_sh2_drc_entry() { \
474 #define emith_sh2_drc_exit() { \
479 EMIT_OP(0xc3); /* ret */\
482 #define emith_sh2_dtbf_loop() { \
483 u8 *jmp0; /* negative cycles check */ \
484 u8 *jmp1; /* unsinged overflow check */ \
486 int tmp_ = rcache_get_tmp(); \
487 cr = rcache_get_reg(SHR_SR, RC_GR_RMW); \
488 rn = rcache_get_reg((op >> 8) & 0x0f, RC_GR_RMW);\
489 emith_sub_r_imm(rn, 1); \
490 emith_sub_r_imm(cr, (cycles+1) << 12); \
492 emith_asr(tmp_, cr, 2+12); \
493 JMP8_POS(jmp0); /* no negative cycles */ \
494 emith_move_r_imm(tmp_, 0); \
495 JMP8_EMIT(ICOND_JNS, jmp0); \
496 emith_and_r_imm(cr, 0xffe); \
497 emith_subf_r_r(rn, tmp_); \
498 JMP8_POS(jmp1); /* no overflow */ \
499 emith_neg_r(rn); /* count left */ \
500 emith_lsl(rn, rn, 2+12); \
501 emith_or_r_r(cr, rn); \
502 emith_or_r_imm(cr, 1); \
503 emith_move_r_imm(rn, 0); \
504 JMP8_EMIT(ICOND_JA, jmp1); \
505 rcache_free_tmp(tmp_); \
508 #define emith_write_sr(sr, srcr) { \
509 int tmp_ = rcache_get_tmp(); \
510 emith_clear_msb(tmp_, srcr, 22); \
511 emith_bic_r_imm(sr, 0x3ff); \
512 emith_or_r_r(sr, tmp_); \
513 rcache_free_tmp(tmp_); \
516 #define emith_tpop_carry(sr, is_sub) \
519 #define emith_tpush_carry(sr, is_sub) \
520 emith_adc_r_r(sr, sr)
524 * t = carry(Rn += Rm)
526 * t = carry(Rn -= Rm)
529 #define emith_sh2_div1_step(rn, rm, sr) { \
531 int tmp_ = rcache_get_tmp(); \
532 emith_eor_r_r(tmp_, tmp_); \
533 emith_tst_r_imm(sr, Q); /* if (Q ^ M) */ \
534 JMP8_POS(jmp0); /* je do_sub */ \
535 emith_add_r_r(rn, rm); \
536 JMP8_POS(jmp1); /* jmp done */ \
537 JMP8_EMIT(ICOND_JE, jmp0); /* do_sub: */ \
538 emith_sub_r_r(rn, rm); \
539 JMP8_EMIT_NC(jmp1); /* done: */ \
541 EMIT_OP_MODRM(0x31, 3, tmp_, sr); /* T = Q1 ^ Q2 */ \
542 rcache_free_tmp(tmp_); \