2 * Basic macros to emit x86 instructions and some utils
3 * Copyright (C) 2008,2009,2010 notaz
5 * This work is licensed under the terms of MAME license.
6 * See COPYING file in the top-level directory.
9 * temp registers must be eax-edx due to use of SETcc and r/w 8/16.
10 * note about silly things like emith_eor_r_r_r:
11 * these are here because the compiler was designed
12 * for ARM as it's primary target.
16 enum { xAX = 0, xCX, xDX, xBX, xSP, xBP, xSI, xDI };
18 #define CONTEXT_REG xBP
22 #define ICOND_JNO 0x01
24 #define ICOND_JAE 0x03
26 #define ICOND_JNE 0x05
27 #define ICOND_JBE 0x06
30 #define ICOND_JNS 0x09
32 #define ICOND_JGE 0x0d
33 #define ICOND_JLE 0x0e
38 // unified conditions (we just use rel8 jump instructions for x86)
39 #define DCOND_EQ ICOND_JE
40 #define DCOND_NE ICOND_JNE
41 #define DCOND_MI ICOND_JS // MInus
42 #define DCOND_PL ICOND_JNS // PLus or zero
43 #define DCOND_HI ICOND_JA // higher (unsigned)
44 #define DCOND_HS ICOND_JAE // higher || same (unsigned)
45 #define DCOND_LO ICOND_JB // lower (unsigned)
46 #define DCOND_LS ICOND_JBE // lower || same (unsigned)
47 #define DCOND_GE ICOND_JGE // greater || equal (signed)
48 #define DCOND_GT ICOND_JG // greater (signed)
49 #define DCOND_LE ICOND_JLE // less || equal (signed)
50 #define DCOND_LT ICOND_JL // less (signed)
51 #define DCOND_VS ICOND_JO // oVerflow Set
52 #define DCOND_VC ICOND_JNO // oVerflow Clear
54 #define EMIT_PTR(ptr, val, type) \
57 #define EMIT(val, type) do { \
58 EMIT_PTR(tcache_ptr, val, type); \
59 tcache_ptr += sizeof(type); \
62 #define EMIT_OP(op) do { \
67 #define EMIT_MODRM(mod,r,rm) \
68 EMIT(((mod)<<6) | ((r)<<3) | (rm), u8)
70 #define EMIT_SIB(scale,index,base) \
71 EMIT(((scale)<<6) | ((index)<<3) | (base), u8)
73 #define EMIT_REX(w,r,x,b) \
74 EMIT(0x40 | ((w)<<3) | ((r)<<2) | ((x)<<1) | (b), u8)
76 #define EMIT_OP_MODRM(op,mod,r,rm) do { \
78 EMIT_MODRM(mod, r, rm); \
81 #define JMP8_POS(ptr) \
85 #define JMP8_EMIT(op, ptr) \
86 EMIT_PTR(ptr, 0x70|(op), u8); \
87 EMIT_PTR(ptr + 1, (tcache_ptr - (ptr+2)), u8)
89 #define JMP8_EMIT_NC(ptr) \
90 EMIT_PTR(ptr, IOP_JMP, u8); \
91 EMIT_PTR(ptr + 1, (tcache_ptr - (ptr+2)), u8)
94 #define emith_move_r_r(dst, src) \
95 EMIT_OP_MODRM(0x8b, 3, dst, src)
97 #define emith_move_r_r_ptr(dst, src) do { \
99 EMIT_OP_MODRM(0x8b, 3, dst, src); \
102 #define emith_add_r_r(d, s) \
103 EMIT_OP_MODRM(0x01, 3, s, d)
105 #define emith_sub_r_r(d, s) \
106 EMIT_OP_MODRM(0x29, 3, s, d)
108 #define emith_adc_r_r(d, s) \
109 EMIT_OP_MODRM(0x11, 3, s, d)
111 #define emith_sbc_r_r(d, s) \
112 EMIT_OP_MODRM(0x19, 3, s, d) /* SBB */
114 #define emith_or_r_r(d, s) \
115 EMIT_OP_MODRM(0x09, 3, s, d)
117 #define emith_and_r_r(d, s) \
118 EMIT_OP_MODRM(0x21, 3, s, d)
120 #define emith_eor_r_r(d, s) \
121 EMIT_OP_MODRM(0x31, 3, s, d) /* XOR */
123 #define emith_tst_r_r(d, s) \
124 EMIT_OP_MODRM(0x85, 3, s, d) /* TEST */
126 #define emith_cmp_r_r(d, s) \
127 EMIT_OP_MODRM(0x39, 3, s, d)
129 // fake teq - test equivalence - get_flags(d ^ s)
130 #define emith_teq_r_r(d, s) do { \
132 emith_eor_r_r(d, s); \
136 #define emith_mvn_r_r(d, s) do { \
138 emith_move_r_r(d, s); \
139 EMIT_OP_MODRM(0xf7, 3, 2, d); /* NOT d */ \
142 #define emith_negc_r_r(d, s) do { \
143 int tmp_ = rcache_get_tmp(); \
144 emith_move_r_imm(tmp_, 0); \
145 emith_sbc_r_r(tmp_, s); \
146 emith_move_r_r(d, tmp_); \
147 rcache_free_tmp(tmp_); \
150 #define emith_neg_r_r(d, s) do { \
152 emith_move_r_r(d, s); \
153 EMIT_OP_MODRM(0xf7, 3, 3, d); /* NEG d */ \
157 #define emith_add_r_r_r(d, s1, s2) do { \
159 emith_add_r_r(d, s2); \
160 } else if (d == s2) { \
161 emith_add_r_r(d, s1); \
163 emith_move_r_r(d, s1); \
164 emith_add_r_r(d, s2); \
168 #define emith_eor_r_r_r(d, s1, s2) do { \
170 emith_eor_r_r(d, s2); \
171 } else if (d == s2) { \
172 emith_eor_r_r(d, s1); \
174 emith_move_r_r(d, s1); \
175 emith_eor_r_r(d, s2); \
180 #define emith_or_r_r_lsl(d, s, lslimm) do { \
181 int tmp_ = rcache_get_tmp(); \
182 emith_lsl(tmp_, s, lslimm); \
183 emith_or_r_r(d, tmp_); \
184 rcache_free_tmp(tmp_); \
188 #define emith_eor_r_r_lsr(d, s, lsrimm) do { \
190 emith_lsr(s, s, lsrimm); \
191 emith_eor_r_r(d, s); \
196 #define emith_move_r_imm(r, imm) do { \
197 EMIT_OP(0xb8 + (r)); \
201 #define emith_move_r_imm_s8(r, imm) \
202 emith_move_r_imm(r, (u32)(signed int)(signed char)(imm))
204 #define emith_arith_r_imm(op, r, imm) do { \
205 EMIT_OP_MODRM(0x81, 3, op, r); \
209 #define emith_add_r_imm(r, imm) \
210 emith_arith_r_imm(0, r, imm)
212 #define emith_or_r_imm(r, imm) \
213 emith_arith_r_imm(1, r, imm)
215 #define emith_adc_r_imm(r, imm) \
216 emith_arith_r_imm(2, r, imm)
218 #define emith_sbc_r_imm(r, imm) \
219 emith_arith_r_imm(3, r, imm) // sbb
221 #define emith_and_r_imm(r, imm) \
222 emith_arith_r_imm(4, r, imm)
224 /* used for sub cycles after test, so retain flags with lea */
225 #define emith_sub_r_imm(r, imm) do { \
227 EMIT_OP_MODRM(0x8d, 2, r, r); \
228 EMIT(-(s32)(imm), s32); \
231 #define emith_subf_r_imm(r, imm) \
232 emith_arith_r_imm(5, r, imm)
234 #define emith_eor_r_imm(r, imm) \
235 emith_arith_r_imm(6, r, imm)
237 #define emith_cmp_r_imm(r, imm) \
238 emith_arith_r_imm(7, r, imm)
240 #define emith_tst_r_imm(r, imm) do { \
241 EMIT_OP_MODRM(0xf7, 3, 0, r); \
246 #define emith_bic_r_imm(r, imm) \
247 emith_arith_r_imm(4, r, ~(imm))
249 // fake conditionals (using SJMP instead)
250 #define emith_move_r_imm_c(cond, r, imm) do { \
252 emith_move_r_imm(r, imm); \
255 #define emith_add_r_imm_c(cond, r, imm) do { \
257 emith_add_r_imm(r, imm); \
260 #define emith_sub_r_imm_c(cond, r, imm) do { \
262 emith_sub_r_imm(r, imm); \
265 #define emith_or_r_imm_c(cond, r, imm) \
266 emith_or_r_imm(r, imm)
267 #define emith_eor_r_imm_c(cond, r, imm) \
268 emith_eor_r_imm(r, imm)
269 #define emith_bic_r_imm_c(cond, r, imm) \
270 emith_bic_r_imm(r, imm)
271 #define emith_ror_c(cond, d, s, cnt) \
274 #define emith_read_r_r_offs_c(cond, r, rs, offs) \
275 emith_read_r_r_offs(r, rs, offs)
276 #define emith_write_r_r_offs_c(cond, r, rs, offs) \
277 emith_write_r_r_offs(r, rs, offs)
278 #define emith_read8_r_r_offs_c(cond, r, rs, offs) \
279 emith_read8_r_r_offs(r, rs, offs)
280 #define emith_write8_r_r_offs_c(cond, r, rs, offs) \
281 emith_write8_r_r_offs(r, rs, offs)
282 #define emith_read16_r_r_offs_c(cond, r, rs, offs) \
283 emith_read16_r_r_offs(r, rs, offs)
284 #define emith_write16_r_r_offs_c(cond, r, rs, offs) \
285 emith_write16_r_r_offs(r, rs, offs)
286 #define emith_jump_reg_c(cond, r) \
288 #define emith_jump_ctx_c(cond, offs) \
290 #define emith_ret_c(cond) \
293 // _r_r_imm - use lea
294 #define emith_add_r_r_imm(d, s, imm) do { \
296 EMIT_OP_MODRM(0x8d, 2, d, s); /* lea */ \
300 #define emith_add_r_r_ptr_imm(d, s, imm) do { \
302 EMIT_REX_FOR_PTR(); \
303 EMIT_OP_MODRM(0x8d, 2, d, s); /* lea */ \
307 emith_move_r_r_ptr(d, s); \
308 EMIT_REX_FOR_PTR(); \
309 EMIT_OP_MODRM(0x81, 3, 0, d); /* add */ \
314 #define emith_and_r_r_imm(d, s, imm) do { \
316 emith_move_r_r(d, s); \
317 emith_and_r_imm(d, imm); \
321 #define emith_shift(op, d, s, cnt) do { \
323 emith_move_r_r(d, s); \
324 EMIT_OP_MODRM(0xc1, 3, op, d); \
328 #define emith_lsl(d, s, cnt) \
329 emith_shift(4, d, s, cnt)
331 #define emith_lsr(d, s, cnt) \
332 emith_shift(5, d, s, cnt)
334 #define emith_asr(d, s, cnt) \
335 emith_shift(7, d, s, cnt)
337 #define emith_rol(d, s, cnt) \
338 emith_shift(0, d, s, cnt)
340 #define emith_ror(d, s, cnt) \
341 emith_shift(1, d, s, cnt)
343 #define emith_rolc(r) \
344 EMIT_OP_MODRM(0xd1, 3, 2, r)
346 #define emith_rorc(r) \
347 EMIT_OP_MODRM(0xd1, 3, 3, r)
350 #define emith_push(r) \
353 #define emith_push_imm(imm) do { \
358 #define emith_pop(r) \
361 #define emith_neg_r(r) \
362 EMIT_OP_MODRM(0xf7, 3, 3, r)
364 #define emith_clear_msb(d, s, count) { \
368 emith_move_r_r(d, s); \
369 emith_and_r_imm(d, t); \
372 #define emith_clear_msb_c(cond, d, s, count) { \
374 emith_clear_msb(d, s, count); \
377 #define emith_sext(d, s, bits) { \
378 emith_lsl(d, s, 32 - (bits)); \
379 emith_asr(d, d, 32 - (bits)); \
382 #define emith_setc(r) do { \
383 assert(is_abcdx(r)); \
385 EMIT_OP_MODRM(0x92, 3, 0, r); /* SETC r */ \
389 #define emith_mul_(op, dlo, dhi, s1, s2) do { \
391 if (dlo != xAX && dhi != xAX) \
393 if (dlo != xDX && dhi != xDX) \
397 else if ((s2) == xAX) \
400 emith_move_r_r(xAX, s1); \
403 EMIT_OP_MODRM(0xf7, 3, op, rmr); /* xMUL rmr */ \
404 /* XXX: using push/pop for the case of edx->eax; eax->edx */ \
405 if (dhi != xDX && dhi != -1) \
408 emith_move_r_r(dlo, xAX); \
409 if (dhi != xDX && dhi != -1) \
411 if (dlo != xDX && dhi != xDX) \
413 if (dlo != xAX && dhi != xAX) \
417 #define emith_mul_u64(dlo, dhi, s1, s2) \
418 emith_mul_(4, dlo, dhi, s1, s2) /* MUL */
420 #define emith_mul_s64(dlo, dhi, s1, s2) \
421 emith_mul_(5, dlo, dhi, s1, s2) /* IMUL */
423 #define emith_mul(d, s1, s2) \
424 emith_mul_(4, d, -1, s1, s2)
426 // (dlo,dhi) += signed(s1) * signed(s2)
427 #define emith_mula_s64(dlo, dhi, s1, s2) do { \
430 emith_mul_(5, dlo, dhi, s1, s2); \
431 EMIT_OP_MODRM(0x03, 0, dlo, 4); \
432 EMIT_SIB(0, 4, 4); /* add dlo, [xsp] */ \
433 EMIT_OP_MODRM(0x13, 1, dhi, 4); \
435 EMIT(sizeof(void *), u8); /* adc dhi, [xsp+{4,8}] */ \
436 emith_add_r_r_ptr_imm(xSP, xSP, sizeof(void *) * 2); \
439 // "flag" instructions are the same
440 #define emith_addf_r_r emith_add_r_r
441 #define emith_subf_r_r emith_sub_r_r
442 #define emith_adcf_r_r emith_adc_r_r
443 #define emith_sbcf_r_r emith_sbc_r_r
444 #define emith_eorf_r_r emith_eor_r_r
445 #define emith_negcf_r_r emith_negc_r_r
447 #define emith_lslf emith_lsl
448 #define emith_lsrf emith_lsr
449 #define emith_asrf emith_asr
450 #define emith_rolf emith_rol
451 #define emith_rorf emith_ror
452 #define emith_rolcf emith_rolc
453 #define emith_rorcf emith_rorc
455 #define emith_deref_op(op, r, rs, offs) do { \
456 /* mov r <-> [ebp+#offs] */ \
457 if ((offs) >= 0x80) { \
458 EMIT_OP_MODRM(op, 2, r, rs); \
461 EMIT_OP_MODRM(op, 1, r, rs); \
466 #define is_abcdx(r) (xAX <= (r) && (r) <= xDX)
468 #define emith_read_r_r_offs(r, rs, offs) \
469 emith_deref_op(0x8b, r, rs, offs)
471 #define emith_write_r_r_offs(r, rs, offs) \
472 emith_deref_op(0x89, r, rs, offs)
474 // note: don't use prefixes on this
475 #define emith_read8_r_r_offs(r, rs, offs) do { \
478 r_ = rcache_get_tmp(); \
479 emith_deref_op(0x8a, r_, rs, offs); \
481 emith_move_r_r(r, r_); \
482 rcache_free_tmp(r_); \
486 #define emith_write8_r_r_offs(r, rs, offs) do {\
488 if (!is_abcdx(r)) { \
489 r_ = rcache_get_tmp(); \
490 emith_move_r_r(r_, r); \
492 emith_deref_op(0x88, r_, rs, offs); \
494 rcache_free_tmp(r_); \
497 #define emith_read16_r_r_offs(r, rs, offs) do { \
498 EMIT(0x66, u8); /* operand override */ \
499 emith_read_r_r_offs(r, rs, offs); \
502 #define emith_write16_r_r_offs(r, rs, offs) do { \
504 emith_write_r_r_offs(r, rs, offs); \
507 #define emith_ctx_read(r, offs) \
508 emith_read_r_r_offs(r, CONTEXT_REG, offs)
510 #define emith_ctx_read_ptr(r, offs) do { \
511 EMIT_REX_FOR_PTR(); \
512 emith_deref_op(0x8b, r, CONTEXT_REG, offs); \
515 #define emith_ctx_write(r, offs) \
516 emith_write_r_r_offs(r, CONTEXT_REG, offs)
518 #define emith_ctx_read_multiple(r, offs, cnt, tmpr) do { \
519 int r_ = r, offs_ = offs, cnt_ = cnt; \
520 for (; cnt_ > 0; r_++, offs_ += 4, cnt_--) \
521 emith_ctx_read(r_, offs_); \
524 #define emith_ctx_write_multiple(r, offs, cnt, tmpr) do { \
525 int r_ = r, offs_ = offs, cnt_ = cnt; \
526 for (; cnt_ > 0; r_++, offs_ += 4, cnt_--) \
527 emith_ctx_write(r_, offs_); \
530 // assumes EBX is free
531 #define emith_ret_to_ctx(offs) { \
533 emith_ctx_write(xBX, offs); \
536 #define emith_jump(ptr) { \
537 u32 disp = (u8 *)(ptr) - ((u8 *)tcache_ptr + 5); \
542 #define emith_jump_patchable(target) \
545 #define emith_jump_cond(cond, ptr) do { \
546 u32 disp = (u8 *)(ptr) - ((u8 *)tcache_ptr + 6); \
548 EMIT_OP(0x80 | (cond)); \
552 #define emith_jump_cond_patchable(cond, target) \
553 emith_jump_cond(cond, target)
555 #define emith_jump_patch(ptr, target) do { \
556 u32 disp_ = (u8 *)(target) - ((u8 *)(ptr) + 4); \
557 u32 offs_ = (*(u8 *)(ptr) == 0x0f) ? 2 : 1; \
558 EMIT_PTR((u8 *)(ptr) + offs_, disp_ - offs_, u32); \
561 #define emith_jump_at(ptr, target) { \
562 u32 disp_ = (u8 *)(target) - ((u8 *)(ptr) + 5); \
563 EMIT_PTR(ptr, 0xe9, u8); \
564 EMIT_PTR((u8 *)(ptr) + 1, disp_, u32); \
567 #define emith_call(ptr) { \
568 u32 disp = (u8 *)(ptr) - ((u8 *)tcache_ptr + 5); \
573 #define emith_call_cond(cond, ptr) \
576 #define emith_call_reg(r) \
577 EMIT_OP_MODRM(0xff, 3, 2, r)
579 #define emith_call_ctx(offs) do { \
580 EMIT_OP_MODRM(0xff, 2, 2, CONTEXT_REG); \
584 #define emith_ret() \
587 #define emith_jump_reg(r) \
588 EMIT_OP_MODRM(0xff, 3, 4, r)
590 #define emith_jump_ctx(offs) do { \
591 EMIT_OP_MODRM(0xff, 2, 4, CONTEXT_REG); \
595 #define emith_push_ret()
597 #define emith_pop_and_ret() \
600 #define EMITH_JMP_START(cond) { \
604 #define EMITH_JMP_END(cond) \
605 JMP8_EMIT(cond, cond_ptr); \
608 #define EMITH_JMP3_START(cond) { \
609 u8 *cond_ptr, *else_ptr; \
612 #define EMITH_JMP3_MID(cond) \
613 JMP8_POS(else_ptr); \
614 JMP8_EMIT(cond, cond_ptr);
616 #define EMITH_JMP3_END() \
617 JMP8_EMIT_NC(else_ptr); \
620 // "simple" jump (no more then a few insns)
621 // ARM will use conditional instructions here
622 #define EMITH_SJMP_DECL_() \
625 #define EMITH_SJMP_START_(cond) \
628 #define EMITH_SJMP_END_(cond) \
629 JMP8_EMIT(cond, cond_ptr)
631 #define EMITH_SJMP_START EMITH_JMP_START
632 #define EMITH_SJMP_END EMITH_JMP_END
634 #define EMITH_SJMP3_START EMITH_JMP3_START
635 #define EMITH_SJMP3_MID EMITH_JMP3_MID
636 #define EMITH_SJMP3_END EMITH_JMP3_END
638 #define emith_pass_arg_r(arg, reg) do { \
640 host_arg2reg(rd, arg); \
641 emith_move_r_r_ptr(rd, reg); \
644 #define emith_pass_arg_imm(arg, imm) do { \
646 host_arg2reg(rd, arg); \
647 emith_move_r_imm(rd, imm); \
650 #define host_instructions_updated(base, end)
655 #define NA_TMP_REG xCX // non-arg tmp from reg_temp[]
657 #define EMIT_REX_FOR_PTR() \
660 #define host_arg2reg(rd, arg) \
662 case 0: rd = xDI; break; \
663 case 1: rd = xSI; break; \
664 case 2: rd = xDX; break; \
667 #define emith_sh2_drc_entry() { \
670 emith_push(xSI); /* to align */ \
673 #define emith_sh2_drc_exit() { \
683 #define NA_TMP_REG xBX // non-arg tmp from reg_temp[]
685 #define EMIT_REX_FOR_PTR()
687 #define host_arg2reg(rd, arg) \
689 case 0: rd = xAX; break; \
690 case 1: rd = xDX; break; \
691 case 2: rd = xCX; break; \
694 #define emith_sh2_drc_entry() { \
701 #define emith_sh2_drc_exit() { \
711 #define emith_save_caller_regs(mask) do { \
712 if ((mask) & (1 << xAX)) emith_push(xAX); \
713 if ((mask) & (1 << xCX)) emith_push(xCX); \
714 if ((mask) & (1 << xDX)) emith_push(xDX); \
715 if ((mask) & (1 << xSI)) emith_push(xSI); \
716 if ((mask) & (1 << xDI)) emith_push(xDI); \
719 #define emith_restore_caller_regs(mask) do { \
720 if ((mask) & (1 << xDI)) emith_pop(xDI); \
721 if ((mask) & (1 << xSI)) emith_pop(xSI); \
722 if ((mask) & (1 << xDX)) emith_pop(xDX); \
723 if ((mask) & (1 << xCX)) emith_pop(xCX); \
724 if ((mask) & (1 << xAX)) emith_pop(xAX); \
727 #define emith_sh2_wcall(a, tab) { \
729 host_arg2reg(arg2_, 2); \
730 emith_lsr(NA_TMP_REG, a, SH2_WRITE_SHIFT); \
731 EMIT_REX_FOR_PTR(); \
732 EMIT_OP_MODRM(0x8b, 0, NA_TMP_REG, 4); \
733 EMIT_SIB(PTR_SCALE, NA_TMP_REG, tab); /* mov tmp, [tab + tmp * {4,8}] */ \
734 emith_move_r_r_ptr(arg2_, CONTEXT_REG); \
735 emith_jump_reg(NA_TMP_REG); \
738 #define emith_sh2_dtbf_loop() { \
739 u8 *jmp0; /* negative cycles check */ \
740 u8 *jmp1; /* unsinged overflow check */ \
742 int tmp_ = rcache_get_tmp(); \
743 cr = rcache_get_reg(SHR_SR, RC_GR_RMW); \
744 rn = rcache_get_reg((op >> 8) & 0x0f, RC_GR_RMW);\
745 emith_sub_r_imm(rn, 1); \
746 emith_sub_r_imm(cr, (cycles+1) << 12); \
748 emith_asr(tmp_, cr, 2+12); \
749 JMP8_POS(jmp0); /* no negative cycles */ \
750 emith_move_r_imm(tmp_, 0); \
751 JMP8_EMIT(ICOND_JNS, jmp0); \
752 emith_and_r_imm(cr, 0xffe); \
753 emith_subf_r_r(rn, tmp_); \
754 JMP8_POS(jmp1); /* no overflow */ \
755 emith_neg_r(rn); /* count left */ \
756 emith_lsl(rn, rn, 2+12); \
757 emith_or_r_r(cr, rn); \
758 emith_or_r_imm(cr, 1); \
759 emith_move_r_imm(rn, 0); \
760 JMP8_EMIT(ICOND_JA, jmp1); \
761 rcache_free_tmp(tmp_); \
764 #define emith_write_sr(sr, srcr) { \
765 int tmp_ = rcache_get_tmp(); \
766 emith_clear_msb(tmp_, srcr, 22); \
767 emith_bic_r_imm(sr, 0x3ff); \
768 emith_or_r_r(sr, tmp_); \
769 rcache_free_tmp(tmp_); \
772 #define emith_tpop_carry(sr, is_sub) \
775 #define emith_tpush_carry(sr, is_sub) \
776 emith_adc_r_r(sr, sr)
780 * t = carry(Rn += Rm)
782 * t = carry(Rn -= Rm)
785 #define emith_sh2_div1_step(rn, rm, sr) { \
787 int tmp_ = rcache_get_tmp(); \
788 emith_eor_r_r(tmp_, tmp_); \
789 emith_tst_r_imm(sr, Q); /* if (Q ^ M) */ \
790 JMP8_POS(jmp0); /* je do_sub */ \
791 emith_add_r_r(rn, rm); \
792 JMP8_POS(jmp1); /* jmp done */ \
793 JMP8_EMIT(ICOND_JE, jmp0); /* do_sub: */ \
794 emith_sub_r_r(rn, rm); \
795 JMP8_EMIT_NC(jmp1); /* done: */ \
796 emith_adc_r_r(tmp_, tmp_); \
797 emith_eor_r_r(sr, tmp_); \
798 rcache_free_tmp(tmp_); \