3 enum { xAX = 0, xCX, xDX, xBX, xSP, xBP, xSI, xDI };
5 #define CONTEXT_REG xBP
15 // unified conditions (we just use rel8 jump instructions for x86)
16 #define DCOND_EQ IOP_JE
17 #define DCOND_NE IOP_JNE
18 #define DCOND_MI IOP_JS // MInus
19 #define DCOND_PL IOP_JNS // PLus or zero
21 #define EMIT_PTR(ptr, val, type) \
24 #define EMIT(val, type) { \
25 EMIT_PTR(tcache_ptr, val, type); \
26 tcache_ptr += sizeof(type); \
29 #define EMIT_OP(op) { \
34 #define EMIT_MODRM(mod,r,rm) \
35 EMIT(((mod)<<6) | ((r)<<3) | (rm), u8)
37 #define EMIT_OP_MODRM(op,mod,r,rm) { \
39 EMIT_MODRM(mod, r, rm); \
42 #define JMP8_POS(ptr) \
46 #define JMP8_EMIT(op, ptr) \
47 EMIT_PTR(ptr, op, u8); \
48 EMIT_PTR(ptr + 1, (tcache_ptr - (ptr+2)), u8)
50 #define emith_move_r_r(dst, src) \
51 EMIT_OP_MODRM(0x8b, 3, dst, src)
53 #define emith_add_r_r(d, s) \
54 EMIT_OP_MODRM(0x01, 3, s, d)
56 #define emith_sub_r_r(d, s) \
57 EMIT_OP_MODRM(0x29, 3, s, d)
59 #define emith_or_r_r(d, s) \
60 EMIT_OP_MODRM(0x09, 3, s, d)
62 #define emith_eor_r_r(d, s) \
63 EMIT_OP_MODRM(0x31, 3, s, d)
65 // fake teq - test equivalence - get_flags(d ^ s)
66 #define emith_teq_r_r(d, s) { \
68 emith_eor_r_r(d, s); \
73 #define emith_move_r_imm(r, imm) { \
74 EMIT_OP(0xb8 + (r)); \
78 #define emith_arith_r_imm(op, r, imm) { \
79 EMIT_OP_MODRM(0x81, 3, op, r); \
83 // 2 - adc, 3 - sbb, 6 - xor, 7 - cmp
84 #define emith_add_r_imm(r, imm) \
85 emith_arith_r_imm(0, r, imm)
87 #define emith_or_r_imm(r, imm) \
88 emith_arith_r_imm(1, r, imm)
90 #define emith_and_r_imm(r, imm) \
91 emith_arith_r_imm(4, r, imm)
93 #define emith_sub_r_imm(r, imm) \
94 emith_arith_r_imm(5, r, imm)
96 #define emith_tst_r_imm(r, imm) { \
97 EMIT_OP_MODRM(0xf7, 3, 0, r); \
102 #define emith_bic_r_imm(r, imm) \
103 emith_arith_r_imm(4, r, ~(imm))
105 // fake conditionals (using SJMP instead)
106 #define emith_add_r_imm_c(cond, r, imm) { \
108 emith_arith_r_imm(0, r, imm); \
111 #define emith_or_r_imm_c(cond, r, imm) { \
113 emith_arith_r_imm(1, r, imm); \
116 #define emith_sub_r_imm_c(cond, r, imm) { \
118 emith_arith_r_imm(5, r, imm); \
122 #define emith_shift(op, d, s, cnt) { \
124 emith_move_r_r(d, s); \
125 EMIT_OP_MODRM(0xc1, 3, op, d); \
129 #define emith_asr(d, s, cnt) \
130 emith_shift(7, d, s, cnt)
132 #define emith_lsl(d, s, cnt) \
133 emith_shift(4, d, s, cnt)
136 #define emith_push(r) \
139 #define emith_pop(r) \
142 #define emith_neg_r(r) \
143 EMIT_OP_MODRM(0xf7, 3, 3, r)
145 #define emith_clear_msb(d, s, count) { \
149 emith_move_r_r(d, s); \
150 emith_and_r_imm(d, t); \
153 #define emith_sext(d, s, bits) { \
154 emith_lsl(d, s, 32 - (bits)); \
155 emith_asr(d, d, 32 - (bits)); \
159 #define emith_mul(d, s1, s2) { \
165 else if ((s2) == xAX) \
168 emith_move_r_r(xAX, s1); \
172 EMIT_OP_MODRM(0xf7, 3, 4, rmr); /* MUL rmr */ \
175 emith_move_r_r(d, xAX); \
180 // "flag" instructions are the same
181 #define emith_subf_r_imm emith_sub_r_imm
182 #define emith_subf_r_r emith_sub_r_r
184 // XXX: offs is 8bit only
185 #define emith_ctx_read(r, offs) { \
186 EMIT_OP_MODRM(0x8b, 1, r, xBP); \
187 EMIT(offs, u8); /* mov tmp, [ebp+#offs] */ \
190 #define emith_ctx_write(r, offs) { \
191 EMIT_OP_MODRM(0x89, 1, r, xBP); \
192 EMIT(offs, u8); /* mov [ebp+#offs], tmp */ \
195 #define emith_jump(ptr) { \
196 u32 disp = (u32)ptr - ((u32)tcache_ptr + 5); \
201 #define emith_call(ptr) { \
202 u32 disp = (u32)ptr - ((u32)tcache_ptr + 5); \
207 // "simple" or "short" jump
208 #define EMITH_SJMP_START(cond) { \
212 #define EMITH_SJMP_END(cond) \
213 JMP8_EMIT(cond, cond_ptr); \
216 #define host_arg2reg(rd, arg) \
218 case 0: rd = xAX; break; \
219 case 1: rd = xDX; break; \
220 case 2: rd = xCX; break; \
223 #define emith_pass_arg_r(arg, reg) { \
225 host_arg2reg(rd, arg); \
226 emith_move_r_r(rd, reg); \
229 #define emith_pass_arg_imm(arg, imm) { \
231 host_arg2reg(rd, arg); \
232 emith_move_r_imm(rd, imm); \
235 /* SH2 drc specific */
236 #define emith_sh2_test_t() { \
237 int t = rcache_get_reg(SHR_SR, RC_GR_READ); \
238 EMIT_OP_MODRM(0xf6, 3, 0, t); \
239 EMIT(0x01, u8); /* test <reg>, byte 1 */ \
242 #define emith_sh2_dtbf_loop() { \
243 u8 *jmp0; /* negative cycles check */ \
244 u8 *jmp1; /* unsinged overflow check */ \
246 tmp = rcache_get_tmp(); \
247 cr = rcache_get_reg(SHR_SR, RC_GR_RMW); \
248 rn = rcache_get_reg((op >> 8) & 0x0f, RC_GR_RMW);\
249 emith_sub_r_imm(rn, 1); \
250 emith_sub_r_imm(cr, (cycles+1) << 12); \
252 emith_asr(tmp, cr, 2+12); \
253 JMP8_POS(jmp0); /* no negative cycles */ \
254 emith_move_r_imm(tmp, 0); \
255 JMP8_EMIT(IOP_JNS, jmp0); \
256 emith_and_r_imm(cr, 0xffe); \
257 emith_subf_r_r(rn, tmp); \
258 JMP8_POS(jmp1); /* no overflow */ \
259 emith_neg_r(rn); /* count left */ \
260 emith_lsl(rn, rn, 2+12); \
261 emith_or_r_r(cr, rn); \
262 emith_or_r_imm(cr, 1); \
263 emith_move_r_imm(rn, 0); \
264 JMP8_EMIT(IOP_JA, jmp1); \
265 rcache_free_tmp(tmp); \