32x: drc: all opcodes covered, some TODOs left
[picodrive.git] / cpu / drc / emit_x86.c
... / ...
CommitLineData
1/*
2 * note about silly things like emith_or_r_r_r_lsl:
3 * these are here because the compiler was designed
4 * for ARM as it's primary target.
5 */
6#include <stdarg.h>
7
8enum { xAX = 0, xCX, xDX, xBX, xSP, xBP, xSI, xDI };
9
10#define CONTEXT_REG xBP
11
12#define IOP_JO 0x70
13#define IOP_JNO 0x71
14#define IOP_JB 0x72
15#define IOP_JAE 0x73
16#define IOP_JE 0x74
17#define IOP_JNE 0x75
18#define IOP_JBE 0x76
19#define IOP_JA 0x77
20#define IOP_JS 0x78
21#define IOP_JNS 0x79
22#define IOP_JL 0x7c
23#define IOP_JGE 0x7d
24#define IOP_JLE 0x7e
25#define IOP_JG 0x7f
26
27// unified conditions (we just use rel8 jump instructions for x86)
28#define DCOND_EQ IOP_JE
29#define DCOND_NE IOP_JNE
30#define DCOND_MI IOP_JS // MInus
31#define DCOND_PL IOP_JNS // PLus or zero
32#define DCOND_HI IOP_JA // higher (unsigned)
33#define DCOND_HS IOP_JAE // higher || same (unsigned)
34#define DCOND_LO IOP_JB // lower (unsigned)
35#define DCOND_LS IOP_JBE // lower || same (unsigned)
36#define DCOND_GE IOP_JGE // greater || equal (signed)
37#define DCOND_GT IOP_JG // greater (signed)
38#define DCOND_LE IOP_JLE // less || equal (signed)
39#define DCOND_LT IOP_JL // less (signed)
40#define DCOND_VS IOP_JO // oVerflow Set
41#define DCOND_VC IOP_JNO // oVerflow Clear
42
43#define EMIT_PTR(ptr, val, type) \
44 *(type *)(ptr) = val
45
46#define EMIT(val, type) { \
47 EMIT_PTR(tcache_ptr, val, type); \
48 tcache_ptr += sizeof(type); \
49}
50
51#define EMIT_OP(op) { \
52 COUNT_OP; \
53 EMIT(op, u8); \
54}
55
56#define EMIT_MODRM(mod,r,rm) \
57 EMIT(((mod)<<6) | ((r)<<3) | (rm), u8)
58
59#define EMIT_OP_MODRM(op,mod,r,rm) { \
60 EMIT_OP(op); \
61 EMIT_MODRM(mod, r, rm); \
62}
63
64#define JMP8_POS(ptr) \
65 ptr = tcache_ptr; \
66 tcache_ptr += 2
67
68#define JMP8_EMIT(op, ptr) \
69 EMIT_PTR(ptr, op, u8); \
70 EMIT_PTR(ptr + 1, (tcache_ptr - (ptr+2)), u8)
71
72// _r_r
73#define emith_move_r_r(dst, src) \
74 EMIT_OP_MODRM(0x8b, 3, dst, src)
75
76#define emith_add_r_r(d, s) \
77 EMIT_OP_MODRM(0x01, 3, s, d)
78
79#define emith_sub_r_r(d, s) \
80 EMIT_OP_MODRM(0x29, 3, s, d)
81
82#define emith_adc_r_r(d, s) \
83 EMIT_OP_MODRM(0x11, 3, s, d)
84
85#define emith_sbc_r_r(d, s) \
86 EMIT_OP_MODRM(0x19, 3, s, d) /* SBB */
87
88#define emith_or_r_r(d, s) \
89 EMIT_OP_MODRM(0x09, 3, s, d)
90
91#define emith_and_r_r(d, s) \
92 EMIT_OP_MODRM(0x21, 3, s, d)
93
94#define emith_eor_r_r(d, s) \
95 EMIT_OP_MODRM(0x31, 3, s, d) /* XOR */
96
97#define emith_tst_r_r(d, s) \
98 EMIT_OP_MODRM(0x85, 3, s, d) /* TEST */
99
100#define emith_cmp_r_r(d, s) \
101 EMIT_OP_MODRM(0x39, 3, s, d)
102
103// fake teq - test equivalence - get_flags(d ^ s)
104#define emith_teq_r_r(d, s) { \
105 emith_push(d); \
106 emith_eor_r_r(d, s); \
107 emith_pop(d); \
108}
109
110#define emith_mvn_r_r(d, s) { \
111 if (d != s) \
112 emith_move_r_r(d, s); \
113 EMIT_OP_MODRM(0xf7, 3, 2, d); /* NOT d */ \
114}
115
116#define emith_negc_r_r(d, s) { \
117 int tmp_ = rcache_get_tmp(); \
118 emith_move_r_imm(tmp_, 0); \
119 emith_sbc_r_r(tmp_, s); \
120 emith_move_r_r(d, tmp_); \
121 rcache_free_tmp(tmp_); \
122}
123
124#define emith_neg_r_r(d, s) { \
125 if (d != s) \
126 emith_move_r_r(d, s); \
127 EMIT_OP_MODRM(0xf7, 3, 3, d); /* NEG d */ \
128}
129
130// _r_r_r
131#define emith_eor_r_r_r(d, s1, s2) { \
132 if (d == s1) { \
133 emith_eor_r_r(d, s2); \
134 } else if (d == s2) { \
135 emith_eor_r_r(d, s1); \
136 } else { \
137 emith_move_r_r(d, s1); \
138 emith_eor_r_r(d, s2); \
139 } \
140}
141
142#define emith_or_r_r_r_lsl(d, s1, s2, lslimm) { \
143 int tmp_ = rcache_get_tmp(); \
144 emith_lsl(tmp_, s2, lslimm); \
145 emith_or_r_r(tmp_, s1); \
146 emith_move_r_r(d, tmp_); \
147 rcache_free_tmp(tmp_); \
148}
149
150// _r_imm
151#define emith_move_r_imm(r, imm) { \
152 EMIT_OP(0xb8 + (r)); \
153 EMIT(imm, u32); \
154}
155
156#define emith_move_r_imm_s8(r, imm) \
157 emith_move_r_imm(r, (u32)(signed int)(signed char)(imm))
158
159#define emith_arith_r_imm(op, r, imm) { \
160 EMIT_OP_MODRM(0x81, 3, op, r); \
161 EMIT(imm, u32); \
162}
163
164// 2 - adc, 3 - sbb
165#define emith_add_r_imm(r, imm) \
166 emith_arith_r_imm(0, r, imm)
167
168#define emith_or_r_imm(r, imm) \
169 emith_arith_r_imm(1, r, imm)
170
171#define emith_and_r_imm(r, imm) \
172 emith_arith_r_imm(4, r, imm)
173
174#define emith_sub_r_imm(r, imm) \
175 emith_arith_r_imm(5, r, imm)
176
177#define emith_eor_r_imm(r, imm) \
178 emith_arith_r_imm(6, r, imm)
179
180#define emith_cmp_r_imm(r, imm) \
181 emith_arith_r_imm(7, r, imm)
182
183#define emith_tst_r_imm(r, imm) { \
184 EMIT_OP_MODRM(0xf7, 3, 0, r); \
185 EMIT(imm, u32); \
186}
187
188// fake
189#define emith_bic_r_imm(r, imm) \
190 emith_arith_r_imm(4, r, ~(imm))
191
192// fake conditionals (using SJMP instead)
193#define emith_add_r_imm_c(cond, r, imm) { \
194 (void)(cond); \
195 emith_add_r_imm(r, imm); \
196}
197
198#define emith_or_r_imm_c(cond, r, imm) { \
199 (void)(cond); \
200 emith_or_r_imm(r, imm); \
201}
202
203#define emith_sub_r_imm_c(cond, r, imm) { \
204 (void)(cond); \
205 emith_sub_r_imm(r, imm); \
206}
207
208#define emith_bic_r_imm_c(cond, r, imm) { \
209 (void)(cond); \
210 emith_bic_r_imm(r, imm); \
211}
212
213// _r_r_imm
214#define emith_and_r_r_imm(d, s, imm) { \
215 if (d != s) \
216 emith_move_r_r(d, s); \
217 emith_and_r_imm(d, imm) \
218}
219
220// shift
221#define emith_shift(op, d, s, cnt) { \
222 if (d != s) \
223 emith_move_r_r(d, s); \
224 EMIT_OP_MODRM(0xc1, 3, op, d); \
225 EMIT(cnt, u8); \
226}
227
228#define emith_lsl(d, s, cnt) \
229 emith_shift(4, d, s, cnt)
230
231#define emith_lsr(d, s, cnt) \
232 emith_shift(5, d, s, cnt)
233
234#define emith_asr(d, s, cnt) \
235 emith_shift(7, d, s, cnt)
236
237#define emith_rol(d, s, cnt) \
238 emith_shift(0, d, s, cnt)
239
240#define emith_ror(d, s, cnt) \
241 emith_shift(1, d, s, cnt)
242
243#define emith_rolc(r) \
244 EMIT_OP_MODRM(0xd1, 3, 2, r)
245
246#define emith_rorc(r) \
247 EMIT_OP_MODRM(0xd1, 3, 3, r)
248
249// misc
250#define emith_push(r) \
251 EMIT_OP(0x50 + (r))
252
253#define emith_pop(r) \
254 EMIT_OP(0x58 + (r))
255
256#define emith_neg_r(r) \
257 EMIT_OP_MODRM(0xf7, 3, 3, r)
258
259#define emith_clear_msb(d, s, count) { \
260 u32 t = (u32)-1; \
261 t >>= count; \
262 if (d != s) \
263 emith_move_r_r(d, s); \
264 emith_and_r_imm(d, t); \
265}
266
267#define emith_sext(d, s, bits) { \
268 emith_lsl(d, s, 32 - (bits)); \
269 emith_asr(d, d, 32 - (bits)); \
270}
271
272// put bit0 of r0 to carry
273#define emith_set_carry(r0) { \
274 emith_tst_r_imm(r0, 1); /* clears C */ \
275 EMITH_SJMP_START(DCOND_EQ); \
276 EMIT_OP(0xf9); /* STC */ \
277 EMITH_SJMP_END(DCOND_EQ); \
278}
279
280// put bit0 of r0 to carry (for subtraction)
281#define emith_set_carry_sub emith_set_carry
282
283// XXX: stupid mess
284#define emith_mul_(op, dlo, dhi, s1, s2) { \
285 int rmr; \
286 if (dlo != xAX && dhi != xAX) \
287 emith_push(xAX); \
288 if (dlo != xDX && dhi != xDX) \
289 emith_push(xDX); \
290 if ((s1) == xAX) \
291 rmr = s2; \
292 else if ((s2) == xAX) \
293 rmr = s1; \
294 else { \
295 emith_move_r_r(xAX, s1); \
296 rmr = s2; \
297 } \
298 EMIT_OP_MODRM(0xf7, 3, op, rmr); /* xMUL rmr */ \
299 /* XXX: using push/pop for the case of edx->eax; eax->edx */ \
300 if (dhi != xDX && dhi != -1) \
301 emith_push(xDX); \
302 if (dlo != xAX) \
303 emith_move_r_r(dlo, xAX); \
304 if (dhi != xDX && dhi != -1) \
305 emith_pop(dhi); \
306 if (dlo != xDX && dhi != xDX) \
307 emith_pop(xDX); \
308 if (dlo != xAX && dhi != xAX) \
309 emith_pop(xAX); \
310}
311
312#define emith_mul_u64(dlo, dhi, s1, s2) \
313 emith_mul_(4, dlo, dhi, s1, s2) /* MUL */
314
315#define emith_mul_s64(dlo, dhi, s1, s2) \
316 emith_mul_(5, dlo, dhi, s1, s2) /* IMUL */
317
318#define emith_mul(d, s1, s2) \
319 emith_mul_(4, d, -1, s1, s2)
320
321// "flag" instructions are the same
322#define emith_subf_r_imm emith_sub_r_imm
323#define emith_addf_r_r emith_add_r_r
324#define emith_subf_r_r emith_sub_r_r
325#define emith_adcf_r_r emith_adc_r_r
326#define emith_sbcf_r_r emith_sbc_r_r
327#define emith_negcf_r_r emith_negc_r_r
328
329#define emith_lslf emith_lsl
330#define emith_lsrf emith_lsr
331#define emith_asrf emith_asr
332#define emith_rolf emith_rol
333#define emith_rorf emith_ror
334#define emith_rolcf emith_rolc
335#define emith_rorcf emith_rorc
336
337// XXX: offs is 8bit only
338#define emith_ctx_read(r, offs) { \
339 EMIT_OP_MODRM(0x8b, 1, r, xBP); \
340 EMIT(offs, u8); /* mov tmp, [ebp+#offs] */ \
341}
342
343#define emith_ctx_write(r, offs) { \
344 EMIT_OP_MODRM(0x89, 1, r, xBP); \
345 EMIT(offs, u8); /* mov [ebp+#offs], tmp */ \
346}
347
348#define emith_jump(ptr) { \
349 u32 disp = (u32)ptr - ((u32)tcache_ptr + 5); \
350 EMIT_OP(0xe9); \
351 EMIT(disp, u32); \
352}
353
354#define emith_call(ptr) { \
355 u32 disp = (u32)ptr - ((u32)tcache_ptr + 5); \
356 EMIT_OP(0xe8); \
357 EMIT(disp, u32); \
358}
359
360// "simple" or "short" jump
361#define EMITH_SJMP_START(cond) { \
362 u8 *cond_ptr; \
363 JMP8_POS(cond_ptr)
364
365#define EMITH_SJMP_END(cond) \
366 JMP8_EMIT(cond, cond_ptr); \
367}
368
369#define host_arg2reg(rd, arg) \
370 switch (arg) { \
371 case 0: rd = xAX; break; \
372 case 1: rd = xDX; break; \
373 case 2: rd = xCX; break; \
374 }
375
376#define emith_pass_arg_r(arg, reg) { \
377 int rd = 7; \
378 host_arg2reg(rd, arg); \
379 emith_move_r_r(rd, reg); \
380}
381
382#define emith_pass_arg_imm(arg, imm) { \
383 int rd = 7; \
384 host_arg2reg(rd, arg); \
385 emith_move_r_imm(rd, imm); \
386}
387
388/* SH2 drc specific */
389#define emith_sh2_test_t() { \
390 int t = rcache_get_reg(SHR_SR, RC_GR_READ); \
391 EMIT_OP_MODRM(0xf6, 3, 0, t); \
392 EMIT(0x01, u8); /* test <reg>, byte 1 */ \
393}
394
395#define emith_sh2_dtbf_loop() { \
396 u8 *jmp0; /* negative cycles check */ \
397 u8 *jmp1; /* unsinged overflow check */ \
398 int cr, rn; \
399 int tmp_ = rcache_get_tmp(); \
400 cr = rcache_get_reg(SHR_SR, RC_GR_RMW); \
401 rn = rcache_get_reg((op >> 8) & 0x0f, RC_GR_RMW);\
402 emith_sub_r_imm(rn, 1); \
403 emith_sub_r_imm(cr, (cycles+1) << 12); \
404 cycles = 0; \
405 emith_asr(tmp_, cr, 2+12); \
406 JMP8_POS(jmp0); /* no negative cycles */ \
407 emith_move_r_imm(tmp_, 0); \
408 JMP8_EMIT(IOP_JNS, jmp0); \
409 emith_and_r_imm(cr, 0xffe); \
410 emith_subf_r_r(rn, tmp_); \
411 JMP8_POS(jmp1); /* no overflow */ \
412 emith_neg_r(rn); /* count left */ \
413 emith_lsl(rn, rn, 2+12); \
414 emith_or_r_r(cr, rn); \
415 emith_or_r_imm(cr, 1); \
416 emith_move_r_imm(rn, 0); \
417 JMP8_EMIT(IOP_JA, jmp1); \
418 rcache_free_tmp(tmp_); \
419}
420
421#define emith_write_sr(srcr) { \
422 int tmp_ = rcache_get_tmp(); \
423 int srr = rcache_get_reg(SHR_SR, RC_GR_RMW); \
424 emith_clear_msb(tmp_, srcr, 20); \
425 emith_bic_r_imm(srr, 0xfff); \
426 emith_or_r_r(srr, tmp_); \
427 rcache_free_tmp(tmp_); \
428}
429
430#define emith_carry_to_t(srr, is_sub) { \
431 int tmp_ = rcache_get_tmp(); \
432 EMIT_OP(0x0f); \
433 EMIT(0x92, u8); \
434 EMIT_MODRM(3, 0, tmp_); /* SETC */ \
435 emith_bic_r_imm(srr, 1); \
436 EMIT_OP_MODRM(0x08, 3, tmp_, srr); /* OR srrl, tmpl */ \
437 rcache_free_tmp(tmp_); \
438}
439