enum { xAX = 0, xCX, xDX, xBX, xSP, xBP, xSI, xDI };
#define CONTEXT_REG xBP
+#define RET_REG xAX
#define ICOND_JO 0x00
#define ICOND_JNO 0x01
#define EMIT_PTR(ptr, val, type) \
*(type *)(ptr) = val
-#define EMIT(val, type) { \
+#define EMIT(val, type) do { \
EMIT_PTR(tcache_ptr, val, type); \
tcache_ptr += sizeof(type); \
-}
+} while (0)
-#define EMIT_OP(op) { \
+#define EMIT_OP(op) do { \
COUNT_OP; \
EMIT(op, u8); \
-}
+} while (0)
#define EMIT_MODRM(mod,r,rm) \
EMIT(((mod)<<6) | ((r)<<3) | (rm), u8)
#define EMIT_SIB(scale,index,base) \
EMIT(((scale)<<6) | ((index)<<3) | (base), u8)
+#define EMIT_REX(w,r,x,b) \
+ EMIT(0x40 | ((w)<<3) | ((r)<<2) | ((x)<<1) | (b), u8)
+
#define EMIT_OP_MODRM(op,mod,r,rm) do { \
EMIT_OP(op); \
EMIT_MODRM(mod, r, rm); \
#define emith_move_r_r(dst, src) \
EMIT_OP_MODRM(0x8b, 3, dst, src)
+#define emith_move_r_r_ptr(dst, src) do { \
+ EMIT_REX_FOR_PTR(); \
+ EMIT_OP_MODRM(0x8b, 3, dst, src); \
+} while (0)
+
#define emith_add_r_r(d, s) \
EMIT_OP_MODRM(0x01, 3, s, d)
EMIT_OP_MODRM(0x39, 3, s, d)
// fake teq - test equivalence - get_flags(d ^ s)
-#define emith_teq_r_r(d, s) { \
+#define emith_teq_r_r(d, s) do { \
emith_push(d); \
emith_eor_r_r(d, s); \
emith_pop(d); \
-}
+} while (0)
-#define emith_mvn_r_r(d, s) { \
+#define emith_mvn_r_r(d, s) do { \
if (d != s) \
emith_move_r_r(d, s); \
EMIT_OP_MODRM(0xf7, 3, 2, d); /* NOT d */ \
-}
+} while (0)
-#define emith_negc_r_r(d, s) { \
+#define emith_negc_r_r(d, s) do { \
int tmp_ = rcache_get_tmp(); \
emith_move_r_imm(tmp_, 0); \
emith_sbc_r_r(tmp_, s); \
emith_move_r_r(d, tmp_); \
rcache_free_tmp(tmp_); \
-}
+} while (0)
-#define emith_neg_r_r(d, s) { \
+#define emith_neg_r_r(d, s) do { \
if (d != s) \
emith_move_r_r(d, s); \
EMIT_OP_MODRM(0xf7, 3, 3, d); /* NEG d */ \
-}
+} while (0)
// _r_r_r
-#define emith_add_r_r_r(d, s1, s2) { \
+#define emith_add_r_r_r(d, s1, s2) do { \
if (d == s1) { \
emith_add_r_r(d, s2); \
} else if (d == s2) { \
emith_move_r_r(d, s1); \
emith_add_r_r(d, s2); \
} \
-}
+} while (0)
-#define emith_eor_r_r_r(d, s1, s2) { \
+#define emith_eor_r_r_r(d, s1, s2) do { \
if (d == s1) { \
emith_eor_r_r(d, s2); \
} else if (d == s2) { \
emith_move_r_r(d, s1); \
emith_eor_r_r(d, s2); \
} \
-}
+} while (0)
// _r_r_shift
-#define emith_or_r_r_lsl(d, s, lslimm) { \
+#define emith_or_r_r_lsl(d, s, lslimm) do { \
int tmp_ = rcache_get_tmp(); \
emith_lsl(tmp_, s, lslimm); \
emith_or_r_r(d, tmp_); \
rcache_free_tmp(tmp_); \
-}
+} while (0)
// d != s
-#define emith_eor_r_r_lsr(d, s, lsrimm) { \
+#define emith_eor_r_r_lsr(d, s, lsrimm) do { \
emith_push(s); \
emith_lsr(s, s, lsrimm); \
emith_eor_r_r(d, s); \
emith_pop(s); \
-}
+} while (0)
// _r_imm
-#define emith_move_r_imm(r, imm) { \
+#define emith_move_r_imm(r, imm) do { \
EMIT_OP(0xb8 + (r)); \
EMIT(imm, u32); \
-}
+} while (0)
#define emith_move_r_imm_s8(r, imm) \
emith_move_r_imm(r, (u32)(signed int)(signed char)(imm))
#define emith_and_r_imm(r, imm) \
emith_arith_r_imm(4, r, imm)
-#define emith_sub_r_imm(r, imm) \
+/* used for sub cycles after test, so retain flags with lea */
+#define emith_sub_r_imm(r, imm) do { \
+ assert(r != xSP); \
+ EMIT_OP_MODRM(0x8d, 2, r, r); \
+ EMIT(-(s32)(imm), s32); \
+} while (0)
+
+#define emith_subf_r_imm(r, imm) \
emith_arith_r_imm(5, r, imm)
#define emith_eor_r_imm(r, imm) \
emith_arith_r_imm(4, r, ~(imm))
// fake conditionals (using SJMP instead)
-#define emith_move_r_imm_c(cond, r, imm) { \
+#define emith_move_r_imm_c(cond, r, imm) do { \
(void)(cond); \
emith_move_r_imm(r, imm); \
-}
+} while (0)
-#define emith_add_r_imm_c(cond, r, imm) { \
+#define emith_add_r_imm_c(cond, r, imm) do { \
(void)(cond); \
emith_add_r_imm(r, imm); \
-}
+} while (0)
-#define emith_sub_r_imm_c(cond, r, imm) { \
+#define emith_sub_r_imm_c(cond, r, imm) do { \
(void)(cond); \
emith_sub_r_imm(r, imm); \
-}
+} while (0)
#define emith_or_r_imm_c(cond, r, imm) \
emith_or_r_imm(r, imm)
#define emith_ret_c(cond) \
emith_ret()
-// _r_r_imm
-#define emith_add_r_r_imm(d, s, imm) { \
- if (d != s) \
- emith_move_r_r(d, s); \
- emith_add_r_imm(d, imm); \
-}
+// _r_r_imm - use lea
+#define emith_add_r_r_imm(d, s, imm) do { \
+ assert(s != xSP); \
+ EMIT_OP_MODRM(0x8d, 2, d, s); /* lea */ \
+ EMIT(imm, s32); \
+} while (0)
+
+#define emith_add_r_r_ptr_imm(d, s, imm) do { \
+ if (s != xSP) { \
+ EMIT_REX_FOR_PTR(); \
+ EMIT_OP_MODRM(0x8d, 2, d, s); /* lea */ \
+ } \
+ else { \
+ if (d != s) \
+ emith_move_r_r_ptr(d, s); \
+ EMIT_REX_FOR_PTR(); \
+ EMIT_OP_MODRM(0x81, 3, 0, d); /* add */ \
+ } \
+ EMIT(imm, s32); \
+} while (0)
-#define emith_and_r_r_imm(d, s, imm) { \
+#define emith_and_r_r_imm(d, s, imm) do { \
if (d != s) \
emith_move_r_r(d, s); \
emith_and_r_imm(d, imm); \
-}
+} while (0)
// shift
-#define emith_shift(op, d, s, cnt) { \
+#define emith_shift(op, d, s, cnt) do { \
if (d != s) \
emith_move_r_r(d, s); \
EMIT_OP_MODRM(0xc1, 3, op, d); \
EMIT(cnt, u8); \
-}
+} while (0)
#define emith_lsl(d, s, cnt) \
emith_shift(4, d, s, cnt)
#define emith_push(r) \
EMIT_OP(0x50 + (r))
-#define emith_push_imm(imm) { \
+#define emith_push_imm(imm) do { \
EMIT_OP(0x68); \
EMIT(imm, u32); \
-}
+} while (0)
#define emith_pop(r) \
EMIT_OP(0x58 + (r))
emith_asr(d, d, 32 - (bits)); \
}
-#define emith_setc(r) { \
+#define emith_setc(r) do { \
+ assert(is_abcdx(r)); \
EMIT_OP(0x0f); \
EMIT_OP_MODRM(0x92, 3, 0, r); /* SETC r */ \
-}
+} while (0)
// XXX: stupid mess
-#define emith_mul_(op, dlo, dhi, s1, s2) { \
+#define emith_mul_(op, dlo, dhi, s1, s2) do { \
int rmr; \
if (dlo != xAX && dhi != xAX) \
emith_push(xAX); \
emith_pop(xDX); \
if (dlo != xAX && dhi != xAX) \
emith_pop(xAX); \
-}
+} while (0)
#define emith_mul_u64(dlo, dhi, s1, s2) \
emith_mul_(4, dlo, dhi, s1, s2) /* MUL */
emith_mul_(4, d, -1, s1, s2)
// (dlo,dhi) += signed(s1) * signed(s2)
-#define emith_mula_s64(dlo, dhi, s1, s2) { \
+#define emith_mula_s64(dlo, dhi, s1, s2) do { \
emith_push(dhi); \
emith_push(dlo); \
emith_mul_(5, dlo, dhi, s1, s2); \
EMIT_OP_MODRM(0x03, 0, dlo, 4); \
- EMIT_SIB(0, 4, 4); /* add dlo, [esp] */ \
+ EMIT_SIB(0, 4, 4); /* add dlo, [xsp] */ \
EMIT_OP_MODRM(0x13, 1, dhi, 4); \
EMIT_SIB(0, 4, 4); \
- EMIT(4, u8); /* adc dhi, [esp+4] */ \
- emith_add_r_imm(xSP, 4*2); \
-}
+ EMIT(sizeof(void *), u8); /* adc dhi, [xsp+{4,8}] */ \
+ emith_add_r_r_ptr_imm(xSP, xSP, sizeof(void *) * 2); \
+} while (0)
// "flag" instructions are the same
-#define emith_subf_r_imm emith_sub_r_imm
#define emith_addf_r_r emith_add_r_r
#define emith_subf_r_r emith_sub_r_r
#define emith_adcf_r_r emith_adc_r_r
rcache_free_tmp(r_); \
} while (0)
-#define emith_read16_r_r_offs(r, rs, offs) { \
+#define emith_read16_r_r_offs(r, rs, offs) do { \
EMIT(0x66, u8); /* operand override */ \
emith_read_r_r_offs(r, rs, offs); \
-}
+} while (0)
-#define emith_write16_r_r_offs(r, rs, offs) { \
+#define emith_write16_r_r_offs(r, rs, offs) do { \
EMIT(0x66, u8); \
emith_write_r_r_offs(r, rs, offs); \
-}
+} while (0)
#define emith_ctx_read(r, offs) \
emith_read_r_r_offs(r, CONTEXT_REG, offs)
+#define emith_ctx_read_ptr(r, offs) do { \
+ EMIT_REX_FOR_PTR(); \
+ emith_deref_op(0x8b, r, CONTEXT_REG, offs); \
+} while (0)
+
#define emith_ctx_write(r, offs) \
emith_write_r_r_offs(r, CONTEXT_REG, offs)
}
#define emith_jump(ptr) { \
- u32 disp = (u32)(ptr) - ((u32)tcache_ptr + 5); \
+ u32 disp = (u8 *)(ptr) - ((u8 *)tcache_ptr + 5); \
EMIT_OP(0xe9); \
EMIT(disp, u32); \
}
#define emith_jump_patchable(target) \
emith_jump(target)
-#define emith_jump_cond(cond, ptr) { \
- u32 disp = (u32)(ptr) - ((u32)tcache_ptr + 6); \
+#define emith_jump_cond(cond, ptr) do { \
+ u32 disp = (u8 *)(ptr) - ((u8 *)tcache_ptr + 6); \
EMIT(0x0f, u8); \
EMIT_OP(0x80 | (cond)); \
EMIT(disp, u32); \
-}
+} while (0)
#define emith_jump_cond_patchable(cond, target) \
emith_jump_cond(cond, target)
#define emith_jump_patch(ptr, target) do { \
- u32 disp_ = (u32)(target) - ((u32)(ptr) + 4); \
+ u32 disp_ = (u8 *)(target) - ((u8 *)(ptr) + 4); \
u32 offs_ = (*(u8 *)(ptr) == 0x0f) ? 2 : 1; \
EMIT_PTR((u8 *)(ptr) + offs_, disp_ - offs_, u32); \
} while (0)
#define emith_jump_at(ptr, target) { \
- u32 disp_ = (u32)(target) - ((u32)(ptr) + 5); \
+ u32 disp_ = (u8 *)(target) - ((u8 *)(ptr) + 5); \
EMIT_PTR(ptr, 0xe9, u8); \
EMIT_PTR((u8 *)(ptr) + 1, disp_, u32); \
}
#define emith_call(ptr) { \
- u32 disp = (u32)(ptr) - ((u32)tcache_ptr + 5); \
+ u32 disp = (u8 *)(ptr) - ((u8 *)tcache_ptr + 5); \
EMIT_OP(0xe8); \
EMIT(disp, u32); \
}
#define emith_call_reg(r) \
EMIT_OP_MODRM(0xff, 3, 2, r)
-#define emith_call_ctx(offs) { \
+#define emith_call_ctx(offs) do { \
EMIT_OP_MODRM(0xff, 2, 2, CONTEXT_REG); \
EMIT(offs, u32); \
-}
+} while (0)
#define emith_ret() \
EMIT_OP(0xc3)
#define emith_jump_reg(r) \
EMIT_OP_MODRM(0xff, 3, 4, r)
-#define emith_jump_ctx(offs) { \
+#define emith_jump_ctx(offs) do { \
EMIT_OP_MODRM(0xff, 2, 4, CONTEXT_REG); \
EMIT(offs, u32); \
-}
+} while (0)
#define emith_push_ret()
// "simple" jump (no more then a few insns)
// ARM will use conditional instructions here
+#define EMITH_SJMP_DECL_() \
+ u8 *cond_ptr
+
+#define EMITH_SJMP_START_(cond) \
+ JMP8_POS(cond_ptr)
+
+#define EMITH_SJMP_END_(cond) \
+ JMP8_EMIT(cond, cond_ptr)
+
#define EMITH_SJMP_START EMITH_JMP_START
#define EMITH_SJMP_END EMITH_JMP_END
#define EMITH_SJMP3_MID EMITH_JMP3_MID
#define EMITH_SJMP3_END EMITH_JMP3_END
-#define emith_pass_arg_r(arg, reg) { \
+#define emith_pass_arg_r(arg, reg) do { \
int rd = 7; \
host_arg2reg(rd, arg); \
- emith_move_r_r(rd, reg); \
-}
+ emith_move_r_r_ptr(rd, reg); \
+} while (0)
-#define emith_pass_arg_imm(arg, imm) { \
+#define emith_pass_arg_imm(arg, imm) do { \
int rd = 7; \
host_arg2reg(rd, arg); \
emith_move_r_imm(rd, imm); \
-}
+} while (0)
#define host_instructions_updated(base, end)
+#ifdef __x86_64__
+
+#define PTR_SCALE 3
+#define NA_TMP_REG xCX // non-arg tmp from reg_temp[]
+
+#define EMIT_REX_FOR_PTR() \
+ EMIT_REX(1,0,0,0)
+
+#define host_arg2reg(rd, arg) \
+ switch (arg) { \
+ case 0: rd = xDI; break; \
+ case 1: rd = xSI; break; \
+ case 2: rd = xDX; break; \
+ }
+
+#define emith_sh2_drc_entry() { \
+ emith_push(xBX); \
+ emith_push(xBP); \
+ emith_push(xSI); /* to align */ \
+}
+
+#define emith_sh2_drc_exit() { \
+ emith_pop(xSI); \
+ emith_pop(xBP); \
+ emith_pop(xBX); \
+ emith_ret(); \
+}
+
+#else
+
+#define PTR_SCALE 2
+#define NA_TMP_REG xBX // non-arg tmp from reg_temp[]
+
+#define EMIT_REX_FOR_PTR()
+
#define host_arg2reg(rd, arg) \
switch (arg) { \
case 0: rd = xAX; break; \
case 2: rd = xCX; break; \
}
-/* SH2 drc specific */
#define emith_sh2_drc_entry() { \
emith_push(xBX); \
emith_push(xBP); \
emith_ret(); \
}
-// assumes EBX is free temporary
+#endif
+
+#define emith_save_caller_regs(mask) do { \
+ if ((mask) & (1 << xAX)) emith_push(xAX); \
+ if ((mask) & (1 << xCX)) emith_push(xCX); \
+ if ((mask) & (1 << xDX)) emith_push(xDX); \
+ if ((mask) & (1 << xSI)) emith_push(xSI); \
+ if ((mask) & (1 << xDI)) emith_push(xDI); \
+} while (0)
+
+#define emith_restore_caller_regs(mask) do { \
+ if ((mask) & (1 << xDI)) emith_pop(xDI); \
+ if ((mask) & (1 << xSI)) emith_pop(xSI); \
+ if ((mask) & (1 << xDX)) emith_pop(xDX); \
+ if ((mask) & (1 << xCX)) emith_pop(xCX); \
+ if ((mask) & (1 << xAX)) emith_pop(xAX); \
+} while (0)
+
#define emith_sh2_wcall(a, tab) { \
int arg2_; \
host_arg2reg(arg2_, 2); \
- emith_lsr(xBX, a, SH2_WRITE_SHIFT); \
- EMIT_OP_MODRM(0x8b, 0, xBX, 4); \
- EMIT_SIB(2, xBX, tab); /* mov ebx, [tab + ebx * 4] */ \
- emith_move_r_r(arg2_, CONTEXT_REG); \
- emith_jump_reg(xBX); \
+ emith_lsr(NA_TMP_REG, a, SH2_WRITE_SHIFT); \
+ EMIT_REX_FOR_PTR(); \
+ EMIT_OP_MODRM(0x8b, 0, NA_TMP_REG, 4); \
+ EMIT_SIB(PTR_SCALE, NA_TMP_REG, tab); /* mov tmp, [tab + tmp * {4,8}] */ \
+ emith_move_r_r_ptr(arg2_, CONTEXT_REG); \
+ emith_jump_reg(NA_TMP_REG); \
}
#define emith_sh2_dtbf_loop() { \
JMP8_EMIT(ICOND_JE, jmp0); /* do_sub: */ \
emith_sub_r_r(rn, rm); \
JMP8_EMIT_NC(jmp1); /* done: */ \
- emith_setc(tmp_); \
- EMIT_OP_MODRM(0x31, 3, tmp_, sr); /* T = Q1 ^ Q2 */ \
+ emith_adc_r_r(tmp_, tmp_); \
+ emith_eor_r_r(sr, tmp_); \
rcache_free_tmp(tmp_); \
}
{ xDX, },
};
+#elif defined(__x86_64__)
+#include "../drc/emit_x86.c"
+
+static const int reg_map_g2h[] = {
+ -1, -1, -1, -1,
+ -1, -1, -1, -1,
+ -1, -1, -1, -1,
+ -1, -1, -1, -1,
+ -1, -1, -1, xBX,
+ -1, -1, -1, -1,
+};
+
+// ax, cx, dx are usually temporaries by convention
+static temp_reg_t reg_temp[] = {
+ { xAX, },
+ { xCX, },
+ { xDX, },
+ { xSI, },
+ { xDI, },
+};
+
#else
#error unsupported arch
#endif
return tr->hreg;
}
-static int rcache_get_arg_id(int arg)
+static int rcache_get_hr_id(int hr)
{
- int i, r = 0;
- host_arg2reg(r, arg);
+ int i;
for (i = 0; i < ARRAY_SIZE(reg_temp); i++)
- if (reg_temp[i].hreg == r)
+ if (reg_temp[i].hreg == hr)
break;
if (i == ARRAY_SIZE(reg_temp)) // can't happen
gconst_check_evict(reg_temp[i].greg);
}
else if (reg_temp[i].type == HR_TEMP) {
- printf("arg %d reg %d already used, aborting\n", arg, r);
+ printf("host reg %d already used, aborting\n", hr);
exit(1);
}
return i;
}
+static int rcache_get_arg_id(int arg)
+{
+ int r = 0;
+ host_arg2reg(r, arg);
+ return rcache_get_hr_id(r);
+}
+
// get a reg to be used as function arg
static int rcache_get_tmp_arg(int arg)
{
return reg_temp[id].hreg;
}
+// ... as return value after a call
+static int rcache_get_tmp_ret(void)
+{
+ int id = rcache_get_hr_id(RET_REG);
+ reg_temp[id].type = HR_TEMP;
+
+ return reg_temp[id].hreg;
+}
+
// same but caches a reg. RC_GR_READ only.
static int rcache_get_reg_arg(int arg, sh2_reg_e r)
{
// XXX: could use some related reg
hr = rcache_get_tmp();
- emith_ctx_read(hr, poffs);
- emith_add_r_imm(hr, a & mask & ~0xff);
+ emith_ctx_read_ptr(hr, poffs);
+ emith_add_r_r_ptr_imm(hr, hr, a & mask & ~0xff);
*offs = a & 0xff; // XXX: ARM oriented..
return hr;
}
emith_ctx_write(reg_map_g2h[SHR_SR], SHR_SR * 4);
arg1 = rcache_get_tmp_arg(1);
- emith_move_r_r(arg1, CONTEXT_REG);
+ emith_move_r_r_ptr(arg1, CONTEXT_REG);
#if 0 // can't do this because of unmapped reads
// ndef PDB_NET
if (reg_map_g2h[SHR_SR] != -1)
emith_ctx_read(reg_map_g2h[SHR_SR], SHR_SR * 4);
- // assuming arg0 and retval reg matches
- return rcache_get_tmp_arg(0);
+ return rcache_get_tmp_ret();
}
static int emit_memhandler_read(int size)
emith_call(sh2_drc_write16);
break;
case 2: // 32
- emith_move_r_r(ctxr, CONTEXT_REG);
+ emith_move_r_r_ptr(ctxr, CONTEXT_REG);
emith_call(sh2_drc_write32);
break;
}
}
}
+/* just after lookup function, jump to address returned */
static void emit_block_entry(void)
{
- int arg0;
-
- host_arg2reg(arg0, 0);
-
#if (DRC_DEBUG & 8) || defined(PDB)
int arg1, arg2;
host_arg2reg(arg1, 1);
host_arg2reg(arg2, 2);
emit_do_static_regs(1, arg2);
- emith_move_r_r(arg1, CONTEXT_REG);
+ emith_move_r_r_ptr(arg1, CONTEXT_REG);
emith_move_r_r(arg2, rcache_get_reg(SHR_SR, RC_GR_READ));
emith_call(sh2_drc_log_entry);
rcache_invalidate();
#endif
- emith_tst_r_r(arg0, arg0);
+ emith_tst_r_r(RET_REG, RET_REG);
EMITH_SJMP_START(DCOND_EQ);
- emith_jump_reg_c(DCOND_NE, arg0);
+ emith_jump_reg_c(DCOND_NE, RET_REG);
EMITH_SJMP_END(DCOND_EQ);
}
struct op_data *opd_b =
(op_flags[i] & OF_DELAY_OP) ? &ops[i-1] : opd;
u32 target_pc = opd_b->imm;
- int cond = -1;
+ int cond = -1, ncond = -1;
void *target = NULL;
+ EMITH_SJMP_DECL_();
sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
FLUSH_CYCLES(sr);
+ rcache_clean();
- if (opd_b->op != OP_BRANCH)
+ if (opd_b->op != OP_BRANCH) {
cond = (opd_b->op == OP_BRANCH_CF) ? DCOND_EQ : DCOND_NE;
+ ncond = (opd_b->op == OP_BRANCH_CF) ? DCOND_NE : DCOND_EQ;
+ }
if (cond != -1) {
int ctaken = (op_flags[i] & OF_DELAY_OP) ? 1 : 2;
else
emith_tst_r_imm(sr, T);
+ EMITH_SJMP_START_(ncond);
emith_sub_r_imm_c(cond, sr, ctaken<<12);
}
- rcache_clean();
#if LINK_BRANCHES
if (find_in_array(branch_target_pc, branch_target_count, target_pc) >= 0)
return NULL;
}
- if (cond != -1)
+ if (cond != -1) {
emith_jump_cond_patchable(cond, target);
+ EMITH_SJMP_END_(ncond);
+ }
else {
emith_jump_patchable(target);
rcache_invalidate();
rcache_invalidate();
emith_ctx_read(arg0, SHR_PC * 4);
emith_ctx_read(arg1, offsetof(SH2, is_slave));
- emith_add_r_r_imm(arg2, CONTEXT_REG, offsetof(SH2, drc_tmp));
+ emith_add_r_r_ptr_imm(arg2, CONTEXT_REG, offsetof(SH2, drc_tmp));
emith_call(dr_lookup_block);
emit_block_entry();
// lookup failed, call sh2_translate()
- emith_move_r_r(arg0, CONTEXT_REG);
+ emith_move_r_r_ptr(arg0, CONTEXT_REG);
emith_ctx_read(arg1, offsetof(SH2, drc_tmp)); // tcache_id
emith_call(sh2_translate);
emit_block_entry();
// sh2_translate() failed, flush cache and retry
emith_ctx_read(arg0, offsetof(SH2, drc_tmp));
emith_call(flush_tcache);
- emith_move_r_r(arg0, CONTEXT_REG);
+ emith_move_r_r_ptr(arg0, CONTEXT_REG);
emith_ctx_read(arg1, offsetof(SH2, drc_tmp));
emith_call(sh2_translate);
emit_block_entry();
emith_add_r_imm(tmp, 4);
tmp = rcache_get_reg_arg(1, SHR_SR);
emith_clear_msb(tmp, tmp, 22);
- emith_move_r_r(arg2, CONTEXT_REG);
+ emith_move_r_r_ptr(arg2, CONTEXT_REG);
emith_call(p32x_sh2_write32); // XXX: use sh2_drc_write32?
rcache_invalidate();
// push PC
rcache_get_reg_arg(0, SHR_SP);
emith_ctx_read(arg1, SHR_PC * 4);
- emith_move_r_r(arg2, CONTEXT_REG);
+ emith_move_r_r_ptr(arg2, CONTEXT_REG);
emith_call(p32x_sh2_write32);
rcache_invalidate();
// update I, cycles, do callback
emith_or_r_r_lsl(sr, arg1, I_SHIFT);
emith_sub_r_imm(sr, 13 << 12); // at least 13 cycles
rcache_flush();
- emith_move_r_r(arg0, CONTEXT_REG);
+ emith_move_r_r_ptr(arg0, CONTEXT_REG);
emith_call_ctx(offsetof(SH2, irq_callback)); // vector = sh2->irq_callback(sh2, level);
// obtain new PC
- emith_lsl(arg0, arg0, 2);
+ emith_lsl(arg0, RET_REG, 2);
emith_ctx_read(arg1, SHR_VBR * 4);
emith_add_r_r(arg0, arg1);
- emit_memhandler_read(2);
- emith_ctx_write(arg0, SHR_PC * 4);
-#ifdef __i386__
- emith_add_r_imm(xSP, 4); // fix stack
+ tmp = emit_memhandler_read(2);
+ emith_ctx_write(tmp, SHR_PC * 4);
+#if defined(__i386__) || defined(__x86_64__)
+ emith_add_r_r_ptr_imm(xSP, xSP, sizeof(void *)); // fix stack
#endif
emith_jump(sh2_drc_dispatcher);
rcache_invalidate();
// sh2_drc_entry(SH2 *sh2)
sh2_drc_entry = (void *)tcache_ptr;
emith_sh2_drc_entry();
- emith_move_r_r(CONTEXT_REG, arg0); // move ctx, arg0
+ emith_move_r_r_ptr(CONTEXT_REG, arg0); // move ctx, arg0
emit_do_static_regs(0, arg2);
emith_call(sh2_drc_test_irq);
emith_jump(sh2_drc_dispatcher);
// sh2_drc_write8(u32 a, u32 d)
sh2_drc_write8 = (void *)tcache_ptr;
- emith_ctx_read(arg2, offsetof(SH2, write8_tab));
+ emith_ctx_read_ptr(arg2, offsetof(SH2, write8_tab));
emith_sh2_wcall(arg0, arg2);
// sh2_drc_write16(u32 a, u32 d)
sh2_drc_write16 = (void *)tcache_ptr;
- emith_ctx_read(arg2, offsetof(SH2, write16_tab));
+ emith_ctx_read_ptr(arg2, offsetof(SH2, write16_tab));
emith_sh2_wcall(arg0, arg2);
#ifdef PDB_NET
emith_ctx_read(arg2, offsetof(SH2, pdb_io_csum[1])); \
emith_adc_r_imm(arg2, 0x01000000); \
emith_ctx_write(arg2, offsetof(SH2, pdb_io_csum[1])); \
- emith_move_r_r(arg2, CONTEXT_REG); \
+ emith_move_r_r_ptr(arg2, CONTEXT_REG); \
emith_jump(func); \
func = tmp; \
}