#define FN_PSUBU FN_SUBU
#define PTR_SCALE 2
#endif
+#define PTR_SIZE (1<<PTR_SCALE)
// XXX: tcache_ptr type for SVP and SH2 compilers differs..
#define EMIT_PTR(ptr, x) \
#define emith_add_r_ret(r) \
emith_add_r_r_ptr(r, LR)
-// NB: ABI SP alignment is 8 for compatibility with MIPS IV
+// NB: ABI SP alignment is 8 for 64 bit, O32 has a 16 byte arg save area
#define emith_push_ret(r) do { \
- emith_add_r_r_ptr_imm(SP, SP, -8-16); /* O32: 16 byte arg save area */ \
- emith_write_r_r_offs(LR, SP, 4+16); \
- if ((r) > 0) emith_write_r_r_offs(r, SP, 0+16); \
+ int offs_ = 8+16 - 2*PTR_SIZE; \
+ emith_add_r_r_ptr_imm(SP, SP, -8-16); \
+ emith_write_r_r_offs_ptr(LR, SP, offs_ + PTR_SIZE); \
+ if ((r) > 0) emith_write_r_r_offs(r, SP, offs_); \
} while (0)
#define emith_pop_and_ret(r) do { \
- if ((r) > 0) emith_read_r_r_offs(r, SP, 0+16); \
- emith_read_r_r_offs(LR, SP, 4+16); \
+ int offs_ = 8+16 - 2*PTR_SIZE; \
+ if ((r) > 0) emith_read_r_r_offs(r, SP, offs_); \
+ emith_read_r_r_offs_ptr(LR, SP, offs_ + PTR_SIZE); \
emith_add_r_r_ptr_imm(SP, SP, 8+16); \
emith_ret(); \
} while (0)
// SH2 drc specific
#define emith_sh2_drc_entry() do { \
- int _c; u32 _m = 0xd0ff0000; \
+ int _c, _z = PTR_SIZE; u32 _m = 0xd0ff0000; \
if (__builtin_parity(_m) == 1) _m |= 0x1; /* ABI align for SP is 8 */ \
- int _s = count_bits(_m) * 4 + 16, _o = _s; /* 16 byte arg save area */ \
+ int _s = count_bits(_m) * _z + 16, _o = _s; /* 16 O32 arg save area */ \
if (_s) emith_add_r_r_ptr_imm(SP, SP, -_s); \
for (_c = HOST_REGS-1; _m && _c >= 0; _m &= ~(1 << _c), _c--) \
if (_m & (1 << _c)) \
- { _o -= 4; if (_c) emith_write_r_r_offs(_c, SP, _o); } \
+ { _o -= _z; if (_c) emith_write_r_r_offs_ptr(_c, SP, _o); } \
} while (0)
#define emith_sh2_drc_exit() do { \
- int _c; u32 _m = 0xd0ff0000; \
+ int _c, _z = PTR_SIZE; u32 _m = 0xd0ff0000; \
if (__builtin_parity(_m) == 1) _m |= 0x1; \
- int _s = count_bits(_m) * 4 + 16, _o = 16; \
+ int _s = count_bits(_m) * _z + 16, _o = 16; \
for (_c = 0; _m && _c < HOST_REGS; _m &= ~(1 << _c), _c++) \
if (_m & (1 << _c)) \
- { if (_c) emith_read_r_r_offs(_c, SP, _o); _o += 4; } \
+ { if (_c) emith_read_r_r_offs_ptr(_c, SP, _o); _o += _z; } \
if (_s) emith_add_r_r_ptr_imm(SP, SP, _s); \
emith_ret(); \
} while (0)
} while (0)
#endif
+#define PTR_SIZE (1<<PTR_SCALE)
+
#define R5_ADDW_REG(rd, rs, rt) (R5_ADD_REG(rd, rs, rt)^R5_OP32)
#define R5_SUBW_REG(rd, rs, rt) (R5_SUB_REG(rd, rs, rt)^R5_OP32)
#define R5_LSLW_REG(rd, rs, rt) (R5_LSL_REG(rd, rs, rt)^R5_OP32)
// function call handling
#define emith_save_caller_regs(mask) do { \
- int _c; u32 _m = mask & 0x3fce0; /* x5-x7,x10-x17 */ \
+ int _c, _z = PTR_SIZE; u32 _m = mask & 0x3fce0; /* x5-x7,x10-x17 */ \
_c = count_bits(_m)&3; _m |= (1<<((4-_c)&3))-1; /* ABI align */ \
- int _s = count_bits(_m) * 4, _o = _s; \
+ int _s = count_bits(_m) * _z, _o = _s; \
if (_s) emith_add_r_r_ptr_imm(SP, SP, -_s); \
for (_c = HOST_REGS-1; _m && _c >= 0; _m &= ~(1 << _c), _c--) \
if (_m & (1 << _c)) \
- { _o -= 4; if (_c) emith_write_r_r_offs(_c, SP, _o); } \
+ { _o -= _z; if (_c) emith_write_r_r_offs_ptr(_c, SP, _o); } \
} while (0)
#define emith_restore_caller_regs(mask) do { \
- int _c; u32 _m = mask & 0x3fce0; \
+ int _c, _z = PTR_SIZE; u32 _m = mask & 0x3fce0; \
_c = count_bits(_m)&3; _m |= (1<<((4-_c)&3))-1; /* ABI align */ \
- int _s = count_bits(_m) * 4, _o = 0; \
+ int _s = count_bits(_m) * _z, _o = 0; \
for (_c = 0; _m && _c < HOST_REGS; _m &= ~(1 << _c), _c++) \
if (_m & (1 << _c)) \
- { if (_c) emith_read_r_r_offs(_c, SP, _o); _o += 4; } \
+ { if (_c) emith_read_r_r_offs_ptr(_c, SP, _o); _o += _z; } \
if (_s) emith_add_r_r_ptr_imm(SP, SP, _s); \
} while (0)
// NB: returns position of patch for cache maintenance
#define emith_jump_patch(ptr, target, pos) do { \
u32 *ptr_ = (u32 *)ptr; /* must skip condition check code */ \
+ while ((*ptr_&0x77) != OP_BCOND && (*ptr_&0x77) != OP_LUI) ptr_ ++; \
if ((*ptr_&0x77) == OP_BCOND) { \
u32 *p_ = ptr_, disp_ = (u8 *)target - (u8 *)ptr_; \
u32 f1_ = _CB(*ptr_,3,12,0); \
#define emith_push_ret(r) do { \
emith_add_r_r_ptr_imm(SP, SP, -16); /* ABI requires 16 byte aligment */\
- emith_write_r_r_offs(LR, SP, 4); \
+ emith_write_r_r_offs_ptr(LR, SP, 8); \
if ((r) > 0) emith_write_r_r_offs(r, SP, 0); \
} while (0)
#define emith_pop_and_ret(r) do { \
if ((r) > 0) emith_read_r_r_offs(r, SP, 0); \
- emith_read_r_r_offs(LR, SP, 4); \
+ emith_read_r_r_offs_ptr(LR, SP, 8); \
emith_add_r_r_ptr_imm(SP, SP, 16); \
emith_ret(); \
} while (0)
// SH2 drc specific
#define emith_sh2_drc_entry() do { \
- int _c; u32 _m = 0x0ffc0202; /* x1,x9,x18-x27 */ \
+ int _c, _z = PTR_SIZE; u32 _m = 0x0ffc0202; /* x1,x9,x18-x27 */ \
_c = count_bits(_m)&3; _m |= (1<<((4-_c)&3))-1; /* ABI align */ \
- int _s = count_bits(_m) * 4, _o = _s; \
+ int _s = count_bits(_m) * _z, _o = _s; \
if (_s) emith_add_r_r_ptr_imm(SP, SP, -_s); \
for (_c = HOST_REGS-1; _m && _c >= 0; _m &= ~(1 << _c), _c--) \
if (_m & (1 << _c)) \
- { _o -= 4; if (_c) emith_write_r_r_offs(_c, SP, _o); } \
+ { _o -= _z; if (_c) emith_write_r_r_offs_ptr(_c, SP, _o); } \
} while (0)
#define emith_sh2_drc_exit() do { \
- int _c; u32 _m = 0x0ffc0202; \
+ int _c, _z = PTR_SIZE; u32 _m = 0x0ffc0202; \
_c = count_bits(_m)&3; _m |= (1<<((4-_c)&3))-1; /* ABI align */ \
- int _s = count_bits(_m) * 4, _o = 0; \
+ int _s = count_bits(_m) * _z, _o = 0; \
for (_c = 0; _m && _c < HOST_REGS; _m &= ~(1 << _c), _c++) \
if (_m & (1 << _c)) \
- { if (_c) emith_read_r_r_offs(_c, SP, _o); _o += 4; } \
+ { if (_c) emith_read_r_r_offs_ptr(_c, SP, _o); _o += _z; } \
if (_s) emith_add_r_r_ptr_imm(SP, SP, _s); \
emith_ret(); \
} while (0)
emith_lsr(mask, a, SH2_READ_SHIFT); \
emith_add_r_r_r_lsl_ptr(tab, tab, mask, PTR_SCALE+1); \
emith_read_r_r_offs_ptr(func, tab, 0); \
- emith_read_r_r_offs(mask, tab, 1 << PTR_SCALE); \
+ emith_read_r_r_offs(mask, tab, PTR_SIZE); \
emith_addf_r_r_r_ptr(func, func, func); \
} while (0)