| 1 | #if PROTO |
| 2 | #define fallback_save(r0) _fallback_save(_jit, r0) |
| 3 | static void _fallback_save(jit_state_t*, jit_int32_t); |
| 4 | #define fallback_load(r0) _fallback_load(_jit, r0) |
| 5 | static void _fallback_load(jit_state_t*, jit_int32_t); |
| 6 | #define fallback_save_regs(r0) _fallback_save_regs(_jit, r0) |
| 7 | static void _fallback_save_regs(jit_state_t*, jit_int32_t); |
| 8 | #define fallback_load_regs(r0) _fallback_load_regs(_jit, r0) |
| 9 | static void _fallback_load_regs(jit_state_t*, jit_int32_t); |
| 10 | #define fallback_calli(i0, i1) _fallback_calli(_jit, i0, i1) |
| 11 | static void _fallback_calli(jit_state_t*, jit_word_t, jit_word_t); |
| 12 | #define fallback_casx(r0,r1,r2,r3,im) _fallback_casx(_jit,r0,r1,r2,r3,im) |
| 13 | static void _fallback_casx(jit_state_t *, jit_int32_t, jit_int32_t, |
| 14 | jit_int32_t, jit_int32_t, jit_word_t); |
| 15 | #endif |
| 16 | |
| 17 | #if CODE |
| 18 | static void |
| 19 | _fallback_save(jit_state_t *_jit, jit_int32_t r0) |
| 20 | { |
| 21 | jit_int32_t offset, regno, spec; |
| 22 | for (offset = 0; offset < JIT_R_NUM; offset++) { |
| 23 | spec = _rvs[offset].spec; |
| 24 | regno = jit_regno(spec); |
| 25 | if (regno == r0) { |
| 26 | if (!(spec & jit_class_sav)) |
| 27 | stxi(_jitc->function->regoff[JIT_R(offset)], rn(JIT_FP), regno); |
| 28 | break; |
| 29 | } |
| 30 | } |
| 31 | } |
| 32 | |
| 33 | static void |
| 34 | _fallback_load(jit_state_t *_jit, jit_int32_t r0) |
| 35 | { |
| 36 | jit_int32_t offset, regno, spec; |
| 37 | for (offset = 0; offset < JIT_R_NUM; offset++) { |
| 38 | spec = _rvs[offset].spec; |
| 39 | regno = jit_regno(spec); |
| 40 | if (regno == r0) { |
| 41 | if (!(spec & jit_class_sav)) |
| 42 | ldxi(regno, rn(JIT_FP), _jitc->function->regoff[JIT_R(offset)]); |
| 43 | break; |
| 44 | } |
| 45 | } |
| 46 | } |
| 47 | |
| 48 | static void |
| 49 | _fallback_save_regs(jit_state_t *_jit, jit_int32_t r0) |
| 50 | { |
| 51 | jit_int32_t regno, spec; |
| 52 | for (regno = 0; regno < _jitc->reglen; regno++) { |
| 53 | spec = _rvs[regno].spec; |
| 54 | if ((jit_regset_tstbit(&_jitc->regarg, regno) || |
| 55 | jit_regset_tstbit(&_jitc->reglive, regno)) && |
| 56 | !(spec & jit_class_sav)) { |
| 57 | if (!_jitc->function->regoff[regno]) { |
| 58 | _jitc->function->regoff[regno] = |
| 59 | jit_allocai(spec & jit_class_gpr ? |
| 60 | sizeof(jit_word_t) : sizeof(jit_float64_t)); |
| 61 | _jitc->again = 1; |
| 62 | } |
| 63 | if ((spec & jit_class_gpr) && rn(regno) == r0) |
| 64 | continue; |
| 65 | jit_regset_setbit(&_jitc->regsav, regno); |
| 66 | if (spec & jit_class_gpr) |
| 67 | emit_stxi(_jitc->function->regoff[regno], JIT_FP, regno); |
| 68 | else |
| 69 | emit_stxi_d(_jitc->function->regoff[regno], JIT_FP, regno); |
| 70 | } |
| 71 | } |
| 72 | } |
| 73 | |
| 74 | static void |
| 75 | _fallback_load_regs(jit_state_t *_jit, jit_int32_t r0) |
| 76 | { |
| 77 | jit_int32_t regno, spec; |
| 78 | for (regno = 0; regno < _jitc->reglen; regno++) { |
| 79 | spec = _rvs[regno].spec; |
| 80 | if ((jit_regset_tstbit(&_jitc->regarg, regno) || |
| 81 | jit_regset_tstbit(&_jitc->reglive, regno)) && |
| 82 | !(spec & jit_class_sav)) { |
| 83 | if ((spec & jit_class_gpr) && rn(regno) == r0) |
| 84 | continue; |
| 85 | jit_regset_setbit(&_jitc->regsav, regno); |
| 86 | if (spec & jit_class_gpr) |
| 87 | emit_ldxi(regno, JIT_FP, _jitc->function->regoff[regno]); |
| 88 | else |
| 89 | emit_ldxi_d(regno, JIT_FP, _jitc->function->regoff[regno]); |
| 90 | } |
| 91 | } |
| 92 | } |
| 93 | |
| 94 | static void |
| 95 | _fallback_calli(jit_state_t *_jit, jit_word_t i0, jit_word_t i1) |
| 96 | { |
| 97 | # if defined(__arm__) |
| 98 | movi(rn(_R0), i1); |
| 99 | # elif defined(__ia64__) |
| 100 | /* avoid confusion with pushargi patching */ |
| 101 | if (i1 >= -2097152 && i1 <= 2097151) |
| 102 | MOVI(_jitc->rout, i1); |
| 103 | else |
| 104 | MOVL(_jitc->rout, i1); |
| 105 | # elif defined(__hppa__) |
| 106 | movi(_R26_REGNO, i1); |
| 107 | #endif |
| 108 | calli(i0); |
| 109 | } |
| 110 | |
| 111 | static void |
| 112 | _fallback_casx(jit_state_t *_jit, jit_int32_t r0, jit_int32_t r1, |
| 113 | jit_int32_t r2, jit_int32_t r3, jit_word_t i0) |
| 114 | { |
| 115 | jit_int32_t r1_reg, iscasi; |
| 116 | jit_word_t jump, done; |
| 117 | /* XXX only attempts to fallback cas for lightning jit code */ |
| 118 | static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; |
| 119 | if ((iscasi = r1 == _NOREG)) { |
| 120 | r1_reg = jit_get_reg(jit_class_gpr|jit_class_sav); |
| 121 | r1 = rn(r1_reg); |
| 122 | movi(r1, i0); |
| 123 | } |
| 124 | fallback_save_regs(r0); |
| 125 | fallback_calli((jit_word_t)pthread_mutex_lock, (jit_word_t)&mutex); |
| 126 | fallback_load(r1); |
| 127 | ldr(r0, r1); |
| 128 | fallback_load(r2); |
| 129 | eqr(r0, r0, r2); |
| 130 | fallback_save(r0); |
| 131 | jump = bnei(_jit->pc.w, r0, 1); |
| 132 | fallback_load(r3); |
| 133 | # if __WORDSIZE == 32 |
| 134 | str_i(r1, r3); |
| 135 | # else |
| 136 | str_l(r1, r3); |
| 137 | # endif |
| 138 | /* done: */ |
| 139 | # if defined(__ia64__) |
| 140 | sync(); |
| 141 | # endif |
| 142 | done = _jit->pc.w; |
| 143 | fallback_calli((jit_word_t)pthread_mutex_unlock, (jit_word_t)&mutex); |
| 144 | fallback_load(r0); |
| 145 | # if defined(__arm__) |
| 146 | patch_at(arm_patch_jump, jump, done); |
| 147 | # elif defined(__ia64__) |
| 148 | patch_at(jit_code_bnei, jump, done); |
| 149 | # else |
| 150 | patch_at(jump, done); |
| 151 | # endif |
| 152 | fallback_load_regs(r0); |
| 153 | if (iscasi) |
| 154 | jit_unget_reg(r1_reg); |
| 155 | } |
| 156 | #endif |