2 #define fallback_save(r0) _fallback_save(_jit, r0)
3 static void _fallback_save(jit_state_t*, jit_int32_t);
4 #define fallback_load(r0) _fallback_load(_jit, r0)
5 static void _fallback_load(jit_state_t*, jit_int32_t);
6 #define fallback_save_regs(r0) _fallback_save_regs(_jit, r0)
7 static void _fallback_save_regs(jit_state_t*, jit_int32_t);
8 #define fallback_load_regs(r0) _fallback_load_regs(_jit, r0)
9 static void _fallback_load_regs(jit_state_t*, jit_int32_t);
10 #define fallback_calli(i0, i1) _fallback_calli(_jit, i0, i1)
11 static void _fallback_calli(jit_state_t*, jit_word_t, jit_word_t);
12 #define fallback_casx(r0,r1,r2,r3,im) _fallback_casx(_jit,r0,r1,r2,r3,im)
13 static void _fallback_casx(jit_state_t *, jit_int32_t, jit_int32_t,
14 jit_int32_t, jit_int32_t, jit_word_t);
15 #define fallback_clo(r0,r1) _fallback_clo(_jit,r0,r1)
16 static void _fallback_clo(jit_state_t*, jit_int32_t, jit_int32_t);
17 #define fallback_clz(r0,r1) _fallback_clz(_jit,r0,r1)
18 static void _fallback_clz(jit_state_t*, jit_int32_t, jit_int32_t);
19 #define fallback_cto(r0,r1) _fallback_cto(_jit,r0,r1)
20 static void _fallback_cto(jit_state_t*, jit_int32_t, jit_int32_t);
21 #define fallback_ctz(r0,r1) _fallback_ctz(_jit,r0,r1)
22 static void _fallback_ctz(jit_state_t*, jit_int32_t, jit_int32_t);
23 # if defined(__ia64__)
24 # define fallback_patch_jmpi(inst,lbl) \
27 patch_at(jit_code_jmpi, inst, lbl); \
30 # define fallback_patch_jmpi(inst,lbl) fallback_patch_at(inst,lbl)
33 # define fallback_patch_at(inst,lbl) patch_at(arm_patch_jump,inst,lbl)
34 # elif defined(__ia64__)
35 # define fallback_patch_at(inst,lbl) \
38 patch_at(jit_code_bnei, inst, lbl); \
41 # define fallback_patch_at(inst,lbl) patch_at(inst,lbl)
43 # if defined(__mips__)
44 # define fallback_jmpi(i0) jmpi(i0,1)
45 # elif defined(__arm__)
46 # define fallback_jmpi(i0) jmpi_p(i0,1)
47 # elif defined(__s390__) || defined(__s390x__)
48 # define fallback_jmpi(i0) jmpi(i0,1)
50 # define fallback_jmpi(i0) jmpi(i0)
52 # if defined(__mips__)
53 # define fallback_bnei(i0,r0,i1) bnei(i0,r0,i1)
54 # elif defined(__s390__) || defined(__s390x__)
55 # define fallback_bnei(i0,r0,i1) bnei_p(i0,r0,i1)
57 # define fallback_bnei(i0,r0,i1) bnei(i0,r0,i1)
59 # if defined(__s390__) || defined(__s390x__)
60 # define fallback_bmsr(i0,r0,r1) bmsr_p(i0,r0,r1)
62 # define fallback_bmsr(i0,r0,r1) bmsr(i0,r0,r1)
68 _fallback_save(jit_state_t *_jit, jit_int32_t r0)
70 jit_int32_t offset, regno, spec;
71 for (offset = 0; offset < JIT_R_NUM; offset++) {
72 spec = _rvs[offset].spec;
73 regno = jit_regno(spec);
75 if (!(spec & jit_class_sav))
76 stxi(_jitc->function->regoff[JIT_R(offset)], rn(JIT_FP), regno);
83 _fallback_load(jit_state_t *_jit, jit_int32_t r0)
85 jit_int32_t offset, regno, spec;
86 for (offset = 0; offset < JIT_R_NUM; offset++) {
87 spec = _rvs[offset].spec;
88 regno = jit_regno(spec);
90 if (!(spec & jit_class_sav))
91 ldxi(regno, rn(JIT_FP), _jitc->function->regoff[JIT_R(offset)]);
98 _fallback_save_regs(jit_state_t *_jit, jit_int32_t r0)
100 jit_int32_t regno, spec;
101 for (regno = 0; regno < _jitc->reglen; regno++) {
102 spec = _rvs[regno].spec;
103 if ((jit_regset_tstbit(&_jitc->regarg, regno) ||
104 jit_regset_tstbit(&_jitc->reglive, regno)) &&
105 !(spec & jit_class_sav)) {
106 if (!_jitc->function->regoff[regno]) {
107 _jitc->function->regoff[regno] =
108 jit_allocai(spec & jit_class_gpr ?
109 sizeof(jit_word_t) : sizeof(jit_float64_t));
112 if ((spec & jit_class_gpr) && rn(regno) == r0)
114 jit_regset_setbit(&_jitc->regsav, regno);
115 if (spec & jit_class_gpr)
116 emit_stxi(_jitc->function->regoff[regno], JIT_FP, regno);
118 emit_stxi_d(_jitc->function->regoff[regno], JIT_FP, regno);
124 _fallback_load_regs(jit_state_t *_jit, jit_int32_t r0)
126 jit_int32_t regno, spec;
127 for (regno = 0; regno < _jitc->reglen; regno++) {
128 spec = _rvs[regno].spec;
129 if ((jit_regset_tstbit(&_jitc->regarg, regno) ||
130 jit_regset_tstbit(&_jitc->reglive, regno)) &&
131 !(spec & jit_class_sav)) {
132 if ((spec & jit_class_gpr) && rn(regno) == r0)
134 jit_regset_setbit(&_jitc->regsav, regno);
135 if (spec & jit_class_gpr)
136 emit_ldxi(regno, JIT_FP, _jitc->function->regoff[regno]);
138 emit_ldxi_d(regno, JIT_FP, _jitc->function->regoff[regno]);
144 _fallback_calli(jit_state_t *_jit, jit_word_t i0, jit_word_t i1)
146 # if defined(__arm__)
148 # elif defined(__hppa__)
149 movi(_R26_REGNO, i1);
151 # if defined(__arm__)
152 calli(i0, jit_exchange_p());
153 # elif defined(__mips__)
155 # elif defined(__powerpc__) && _CALL_SYSV
157 # elif defined(__s390__) || defined(__s390x__)
165 _fallback_casx(jit_state_t *_jit, jit_int32_t r0, jit_int32_t r1,
166 jit_int32_t r2, jit_int32_t r3, jit_word_t i0)
168 jit_int32_t r1_reg, iscasi;
169 jit_word_t jump, done;
170 /* XXX only attempts to fallback cas for lightning jit code */
171 static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
172 if ((iscasi = r1 == _NOREG)) {
173 r1_reg = jit_get_reg(jit_class_gpr|jit_class_sav);
177 fallback_save_regs(r0);
178 fallback_calli((jit_word_t)pthread_mutex_lock, (jit_word_t)&mutex);
184 jump = fallback_bnei(_jit->pc.w, r0, 1);
186 # if __WORDSIZE == 32
193 fallback_calli((jit_word_t)pthread_mutex_unlock, (jit_word_t)&mutex);
195 fallback_patch_at(jump, done);
196 fallback_load_regs(r0);
198 jit_unget_reg(r1_reg);
202 _fallback_clo(jit_state_t *_jit, jit_int32_t r0, jit_int32_t r1)
204 jit_word_t clz, done;
206 clz = fallback_bnei(_jit->pc.w, r0, 0);
207 movi(r0, __WORDSIZE);
208 done = fallback_jmpi(_jit->pc.w);
209 fallback_patch_at(clz, _jit->pc.w);
210 fallback_clz(r0, r0);
211 fallback_patch_jmpi(done, _jit->pc.w);
215 _fallback_clz(jit_state_t *_jit, jit_int32_t r0, jit_int32_t r1)
217 jit_int32_t r1_reg, r2, r2_reg;
218 jit_word_t clz, l32, l16, l8, l4, l2, l1;
219 l32 = fallback_bnei(_jit->pc.w, r1, 0);
220 movi(r0, __WORDSIZE);
221 clz = fallback_jmpi(_jit->pc.w);
222 fallback_patch_at(l32, _jit->pc.w);
223 r2_reg = jit_get_reg(jit_class_gpr);
225 r1_reg = jit_get_reg(jit_class_gpr);
226 movr(rn(r1_reg), r1);
229 # if __WORDSIZE == 64
230 movi(r2, 0xffffffff00000000UL);
231 l32 = fallback_bmsr(_jit->pc.w, r1, r2);
234 fallback_patch_at(l32, _jit->pc.w);
237 movi(r2, 0xffff0000UL);
239 l16 = fallback_bmsr(_jit->pc.w, r1, r2);
242 fallback_patch_at(l16, _jit->pc.w);
244 l8 = fallback_bmsr(_jit->pc.w, r1, r2);
247 fallback_patch_at(l8, _jit->pc.w);
249 l4 = fallback_bmsr(_jit->pc.w, r1, r2);
252 fallback_patch_at(l4, _jit->pc.w);
254 l2 = fallback_bmsr(_jit->pc.w, r1, r2);
257 fallback_patch_at(l2, _jit->pc.w);
259 l1 = fallback_bmsr(_jit->pc.w, r1, r2);
261 fallback_patch_at(l1, _jit->pc.w);
262 fallback_patch_jmpi(clz, _jit->pc.w);
263 jit_unget_reg(r2_reg);
264 jit_unget_reg(r1_reg);
268 _fallback_cto(jit_state_t *_jit, jit_int32_t r0, jit_int32_t r1)
270 jit_word_t ctz, done;
272 ctz = fallback_bnei(_jit->pc.w, r0, 0);
273 movi(r0, __WORDSIZE);
274 done = fallback_jmpi(_jit->pc.w);
275 fallback_patch_at(ctz, _jit->pc.w);
276 fallback_ctz(r0, r0);
277 fallback_patch_jmpi(done, _jit->pc.w);
281 _fallback_ctz(jit_state_t *_jit, jit_int32_t r0, jit_int32_t r1)
283 jit_int32_t r1_reg, r2, r2_reg;
284 jit_word_t ctz, l32, l16, l8, l4, l2, l1;
285 l32 = fallback_bnei(_jit->pc.w, r1, 0);
286 movi(r0, __WORDSIZE);
287 ctz = fallback_jmpi(_jit->pc.w);
288 fallback_patch_at(l32, _jit->pc.w);
289 r2_reg = jit_get_reg(jit_class_gpr);
291 r1_reg = jit_get_reg(jit_class_gpr);
292 movr(rn(r1_reg), r1);
295 # if __WORDSIZE == 64
296 movi(r2, 0xffffffffUL);
297 l32 = fallback_bmsr(_jit->pc.w, r1, r2);
300 fallback_patch_at(l32, _jit->pc.w);
305 l16 = fallback_bmsr(_jit->pc.w, r1, r2);
308 fallback_patch_at(l16, _jit->pc.w);
310 l8 = fallback_bmsr(_jit->pc.w, r1, r2);
313 fallback_patch_at(l8, _jit->pc.w);
315 l4 = fallback_bmsr(_jit->pc.w, r1, r2);
318 fallback_patch_at(l4, _jit->pc.w);
320 l2 = fallback_bmsr(_jit->pc.w, r1, r2);
323 fallback_patch_at(l2, _jit->pc.w);
325 l1 = fallback_bmsr(_jit->pc.w, r1, r2);
327 fallback_patch_at(l1, _jit->pc.w);
328 fallback_patch_jmpi(ctz, _jit->pc.w);
329 jit_unget_reg(r2_reg);
330 jit_unget_reg(r1_reg);