git subrepo pull --force deps/lightrec
[pcsx_rearmed.git] / deps / lightrec / regcache.c
... / ...
CommitLineData
1// SPDX-License-Identifier: LGPL-2.1-or-later
2/*
3 * Copyright (C) 2014-2021 Paul Cercueil <paul@crapouillou.net>
4 */
5
6#include "debug.h"
7#include "memmanager.h"
8#include "lightning-wrapper.h"
9#include "regcache.h"
10
11#include <stdbool.h>
12#include <stddef.h>
13
14#define REG_PC (offsetof(struct lightrec_state, curr_pc) / sizeof(u32))
15
16enum reg_priority {
17 REG_IS_TEMP,
18 REG_IS_TEMP_VALUE,
19 REG_IS_ZERO,
20 REG_IS_LOADED,
21 REG_IS_DIRTY,
22
23 REG_NB_PRIORITIES,
24};
25
26struct native_register {
27 bool used, output, extend, extended,
28 zero_extend, zero_extended, locked;
29 s16 emulated_register;
30 intptr_t value;
31 enum reg_priority prio;
32};
33
34struct regcache {
35 struct lightrec_state *state;
36 struct native_register lightrec_regs[NUM_REGS + NUM_TEMPS];
37};
38
39static const char * mips_regs[] = {
40 "zero",
41 "at",
42 "v0", "v1",
43 "a0", "a1", "a2", "a3",
44 "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7",
45 "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
46 "t8", "t9",
47 "k0", "k1",
48 "gp", "sp", "fp", "ra",
49 "lo", "hi",
50};
51
52/* Forward declaration(s) */
53static void clean_reg(jit_state_t *_jit,
54 struct native_register *nreg, u8 jit_reg, bool clean);
55
56const char * lightrec_reg_name(u8 reg)
57{
58 return mips_regs[reg];
59}
60
61static inline bool lightrec_reg_is_zero(u8 jit_reg)
62{
63#if defined(__mips__) || defined(__alpha__) || defined(__riscv)
64 if (jit_reg == _ZERO)
65 return true;
66#endif
67 return false;
68}
69
70static inline s8 lightrec_get_hardwired_reg(u16 reg)
71{
72#if defined(__mips__) || defined(__alpha__) || defined(__riscv)
73 if (reg == 0)
74 return _ZERO;
75#endif
76 return -1;
77}
78
79static inline u8 lightrec_reg_number(const struct regcache *cache,
80 const struct native_register *nreg)
81{
82 return (u8) (((uintptr_t) nreg - (uintptr_t) cache->lightrec_regs)
83 / sizeof(*nreg));
84}
85
86static inline u8 lightrec_reg_to_lightning(const struct regcache *cache,
87 const struct native_register *nreg)
88{
89 u8 offset = lightrec_reg_number(cache, nreg);
90
91 if (offset < NUM_REGS)
92 return JIT_V(FIRST_REG + offset);
93 else
94 return JIT_R(FIRST_TEMP + offset - NUM_REGS);
95}
96
97static inline struct native_register * lightning_reg_to_lightrec(
98 struct regcache *cache, u8 reg)
99{
100 if ((JIT_V0 > JIT_R0 && reg >= JIT_V0) ||
101 (JIT_V0 < JIT_R0 && reg < JIT_R0)) {
102 if (JIT_V1 > JIT_V0)
103 return &cache->lightrec_regs[reg - JIT_V(FIRST_REG)];
104 else
105 return &cache->lightrec_regs[JIT_V(FIRST_REG) - reg];
106 } else {
107 if (JIT_R1 > JIT_R0)
108 return &cache->lightrec_regs[NUM_REGS + reg - JIT_R(FIRST_TEMP)];
109 else
110 return &cache->lightrec_regs[NUM_REGS + JIT_R(FIRST_TEMP) - reg];
111 }
112}
113
114u8 lightrec_get_reg_in_flags(struct regcache *cache, u8 jit_reg)
115{
116 struct native_register *reg;
117 u8 flags = 0;
118
119 if (lightrec_reg_is_zero(jit_reg))
120 return REG_EXT | REG_ZEXT;
121
122 reg = lightning_reg_to_lightrec(cache, jit_reg);
123 if (reg->extended)
124 flags |= REG_EXT;
125 if (reg->zero_extended)
126 flags |= REG_ZEXT;
127
128 return flags;
129}
130
131void lightrec_set_reg_out_flags(struct regcache *cache, u8 jit_reg, u8 flags)
132{
133 struct native_register *reg;
134
135 if (!lightrec_reg_is_zero(jit_reg)) {
136 reg = lightning_reg_to_lightrec(cache, jit_reg);
137 reg->extend = flags & REG_EXT;
138 reg->zero_extend = flags & REG_ZEXT;
139 }
140}
141
142static struct native_register * alloc_temp(struct regcache *cache)
143{
144 struct native_register *elm, *nreg = NULL;
145 enum reg_priority best = REG_NB_PRIORITIES;
146 unsigned int i;
147
148 /* We search the register list in reverse order. As temporaries are
149 * meant to be used only in the emitter functions, they can be mapped to
150 * caller-saved registers, as they won't have to be saved back to
151 * memory. */
152 for (i = ARRAY_SIZE(cache->lightrec_regs); i; i--) {
153 elm = &cache->lightrec_regs[i - 1];
154
155 if (!elm->used && !elm->locked && elm->prio < best) {
156 nreg = elm;
157 best = elm->prio;
158
159 if (best == REG_IS_TEMP)
160 break;
161 }
162 }
163
164 return nreg;
165}
166
167static struct native_register * find_mapped_reg(struct regcache *cache,
168 u16 reg, bool out)
169{
170 unsigned int i;
171
172 for (i = 0; i < ARRAY_SIZE(cache->lightrec_regs); i++) {
173 struct native_register *nreg = &cache->lightrec_regs[i];
174 if ((nreg->prio >= REG_IS_ZERO) &&
175 nreg->emulated_register == reg &&
176 (!out || !nreg->locked))
177 return nreg;
178 }
179
180 return NULL;
181}
182
183static struct native_register * alloc_in_out(struct regcache *cache,
184 u16 reg, bool out)
185{
186 struct native_register *elm, *nreg = NULL;
187 enum reg_priority best = REG_NB_PRIORITIES;
188 unsigned int i;
189
190 /* Try to find if the register is already mapped somewhere */
191 nreg = find_mapped_reg(cache, reg, out);
192 if (nreg)
193 return nreg;
194
195 nreg = NULL;
196
197 for (i = 0; i < ARRAY_SIZE(cache->lightrec_regs); i++) {
198 elm = &cache->lightrec_regs[i];
199
200 if (!elm->used && !elm->locked && elm->prio < best) {
201 nreg = elm;
202 best = elm->prio;
203
204 if (best == REG_IS_TEMP)
205 break;
206 }
207 }
208
209 return nreg;
210}
211
212static void lightrec_discard_nreg(struct native_register *nreg)
213{
214 nreg->extended = false;
215 nreg->zero_extended = false;
216 nreg->output = false;
217 nreg->used = false;
218 nreg->locked = false;
219 nreg->emulated_register = -1;
220 nreg->prio = 0;
221}
222
223static void lightrec_unload_nreg(struct regcache *cache, jit_state_t *_jit,
224 struct native_register *nreg, u8 jit_reg)
225{
226 clean_reg(_jit, nreg, jit_reg, false);
227 lightrec_discard_nreg(nreg);
228}
229
230void lightrec_unload_reg(struct regcache *cache, jit_state_t *_jit, u8 jit_reg)
231{
232 if (lightrec_reg_is_zero(jit_reg))
233 return;
234
235 lightrec_unload_nreg(cache, _jit,
236 lightning_reg_to_lightrec(cache, jit_reg), jit_reg);
237}
238
239u8 lightrec_alloc_reg(struct regcache *cache, jit_state_t *_jit, u8 jit_reg)
240{
241 struct native_register *reg;
242
243 if (lightrec_reg_is_zero(jit_reg))
244 return jit_reg;
245
246 reg = lightning_reg_to_lightrec(cache, jit_reg);
247 lightrec_unload_nreg(cache, _jit, reg, jit_reg);
248
249 reg->used = true;
250 reg->prio = REG_IS_LOADED;
251 return jit_reg;
252}
253
254u8 lightrec_alloc_reg_temp(struct regcache *cache, jit_state_t *_jit)
255{
256 u8 jit_reg;
257 struct native_register *nreg = alloc_temp(cache);
258 if (!nreg) {
259 /* No free register, no dirty register to free. */
260 pr_err("No more registers! Abandon ship!\n");
261 return 0;
262 }
263
264 jit_reg = lightrec_reg_to_lightning(cache, nreg);
265 lightrec_unload_nreg(cache, _jit, nreg, jit_reg);
266
267 nreg->prio = REG_IS_TEMP;
268 nreg->used = true;
269 return jit_reg;
270}
271
272s8 lightrec_get_reg_with_value(struct regcache *cache, intptr_t value)
273{
274 struct native_register *nreg;
275 unsigned int i;
276
277 for (i = 0; i < ARRAY_SIZE(cache->lightrec_regs); i++) {
278 nreg = &cache->lightrec_regs[i];
279
280 if (nreg->prio == REG_IS_TEMP_VALUE && nreg->value == value) {
281 nreg->used = true;
282 return lightrec_reg_to_lightning(cache, nreg);
283 }
284 }
285
286 return -1;
287}
288
289void lightrec_temp_set_value(struct regcache *cache, u8 jit_reg, intptr_t value)
290{
291 struct native_register *nreg;
292
293 nreg = lightning_reg_to_lightrec(cache, jit_reg);
294
295 nreg->prio = REG_IS_TEMP_VALUE;
296 nreg->value = value;
297}
298
299u8 lightrec_alloc_reg_temp_with_value(struct regcache *cache,
300 jit_state_t *_jit, intptr_t value)
301{
302 s8 reg;
303
304 reg = lightrec_get_reg_with_value(cache, value);
305 if (reg < 0) {
306 reg = lightrec_alloc_reg_temp(cache, _jit);
307 jit_movi((u8)reg, value);
308 lightrec_temp_set_value(cache, (u8)reg, value);
309 }
310
311 return (u8)reg;
312}
313
314u8 lightrec_alloc_reg_out(struct regcache *cache, jit_state_t *_jit,
315 u16 reg, u8 flags)
316{
317 struct native_register *nreg;
318 u8 jit_reg;
319 s8 hw_reg;
320
321 hw_reg = lightrec_get_hardwired_reg(reg);
322 if (hw_reg >= 0)
323 return (u8) hw_reg;
324
325 nreg = alloc_in_out(cache, reg, true);
326 if (!nreg) {
327 /* No free register, no dirty register to free. */
328 pr_err("No more registers! Abandon ship!\n");
329 return 0;
330 }
331
332 jit_reg = lightrec_reg_to_lightning(cache, nreg);
333
334 /* If we get a dirty register that doesn't correspond to the one
335 * we're requesting, store back the old value */
336 if (nreg->emulated_register != reg)
337 lightrec_unload_nreg(cache, _jit, nreg, jit_reg);
338
339 nreg->used = true;
340 nreg->output = true;
341 nreg->emulated_register = reg;
342 nreg->extend = flags & REG_EXT;
343 nreg->zero_extend = flags & REG_ZEXT;
344 nreg->prio = reg ? REG_IS_LOADED : REG_IS_ZERO;
345 return jit_reg;
346}
347
348u8 lightrec_alloc_reg_in(struct regcache *cache, jit_state_t *_jit,
349 u16 reg, u8 flags)
350{
351 struct native_register *nreg;
352 u8 jit_reg;
353 bool reg_changed;
354 s8 hw_reg;
355
356 hw_reg = lightrec_get_hardwired_reg(reg);
357 if (hw_reg >= 0)
358 return (u8) hw_reg;
359
360 nreg = alloc_in_out(cache, reg, false);
361 if (!nreg) {
362 /* No free register, no dirty register to free. */
363 pr_err("No more registers! Abandon ship!\n");
364 return 0;
365 }
366
367 jit_reg = lightrec_reg_to_lightning(cache, nreg);
368
369 /* If we get a dirty register that doesn't correspond to the one
370 * we're requesting, store back the old value */
371 reg_changed = nreg->emulated_register != reg;
372 if (reg_changed)
373 lightrec_unload_nreg(cache, _jit, nreg, jit_reg);
374
375 if (nreg->prio < REG_IS_LOADED && reg != 0) {
376 s16 offset = offsetof(struct lightrec_state, regs.gpr)
377 + (reg << 2);
378
379 nreg->zero_extended = flags & REG_ZEXT;
380 nreg->extended = !nreg->zero_extended;
381
382 /* Load previous value from register cache */
383 if (nreg->zero_extended)
384 jit_ldxi_ui(jit_reg, LIGHTREC_REG_STATE, offset);
385 else
386 jit_ldxi_i(jit_reg, LIGHTREC_REG_STATE, offset);
387
388 nreg->prio = REG_IS_LOADED;
389 }
390
391 /* Clear register r0 before use */
392 if (reg == 0 && nreg->prio != REG_IS_ZERO) {
393 jit_movi(jit_reg, 0);
394 nreg->extended = true;
395 nreg->zero_extended = true;
396 nreg->prio = REG_IS_ZERO;
397 }
398
399 nreg->used = true;
400 nreg->output = false;
401 nreg->emulated_register = reg;
402
403 if ((flags & REG_EXT) && !nreg->extended &&
404 (!nreg->zero_extended || !(flags & REG_ZEXT))) {
405 nreg->extended = true;
406 nreg->zero_extended = false;
407 jit_extr_i(jit_reg, jit_reg);
408 } else if (!(flags & REG_EXT) && (flags & REG_ZEXT) &&
409 !nreg->zero_extended) {
410 nreg->zero_extended = true;
411 nreg->extended = false;
412 jit_extr_ui(jit_reg, jit_reg);
413 }
414
415 return jit_reg;
416}
417
418void lightrec_remap_reg(struct regcache *cache, jit_state_t *_jit,
419 u8 jit_reg, u16 reg_out, bool discard)
420{
421 struct native_register *nreg;
422
423 lightrec_discard_reg_if_loaded(cache, reg_out);
424
425 nreg = lightning_reg_to_lightrec(cache, jit_reg);
426 clean_reg(_jit, nreg, jit_reg, !discard);
427
428 nreg->output = true;
429 nreg->emulated_register = reg_out;
430 nreg->extend = nreg->extended;
431 nreg->zero_extend = nreg->zero_extended;
432}
433
434static bool reg_pc_is_mapped(struct regcache *cache)
435{
436 struct native_register *nreg = lightning_reg_to_lightrec(cache, JIT_V0);
437
438 return nreg->prio == REG_IS_LOADED && nreg->emulated_register == REG_PC;
439}
440
441void lightrec_load_imm(struct regcache *cache,
442 jit_state_t *_jit, u8 jit_reg, u32 pc, u32 imm)
443{
444 s32 delta = imm - pc;
445
446 if (!reg_pc_is_mapped(cache) || !can_sign_extend(delta, 16))
447 jit_movi(jit_reg, imm);
448 else if (jit_reg != JIT_V0 || delta)
449 jit_addi(jit_reg, JIT_V0, delta);
450}
451
452void lightrec_load_next_pc_imm(struct regcache *cache,
453 jit_state_t *_jit, u32 pc, u32 imm)
454{
455 struct native_register *nreg = lightning_reg_to_lightrec(cache, JIT_V0);
456 u8 reg = JIT_V0;
457
458 if (lightrec_store_next_pc())
459 reg = lightrec_alloc_reg_temp(cache, _jit);
460
461 if (reg_pc_is_mapped(cache)) {
462 /* JIT_V0 contains next PC - so we can overwrite it */
463 lightrec_load_imm(cache, _jit, reg, pc, imm);
464 } else {
465 /* JIT_V0 contains something else - invalidate it */
466 if (reg == JIT_V0)
467 lightrec_unload_reg(cache, _jit, JIT_V0);
468
469 jit_movi(reg, imm);
470 }
471
472 if (lightrec_store_next_pc()) {
473 jit_stxi_i(offsetof(struct lightrec_state, next_pc),
474 LIGHTREC_REG_STATE, reg);
475 lightrec_free_reg(cache, reg);
476 } else {
477 nreg->prio = REG_IS_LOADED;
478 nreg->emulated_register = -1;
479 nreg->locked = true;
480 }
481}
482
483void lightrec_load_next_pc(struct regcache *cache, jit_state_t *_jit, u8 reg)
484{
485 struct native_register *nreg_v0, *nreg;
486 u16 offset;
487 u8 jit_reg;
488
489 if (lightrec_store_next_pc()) {
490 jit_reg = lightrec_alloc_reg_in(cache, _jit, reg, 0);
491 offset = offsetof(struct lightrec_state, next_pc);
492 jit_stxi_i(offset, LIGHTREC_REG_STATE, jit_reg);
493 lightrec_free_reg(cache, jit_reg);
494
495 return;
496 }
497
498 /* Invalidate JIT_V0 if it is not mapped to 'reg' */
499 nreg_v0 = lightning_reg_to_lightrec(cache, JIT_V0);
500 if (nreg_v0->prio >= REG_IS_LOADED && nreg_v0->emulated_register != reg)
501 lightrec_unload_nreg(cache, _jit, nreg_v0, JIT_V0);
502
503 nreg = find_mapped_reg(cache, reg, false);
504 if (!nreg) {
505 /* Not mapped - load the value from the register cache */
506
507 offset = offsetof(struct lightrec_state, regs.gpr) + (reg << 2);
508 jit_ldxi_ui(JIT_V0, LIGHTREC_REG_STATE, offset);
509
510 nreg_v0->prio = REG_IS_LOADED;
511 nreg_v0->emulated_register = reg;
512
513 } else if (nreg == nreg_v0) {
514 /* The target register 'reg' is mapped to JIT_V0 */
515
516 if (!nreg->zero_extended)
517 jit_extr_ui(JIT_V0, JIT_V0);
518
519 } else {
520 /* The target register 'reg' is mapped elsewhere. In that case,
521 * move the register's value to JIT_V0 and re-map it in the
522 * register cache. We can then safely discard the original
523 * mapped register (even if it was dirty). */
524
525 jit_reg = lightrec_reg_to_lightning(cache, nreg);
526 if (nreg->zero_extended)
527 jit_movr(JIT_V0, jit_reg);
528 else
529 jit_extr_ui(JIT_V0, jit_reg);
530
531 *nreg_v0 = *nreg;
532 lightrec_discard_nreg(nreg);
533 }
534
535 if (lightrec_store_next_pc()) {
536 jit_stxi_i(offsetof(struct lightrec_state, next_pc),
537 LIGHTREC_REG_STATE, JIT_V0);
538 } else {
539 lightrec_clean_reg(cache, _jit, JIT_V0);
540
541 nreg_v0->zero_extended = true;
542 nreg_v0->locked = true;
543 }
544}
545
546static void free_reg(struct native_register *nreg)
547{
548 /* Set output registers as dirty */
549 if (nreg->used && nreg->output && nreg->emulated_register > 0)
550 nreg->prio = REG_IS_DIRTY;
551 if (nreg->output) {
552 nreg->extended = nreg->extend;
553 nreg->zero_extended = nreg->zero_extend;
554 }
555 nreg->used = false;
556}
557
558void lightrec_free_reg(struct regcache *cache, u8 jit_reg)
559{
560 if (!lightrec_reg_is_zero(jit_reg))
561 free_reg(lightning_reg_to_lightrec(cache, jit_reg));
562}
563
564void lightrec_free_regs(struct regcache *cache)
565{
566 unsigned int i;
567
568 for (i = 0; i < ARRAY_SIZE(cache->lightrec_regs); i++)
569 free_reg(&cache->lightrec_regs[i]);
570}
571
572static void clean_reg(jit_state_t *_jit,
573 struct native_register *nreg, u8 jit_reg, bool clean)
574{
575 /* If we get a dirty register, store back the old value */
576 if (nreg->prio == REG_IS_DIRTY) {
577 s16 offset = offsetof(struct lightrec_state, regs.gpr)
578 + (nreg->emulated_register << 2);
579
580 jit_stxi_i(offset, LIGHTREC_REG_STATE, jit_reg);
581
582 if (clean) {
583 if (nreg->emulated_register == 0)
584 nreg->prio = REG_IS_ZERO;
585 else
586 nreg->prio = REG_IS_LOADED;
587 }
588 }
589}
590
591static void clean_regs(struct regcache *cache, jit_state_t *_jit, bool clean)
592{
593 unsigned int i;
594
595 for (i = 0; i < NUM_REGS; i++) {
596 clean_reg(_jit, &cache->lightrec_regs[i],
597 JIT_V(FIRST_REG + i), clean);
598 }
599 for (i = 0; i < NUM_TEMPS; i++) {
600 clean_reg(_jit, &cache->lightrec_regs[i + NUM_REGS],
601 JIT_R(FIRST_TEMP + i), clean);
602 }
603}
604
605void lightrec_storeback_regs(struct regcache *cache, jit_state_t *_jit)
606{
607 clean_regs(cache, _jit, false);
608}
609
610void lightrec_clean_regs(struct regcache *cache, jit_state_t *_jit)
611{
612 clean_regs(cache, _jit, true);
613}
614
615bool lightrec_has_dirty_regs(struct regcache *cache)
616{
617 unsigned int i;
618
619 for (i = 0; i < NUM_REGS + NUM_TEMPS; i++)
620 if (cache->lightrec_regs[i].prio == REG_IS_DIRTY)
621 return true;
622
623 return false;
624}
625
626void lightrec_clean_reg(struct regcache *cache, jit_state_t *_jit, u8 jit_reg)
627{
628 struct native_register *reg;
629
630 if (!lightrec_reg_is_zero(jit_reg)) {
631 reg = lightning_reg_to_lightrec(cache, jit_reg);
632 clean_reg(_jit, reg, jit_reg, true);
633 }
634}
635
636bool lightrec_reg_is_loaded(struct regcache *cache, u16 reg)
637{
638 return !!find_mapped_reg(cache, reg, false);
639}
640
641void lightrec_clean_reg_if_loaded(struct regcache *cache, jit_state_t *_jit,
642 u16 reg, bool unload)
643{
644 struct native_register *nreg;
645 u8 jit_reg;
646
647 nreg = find_mapped_reg(cache, reg, false);
648 if (nreg) {
649 jit_reg = lightrec_reg_to_lightning(cache, nreg);
650
651 if (unload)
652 lightrec_unload_nreg(cache, _jit, nreg, jit_reg);
653 else
654 clean_reg(_jit, nreg, jit_reg, true);
655 }
656}
657
658void lightrec_discard_reg_if_loaded(struct regcache *cache, u16 reg)
659{
660 struct native_register *nreg;
661
662 nreg = find_mapped_reg(cache, reg, false);
663 if (nreg)
664 lightrec_discard_nreg(nreg);
665}
666
667struct native_register * lightrec_regcache_enter_branch(struct regcache *cache)
668{
669 struct native_register *backup;
670
671 backup = lightrec_malloc(cache->state, MEM_FOR_LIGHTREC,
672 sizeof(cache->lightrec_regs));
673 memcpy(backup, &cache->lightrec_regs, sizeof(cache->lightrec_regs));
674
675 return backup;
676}
677
678void lightrec_regcache_leave_branch(struct regcache *cache,
679 struct native_register *regs)
680{
681 memcpy(&cache->lightrec_regs, regs, sizeof(cache->lightrec_regs));
682 lightrec_free(cache->state, MEM_FOR_LIGHTREC,
683 sizeof(cache->lightrec_regs), regs);
684}
685
686void lightrec_regcache_reset(struct regcache *cache)
687{
688 memset(&cache->lightrec_regs, 0, sizeof(cache->lightrec_regs));
689}
690
691void lightrec_preload_pc(struct regcache *cache, jit_state_t *_jit)
692{
693 struct native_register *nreg;
694
695 /* The block's PC is loaded in JIT_V0 at the start of the block */
696 nreg = lightning_reg_to_lightrec(cache, JIT_V0);
697 nreg->emulated_register = REG_PC;
698 nreg->prio = REG_IS_LOADED;
699 nreg->zero_extended = true;
700
701 jit_live(JIT_V0);
702}
703
704struct regcache * lightrec_regcache_init(struct lightrec_state *state)
705{
706 struct regcache *cache;
707
708 cache = lightrec_calloc(state, MEM_FOR_LIGHTREC, sizeof(*cache));
709 if (!cache)
710 return NULL;
711
712 cache->state = state;
713
714 return cache;
715}
716
717void lightrec_free_regcache(struct regcache *cache)
718{
719 return lightrec_free(cache->state, MEM_FOR_LIGHTREC,
720 sizeof(*cache), cache);
721}
722
723void lightrec_regcache_mark_live(struct regcache *cache, jit_state_t *_jit)
724{
725 struct native_register *nreg;
726 unsigned int i;
727
728#ifdef _WIN32
729 /* FIXME: GNU Lightning on Windows seems to use our mapped registers as
730 * temporaries. Until the actual bug is found and fixed, unconditionally
731 * mark our registers as live here. */
732 for (i = 0; i < NUM_REGS; i++) {
733 nreg = &cache->lightrec_regs[i];
734
735 if (nreg->used || nreg->prio > REG_IS_TEMP)
736 jit_live(JIT_V(FIRST_REG + i));
737 }
738#endif
739
740 for (i = 0; i < NUM_TEMPS; i++) {
741 nreg = &cache->lightrec_regs[NUM_REGS + i];
742
743 if (nreg->used || nreg->prio > REG_IS_TEMP)
744 jit_live(JIT_R(FIRST_TEMP + i));
745 }
746
747 jit_live(LIGHTREC_REG_STATE);
748 jit_live(LIGHTREC_REG_CYCLE);
749}