add a thp-based huge page alloc fallback
[pcsx_rearmed.git] / deps / lightrec / lightrec.c
... / ...
CommitLineData
1// SPDX-License-Identifier: LGPL-2.1-or-later
2/*
3 * Copyright (C) 2014-2021 Paul Cercueil <paul@crapouillou.net>
4 */
5
6#include "blockcache.h"
7#include "debug.h"
8#include "disassembler.h"
9#include "emitter.h"
10#include "interpreter.h"
11#include "lightrec-config.h"
12#include "lightning-wrapper.h"
13#include "lightrec.h"
14#include "memmanager.h"
15#include "reaper.h"
16#include "recompiler.h"
17#include "regcache.h"
18#include "optimizer.h"
19#include "tlsf/tlsf.h"
20
21#include <errno.h>
22#include <inttypes.h>
23#include <limits.h>
24#if ENABLE_THREADED_COMPILER
25#include <stdatomic.h>
26#endif
27#include <stdbool.h>
28#include <stddef.h>
29#include <string.h>
30
31static struct block * lightrec_precompile_block(struct lightrec_state *state,
32 u32 pc);
33static bool lightrec_block_is_fully_tagged(const struct block *block);
34
35static void lightrec_mtc2(struct lightrec_state *state, u8 reg, u32 data);
36static u32 lightrec_mfc2(struct lightrec_state *state, u8 reg);
37
38static void lightrec_default_sb(struct lightrec_state *state, u32 opcode,
39 void *host, u32 addr, u8 data)
40{
41 *(u8 *)host = data;
42
43 if (!(state->opt_flags & LIGHTREC_OPT_INV_DMA_ONLY))
44 lightrec_invalidate(state, addr, 1);
45}
46
47static void lightrec_default_sh(struct lightrec_state *state, u32 opcode,
48 void *host, u32 addr, u16 data)
49{
50 *(u16 *)host = HTOLE16(data);
51
52 if (!(state->opt_flags & LIGHTREC_OPT_INV_DMA_ONLY))
53 lightrec_invalidate(state, addr, 2);
54}
55
56static void lightrec_default_sw(struct lightrec_state *state, u32 opcode,
57 void *host, u32 addr, u32 data)
58{
59 *(u32 *)host = HTOLE32(data);
60
61 if (!(state->opt_flags & LIGHTREC_OPT_INV_DMA_ONLY))
62 lightrec_invalidate(state, addr, 4);
63}
64
65static u8 lightrec_default_lb(struct lightrec_state *state,
66 u32 opcode, void *host, u32 addr)
67{
68 return *(u8 *)host;
69}
70
71static u16 lightrec_default_lh(struct lightrec_state *state,
72 u32 opcode, void *host, u32 addr)
73{
74 return LE16TOH(*(u16 *)host);
75}
76
77static u32 lightrec_default_lw(struct lightrec_state *state,
78 u32 opcode, void *host, u32 addr)
79{
80 return LE32TOH(*(u32 *)host);
81}
82
83static const struct lightrec_mem_map_ops lightrec_default_ops = {
84 .sb = lightrec_default_sb,
85 .sh = lightrec_default_sh,
86 .sw = lightrec_default_sw,
87 .lb = lightrec_default_lb,
88 .lh = lightrec_default_lh,
89 .lw = lightrec_default_lw,
90};
91
92static void __segfault_cb(struct lightrec_state *state, u32 addr,
93 const struct block *block)
94{
95 lightrec_set_exit_flags(state, LIGHTREC_EXIT_SEGFAULT);
96 pr_err("Segmentation fault in recompiled code: invalid "
97 "load/store at address "PC_FMT"\n", addr);
98 if (block)
99 pr_err("Was executing block "PC_FMT"\n", block->pc);
100}
101
102static void lightrec_swl(struct lightrec_state *state,
103 const struct lightrec_mem_map_ops *ops,
104 u32 opcode, void *host, u32 addr, u32 data)
105{
106 unsigned int shift = addr & 0x3;
107 unsigned int mask = shift < 3 ? GENMASK(31, (shift + 1) * 8) : 0;
108 u32 old_data;
109
110 /* Align to 32 bits */
111 addr &= ~3;
112 host = (void *)((uintptr_t)host & ~3);
113
114 old_data = ops->lw(state, opcode, host, addr);
115
116 data = (data >> ((3 - shift) * 8)) | (old_data & mask);
117
118 ops->sw(state, opcode, host, addr, data);
119}
120
121static void lightrec_swr(struct lightrec_state *state,
122 const struct lightrec_mem_map_ops *ops,
123 u32 opcode, void *host, u32 addr, u32 data)
124{
125 unsigned int shift = addr & 0x3;
126 unsigned int mask = (1 << (shift * 8)) - 1;
127 u32 old_data;
128
129 /* Align to 32 bits */
130 addr &= ~3;
131 host = (void *)((uintptr_t)host & ~3);
132
133 old_data = ops->lw(state, opcode, host, addr);
134
135 data = (data << (shift * 8)) | (old_data & mask);
136
137 ops->sw(state, opcode, host, addr, data);
138}
139
140static void lightrec_swc2(struct lightrec_state *state, union code op,
141 const struct lightrec_mem_map_ops *ops,
142 void *host, u32 addr)
143{
144 u32 data = lightrec_mfc2(state, op.i.rt);
145
146 ops->sw(state, op.opcode, host, addr, data);
147}
148
149static u32 lightrec_lwl(struct lightrec_state *state,
150 const struct lightrec_mem_map_ops *ops,
151 u32 opcode, void *host, u32 addr, u32 data)
152{
153 unsigned int shift = addr & 0x3;
154 unsigned int mask = (1 << (24 - shift * 8)) - 1;
155 u32 old_data;
156
157 /* Align to 32 bits */
158 addr &= ~3;
159 host = (void *)((uintptr_t)host & ~3);
160
161 old_data = ops->lw(state, opcode, host, addr);
162
163 return (data & mask) | (old_data << (24 - shift * 8));
164}
165
166static u32 lightrec_lwr(struct lightrec_state *state,
167 const struct lightrec_mem_map_ops *ops,
168 u32 opcode, void *host, u32 addr, u32 data)
169{
170 unsigned int shift = addr & 0x3;
171 unsigned int mask = shift ? GENMASK(31, 32 - shift * 8) : 0;
172 u32 old_data;
173
174 /* Align to 32 bits */
175 addr &= ~3;
176 host = (void *)((uintptr_t)host & ~3);
177
178 old_data = ops->lw(state, opcode, host, addr);
179
180 return (data & mask) | (old_data >> (shift * 8));
181}
182
183static void lightrec_lwc2(struct lightrec_state *state, union code op,
184 const struct lightrec_mem_map_ops *ops,
185 void *host, u32 addr)
186{
187 u32 data = ops->lw(state, op.opcode, host, addr);
188
189 lightrec_mtc2(state, op.i.rt, data);
190}
191
192static void lightrec_invalidate_map(struct lightrec_state *state,
193 const struct lightrec_mem_map *map, u32 addr, u32 len)
194{
195 if (map == &state->maps[PSX_MAP_KERNEL_USER_RAM]) {
196 memset(lut_address(state, lut_offset(addr)), 0,
197 ((len + 3) / 4) * lut_elm_size(state));
198 }
199}
200
201static enum psx_map
202lightrec_get_map_idx(struct lightrec_state *state, u32 kaddr)
203{
204 const struct lightrec_mem_map *map;
205 unsigned int i;
206
207 for (i = 0; i < state->nb_maps; i++) {
208 map = &state->maps[i];
209
210 if (kaddr >= map->pc && kaddr < map->pc + map->length)
211 return (enum psx_map) i;
212 }
213
214 return PSX_MAP_UNKNOWN;
215}
216
217const struct lightrec_mem_map *
218lightrec_get_map(struct lightrec_state *state, void **host, u32 kaddr)
219{
220 const struct lightrec_mem_map *map;
221 enum psx_map idx;
222 u32 addr;
223
224 idx = lightrec_get_map_idx(state, kaddr);
225 if (idx == PSX_MAP_UNKNOWN)
226 return NULL;
227
228 map = &state->maps[idx];
229 addr = kaddr - map->pc;
230
231 while (map->mirror_of)
232 map = map->mirror_of;
233
234 if (host)
235 *host = map->address + addr;
236
237 return map;
238}
239
240u32 lightrec_rw(struct lightrec_state *state, union code op, u32 base,
241 u32 data, u32 *flags, struct block *block, u16 offset)
242{
243 const struct lightrec_mem_map *map;
244 const struct lightrec_mem_map_ops *ops;
245 u32 opcode = op.opcode;
246 bool was_tagged = true;
247 u16 old_flags;
248 u32 addr;
249 void *host;
250
251 addr = kunseg(base + (s16) op.i.imm);
252
253 map = lightrec_get_map(state, &host, addr);
254 if (!map) {
255 __segfault_cb(state, addr, block);
256 return 0;
257 }
258
259 if (flags)
260 was_tagged = LIGHTREC_FLAGS_GET_IO_MODE(*flags);
261
262 if (likely(!map->ops)) {
263 if (flags && !LIGHTREC_FLAGS_GET_IO_MODE(*flags)) {
264 /* Force parallel port accesses as HW accesses, because
265 * the direct-I/O emitters can't differenciate it. */
266 if (unlikely(map == &state->maps[PSX_MAP_PARALLEL_PORT]))
267 *flags |= LIGHTREC_IO_MODE(LIGHTREC_IO_HW);
268 /* If the base register is 0x0, be extra suspicious.
269 * Some games (e.g. Sled Storm) actually do segmentation
270 * faults by using uninitialized pointers, which are
271 * later initialized to point to hardware registers. */
272 else if (op.i.rs && base == 0x0)
273 *flags |= LIGHTREC_IO_MODE(LIGHTREC_IO_HW);
274 else
275 *flags |= LIGHTREC_IO_MODE(LIGHTREC_IO_DIRECT);
276 }
277
278 ops = &lightrec_default_ops;
279 } else if (flags &&
280 LIGHTREC_FLAGS_GET_IO_MODE(*flags) == LIGHTREC_IO_DIRECT_HW) {
281 ops = &lightrec_default_ops;
282 } else {
283 if (flags && !LIGHTREC_FLAGS_GET_IO_MODE(*flags))
284 *flags |= LIGHTREC_IO_MODE(LIGHTREC_IO_HW);
285
286 ops = map->ops;
287 }
288
289 if (!was_tagged) {
290 old_flags = block_set_flags(block, BLOCK_SHOULD_RECOMPILE);
291
292 if (!(old_flags & BLOCK_SHOULD_RECOMPILE)) {
293 pr_debug("Opcode of block at "PC_FMT" has been tagged"
294 " - flag for recompilation\n", block->pc);
295
296 lut_write(state, lut_offset(block->pc), NULL);
297 }
298 }
299
300 switch (op.i.op) {
301 case OP_SB:
302 ops->sb(state, opcode, host, addr, (u8) data);
303 return 0;
304 case OP_SH:
305 ops->sh(state, opcode, host, addr, (u16) data);
306 return 0;
307 case OP_SWL:
308 lightrec_swl(state, ops, opcode, host, addr, data);
309 return 0;
310 case OP_SWR:
311 lightrec_swr(state, ops, opcode, host, addr, data);
312 return 0;
313 case OP_SW:
314 ops->sw(state, opcode, host, addr, data);
315 return 0;
316 case OP_SWC2:
317 lightrec_swc2(state, op, ops, host, addr);
318 return 0;
319 case OP_LB:
320 return (s32) (s8) ops->lb(state, opcode, host, addr);
321 case OP_LBU:
322 return ops->lb(state, opcode, host, addr);
323 case OP_LH:
324 return (s32) (s16) ops->lh(state, opcode, host, addr);
325 case OP_LHU:
326 return ops->lh(state, opcode, host, addr);
327 case OP_LWC2:
328 lightrec_lwc2(state, op, ops, host, addr);
329 return 0;
330 case OP_LWL:
331 return lightrec_lwl(state, ops, opcode, host, addr, data);
332 case OP_LWR:
333 return lightrec_lwr(state, ops, opcode, host, addr, data);
334 case OP_LW:
335 default:
336 return ops->lw(state, opcode, host, addr);
337 }
338}
339
340static void lightrec_rw_helper(struct lightrec_state *state,
341 union code op, u32 *flags,
342 struct block *block, u16 offset)
343{
344 u32 ret = lightrec_rw(state, op, state->regs.gpr[op.i.rs],
345 state->regs.gpr[op.i.rt], flags, block, offset);
346
347 switch (op.i.op) {
348 case OP_LB:
349 case OP_LBU:
350 case OP_LH:
351 case OP_LHU:
352 case OP_LWL:
353 case OP_LWR:
354 case OP_LW:
355 if (OPT_HANDLE_LOAD_DELAYS && unlikely(!state->in_delay_slot_n)) {
356 state->temp_reg = ret;
357 state->in_delay_slot_n = 0xff;
358 } else if (op.i.rt) {
359 state->regs.gpr[op.i.rt] = ret;
360 }
361 fallthrough;
362 default:
363 break;
364 }
365}
366
367static void lightrec_rw_cb(struct lightrec_state *state, u32 arg)
368{
369 lightrec_rw_helper(state, (union code) arg, NULL, NULL, 0);
370}
371
372static void lightrec_rw_generic_cb(struct lightrec_state *state, u32 arg)
373{
374 struct block *block;
375 struct opcode *op;
376 u16 offset = (u16)arg;
377
378 block = lightrec_find_block_from_lut(state->block_cache,
379 arg >> 16, state->curr_pc);
380 if (unlikely(!block)) {
381 pr_err("rw_generic: No block found in LUT for "PC_FMT" offset 0x%"PRIx16"\n",
382 state->curr_pc, offset);
383 lightrec_set_exit_flags(state, LIGHTREC_EXIT_SEGFAULT);
384 return;
385 }
386
387 op = &block->opcode_list[offset];
388 lightrec_rw_helper(state, op->c, &op->flags, block, offset);
389}
390
391static u32 clamp_s32(s32 val, s32 min, s32 max)
392{
393 return val < min ? min : val > max ? max : val;
394}
395
396static u16 load_u16(u32 *ptr)
397{
398 return ((struct u16x2 *) ptr)->l;
399}
400
401static void store_u16(u32 *ptr, u16 value)
402{
403 ((struct u16x2 *) ptr)->l = value;
404}
405
406static u32 lightrec_mfc2(struct lightrec_state *state, u8 reg)
407{
408 s16 gteir1, gteir2, gteir3;
409
410 switch (reg) {
411 case 1:
412 case 3:
413 case 5:
414 case 8:
415 case 9:
416 case 10:
417 case 11:
418 return (s32)(s16) load_u16(&state->regs.cp2d[reg]);
419 case 7:
420 case 16:
421 case 17:
422 case 18:
423 case 19:
424 return load_u16(&state->regs.cp2d[reg]);
425 case 28:
426 case 29:
427 gteir1 = (s16) load_u16(&state->regs.cp2d[9]);
428 gteir2 = (s16) load_u16(&state->regs.cp2d[10]);
429 gteir3 = (s16) load_u16(&state->regs.cp2d[11]);
430
431 return clamp_s32(gteir1 >> 7, 0, 0x1f) << 0 |
432 clamp_s32(gteir2 >> 7, 0, 0x1f) << 5 |
433 clamp_s32(gteir3 >> 7, 0, 0x1f) << 10;
434 case 15:
435 reg = 14;
436 fallthrough;
437 default:
438 return state->regs.cp2d[reg];
439 }
440}
441
442u32 lightrec_mfc(struct lightrec_state *state, union code op)
443{
444 u32 val;
445
446 if (op.i.op == OP_CP0)
447 return state->regs.cp0[op.r.rd];
448
449 if (op.i.op == OP_SWC2) {
450 val = lightrec_mfc2(state, op.i.rt);
451 } else if (op.r.rs == OP_CP2_BASIC_MFC2)
452 val = lightrec_mfc2(state, op.r.rd);
453 else {
454 val = state->regs.cp2c[op.r.rd];
455
456 switch (op.r.rd) {
457 case 4:
458 case 12:
459 case 20:
460 case 26:
461 case 27:
462 case 29:
463 case 30:
464 val = (u32)(s16)val;
465 fallthrough;
466 default:
467 break;
468 }
469 }
470
471 if (state->ops.cop2_notify)
472 (*state->ops.cop2_notify)(state, op.opcode, val);
473
474 return val;
475}
476
477static void lightrec_mfc_cb(struct lightrec_state *state, union code op)
478{
479 u32 rt = lightrec_mfc(state, op);
480
481 if (op.i.op == OP_SWC2)
482 state->temp_reg = rt;
483 else if (op.r.rt)
484 state->regs.gpr[op.r.rt] = rt;
485}
486
487static void lightrec_mtc0(struct lightrec_state *state, u8 reg, u32 data)
488{
489 u32 status, oldstatus, cause;
490
491 switch (reg) {
492 case 1:
493 case 4:
494 case 8:
495 case 14:
496 case 15:
497 /* Those registers are read-only */
498 return;
499 default:
500 break;
501 }
502
503 if (reg == 12) {
504 status = state->regs.cp0[12];
505 oldstatus = status;
506
507 if (status & ~data & BIT(16)) {
508 state->ops.enable_ram(state, true);
509 lightrec_invalidate_all(state);
510 } else if (~status & data & BIT(16)) {
511 state->ops.enable_ram(state, false);
512 }
513 }
514
515 if (reg == 13) {
516 state->regs.cp0[13] &= ~0x300;
517 state->regs.cp0[13] |= data & 0x300;
518 } else {
519 state->regs.cp0[reg] = data;
520 }
521
522 if (reg == 12 || reg == 13) {
523 cause = state->regs.cp0[13];
524 status = state->regs.cp0[12];
525
526 /* Handle software interrupts */
527 if ((!!(status & cause & 0x300)) & status)
528 lightrec_set_exit_flags(state, LIGHTREC_EXIT_CHECK_INTERRUPT);
529
530 /* Handle hardware interrupts */
531 if (reg == 12 && !(~status & 0x401) && (~oldstatus & 0x401))
532 lightrec_set_exit_flags(state, LIGHTREC_EXIT_CHECK_INTERRUPT);
533 }
534}
535
536static u32 count_leading_bits(s32 data)
537{
538 u32 cnt = 33;
539
540#ifdef __has_builtin
541#if __has_builtin(__builtin_clrsb)
542 return 1 + __builtin_clrsb(data);
543#endif
544#endif
545
546 data = (data ^ (data >> 31)) << 1;
547
548 do {
549 cnt -= 1;
550 data >>= 1;
551 } while (data);
552
553 return cnt;
554}
555
556static void lightrec_mtc2(struct lightrec_state *state, u8 reg, u32 data)
557{
558 switch (reg) {
559 case 15:
560 state->regs.cp2d[12] = state->regs.cp2d[13];
561 state->regs.cp2d[13] = state->regs.cp2d[14];
562 state->regs.cp2d[14] = data;
563 break;
564 case 28:
565 state->regs.cp2d[9] = (data << 7) & 0xf80;
566 state->regs.cp2d[10] = (data << 2) & 0xf80;
567 state->regs.cp2d[11] = (data >> 3) & 0xf80;
568 break;
569 case 31:
570 return;
571 case 30:
572 state->regs.cp2d[31] = count_leading_bits((s32) data);
573 fallthrough;
574 default:
575 state->regs.cp2d[reg] = data;
576 break;
577 }
578}
579
580static void lightrec_ctc2(struct lightrec_state *state, u8 reg, u32 data)
581{
582 switch (reg) {
583 case 4:
584 case 12:
585 case 20:
586 case 26:
587 case 27:
588 case 29:
589 case 30:
590 store_u16(&state->regs.cp2c[reg], data);
591 break;
592 case 31:
593 data = (data & 0x7ffff000) | !!(data & 0x7f87e000) << 31;
594 fallthrough;
595 default:
596 state->regs.cp2c[reg] = data;
597 break;
598 }
599}
600
601void lightrec_mtc(struct lightrec_state *state, union code op, u8 reg, u32 data)
602{
603 if (op.i.op == OP_CP0) {
604 lightrec_mtc0(state, reg, data);
605 } else {
606 if (op.i.op == OP_LWC2 || op.r.rs != OP_CP2_BASIC_CTC2)
607 lightrec_mtc2(state, reg, data);
608 else
609 lightrec_ctc2(state, reg, data);
610
611 if (state->ops.cop2_notify)
612 (*state->ops.cop2_notify)(state, op.opcode, data);
613 }
614}
615
616static void lightrec_mtc_cb(struct lightrec_state *state, u32 arg)
617{
618 union code op = (union code) arg;
619 u32 data;
620 u8 reg;
621
622 if (op.i.op == OP_LWC2) {
623 data = state->temp_reg;
624 reg = op.i.rt;
625 } else {
626 data = state->regs.gpr[op.r.rt];
627 reg = op.r.rd;
628 }
629
630 lightrec_mtc(state, op, reg, data);
631}
632
633void lightrec_rfe(struct lightrec_state *state)
634{
635 u32 status;
636
637 /* Read CP0 Status register (r12) */
638 status = state->regs.cp0[12];
639
640 /* Switch the bits */
641 status = ((status & 0x3c) >> 2) | (status & ~0xf);
642
643 /* Write it back */
644 lightrec_mtc0(state, 12, status);
645}
646
647void lightrec_cp(struct lightrec_state *state, union code op)
648{
649 if (op.i.op == OP_CP0) {
650 pr_err("Invalid CP opcode to coprocessor #0\n");
651 return;
652 }
653
654 (*state->ops.cop2_op)(state, op.opcode);
655}
656
657static void lightrec_cp_cb(struct lightrec_state *state, u32 arg)
658{
659 lightrec_cp(state, (union code) arg);
660}
661
662static struct block * lightrec_get_block(struct lightrec_state *state, u32 pc)
663{
664 struct block *block = lightrec_find_block(state->block_cache, pc);
665 u8 old_flags;
666
667 if (block && lightrec_block_is_outdated(state, block)) {
668 pr_debug("Block at "PC_FMT" is outdated!\n", block->pc);
669
670 old_flags = block_set_flags(block, BLOCK_IS_DEAD);
671 if (!(old_flags & BLOCK_IS_DEAD)) {
672 /* Make sure the recompiler isn't processing the block
673 * we'll destroy */
674 if (ENABLE_THREADED_COMPILER)
675 lightrec_recompiler_remove(state->rec, block);
676
677 lightrec_unregister_block(state->block_cache, block);
678 remove_from_code_lut(state->block_cache, block);
679 lightrec_free_block(state, block);
680 }
681
682 block = NULL;
683 }
684
685 if (!block) {
686 block = lightrec_precompile_block(state, pc);
687 if (!block) {
688 pr_err("Unable to recompile block at "PC_FMT"\n", pc);
689 lightrec_set_exit_flags(state, LIGHTREC_EXIT_SEGFAULT);
690 return NULL;
691 }
692
693 lightrec_register_block(state->block_cache, block);
694 }
695
696 return block;
697}
698
699static void * get_next_block_func(struct lightrec_state *state, u32 pc)
700{
701 struct block *block;
702 bool should_recompile;
703 void *func;
704 int err;
705
706 do {
707 func = lut_read(state, lut_offset(pc));
708 if (func && func != state->get_next_block)
709 break;
710
711 block = lightrec_get_block(state, pc);
712
713 if (unlikely(!block))
714 break;
715
716 if (OPT_REPLACE_MEMSET &&
717 block_has_flag(block, BLOCK_IS_MEMSET)) {
718 func = state->memset_func;
719 break;
720 }
721
722 should_recompile = block_has_flag(block, BLOCK_SHOULD_RECOMPILE) &&
723 !block_has_flag(block, BLOCK_NEVER_COMPILE) &&
724 !block_has_flag(block, BLOCK_IS_DEAD);
725
726 if (unlikely(should_recompile)) {
727 pr_debug("Block at "PC_FMT" should recompile\n", pc);
728
729 if (ENABLE_THREADED_COMPILER) {
730 lightrec_recompiler_add(state->rec, block);
731 } else {
732 err = lightrec_compile_block(state->cstate, block);
733 if (err) {
734 state->exit_flags = LIGHTREC_EXIT_NOMEM;
735 return NULL;
736 }
737 }
738 }
739
740 if (ENABLE_THREADED_COMPILER && likely(!should_recompile))
741 func = lightrec_recompiler_run_first_pass(state, block, &pc);
742 else
743 func = block->function;
744
745 if (likely(func))
746 break;
747
748 if (unlikely(block_has_flag(block, BLOCK_NEVER_COMPILE))) {
749 pc = lightrec_emulate_block(state, block, pc);
750
751 } else if (!ENABLE_THREADED_COMPILER) {
752 /* Block wasn't compiled yet - run the interpreter */
753 if (block_has_flag(block, BLOCK_FULLY_TAGGED))
754 pr_debug("Block fully tagged, skipping first pass\n");
755 else if (ENABLE_FIRST_PASS && likely(!should_recompile))
756 pc = lightrec_emulate_block(state, block, pc);
757
758 /* Then compile it using the profiled data */
759 err = lightrec_compile_block(state->cstate, block);
760 if (err) {
761 state->exit_flags = LIGHTREC_EXIT_NOMEM;
762 return NULL;
763 }
764 } else if (unlikely(block_has_flag(block, BLOCK_IS_DEAD))) {
765 /*
766 * If the block is dead but has never been compiled,
767 * then its function pointer is NULL and we cannot
768 * execute the block. In that case, reap all the dead
769 * blocks now, and in the next loop we will create a
770 * new block.
771 */
772 lightrec_reaper_reap(state->reaper);
773 } else {
774 lightrec_recompiler_add(state->rec, block);
775 }
776 } while (state->exit_flags == LIGHTREC_EXIT_NORMAL
777 && state->current_cycle < state->target_cycle);
778
779 state->curr_pc = pc;
780 return func;
781}
782
783static void * lightrec_alloc_code(struct lightrec_state *state, size_t size)
784{
785 void *code;
786
787 if (ENABLE_THREADED_COMPILER)
788 lightrec_code_alloc_lock(state);
789
790 code = tlsf_malloc(state->tlsf, size);
791
792 if (ENABLE_THREADED_COMPILER)
793 lightrec_code_alloc_unlock(state);
794
795 return code;
796}
797
798static void lightrec_realloc_code(struct lightrec_state *state,
799 void *ptr, size_t size)
800{
801 /* NOTE: 'size' MUST be smaller than the size specified during
802 * the allocation. */
803
804 if (ENABLE_THREADED_COMPILER)
805 lightrec_code_alloc_lock(state);
806
807 tlsf_realloc(state->tlsf, ptr, size);
808
809 if (ENABLE_THREADED_COMPILER)
810 lightrec_code_alloc_unlock(state);
811}
812
813static void lightrec_free_code(struct lightrec_state *state, void *ptr)
814{
815 if (ENABLE_THREADED_COMPILER)
816 lightrec_code_alloc_lock(state);
817
818 tlsf_free(state->tlsf, ptr);
819
820 if (ENABLE_THREADED_COMPILER)
821 lightrec_code_alloc_unlock(state);
822}
823
824static char lightning_code_data[0x80000];
825
826static void * lightrec_emit_code(struct lightrec_state *state,
827 const struct block *block,
828 jit_state_t *_jit, unsigned int *size)
829{
830 bool has_code_buffer = ENABLE_CODE_BUFFER && state->tlsf;
831 jit_word_t code_size, new_code_size;
832 void *code;
833
834 jit_realize();
835
836 if (ENABLE_DISASSEMBLER)
837 jit_set_data(lightning_code_data, sizeof(lightning_code_data), 0);
838 else
839 jit_set_data(NULL, 0, JIT_DISABLE_DATA | JIT_DISABLE_NOTE);
840
841 if (has_code_buffer) {
842 jit_get_code(&code_size);
843 code = lightrec_alloc_code(state, (size_t) code_size);
844
845 if (!code) {
846 if (ENABLE_THREADED_COMPILER) {
847 /* If we're using the threaded compiler, return
848 * an allocation error here. The threaded
849 * compiler will then empty its job queue and
850 * request a code flush using the reaper. */
851 return NULL;
852 }
853
854 /* Remove outdated blocks, and try again */
855 lightrec_remove_outdated_blocks(state->block_cache, block);
856
857 pr_debug("Re-try to alloc %zu bytes...\n", code_size);
858
859 code = lightrec_alloc_code(state, code_size);
860 if (!code) {
861 pr_err("Could not alloc even after removing old blocks!\n");
862 return NULL;
863 }
864 }
865
866 jit_set_code(code, code_size);
867 }
868
869 code = jit_emit();
870
871 jit_get_code(&new_code_size);
872 lightrec_register(MEM_FOR_CODE, new_code_size);
873
874 if (has_code_buffer) {
875 lightrec_realloc_code(state, code, (size_t) new_code_size);
876
877 pr_debug("Creating code block at address 0x%" PRIxPTR ", "
878 "code size: %" PRIuPTR " new: %" PRIuPTR "\n",
879 (uintptr_t) code, code_size, new_code_size);
880 }
881
882 *size = (unsigned int) new_code_size;
883
884 if (state->ops.code_inv)
885 state->ops.code_inv(code, new_code_size);
886
887 return code;
888}
889
890static struct block * generate_wrapper(struct lightrec_state *state)
891{
892 struct block *block;
893 jit_state_t *_jit;
894 unsigned int i;
895 jit_node_t *addr[C_WRAPPERS_COUNT - 1];
896 jit_node_t *to_end[C_WRAPPERS_COUNT - 1];
897 u8 tmp = JIT_R1;
898
899#ifdef __sh__
900 /* On SH, GBR-relative loads target the r0 register.
901 * Use it as the temporary register to factorize the move to
902 * JIT_R1. */
903 if (LIGHTREC_REG_STATE == _GBR)
904 tmp = _R0;
905#endif
906
907 block = lightrec_malloc(state, MEM_FOR_IR, sizeof(*block));
908 if (!block)
909 goto err_no_mem;
910
911 _jit = jit_new_state();
912 if (!_jit)
913 goto err_free_block;
914
915 jit_name("RW wrapper");
916 jit_note(__FILE__, __LINE__);
917
918 /* Wrapper entry point */
919 jit_prolog();
920 jit_tramp(256);
921
922 /* Add entry points */
923 for (i = C_WRAPPERS_COUNT - 1; i > 0; i--) {
924 jit_ldxi(tmp, LIGHTREC_REG_STATE,
925 offsetof(struct lightrec_state, c_wrappers[i]));
926 to_end[i - 1] = jit_b();
927 addr[i - 1] = jit_indirect();
928 }
929
930 jit_ldxi(tmp, LIGHTREC_REG_STATE,
931 offsetof(struct lightrec_state, c_wrappers[0]));
932
933 for (i = 0; i < C_WRAPPERS_COUNT - 1; i++)
934 jit_patch(to_end[i]);
935 jit_movr(JIT_R1, tmp);
936
937 jit_epilog();
938 jit_prolog();
939
940 /* Save all temporaries on stack */
941 for (i = 0; i < NUM_TEMPS; i++) {
942 if (i + FIRST_TEMP != 1) {
943 jit_stxi(offsetof(struct lightrec_state, wrapper_regs[i]),
944 LIGHTREC_REG_STATE, JIT_R(i + FIRST_TEMP));
945 }
946 }
947
948 jit_getarg(JIT_R2, jit_arg());
949
950 jit_prepare();
951 jit_pushargr(LIGHTREC_REG_STATE);
952 jit_pushargr(JIT_R2);
953
954 jit_ldxi_ui(JIT_R2, LIGHTREC_REG_STATE,
955 offsetof(struct lightrec_state, target_cycle));
956
957 /* state->current_cycle = state->target_cycle - delta; */
958 jit_subr(LIGHTREC_REG_CYCLE, JIT_R2, LIGHTREC_REG_CYCLE);
959 jit_stxi_i(offsetof(struct lightrec_state, current_cycle),
960 LIGHTREC_REG_STATE, LIGHTREC_REG_CYCLE);
961
962 /* Call the wrapper function */
963 jit_finishr(JIT_R1);
964
965 /* delta = state->target_cycle - state->current_cycle */;
966 jit_ldxi_ui(LIGHTREC_REG_CYCLE, LIGHTREC_REG_STATE,
967 offsetof(struct lightrec_state, current_cycle));
968 jit_ldxi_ui(JIT_R1, LIGHTREC_REG_STATE,
969 offsetof(struct lightrec_state, target_cycle));
970 jit_subr(LIGHTREC_REG_CYCLE, JIT_R1, LIGHTREC_REG_CYCLE);
971
972 /* Restore temporaries from stack */
973 for (i = 0; i < NUM_TEMPS; i++) {
974 if (i + FIRST_TEMP != 1) {
975 jit_ldxi(JIT_R(i + FIRST_TEMP), LIGHTREC_REG_STATE,
976 offsetof(struct lightrec_state, wrapper_regs[i]));
977 }
978 }
979
980 jit_ret();
981 jit_epilog();
982
983 block->_jit = _jit;
984 block->opcode_list = NULL;
985 block->flags = BLOCK_NO_OPCODE_LIST;
986 block->nb_ops = 0;
987
988 block->function = lightrec_emit_code(state, block, _jit,
989 &block->code_size);
990 if (!block->function)
991 goto err_free_block;
992
993 state->wrappers_eps[C_WRAPPERS_COUNT - 1] = block->function;
994
995 for (i = 0; i < C_WRAPPERS_COUNT - 1; i++)
996 state->wrappers_eps[i] = jit_address(addr[i]);
997
998 if (ENABLE_DISASSEMBLER) {
999 pr_debug("Wrapper block:\n");
1000 jit_disassemble();
1001 }
1002
1003 jit_clear_state();
1004 return block;
1005
1006err_free_block:
1007 lightrec_free(state, MEM_FOR_IR, sizeof(*block), block);
1008err_no_mem:
1009 pr_err("Unable to compile wrapper: Out of memory\n");
1010 return NULL;
1011}
1012
1013static u32 lightrec_memset(struct lightrec_state *state)
1014{
1015 u32 kunseg_pc = kunseg(state->regs.gpr[4]);
1016 void *host;
1017 const struct lightrec_mem_map *map = lightrec_get_map(state, &host, kunseg_pc);
1018 u32 length = state->regs.gpr[5] * 4;
1019
1020 if (!map) {
1021 pr_err("Unable to find memory map for memset target address "PC_FMT"\n",
1022 kunseg_pc);
1023 return 0;
1024 }
1025
1026 pr_debug("Calling host memset, "PC_FMT" (host address 0x%"PRIxPTR") for %u bytes\n",
1027 kunseg_pc, (uintptr_t)host, length);
1028 memset(host, 0, length);
1029
1030 if (!(state->opt_flags & LIGHTREC_OPT_INV_DMA_ONLY))
1031 lightrec_invalidate_map(state, map, kunseg_pc, length);
1032
1033 /* Rough estimation of the number of cycles consumed */
1034 return 8 + 5 * (length + 3 / 4);
1035}
1036
1037static u32 lightrec_check_load_delay(struct lightrec_state *state, u32 pc, u8 reg)
1038{
1039 struct block *block;
1040 union code first_op;
1041
1042 first_op = lightrec_read_opcode(state, pc);
1043
1044 if (likely(!opcode_reads_register(first_op, reg))) {
1045 state->regs.gpr[reg] = state->temp_reg;
1046 } else {
1047 block = lightrec_get_block(state, pc);
1048 if (unlikely(!block)) {
1049 pr_err("Unable to get block at "PC_FMT"\n", pc);
1050 lightrec_set_exit_flags(state, LIGHTREC_EXIT_SEGFAULT);
1051 pc = 0;
1052 } else {
1053 pc = lightrec_handle_load_delay(state, block, pc, reg);
1054 }
1055 }
1056
1057 return pc;
1058}
1059
1060static void update_cycle_counter_before_c(jit_state_t *_jit)
1061{
1062 /* update state->current_cycle */
1063 jit_ldxi_i(JIT_R2, LIGHTREC_REG_STATE,
1064 offsetof(struct lightrec_state, target_cycle));
1065 jit_subr(JIT_R1, JIT_R2, LIGHTREC_REG_CYCLE);
1066 jit_stxi_i(offsetof(struct lightrec_state, current_cycle),
1067 LIGHTREC_REG_STATE, JIT_R1);
1068}
1069
1070static void update_cycle_counter_after_c(jit_state_t *_jit)
1071{
1072 /* Recalc the delta */
1073 jit_ldxi_i(JIT_R1, LIGHTREC_REG_STATE,
1074 offsetof(struct lightrec_state, current_cycle));
1075 jit_ldxi_i(JIT_R2, LIGHTREC_REG_STATE,
1076 offsetof(struct lightrec_state, target_cycle));
1077 jit_subr(LIGHTREC_REG_CYCLE, JIT_R2, JIT_R1);
1078}
1079
1080static void sync_next_pc(jit_state_t *_jit)
1081{
1082 if (lightrec_store_next_pc()) {
1083 jit_ldxi_ui(JIT_V0, LIGHTREC_REG_STATE,
1084 offsetof(struct lightrec_state, next_pc));
1085 }
1086}
1087
1088static struct block * generate_dispatcher(struct lightrec_state *state)
1089{
1090 struct block *block;
1091 jit_state_t *_jit;
1092 jit_node_t *to_end, *loop, *addr, *addr2, *addr3, *addr4, *addr5, *jmp, *jmp2;
1093 unsigned int i;
1094 u32 offset;
1095
1096 block = lightrec_malloc(state, MEM_FOR_IR, sizeof(*block));
1097 if (!block)
1098 goto err_no_mem;
1099
1100 _jit = jit_new_state();
1101 if (!_jit)
1102 goto err_free_block;
1103
1104 jit_name("dispatcher");
1105 jit_note(__FILE__, __LINE__);
1106
1107 jit_prolog();
1108 jit_frame(256);
1109
1110 jit_getarg(LIGHTREC_REG_STATE, jit_arg());
1111 jit_getarg(JIT_V0, jit_arg());
1112 jit_getarg(JIT_V1, jit_arg());
1113 jit_getarg_i(LIGHTREC_REG_CYCLE, jit_arg());
1114
1115 /* Force all callee-saved registers to be pushed on the stack */
1116 for (i = 0; i < NUM_REGS; i++)
1117 jit_movr(JIT_V(i + FIRST_REG), JIT_V(i + FIRST_REG));
1118
1119 loop = jit_label();
1120
1121 /* Call the block's code */
1122 jit_jmpr(JIT_V1);
1123
1124 if (OPT_REPLACE_MEMSET) {
1125 /* Blocks will jump here when they need to call
1126 * lightrec_memset() */
1127 addr3 = jit_indirect();
1128
1129 jit_movr(JIT_V1, LIGHTREC_REG_CYCLE);
1130
1131 jit_prepare();
1132 jit_pushargr(LIGHTREC_REG_STATE);
1133
1134 jit_finishi(lightrec_memset);
1135 jit_retval(LIGHTREC_REG_CYCLE);
1136
1137 jit_ldxi_ui(JIT_V0, LIGHTREC_REG_STATE,
1138 offsetof(struct lightrec_state, regs.gpr[31]));
1139 jit_subr(LIGHTREC_REG_CYCLE, JIT_V1, LIGHTREC_REG_CYCLE);
1140
1141 if (OPT_DETECT_IMPOSSIBLE_BRANCHES || OPT_HANDLE_LOAD_DELAYS)
1142 jmp = jit_b();
1143 }
1144
1145 if (OPT_DETECT_IMPOSSIBLE_BRANCHES) {
1146 /* Blocks will jump here when they reach a branch that should
1147 * be executed with the interpreter, passing the branch's PC
1148 * in JIT_V0 and the address of the block in JIT_V1. */
1149 addr4 = jit_indirect();
1150
1151 sync_next_pc(_jit);
1152 update_cycle_counter_before_c(_jit);
1153
1154 jit_prepare();
1155 jit_pushargr(LIGHTREC_REG_STATE);
1156 jit_pushargr(JIT_V1);
1157 jit_pushargr(JIT_V0);
1158 jit_finishi(lightrec_emulate_block);
1159
1160 jit_retval(JIT_V0);
1161
1162 update_cycle_counter_after_c(_jit);
1163
1164 if (OPT_HANDLE_LOAD_DELAYS)
1165 jmp2 = jit_b();
1166
1167 }
1168
1169 if (OPT_HANDLE_LOAD_DELAYS) {
1170 /* Blocks will jump here when they reach a branch with a load
1171 * opcode in its delay slot. The delay slot has already been
1172 * executed; the load value is in (state->temp_reg), and the
1173 * register number is in JIT_V1.
1174 * Jump to a C function which will evaluate the branch target's
1175 * first opcode, to make sure that it does not read the register
1176 * in question; and if it does, handle it accordingly. */
1177 addr5 = jit_indirect();
1178
1179 sync_next_pc(_jit);
1180 update_cycle_counter_before_c(_jit);
1181
1182 jit_prepare();
1183 jit_pushargr(LIGHTREC_REG_STATE);
1184 jit_pushargr(JIT_V0);
1185 jit_pushargr(JIT_V1);
1186 jit_finishi(lightrec_check_load_delay);
1187
1188 jit_retval(JIT_V0);
1189
1190 update_cycle_counter_after_c(_jit);
1191 }
1192
1193 /* The block will jump here, with the number of cycles remaining in
1194 * LIGHTREC_REG_CYCLE */
1195 addr2 = jit_indirect();
1196
1197 sync_next_pc(_jit);
1198
1199 if (OPT_HANDLE_LOAD_DELAYS && OPT_DETECT_IMPOSSIBLE_BRANCHES)
1200 jit_patch(jmp2);
1201
1202 if (OPT_REPLACE_MEMSET
1203 && (OPT_DETECT_IMPOSSIBLE_BRANCHES || OPT_HANDLE_LOAD_DELAYS)) {
1204 jit_patch(jmp);
1205 }
1206
1207 /* Store back the next PC to the lightrec_state structure */
1208 offset = offsetof(struct lightrec_state, curr_pc);
1209 jit_stxi_i(offset, LIGHTREC_REG_STATE, JIT_V0);
1210
1211 /* Jump to end if state->target_cycle < state->current_cycle */
1212 to_end = jit_blei(LIGHTREC_REG_CYCLE, 0);
1213
1214 /* Convert next PC to KUNSEG and avoid mirrors */
1215 jit_andi(JIT_V1, JIT_V0, 0x10000000 | (RAM_SIZE - 1));
1216 jit_rshi_u(JIT_R1, JIT_V1, 28);
1217 jit_andi(JIT_R2, JIT_V0, BIOS_SIZE - 1);
1218 jit_addi(JIT_R2, JIT_R2, RAM_SIZE);
1219 jit_movnr(JIT_V1, JIT_R2, JIT_R1);
1220
1221 /* If possible, use the code LUT */
1222 if (!lut_is_32bit(state))
1223 jit_lshi(JIT_V1, JIT_V1, 1);
1224 jit_add_state(JIT_V1, JIT_V1);
1225
1226 offset = offsetof(struct lightrec_state, code_lut);
1227 if (lut_is_32bit(state))
1228 jit_ldxi_ui(JIT_V1, JIT_V1, offset);
1229 else
1230 jit_ldxi(JIT_V1, JIT_V1, offset);
1231
1232 /* If we get non-NULL, loop */
1233 jit_patch_at(jit_bnei(JIT_V1, 0), loop);
1234
1235 /* The code LUT will be set to this address when the block at the target
1236 * PC has been preprocessed but not yet compiled by the threaded
1237 * recompiler */
1238 addr = jit_indirect();
1239
1240 /* Slow path: call C function get_next_block_func() */
1241
1242 if (ENABLE_FIRST_PASS || OPT_DETECT_IMPOSSIBLE_BRANCHES) {
1243 /* We may call the interpreter - update state->current_cycle */
1244 update_cycle_counter_before_c(_jit);
1245 }
1246
1247 jit_prepare();
1248 jit_pushargr(LIGHTREC_REG_STATE);
1249 jit_pushargr(JIT_V0);
1250
1251 /* Save the cycles register if needed */
1252 if (!(ENABLE_FIRST_PASS || OPT_DETECT_IMPOSSIBLE_BRANCHES))
1253 jit_movr(JIT_V0, LIGHTREC_REG_CYCLE);
1254
1255 /* Get the next block */
1256 jit_finishi(&get_next_block_func);
1257 jit_retval(JIT_V1);
1258
1259 if (ENABLE_FIRST_PASS || OPT_DETECT_IMPOSSIBLE_BRANCHES) {
1260 /* The interpreter may have updated state->current_cycle and
1261 * state->target_cycle - recalc the delta */
1262 update_cycle_counter_after_c(_jit);
1263 } else {
1264 jit_movr(LIGHTREC_REG_CYCLE, JIT_V0);
1265 }
1266
1267 /* Reset JIT_V0 to the next PC */
1268 jit_ldxi_ui(JIT_V0, LIGHTREC_REG_STATE,
1269 offsetof(struct lightrec_state, curr_pc));
1270
1271 /* If we get non-NULL, loop */
1272 jit_patch_at(jit_bnei(JIT_V1, 0), loop);
1273
1274 /* When exiting, the recompiled code will jump to that address */
1275 jit_note(__FILE__, __LINE__);
1276 jit_patch(to_end);
1277
1278 jit_retr(LIGHTREC_REG_CYCLE);
1279 jit_epilog();
1280
1281 block->_jit = _jit;
1282 block->opcode_list = NULL;
1283 block->flags = BLOCK_NO_OPCODE_LIST;
1284 block->nb_ops = 0;
1285
1286 block->function = lightrec_emit_code(state, block, _jit,
1287 &block->code_size);
1288 if (!block->function)
1289 goto err_free_block;
1290
1291 state->eob_wrapper_func = jit_address(addr2);
1292 if (OPT_DETECT_IMPOSSIBLE_BRANCHES)
1293 state->interpreter_func = jit_address(addr4);
1294 if (OPT_HANDLE_LOAD_DELAYS)
1295 state->ds_check_func = jit_address(addr5);
1296 if (OPT_REPLACE_MEMSET)
1297 state->memset_func = jit_address(addr3);
1298 state->get_next_block = jit_address(addr);
1299
1300 if (ENABLE_DISASSEMBLER) {
1301 pr_debug("Dispatcher block:\n");
1302 jit_disassemble();
1303 }
1304
1305 /* We're done! */
1306 jit_clear_state();
1307 return block;
1308
1309err_free_block:
1310 lightrec_free(state, MEM_FOR_IR, sizeof(*block), block);
1311err_no_mem:
1312 pr_err("Unable to compile dispatcher: Out of memory\n");
1313 return NULL;
1314}
1315
1316union code lightrec_read_opcode(struct lightrec_state *state, u32 pc)
1317{
1318 void *host = NULL;
1319
1320 lightrec_get_map(state, &host, kunseg(pc));
1321
1322 const u32 *code = (u32 *)host;
1323 return (union code) LE32TOH(*code);
1324}
1325
1326unsigned int lightrec_cycles_of_opcode(const struct lightrec_state *state,
1327 union code code)
1328{
1329 return state->cycles_per_op;
1330}
1331
1332void lightrec_free_opcode_list(struct lightrec_state *state, struct opcode *ops)
1333{
1334 struct opcode_list *list = container_of(ops, struct opcode_list, ops);
1335
1336 lightrec_free(state, MEM_FOR_IR,
1337 sizeof(*list) + list->nb_ops * sizeof(struct opcode),
1338 list);
1339}
1340
1341static unsigned int lightrec_get_mips_block_len(const u32 *src)
1342{
1343 unsigned int i;
1344 union code c;
1345
1346 for (i = 1; ; i++) {
1347 c.opcode = LE32TOH(*src++);
1348
1349 if (is_syscall(c))
1350 return i;
1351
1352 if (is_unconditional_jump(c))
1353 return i + 1;
1354 }
1355}
1356
1357static struct opcode * lightrec_disassemble(struct lightrec_state *state,
1358 const u32 *src, unsigned int *len)
1359{
1360 struct opcode_list *list;
1361 unsigned int i, length;
1362
1363 length = lightrec_get_mips_block_len(src);
1364
1365 list = lightrec_malloc(state, MEM_FOR_IR,
1366 sizeof(*list) + sizeof(struct opcode) * length);
1367 if (!list) {
1368 pr_err("Unable to allocate memory\n");
1369 return NULL;
1370 }
1371
1372 list->nb_ops = (u16) length;
1373
1374 for (i = 0; i < length; i++) {
1375 list->ops[i].opcode = LE32TOH(src[i]);
1376 list->ops[i].flags = 0;
1377 }
1378
1379 *len = length * sizeof(u32);
1380
1381 return list->ops;
1382}
1383
1384static struct block * lightrec_precompile_block(struct lightrec_state *state,
1385 u32 pc)
1386{
1387 struct opcode *list;
1388 struct block *block;
1389 void *host, *addr;
1390 const struct lightrec_mem_map *map = lightrec_get_map(state, &host, kunseg(pc));
1391 const u32 *code = (u32 *) host;
1392 unsigned int length;
1393 bool fully_tagged;
1394 u8 block_flags = 0;
1395
1396 if (!map)
1397 return NULL;
1398
1399 block = lightrec_malloc(state, MEM_FOR_IR, sizeof(*block));
1400 if (!block) {
1401 pr_err("Unable to recompile block: Out of memory\n");
1402 return NULL;
1403 }
1404
1405 list = lightrec_disassemble(state, code, &length);
1406 if (!list) {
1407 lightrec_free(state, MEM_FOR_IR, sizeof(*block), block);
1408 return NULL;
1409 }
1410
1411 block->pc = pc;
1412 block->_jit = NULL;
1413 block->function = NULL;
1414 block->opcode_list = list;
1415 block->code = code;
1416 block->next = NULL;
1417 block->flags = 0;
1418 block->code_size = 0;
1419 block->precompile_date = state->current_cycle;
1420 block->nb_ops = length / sizeof(u32);
1421
1422 lightrec_optimize(state, block);
1423
1424 length = block->nb_ops * sizeof(u32);
1425
1426 lightrec_register(MEM_FOR_MIPS_CODE, length);
1427
1428 if (ENABLE_DISASSEMBLER) {
1429 pr_debug("Disassembled block at PC: 0x%08x\n", block->pc);
1430 lightrec_print_disassembly(block, code);
1431 }
1432
1433 pr_debug("Block size: %hu opcodes\n", block->nb_ops);
1434
1435 fully_tagged = lightrec_block_is_fully_tagged(block);
1436 if (fully_tagged)
1437 block_flags |= BLOCK_FULLY_TAGGED;
1438
1439 if (block_flags)
1440 block_set_flags(block, block_flags);
1441
1442 block->hash = lightrec_calculate_block_hash(block);
1443
1444 if (OPT_REPLACE_MEMSET && block_has_flag(block, BLOCK_IS_MEMSET))
1445 addr = state->memset_func;
1446 else
1447 addr = state->get_next_block;
1448 lut_write(state, lut_offset(pc), addr);
1449
1450 pr_debug("Blocks created: %u\n", ++state->nb_precompile);
1451
1452 return block;
1453}
1454
1455static bool lightrec_block_is_fully_tagged(const struct block *block)
1456{
1457 const struct opcode *op;
1458 unsigned int i;
1459
1460 for (i = 0; i < block->nb_ops; i++) {
1461 op = &block->opcode_list[i];
1462
1463 /* If we have one branch that must be emulated, we cannot trash
1464 * the opcode list. */
1465 if (should_emulate(op))
1466 return false;
1467
1468 /* Check all loads/stores of the opcode list and mark the
1469 * block as fully compiled if they all have been tagged. */
1470 switch (op->c.i.op) {
1471 case OP_LB:
1472 case OP_LH:
1473 case OP_LWL:
1474 case OP_LW:
1475 case OP_LBU:
1476 case OP_LHU:
1477 case OP_LWR:
1478 case OP_SB:
1479 case OP_SH:
1480 case OP_SWL:
1481 case OP_SW:
1482 case OP_SWR:
1483 case OP_LWC2:
1484 case OP_SWC2:
1485 if (!LIGHTREC_FLAGS_GET_IO_MODE(op->flags))
1486 return false;
1487 fallthrough;
1488 default:
1489 continue;
1490 }
1491 }
1492
1493 return true;
1494}
1495
1496static void lightrec_reap_block(struct lightrec_state *state, void *data)
1497{
1498 struct block *block = data;
1499
1500 pr_debug("Reap dead block at "PC_FMT"\n", block->pc);
1501 lightrec_unregister_block(state->block_cache, block);
1502 lightrec_free_block(state, block);
1503}
1504
1505static void lightrec_reap_jit(struct lightrec_state *state, void *data)
1506{
1507 _jit_destroy_state(data);
1508}
1509
1510static void lightrec_free_function(struct lightrec_state *state, void *fn)
1511{
1512 if (ENABLE_CODE_BUFFER && state->tlsf) {
1513 pr_debug("Freeing code block at 0x%" PRIxPTR "\n", (uintptr_t) fn);
1514 lightrec_free_code(state, fn);
1515 }
1516}
1517
1518static void lightrec_reap_function(struct lightrec_state *state, void *data)
1519{
1520 lightrec_free_function(state, data);
1521}
1522
1523static void lightrec_reap_opcode_list(struct lightrec_state *state, void *data)
1524{
1525 lightrec_free_opcode_list(state, data);
1526}
1527
1528int lightrec_compile_block(struct lightrec_cstate *cstate,
1529 struct block *block)
1530{
1531 struct lightrec_state *state = cstate->state;
1532 struct lightrec_branch_target *target;
1533 bool fully_tagged = false;
1534 struct block *block2;
1535 struct opcode *elm;
1536 jit_state_t *_jit, *oldjit;
1537 jit_node_t *start_of_block;
1538 bool skip_next = false;
1539 void *old_fn, *new_fn;
1540 size_t old_code_size;
1541 unsigned int i, j;
1542 u8 old_flags;
1543 u32 offset;
1544
1545 fully_tagged = lightrec_block_is_fully_tagged(block);
1546 if (fully_tagged)
1547 block_set_flags(block, BLOCK_FULLY_TAGGED);
1548
1549 _jit = jit_new_state();
1550 if (!_jit)
1551 return -ENOMEM;
1552
1553 oldjit = block->_jit;
1554 old_fn = block->function;
1555 old_code_size = block->code_size;
1556 block->_jit = _jit;
1557
1558 lightrec_regcache_reset(cstate->reg_cache);
1559
1560 if (OPT_PRELOAD_PC && (block->flags & BLOCK_PRELOAD_PC))
1561 lightrec_preload_pc(cstate->reg_cache, _jit);
1562
1563 cstate->cycles = 0;
1564 cstate->nb_local_branches = 0;
1565 cstate->nb_targets = 0;
1566 cstate->no_load_delay = false;
1567
1568 jit_prolog();
1569 jit_tramp(256);
1570
1571 start_of_block = jit_label();
1572
1573 for (i = 0; i < block->nb_ops; i++) {
1574 elm = &block->opcode_list[i];
1575
1576 if (skip_next) {
1577 skip_next = false;
1578 continue;
1579 }
1580
1581 if (should_emulate(elm)) {
1582 pr_debug("Branch at offset 0x%x will be emulated\n",
1583 i << 2);
1584
1585 lightrec_emit_jump_to_interpreter(cstate, block, i);
1586 skip_next = !op_flag_no_ds(elm->flags);
1587 } else {
1588 lightrec_rec_opcode(cstate, block, i);
1589 skip_next = !op_flag_no_ds(elm->flags) && has_delay_slot(elm->c);
1590#if _WIN32
1591 /* FIXME: GNU Lightning on Windows seems to use our
1592 * mapped registers as temporaries. Until the actual bug
1593 * is found and fixed, unconditionally mark our
1594 * registers as live here. */
1595 lightrec_regcache_mark_live(cstate->reg_cache, _jit);
1596#endif
1597 }
1598
1599 cstate->cycles += lightrec_cycles_of_opcode(state, elm->c);
1600 }
1601
1602 for (i = 0; i < cstate->nb_local_branches; i++) {
1603 struct lightrec_branch *branch = &cstate->local_branches[i];
1604
1605 pr_debug("Patch local branch to offset 0x%x\n",
1606 branch->target << 2);
1607
1608 if (branch->target == 0) {
1609 jit_patch_at(branch->branch, start_of_block);
1610 continue;
1611 }
1612
1613 for (j = 0; j < cstate->nb_targets; j++) {
1614 if (cstate->targets[j].offset == branch->target) {
1615 jit_patch_at(branch->branch,
1616 cstate->targets[j].label);
1617 break;
1618 }
1619 }
1620
1621 if (j == cstate->nb_targets)
1622 pr_err("Unable to find branch target\n");
1623 }
1624
1625 jit_ret();
1626 jit_epilog();
1627
1628 new_fn = lightrec_emit_code(state, block, _jit, &block->code_size);
1629 if (!new_fn) {
1630 if (!ENABLE_THREADED_COMPILER)
1631 pr_err("Unable to compile block!\n");
1632 block->_jit = oldjit;
1633 jit_clear_state();
1634 _jit_destroy_state(_jit);
1635 return -ENOMEM;
1636 }
1637
1638 /* Pause the reaper, because lightrec_reset_lut_offset() may try to set
1639 * the old block->function pointer to the code LUT. */
1640 if (ENABLE_THREADED_COMPILER)
1641 lightrec_reaper_pause(state->reaper);
1642
1643 block->function = new_fn;
1644 block_clear_flags(block, BLOCK_SHOULD_RECOMPILE);
1645
1646 /* Add compiled function to the LUT */
1647 lut_write(state, lut_offset(block->pc), block->function);
1648
1649 if (ENABLE_THREADED_COMPILER)
1650 lightrec_reaper_continue(state->reaper);
1651
1652 /* Detect old blocks that have been covered by the new one */
1653 for (i = 0; i < cstate->nb_targets; i++) {
1654 target = &cstate->targets[i];
1655
1656 if (!target->offset)
1657 continue;
1658
1659 offset = block->pc + target->offset * sizeof(u32);
1660
1661 /* Pause the reaper while we search for the block until we set
1662 * the BLOCK_IS_DEAD flag, otherwise the block may be removed
1663 * under our feet. */
1664 if (ENABLE_THREADED_COMPILER)
1665 lightrec_reaper_pause(state->reaper);
1666
1667 block2 = lightrec_find_block(state->block_cache, offset);
1668 if (block2) {
1669 /* No need to check if block2 is compilable - it must
1670 * be, otherwise block wouldn't be compilable either */
1671
1672 /* Set the "block dead" flag to prevent the dynarec from
1673 * recompiling this block */
1674 old_flags = block_set_flags(block2, BLOCK_IS_DEAD);
1675 }
1676
1677 if (ENABLE_THREADED_COMPILER) {
1678 lightrec_reaper_continue(state->reaper);
1679
1680 /* If block2 was pending for compilation, cancel it.
1681 * If it's being compiled right now, wait until it
1682 * finishes. */
1683 if (block2)
1684 lightrec_recompiler_remove(state->rec, block2);
1685 }
1686
1687 /* We know from now on that block2 (if present) isn't going to
1688 * be compiled. We can override the LUT entry with our new
1689 * block's entry point. */
1690 offset = lut_offset(block->pc) + target->offset;
1691 lut_write(state, offset, jit_address(target->label));
1692
1693 if (block2) {
1694 pr_debug("Reap block 0x%08x as it's covered by block "
1695 "0x%08x\n", block2->pc, block->pc);
1696
1697 /* Finally, reap the block. */
1698 if (!ENABLE_THREADED_COMPILER) {
1699 lightrec_unregister_block(state->block_cache, block2);
1700 lightrec_free_block(state, block2);
1701 } else if (!(old_flags & BLOCK_IS_DEAD)) {
1702 lightrec_reaper_add(state->reaper,
1703 lightrec_reap_block,
1704 block2);
1705 }
1706 }
1707 }
1708
1709 if (ENABLE_DISASSEMBLER) {
1710 pr_debug("Compiling block at PC: 0x%08x\n", block->pc);
1711 jit_disassemble();
1712 }
1713
1714 jit_clear_state();
1715
1716 if (fully_tagged)
1717 old_flags = block_set_flags(block, BLOCK_NO_OPCODE_LIST);
1718
1719 if (fully_tagged && !(old_flags & BLOCK_NO_OPCODE_LIST)) {
1720 pr_debug("Block "PC_FMT" is fully tagged"
1721 " - free opcode list\n", block->pc);
1722
1723 if (ENABLE_THREADED_COMPILER) {
1724 lightrec_reaper_add(state->reaper,
1725 lightrec_reap_opcode_list,
1726 block->opcode_list);
1727 } else {
1728 lightrec_free_opcode_list(state, block->opcode_list);
1729 }
1730 }
1731
1732 if (oldjit) {
1733 pr_debug("Block 0x%08x recompiled, reaping old jit context.\n",
1734 block->pc);
1735
1736 if (ENABLE_THREADED_COMPILER) {
1737 lightrec_reaper_add(state->reaper,
1738 lightrec_reap_jit, oldjit);
1739 lightrec_reaper_add(state->reaper,
1740 lightrec_reap_function, old_fn);
1741 } else {
1742 _jit_destroy_state(oldjit);
1743 lightrec_free_function(state, old_fn);
1744 }
1745
1746 lightrec_unregister(MEM_FOR_CODE, old_code_size);
1747 }
1748
1749 pr_debug("Blocks compiled: %u\n", ++state->nb_compile);
1750
1751 return 0;
1752}
1753
1754static void lightrec_print_info(struct lightrec_state *state)
1755{
1756 if ((state->current_cycle & ~0xfffffff) != state->old_cycle_counter) {
1757 pr_info("Lightrec RAM usage: IR %u KiB, CODE %u KiB, "
1758 "MIPS %u KiB, TOTAL %u KiB, avg. IPI %f\n",
1759 lightrec_get_mem_usage(MEM_FOR_IR) / 1024,
1760 lightrec_get_mem_usage(MEM_FOR_CODE) / 1024,
1761 lightrec_get_mem_usage(MEM_FOR_MIPS_CODE) / 1024,
1762 lightrec_get_total_mem_usage() / 1024,
1763 lightrec_get_average_ipi());
1764 state->old_cycle_counter = state->current_cycle & ~0xfffffff;
1765 }
1766}
1767
1768u32 lightrec_execute(struct lightrec_state *state, u32 pc, u32 target_cycle)
1769{
1770 s32 (*func)(struct lightrec_state *, u32, void *, s32) = (void *)state->dispatcher->function;
1771 void *block_trace;
1772 s32 cycles_delta;
1773
1774 state->exit_flags = LIGHTREC_EXIT_NORMAL;
1775
1776 /* Handle the cycle counter overflowing */
1777 if (unlikely(target_cycle < state->current_cycle))
1778 target_cycle = UINT_MAX;
1779
1780 state->target_cycle = target_cycle;
1781 state->curr_pc = pc;
1782
1783 block_trace = get_next_block_func(state, pc);
1784 if (block_trace) {
1785 cycles_delta = state->target_cycle - state->current_cycle;
1786
1787 cycles_delta = (*func)(state, state->curr_pc,
1788 block_trace, cycles_delta);
1789
1790 state->current_cycle = state->target_cycle - cycles_delta;
1791 }
1792
1793 if (ENABLE_THREADED_COMPILER)
1794 lightrec_reaper_reap(state->reaper);
1795
1796 if (LOG_LEVEL >= INFO_L)
1797 lightrec_print_info(state);
1798
1799 return state->curr_pc;
1800}
1801
1802u32 lightrec_run_interpreter(struct lightrec_state *state, u32 pc,
1803 u32 target_cycle)
1804{
1805 struct block *block;
1806
1807 state->exit_flags = LIGHTREC_EXIT_NORMAL;
1808 state->target_cycle = target_cycle;
1809
1810 do {
1811 block = lightrec_get_block(state, pc);
1812 if (!block)
1813 break;
1814
1815 pc = lightrec_emulate_block(state, block, pc);
1816
1817 if (ENABLE_THREADED_COMPILER)
1818 lightrec_reaper_reap(state->reaper);
1819 } while (state->current_cycle < state->target_cycle);
1820
1821 if (LOG_LEVEL >= INFO_L)
1822 lightrec_print_info(state);
1823
1824 return pc;
1825}
1826
1827void lightrec_free_block(struct lightrec_state *state, struct block *block)
1828{
1829 u8 old_flags;
1830
1831 lightrec_unregister(MEM_FOR_MIPS_CODE, block->nb_ops * sizeof(u32));
1832 old_flags = block_set_flags(block, BLOCK_NO_OPCODE_LIST);
1833
1834 if (!(old_flags & BLOCK_NO_OPCODE_LIST))
1835 lightrec_free_opcode_list(state, block->opcode_list);
1836 if (block->_jit)
1837 _jit_destroy_state(block->_jit);
1838 if (block->function) {
1839 lightrec_free_function(state, block->function);
1840 lightrec_unregister(MEM_FOR_CODE, block->code_size);
1841 }
1842 lightrec_free(state, MEM_FOR_IR, sizeof(*block), block);
1843}
1844
1845struct lightrec_cstate * lightrec_create_cstate(struct lightrec_state *state)
1846{
1847 struct lightrec_cstate *cstate;
1848
1849 cstate = lightrec_malloc(state, MEM_FOR_LIGHTREC, sizeof(*cstate));
1850 if (!cstate)
1851 return NULL;
1852
1853 cstate->reg_cache = lightrec_regcache_init(state);
1854 if (!cstate->reg_cache) {
1855 lightrec_free(state, MEM_FOR_LIGHTREC, sizeof(*cstate), cstate);
1856 return NULL;
1857 }
1858
1859 cstate->state = state;
1860
1861 return cstate;
1862}
1863
1864void lightrec_free_cstate(struct lightrec_cstate *cstate)
1865{
1866 lightrec_free_regcache(cstate->reg_cache);
1867 lightrec_free(cstate->state, MEM_FOR_LIGHTREC, sizeof(*cstate), cstate);
1868}
1869
1870struct lightrec_state * lightrec_init(char *argv0,
1871 const struct lightrec_mem_map *map,
1872 size_t nb,
1873 const struct lightrec_ops *ops)
1874{
1875 const struct lightrec_mem_map *codebuf_map = &map[PSX_MAP_CODE_BUFFER];
1876 struct lightrec_state *state;
1877 uintptr_t addr;
1878 void *tlsf = NULL;
1879 bool with_32bit_lut = false;
1880 size_t lut_size;
1881
1882 /* Sanity-check ops */
1883 if (!ops || !ops->cop2_op || !ops->enable_ram) {
1884 pr_err("Missing callbacks in lightrec_ops structure\n");
1885 return NULL;
1886 }
1887
1888 if (ops->cop2_notify)
1889 pr_debug("Optional cop2_notify callback in lightrec_ops\n");
1890 else
1891 pr_debug("No optional cop2_notify callback in lightrec_ops\n");
1892
1893 if (ENABLE_CODE_BUFFER && nb > PSX_MAP_CODE_BUFFER
1894 && codebuf_map->address) {
1895 tlsf = tlsf_create_with_pool(codebuf_map->address,
1896 codebuf_map->length);
1897 if (!tlsf) {
1898 pr_err("Unable to initialize code buffer\n");
1899 return NULL;
1900 }
1901
1902 if (__WORDSIZE == 64) {
1903 addr = (uintptr_t) codebuf_map->address + codebuf_map->length - 1;
1904 with_32bit_lut = addr == (u32) addr;
1905 }
1906 }
1907
1908 if (with_32bit_lut)
1909 lut_size = CODE_LUT_SIZE * 4;
1910 else
1911 lut_size = CODE_LUT_SIZE * sizeof(void *);
1912
1913 init_jit(argv0);
1914
1915 state = calloc(1, sizeof(*state) + lut_size);
1916 if (!state)
1917 goto err_finish_jit;
1918
1919 lightrec_register(MEM_FOR_LIGHTREC, sizeof(*state) + lut_size);
1920
1921 state->tlsf = tlsf;
1922 state->with_32bit_lut = with_32bit_lut;
1923 state->in_delay_slot_n = 0xff;
1924 state->cycles_per_op = 2;
1925
1926 state->block_cache = lightrec_blockcache_init(state);
1927 if (!state->block_cache)
1928 goto err_free_state;
1929
1930 if (ENABLE_THREADED_COMPILER) {
1931 state->rec = lightrec_recompiler_init(state);
1932 if (!state->rec)
1933 goto err_free_block_cache;
1934
1935 state->reaper = lightrec_reaper_init(state);
1936 if (!state->reaper)
1937 goto err_free_recompiler;
1938 } else {
1939 state->cstate = lightrec_create_cstate(state);
1940 if (!state->cstate)
1941 goto err_free_block_cache;
1942 }
1943
1944 state->nb_maps = nb;
1945 state->maps = map;
1946
1947 memcpy(&state->ops, ops, sizeof(*ops));
1948
1949 state->dispatcher = generate_dispatcher(state);
1950 if (!state->dispatcher)
1951 goto err_free_reaper;
1952
1953 state->c_wrapper_block = generate_wrapper(state);
1954 if (!state->c_wrapper_block)
1955 goto err_free_dispatcher;
1956
1957 state->c_wrappers[C_WRAPPER_RW] = lightrec_rw_cb;
1958 state->c_wrappers[C_WRAPPER_RW_GENERIC] = lightrec_rw_generic_cb;
1959 state->c_wrappers[C_WRAPPER_MFC] = lightrec_mfc_cb;
1960 state->c_wrappers[C_WRAPPER_MTC] = lightrec_mtc_cb;
1961 state->c_wrappers[C_WRAPPER_CP] = lightrec_cp_cb;
1962
1963 map = &state->maps[PSX_MAP_BIOS];
1964 state->offset_bios = (uintptr_t)map->address - map->pc;
1965
1966 map = &state->maps[PSX_MAP_SCRATCH_PAD];
1967 state->offset_scratch = (uintptr_t)map->address - map->pc;
1968
1969 map = &state->maps[PSX_MAP_HW_REGISTERS];
1970 state->offset_io = (uintptr_t)map->address - map->pc;
1971
1972 map = &state->maps[PSX_MAP_KERNEL_USER_RAM];
1973 state->offset_ram = (uintptr_t)map->address - map->pc;
1974
1975 if (state->maps[PSX_MAP_MIRROR1].address == map->address + 0x200000 &&
1976 state->maps[PSX_MAP_MIRROR2].address == map->address + 0x400000 &&
1977 state->maps[PSX_MAP_MIRROR3].address == map->address + 0x600000)
1978 state->mirrors_mapped = true;
1979
1980 if (state->offset_bios == 0 &&
1981 state->offset_scratch == 0 &&
1982 state->offset_ram == 0 &&
1983 state->offset_io == 0 &&
1984 state->mirrors_mapped) {
1985 pr_info("Memory map is perfect. Emitted code will be best.\n");
1986 } else {
1987 pr_info("Memory map is sub-par. Emitted code will be slow.\n");
1988 }
1989
1990 if (state->with_32bit_lut)
1991 pr_info("Using 32-bit LUT\n");
1992
1993 return state;
1994
1995err_free_dispatcher:
1996 lightrec_free_block(state, state->dispatcher);
1997err_free_reaper:
1998 if (ENABLE_THREADED_COMPILER)
1999 lightrec_reaper_destroy(state->reaper);
2000err_free_recompiler:
2001 if (ENABLE_THREADED_COMPILER)
2002 lightrec_free_recompiler(state->rec);
2003 else
2004 lightrec_free_cstate(state->cstate);
2005err_free_block_cache:
2006 lightrec_free_block_cache(state->block_cache);
2007err_free_state:
2008 lightrec_unregister(MEM_FOR_LIGHTREC, sizeof(*state) +
2009 lut_elm_size(state) * CODE_LUT_SIZE);
2010 free(state);
2011err_finish_jit:
2012 finish_jit();
2013 if (ENABLE_CODE_BUFFER && tlsf)
2014 tlsf_destroy(tlsf);
2015 return NULL;
2016}
2017
2018void lightrec_destroy(struct lightrec_state *state)
2019{
2020 /* Force a print info on destroy*/
2021 state->current_cycle = ~state->current_cycle;
2022 lightrec_print_info(state);
2023
2024 lightrec_free_block_cache(state->block_cache);
2025 lightrec_free_block(state, state->dispatcher);
2026 lightrec_free_block(state, state->c_wrapper_block);
2027
2028 if (ENABLE_THREADED_COMPILER) {
2029 lightrec_free_recompiler(state->rec);
2030 lightrec_reaper_destroy(state->reaper);
2031 } else {
2032 lightrec_free_cstate(state->cstate);
2033 }
2034
2035 finish_jit();
2036 if (ENABLE_CODE_BUFFER && state->tlsf)
2037 tlsf_destroy(state->tlsf);
2038
2039 lightrec_unregister(MEM_FOR_LIGHTREC, sizeof(*state) +
2040 lut_elm_size(state) * CODE_LUT_SIZE);
2041 free(state);
2042}
2043
2044void lightrec_invalidate(struct lightrec_state *state, u32 addr, u32 len)
2045{
2046 u32 kaddr = kunseg(addr & ~0x3);
2047 enum psx_map idx = lightrec_get_map_idx(state, kaddr);
2048
2049 switch (idx) {
2050 case PSX_MAP_MIRROR1:
2051 case PSX_MAP_MIRROR2:
2052 case PSX_MAP_MIRROR3:
2053 /* Handle mirrors */
2054 kaddr &= RAM_SIZE - 1;
2055 fallthrough;
2056 case PSX_MAP_KERNEL_USER_RAM:
2057 break;
2058 default:
2059 return;
2060 }
2061
2062 memset(lut_address(state, lut_offset(kaddr)), 0,
2063 ((len + 3) / 4) * lut_elm_size(state));
2064}
2065
2066void lightrec_invalidate_all(struct lightrec_state *state)
2067{
2068 memset(state->code_lut, 0, lut_elm_size(state) * CODE_LUT_SIZE);
2069}
2070
2071void lightrec_set_unsafe_opt_flags(struct lightrec_state *state, u32 flags)
2072{
2073 if ((flags ^ state->opt_flags) & LIGHTREC_OPT_INV_DMA_ONLY)
2074 lightrec_invalidate_all(state);
2075
2076 state->opt_flags = flags;
2077}
2078
2079void lightrec_set_exit_flags(struct lightrec_state *state, u32 flags)
2080{
2081 if (flags != LIGHTREC_EXIT_NORMAL) {
2082 state->exit_flags |= flags;
2083 state->target_cycle = state->current_cycle;
2084 }
2085}
2086
2087u32 lightrec_exit_flags(struct lightrec_state *state)
2088{
2089 return state->exit_flags;
2090}
2091
2092u32 lightrec_current_cycle_count(const struct lightrec_state *state)
2093{
2094 return state->current_cycle;
2095}
2096
2097void lightrec_reset_cycle_count(struct lightrec_state *state, u32 cycles)
2098{
2099 state->current_cycle = cycles;
2100
2101 if (state->target_cycle < cycles)
2102 state->target_cycle = cycles;
2103}
2104
2105void lightrec_set_target_cycle_count(struct lightrec_state *state, u32 cycles)
2106{
2107 if (state->exit_flags == LIGHTREC_EXIT_NORMAL) {
2108 if (cycles < state->current_cycle)
2109 cycles = state->current_cycle;
2110
2111 state->target_cycle = cycles;
2112 }
2113}
2114
2115struct lightrec_registers * lightrec_get_registers(struct lightrec_state *state)
2116{
2117 return &state->regs;
2118}
2119
2120void lightrec_set_cycles_per_opcode(struct lightrec_state *state, u32 cycles)
2121{
2122 state->cycles_per_op = cycles;
2123}