git subrepo pull --force deps/lightrec
[pcsx_rearmed.git] / deps / lightrec / lightrec.c
... / ...
CommitLineData
1// SPDX-License-Identifier: LGPL-2.1-or-later
2/*
3 * Copyright (C) 2014-2021 Paul Cercueil <paul@crapouillou.net>
4 */
5
6#include "blockcache.h"
7#include "debug.h"
8#include "disassembler.h"
9#include "emitter.h"
10#include "interpreter.h"
11#include "lightrec-config.h"
12#include "lightning-wrapper.h"
13#include "lightrec.h"
14#include "memmanager.h"
15#include "reaper.h"
16#include "recompiler.h"
17#include "regcache.h"
18#include "optimizer.h"
19#include "tlsf/tlsf.h"
20
21#include <errno.h>
22#include <inttypes.h>
23#include <limits.h>
24#if ENABLE_THREADED_COMPILER
25#include <stdatomic.h>
26#endif
27#include <stdbool.h>
28#include <stddef.h>
29#include <string.h>
30
31static struct block * lightrec_precompile_block(struct lightrec_state *state,
32 u32 pc);
33static bool lightrec_block_is_fully_tagged(const struct block *block);
34
35static void lightrec_mtc2(struct lightrec_state *state, u8 reg, u32 data);
36static u32 lightrec_mfc2(struct lightrec_state *state, u8 reg);
37
38static void lightrec_reap_block(struct lightrec_state *state, void *data);
39
40static void lightrec_default_sb(struct lightrec_state *state, u32 opcode,
41 void *host, u32 addr, u32 data)
42{
43 *(u8 *)host = (u8)data;
44
45 if (!(state->opt_flags & LIGHTREC_OPT_INV_DMA_ONLY))
46 lightrec_invalidate(state, addr, 1);
47}
48
49static void lightrec_default_sh(struct lightrec_state *state, u32 opcode,
50 void *host, u32 addr, u32 data)
51{
52 *(u16 *)host = HTOLE16((u16)data);
53
54 if (!(state->opt_flags & LIGHTREC_OPT_INV_DMA_ONLY))
55 lightrec_invalidate(state, addr, 2);
56}
57
58static void lightrec_default_sw(struct lightrec_state *state, u32 opcode,
59 void *host, u32 addr, u32 data)
60{
61 *(u32 *)host = HTOLE32(data);
62
63 if (!(state->opt_flags & LIGHTREC_OPT_INV_DMA_ONLY))
64 lightrec_invalidate(state, addr, 4);
65}
66
67static u8 lightrec_default_lb(struct lightrec_state *state,
68 u32 opcode, void *host, u32 addr)
69{
70 return *(u8 *)host;
71}
72
73static u16 lightrec_default_lh(struct lightrec_state *state,
74 u32 opcode, void *host, u32 addr)
75{
76 return LE16TOH(*(u16 *)host);
77}
78
79static u32 lightrec_default_lw(struct lightrec_state *state,
80 u32 opcode, void *host, u32 addr)
81{
82 return LE32TOH(*(u32 *)host);
83}
84
85static u32 lightrec_default_lwu(struct lightrec_state *state,
86 u32 opcode, void *host, u32 addr)
87{
88 u32 val;
89
90 memcpy(&val, host, 4);
91
92 return LE32TOH(val);
93}
94
95static void lightrec_default_swu(struct lightrec_state *state, u32 opcode,
96 void *host, u32 addr, u32 data)
97{
98 data = HTOLE32(data);
99
100 memcpy(host, &data, 4);
101
102 if (!(state->opt_flags & LIGHTREC_OPT_INV_DMA_ONLY))
103 lightrec_invalidate(state, addr & ~0x3, 8);
104}
105
106static const struct lightrec_mem_map_ops lightrec_default_ops = {
107 .sb = lightrec_default_sb,
108 .sh = lightrec_default_sh,
109 .sw = lightrec_default_sw,
110 .lb = lightrec_default_lb,
111 .lh = lightrec_default_lh,
112 .lw = lightrec_default_lw,
113 .lwu = lightrec_default_lwu,
114 .swu = lightrec_default_swu,
115};
116
117static void __segfault_cb(struct lightrec_state *state, u32 addr,
118 const struct block *block)
119{
120 lightrec_set_exit_flags(state, LIGHTREC_EXIT_SEGFAULT);
121 pr_err("Segmentation fault in recompiled code: invalid "
122 "load/store at address "PC_FMT"\n", addr);
123 if (block)
124 pr_err("Was executing block "PC_FMT"\n", block->pc);
125}
126
127static void lightrec_swl(struct lightrec_state *state,
128 const struct lightrec_mem_map_ops *ops,
129 u32 opcode, void *host, u32 addr, u32 data)
130{
131 unsigned int shift = addr & 0x3;
132 unsigned int mask = shift < 3 ? GENMASK(31, (shift + 1) * 8) : 0;
133 u32 old_data;
134
135 /* Align to 32 bits */
136 addr &= ~3;
137 host = (void *)((uintptr_t)host & ~3);
138
139 old_data = ops->lw(state, opcode, host, addr);
140
141 data = (data >> ((3 - shift) * 8)) | (old_data & mask);
142
143 ops->sw(state, opcode, host, addr, data);
144}
145
146static void lightrec_swr(struct lightrec_state *state,
147 const struct lightrec_mem_map_ops *ops,
148 u32 opcode, void *host, u32 addr, u32 data)
149{
150 unsigned int shift = addr & 0x3;
151 unsigned int mask = (1 << (shift * 8)) - 1;
152 u32 old_data;
153
154 /* Align to 32 bits */
155 addr &= ~3;
156 host = (void *)((uintptr_t)host & ~3);
157
158 old_data = ops->lw(state, opcode, host, addr);
159
160 data = (data << (shift * 8)) | (old_data & mask);
161
162 ops->sw(state, opcode, host, addr, data);
163}
164
165static void lightrec_swc2(struct lightrec_state *state, union code op,
166 const struct lightrec_mem_map_ops *ops,
167 void *host, u32 addr)
168{
169 u32 data = lightrec_mfc2(state, op.i.rt);
170
171 ops->sw(state, op.opcode, host, addr, data);
172}
173
174static u32 lightrec_lwl(struct lightrec_state *state,
175 const struct lightrec_mem_map_ops *ops,
176 u32 opcode, void *host, u32 addr, u32 data)
177{
178 unsigned int shift = addr & 0x3;
179 unsigned int mask = (1 << (24 - shift * 8)) - 1;
180 u32 old_data;
181
182 /* Align to 32 bits */
183 addr &= ~3;
184 host = (void *)((uintptr_t)host & ~3);
185
186 old_data = ops->lw(state, opcode, host, addr);
187
188 return (data & mask) | (old_data << (24 - shift * 8));
189}
190
191static u32 lightrec_lwr(struct lightrec_state *state,
192 const struct lightrec_mem_map_ops *ops,
193 u32 opcode, void *host, u32 addr, u32 data)
194{
195 unsigned int shift = addr & 0x3;
196 unsigned int mask = shift ? GENMASK(31, 32 - shift * 8) : 0;
197 u32 old_data;
198
199 /* Align to 32 bits */
200 addr &= ~3;
201 host = (void *)((uintptr_t)host & ~3);
202
203 old_data = ops->lw(state, opcode, host, addr);
204
205 return (data & mask) | (old_data >> (shift * 8));
206}
207
208static void lightrec_lwc2(struct lightrec_state *state, union code op,
209 const struct lightrec_mem_map_ops *ops,
210 void *host, u32 addr)
211{
212 u32 data = ops->lw(state, op.opcode, host, addr);
213
214 lightrec_mtc2(state, op.i.rt, data);
215}
216
217static void lightrec_invalidate_map(struct lightrec_state *state,
218 const struct lightrec_mem_map *map, u32 addr, u32 len)
219{
220 if (map == &state->maps[PSX_MAP_KERNEL_USER_RAM]) {
221 memset(lut_address(state, lut_offset(addr)), 0,
222 ((len + 3) / 4) * lut_elm_size(state));
223 }
224}
225
226static enum psx_map
227lightrec_get_map_idx(struct lightrec_state *state, u32 kaddr)
228{
229 const struct lightrec_mem_map *map;
230 unsigned int i;
231
232 for (i = 0; i < state->nb_maps; i++) {
233 map = &state->maps[i];
234
235 if (kaddr >= map->pc && kaddr < map->pc + map->length)
236 return (enum psx_map) i;
237 }
238
239 return PSX_MAP_UNKNOWN;
240}
241
242const struct lightrec_mem_map *
243lightrec_get_map(struct lightrec_state *state, void **host, u32 kaddr)
244{
245 const struct lightrec_mem_map *map;
246 enum psx_map idx;
247 u32 addr;
248
249 idx = lightrec_get_map_idx(state, kaddr);
250 if (idx == PSX_MAP_UNKNOWN)
251 return NULL;
252
253 map = &state->maps[idx];
254 addr = kaddr - map->pc;
255
256 while (map->mirror_of)
257 map = map->mirror_of;
258
259 if (host)
260 *host = map->address + addr;
261
262 return map;
263}
264
265u32 lightrec_rw(struct lightrec_state *state, union code op, u32 base,
266 u32 data, u32 *flags, struct block *block, u16 offset)
267{
268 const struct lightrec_mem_map *map;
269 const struct lightrec_mem_map_ops *ops;
270 u32 opcode = op.opcode;
271 bool was_tagged = true;
272 u16 old_flags;
273 u32 addr;
274 void *host;
275
276 addr = kunseg(base + (s16) op.i.imm);
277
278 map = lightrec_get_map(state, &host, addr);
279 if (!map) {
280 __segfault_cb(state, addr, block);
281 return 0;
282 }
283
284 if (flags)
285 was_tagged = LIGHTREC_FLAGS_GET_IO_MODE(*flags);
286
287 if (likely(!map->ops)) {
288 if (flags && !LIGHTREC_FLAGS_GET_IO_MODE(*flags)) {
289 /* Force parallel port accesses as HW accesses, because
290 * the direct-I/O emitters can't differenciate it. */
291 if (unlikely(map == &state->maps[PSX_MAP_PARALLEL_PORT]))
292 *flags |= LIGHTREC_IO_MODE(LIGHTREC_IO_HW);
293 /* If the base register is 0x0, be extra suspicious.
294 * Some games (e.g. Sled Storm) actually do segmentation
295 * faults by using uninitialized pointers, which are
296 * later initialized to point to hardware registers. */
297 else if (op.i.rs && base == 0x0)
298 *flags |= LIGHTREC_IO_MODE(LIGHTREC_IO_HW);
299 else
300 *flags |= LIGHTREC_IO_MODE(LIGHTREC_IO_DIRECT);
301 }
302
303 ops = &lightrec_default_ops;
304 } else if (flags &&
305 LIGHTREC_FLAGS_GET_IO_MODE(*flags) == LIGHTREC_IO_DIRECT_HW) {
306 ops = &lightrec_default_ops;
307 } else {
308 if (flags && !LIGHTREC_FLAGS_GET_IO_MODE(*flags))
309 *flags |= LIGHTREC_IO_MODE(LIGHTREC_IO_HW);
310
311 ops = map->ops;
312 }
313
314 if (!was_tagged) {
315 old_flags = block_set_flags(block, BLOCK_SHOULD_RECOMPILE);
316
317 if (!(old_flags & BLOCK_SHOULD_RECOMPILE)) {
318 pr_debug("Opcode of block at "PC_FMT" has been tagged"
319 " - flag for recompilation\n", block->pc);
320
321 lut_write(state, lut_offset(block->pc), NULL);
322 }
323 }
324
325 switch (op.i.op) {
326 case OP_SB:
327 ops->sb(state, opcode, host, addr, data);
328 return 0;
329 case OP_SH:
330 ops->sh(state, opcode, host, addr, data);
331 return 0;
332 case OP_SWL:
333 lightrec_swl(state, ops, opcode, host, addr, data);
334 return 0;
335 case OP_SWR:
336 lightrec_swr(state, ops, opcode, host, addr, data);
337 return 0;
338 case OP_SW:
339 ops->sw(state, opcode, host, addr, data);
340 return 0;
341 case OP_SWC2:
342 lightrec_swc2(state, op, ops, host, addr);
343 return 0;
344 case OP_LB:
345 return (s32) (s8) ops->lb(state, opcode, host, addr);
346 case OP_LBU:
347 return ops->lb(state, opcode, host, addr);
348 case OP_LH:
349 return (s32) (s16) ops->lh(state, opcode, host, addr);
350 case OP_LHU:
351 return ops->lh(state, opcode, host, addr);
352 case OP_LWC2:
353 lightrec_lwc2(state, op, ops, host, addr);
354 return 0;
355 case OP_LWL:
356 return lightrec_lwl(state, ops, opcode, host, addr, data);
357 case OP_LWR:
358 return lightrec_lwr(state, ops, opcode, host, addr, data);
359 case OP_META_LWU:
360 return ops->lwu(state, opcode, host, addr);
361 case OP_META_SWU:
362 ops->swu(state, opcode, host, addr, data);
363 return 0;
364 case OP_LW:
365 default:
366 return ops->lw(state, opcode, host, addr);
367 }
368}
369
370static void lightrec_rw_helper(struct lightrec_state *state,
371 union code op, u32 *flags,
372 struct block *block, u16 offset)
373{
374 u32 ret = lightrec_rw(state, op, state->regs.gpr[op.i.rs],
375 state->regs.gpr[op.i.rt], flags, block, offset);
376
377 switch (op.i.op) {
378 case OP_LB:
379 case OP_LBU:
380 case OP_LH:
381 case OP_LHU:
382 case OP_LWL:
383 case OP_LWR:
384 case OP_LW:
385 case OP_META_LWU:
386 if (OPT_HANDLE_LOAD_DELAYS && unlikely(!state->in_delay_slot_n)) {
387 state->temp_reg = ret;
388 state->in_delay_slot_n = 0xff;
389 } else if (op.i.rt) {
390 state->regs.gpr[op.i.rt] = ret;
391 }
392 fallthrough;
393 default:
394 break;
395 }
396}
397
398static void lightrec_rw_cb(struct lightrec_state *state, u32 arg)
399{
400 lightrec_rw_helper(state, (union code) arg, NULL, NULL, 0);
401}
402
403static void lightrec_rw_generic_cb(struct lightrec_state *state, u32 arg)
404{
405 struct block *block;
406 struct opcode *op;
407 u16 offset = (u16)arg;
408
409 block = lightrec_find_block_from_lut(state->block_cache,
410 arg >> 16, state->curr_pc);
411 if (unlikely(!block)) {
412 pr_err("rw_generic: No block found in LUT for "PC_FMT" offset 0x%"PRIx16"\n",
413 state->curr_pc, offset);
414 lightrec_set_exit_flags(state, LIGHTREC_EXIT_SEGFAULT);
415 return;
416 }
417
418 op = &block->opcode_list[offset];
419 lightrec_rw_helper(state, op->c, &op->flags, block, offset);
420}
421
422static u32 clamp_s32(s32 val, s32 min, s32 max)
423{
424 return val < min ? min : val > max ? max : val;
425}
426
427static u16 load_u16(u32 *ptr)
428{
429 return ((struct u16x2 *) ptr)->l;
430}
431
432static void store_u16(u32 *ptr, u16 value)
433{
434 ((struct u16x2 *) ptr)->l = value;
435}
436
437static u32 lightrec_mfc2(struct lightrec_state *state, u8 reg)
438{
439 s16 gteir1, gteir2, gteir3;
440
441 switch (reg) {
442 case 1:
443 case 3:
444 case 5:
445 case 8:
446 case 9:
447 case 10:
448 case 11:
449 return (s32)(s16) load_u16(&state->regs.cp2d[reg]);
450 case 7:
451 case 16:
452 case 17:
453 case 18:
454 case 19:
455 return load_u16(&state->regs.cp2d[reg]);
456 case 28:
457 case 29:
458 gteir1 = (s16) load_u16(&state->regs.cp2d[9]);
459 gteir2 = (s16) load_u16(&state->regs.cp2d[10]);
460 gteir3 = (s16) load_u16(&state->regs.cp2d[11]);
461
462 return clamp_s32(gteir1 >> 7, 0, 0x1f) << 0 |
463 clamp_s32(gteir2 >> 7, 0, 0x1f) << 5 |
464 clamp_s32(gteir3 >> 7, 0, 0x1f) << 10;
465 case 15:
466 reg = 14;
467 fallthrough;
468 default:
469 return state->regs.cp2d[reg];
470 }
471}
472
473u32 lightrec_mfc(struct lightrec_state *state, union code op)
474{
475 u32 val;
476
477 if (op.i.op == OP_CP0)
478 return state->regs.cp0[op.r.rd];
479
480 if (op.i.op == OP_SWC2) {
481 val = lightrec_mfc2(state, op.i.rt);
482 } else if (op.r.rs == OP_CP2_BASIC_MFC2)
483 val = lightrec_mfc2(state, op.r.rd);
484 else {
485 val = state->regs.cp2c[op.r.rd];
486
487 switch (op.r.rd) {
488 case 4:
489 case 12:
490 case 20:
491 case 26:
492 case 27:
493 case 29:
494 case 30:
495 val = (u32)(s16)val;
496 fallthrough;
497 default:
498 break;
499 }
500 }
501
502 if (state->ops.cop2_notify)
503 (*state->ops.cop2_notify)(state, op.opcode, val);
504
505 return val;
506}
507
508static void lightrec_mfc_cb(struct lightrec_state *state, union code op)
509{
510 u32 rt = lightrec_mfc(state, op);
511
512 if (op.i.op == OP_SWC2)
513 state->temp_reg = rt;
514 else if (op.r.rt)
515 state->regs.gpr[op.r.rt] = rt;
516}
517
518static void lightrec_mtc0(struct lightrec_state *state, u8 reg, u32 data)
519{
520 u32 status, oldstatus, cause;
521
522 switch (reg) {
523 case 1:
524 case 4:
525 case 8:
526 case 14:
527 case 15:
528 /* Those registers are read-only */
529 return;
530 default:
531 break;
532 }
533
534 if (reg == 12) {
535 status = state->regs.cp0[12];
536 oldstatus = status;
537
538 if (status & ~data & BIT(16)) {
539 state->ops.enable_ram(state, true);
540 lightrec_invalidate_all(state);
541 } else if (~status & data & BIT(16)) {
542 state->ops.enable_ram(state, false);
543 }
544 }
545
546 if (reg == 13) {
547 state->regs.cp0[13] &= ~0x300;
548 state->regs.cp0[13] |= data & 0x300;
549 } else {
550 state->regs.cp0[reg] = data;
551 }
552
553 if (reg == 12 || reg == 13) {
554 cause = state->regs.cp0[13];
555 status = state->regs.cp0[12];
556
557 /* Handle software interrupts */
558 if ((!!(status & cause & 0x300)) & status)
559 lightrec_set_exit_flags(state, LIGHTREC_EXIT_CHECK_INTERRUPT);
560
561 /* Handle hardware interrupts */
562 if (reg == 12 && !(~status & 0x401) && (~oldstatus & 0x401))
563 lightrec_set_exit_flags(state, LIGHTREC_EXIT_CHECK_INTERRUPT);
564 }
565}
566
567static u32 count_leading_bits(s32 data)
568{
569 u32 cnt = 33;
570
571#ifdef __has_builtin
572#if __has_builtin(__builtin_clrsb)
573 return 1 + __builtin_clrsb(data);
574#endif
575#endif
576
577 data = (data ^ (data >> 31)) << 1;
578
579 do {
580 cnt -= 1;
581 data >>= 1;
582 } while (data);
583
584 return cnt;
585}
586
587static void lightrec_mtc2(struct lightrec_state *state, u8 reg, u32 data)
588{
589 switch (reg) {
590 case 15:
591 state->regs.cp2d[12] = state->regs.cp2d[13];
592 state->regs.cp2d[13] = state->regs.cp2d[14];
593 state->regs.cp2d[14] = data;
594 break;
595 case 28:
596 state->regs.cp2d[9] = (data << 7) & 0xf80;
597 state->regs.cp2d[10] = (data << 2) & 0xf80;
598 state->regs.cp2d[11] = (data >> 3) & 0xf80;
599 break;
600 case 31:
601 return;
602 case 30:
603 state->regs.cp2d[31] = count_leading_bits((s32) data);
604 fallthrough;
605 default:
606 state->regs.cp2d[reg] = data;
607 break;
608 }
609}
610
611static void lightrec_ctc2(struct lightrec_state *state, u8 reg, u32 data)
612{
613 switch (reg) {
614 case 4:
615 case 12:
616 case 20:
617 case 26:
618 case 27:
619 case 29:
620 case 30:
621 store_u16(&state->regs.cp2c[reg], data);
622 break;
623 case 31:
624 data = (data & 0x7ffff000) | !!(data & 0x7f87e000) << 31;
625 fallthrough;
626 default:
627 state->regs.cp2c[reg] = data;
628 break;
629 }
630}
631
632void lightrec_mtc(struct lightrec_state *state, union code op, u8 reg, u32 data)
633{
634 if (op.i.op == OP_CP0) {
635 lightrec_mtc0(state, reg, data);
636 } else {
637 if (op.i.op == OP_LWC2 || op.r.rs != OP_CP2_BASIC_CTC2)
638 lightrec_mtc2(state, reg, data);
639 else
640 lightrec_ctc2(state, reg, data);
641
642 if (state->ops.cop2_notify)
643 (*state->ops.cop2_notify)(state, op.opcode, data);
644 }
645}
646
647static void lightrec_mtc_cb(struct lightrec_state *state, u32 arg)
648{
649 union code op = (union code) arg;
650 u32 data;
651 u8 reg;
652
653 if (op.i.op == OP_LWC2) {
654 data = state->temp_reg;
655 reg = op.i.rt;
656 } else {
657 data = state->regs.gpr[op.r.rt];
658 reg = op.r.rd;
659 }
660
661 lightrec_mtc(state, op, reg, data);
662}
663
664void lightrec_rfe(struct lightrec_state *state)
665{
666 u32 status;
667
668 /* Read CP0 Status register (r12) */
669 status = state->regs.cp0[12];
670
671 /* Switch the bits */
672 status = ((status & 0x3c) >> 2) | (status & ~0xf);
673
674 /* Write it back */
675 lightrec_mtc0(state, 12, status);
676}
677
678void lightrec_cp(struct lightrec_state *state, union code op)
679{
680 if (op.i.op == OP_CP0) {
681 pr_err("Invalid CP opcode to coprocessor #0\n");
682 return;
683 }
684
685 (*state->ops.cop2_op)(state, op.opcode);
686}
687
688static void lightrec_cp_cb(struct lightrec_state *state, u32 arg)
689{
690 lightrec_cp(state, (union code) arg);
691}
692
693static struct block * lightrec_get_block(struct lightrec_state *state, u32 pc)
694{
695 struct block *block = lightrec_find_block(state->block_cache, pc);
696 u8 old_flags;
697
698 if (block && lightrec_block_is_outdated(state, block)) {
699 pr_debug("Block at "PC_FMT" is outdated!\n", block->pc);
700
701 old_flags = block_set_flags(block, BLOCK_IS_DEAD);
702 if (!(old_flags & BLOCK_IS_DEAD)) {
703 /* Make sure the recompiler isn't processing the block
704 * we'll destroy */
705 if (ENABLE_THREADED_COMPILER)
706 lightrec_recompiler_remove(state->rec, block);
707
708 remove_from_code_lut(state->block_cache, block);
709
710 if (ENABLE_THREADED_COMPILER) {
711 lightrec_reaper_add(state->reaper,
712 lightrec_reap_block, block);
713 } else {
714 lightrec_unregister_block(state->block_cache, block);
715 lightrec_free_block(state, block);
716 }
717 }
718
719 block = NULL;
720 }
721
722 if (!block) {
723 block = lightrec_precompile_block(state, pc);
724 if (!block) {
725 pr_err("Unable to recompile block at "PC_FMT"\n", pc);
726 lightrec_set_exit_flags(state, LIGHTREC_EXIT_SEGFAULT);
727 return NULL;
728 }
729
730 lightrec_register_block(state->block_cache, block);
731 }
732
733 return block;
734}
735
736static void * get_next_block_func(struct lightrec_state *state, u32 pc)
737{
738 struct block *block;
739 bool should_recompile;
740 void *func;
741 int err;
742
743 do {
744 func = lut_read(state, lut_offset(pc));
745 if (func && func != state->get_next_block)
746 break;
747
748 block = lightrec_get_block(state, pc);
749
750 if (unlikely(!block))
751 break;
752
753 if (OPT_REPLACE_MEMSET &&
754 block_has_flag(block, BLOCK_IS_MEMSET)) {
755 func = state->memset_func;
756 break;
757 }
758
759 should_recompile = block_has_flag(block, BLOCK_SHOULD_RECOMPILE) &&
760 !block_has_flag(block, BLOCK_NEVER_COMPILE) &&
761 !block_has_flag(block, BLOCK_IS_DEAD);
762
763 if (unlikely(should_recompile)) {
764 pr_debug("Block at "PC_FMT" should recompile\n", pc);
765
766 if (ENABLE_THREADED_COMPILER) {
767 lightrec_recompiler_add(state->rec, block);
768 } else {
769 err = lightrec_compile_block(state->cstate, block);
770 if (err) {
771 state->exit_flags = LIGHTREC_EXIT_NOMEM;
772 return NULL;
773 }
774 }
775 }
776
777 if (ENABLE_THREADED_COMPILER && likely(!should_recompile))
778 func = lightrec_recompiler_run_first_pass(state, block, &pc);
779 else
780 func = block->function;
781
782 if (likely(func))
783 break;
784
785 if (unlikely(block_has_flag(block, BLOCK_NEVER_COMPILE))) {
786 pc = lightrec_emulate_block(state, block, pc);
787
788 } else if (!ENABLE_THREADED_COMPILER) {
789 /* Block wasn't compiled yet - run the interpreter */
790 if (block_has_flag(block, BLOCK_FULLY_TAGGED))
791 pr_debug("Block fully tagged, skipping first pass\n");
792 else if (ENABLE_FIRST_PASS && likely(!should_recompile))
793 pc = lightrec_emulate_block(state, block, pc);
794
795 /* Then compile it using the profiled data */
796 err = lightrec_compile_block(state->cstate, block);
797 if (err) {
798 state->exit_flags = LIGHTREC_EXIT_NOMEM;
799 return NULL;
800 }
801 } else if (unlikely(block_has_flag(block, BLOCK_IS_DEAD))) {
802 /*
803 * If the block is dead but has never been compiled,
804 * then its function pointer is NULL and we cannot
805 * execute the block. In that case, reap all the dead
806 * blocks now, and in the next loop we will create a
807 * new block.
808 */
809 lightrec_reaper_reap(state->reaper);
810 } else {
811 lightrec_recompiler_add(state->rec, block);
812 }
813 } while (state->exit_flags == LIGHTREC_EXIT_NORMAL
814 && state->current_cycle < state->target_cycle);
815
816 state->curr_pc = pc;
817 return func;
818}
819
820static void * lightrec_alloc_code(struct lightrec_state *state, size_t size)
821{
822 void *code;
823
824 if (ENABLE_THREADED_COMPILER)
825 lightrec_code_alloc_lock(state);
826
827 code = tlsf_malloc(state->tlsf, size);
828
829 if (ENABLE_THREADED_COMPILER)
830 lightrec_code_alloc_unlock(state);
831
832 return code;
833}
834
835static void lightrec_realloc_code(struct lightrec_state *state,
836 void *ptr, size_t size)
837{
838 /* NOTE: 'size' MUST be smaller than the size specified during
839 * the allocation. */
840
841 if (ENABLE_THREADED_COMPILER)
842 lightrec_code_alloc_lock(state);
843
844 tlsf_realloc(state->tlsf, ptr, size);
845
846 if (ENABLE_THREADED_COMPILER)
847 lightrec_code_alloc_unlock(state);
848}
849
850static void lightrec_free_code(struct lightrec_state *state, void *ptr)
851{
852 if (ENABLE_THREADED_COMPILER)
853 lightrec_code_alloc_lock(state);
854
855 tlsf_free(state->tlsf, ptr);
856
857 if (ENABLE_THREADED_COMPILER)
858 lightrec_code_alloc_unlock(state);
859}
860
861static char lightning_code_data[0x80000];
862
863static void * lightrec_emit_code(struct lightrec_state *state,
864 const struct block *block,
865 jit_state_t *_jit, unsigned int *size)
866{
867 bool has_code_buffer = ENABLE_CODE_BUFFER && state->tlsf;
868 jit_word_t code_size, new_code_size;
869 void *code;
870
871 jit_realize();
872
873 if (ENABLE_DISASSEMBLER)
874 jit_set_data(lightning_code_data, sizeof(lightning_code_data), 0);
875 else
876 jit_set_data(NULL, 0, JIT_DISABLE_DATA | JIT_DISABLE_NOTE);
877
878 if (has_code_buffer) {
879 jit_get_code(&code_size);
880 code = lightrec_alloc_code(state, (size_t) code_size);
881
882 if (!code) {
883 if (ENABLE_THREADED_COMPILER) {
884 /* If we're using the threaded compiler, return
885 * an allocation error here. The threaded
886 * compiler will then empty its job queue and
887 * request a code flush using the reaper. */
888 return NULL;
889 }
890
891 /* Remove outdated blocks, and try again */
892 lightrec_remove_outdated_blocks(state->block_cache, block);
893
894 pr_debug("Re-try to alloc %zu bytes...\n", code_size);
895
896 code = lightrec_alloc_code(state, code_size);
897 if (!code) {
898 pr_err("Could not alloc even after removing old blocks!\n");
899 return NULL;
900 }
901 }
902
903 jit_set_code(code, code_size);
904 }
905
906 code = jit_emit();
907
908 jit_get_code(&new_code_size);
909 lightrec_register(MEM_FOR_CODE, new_code_size);
910
911 if (has_code_buffer) {
912 lightrec_realloc_code(state, code, (size_t) new_code_size);
913
914 pr_debug("Creating code block at address 0x%" PRIxPTR ", "
915 "code size: %" PRIuPTR " new: %" PRIuPTR "\n",
916 (uintptr_t) code, code_size, new_code_size);
917 }
918
919 *size = (unsigned int) new_code_size;
920
921 if (state->ops.code_inv)
922 state->ops.code_inv(code, new_code_size);
923
924 return code;
925}
926
927static struct block * generate_wrapper(struct lightrec_state *state)
928{
929 struct block *block;
930 jit_state_t *_jit;
931 unsigned int i;
932 jit_node_t *addr[C_WRAPPERS_COUNT - 1];
933 jit_node_t *to_end[C_WRAPPERS_COUNT - 1];
934 u8 tmp = JIT_R1;
935
936#ifdef __sh__
937 /* On SH, GBR-relative loads target the r0 register.
938 * Use it as the temporary register to factorize the move to
939 * JIT_R1. */
940 if (LIGHTREC_REG_STATE == _GBR)
941 tmp = _R0;
942#endif
943
944 block = lightrec_malloc(state, MEM_FOR_IR, sizeof(*block));
945 if (!block)
946 goto err_no_mem;
947
948 _jit = jit_new_state();
949 if (!_jit)
950 goto err_free_block;
951
952 jit_name("RW wrapper");
953 jit_note(__FILE__, __LINE__);
954
955 /* Wrapper entry point */
956 jit_prolog();
957 jit_tramp(256);
958
959 /* Add entry points */
960 for (i = C_WRAPPERS_COUNT - 1; i > 0; i--) {
961 jit_ldxi(tmp, LIGHTREC_REG_STATE,
962 offsetof(struct lightrec_state, c_wrappers[i]));
963 to_end[i - 1] = jit_b();
964 addr[i - 1] = jit_indirect();
965 }
966
967 jit_ldxi(tmp, LIGHTREC_REG_STATE,
968 offsetof(struct lightrec_state, c_wrappers[0]));
969
970 for (i = 0; i < C_WRAPPERS_COUNT - 1; i++)
971 jit_patch(to_end[i]);
972 jit_movr(JIT_R1, tmp);
973
974 jit_epilog();
975 jit_prolog();
976
977 /* Save all temporaries on stack */
978 for (i = 0; i < NUM_TEMPS; i++) {
979 if (i + FIRST_TEMP != 1) {
980 jit_stxi(offsetof(struct lightrec_state, wrapper_regs[i]),
981 LIGHTREC_REG_STATE, JIT_R(i + FIRST_TEMP));
982 }
983 }
984
985 jit_getarg(JIT_R2, jit_arg());
986
987 jit_prepare();
988 jit_pushargr(LIGHTREC_REG_STATE);
989 jit_pushargr(JIT_R2);
990
991 jit_ldxi_ui(JIT_R2, LIGHTREC_REG_STATE,
992 offsetof(struct lightrec_state, target_cycle));
993
994 /* state->current_cycle = state->target_cycle - delta; */
995 jit_subr(LIGHTREC_REG_CYCLE, JIT_R2, LIGHTREC_REG_CYCLE);
996 jit_stxi_i(offsetof(struct lightrec_state, current_cycle),
997 LIGHTREC_REG_STATE, LIGHTREC_REG_CYCLE);
998
999 /* Call the wrapper function */
1000 jit_finishr(JIT_R1);
1001
1002 /* delta = state->target_cycle - state->current_cycle */;
1003 jit_ldxi_ui(LIGHTREC_REG_CYCLE, LIGHTREC_REG_STATE,
1004 offsetof(struct lightrec_state, current_cycle));
1005 jit_ldxi_ui(JIT_R1, LIGHTREC_REG_STATE,
1006 offsetof(struct lightrec_state, target_cycle));
1007 jit_subr(LIGHTREC_REG_CYCLE, JIT_R1, LIGHTREC_REG_CYCLE);
1008
1009 /* Restore temporaries from stack */
1010 for (i = 0; i < NUM_TEMPS; i++) {
1011 if (i + FIRST_TEMP != 1) {
1012 jit_ldxi(JIT_R(i + FIRST_TEMP), LIGHTREC_REG_STATE,
1013 offsetof(struct lightrec_state, wrapper_regs[i]));
1014 }
1015 }
1016
1017 jit_ret();
1018 jit_epilog();
1019
1020 block->_jit = _jit;
1021 block->opcode_list = NULL;
1022 block->flags = BLOCK_NO_OPCODE_LIST;
1023 block->nb_ops = 0;
1024
1025 block->function = lightrec_emit_code(state, block, _jit,
1026 &block->code_size);
1027 if (!block->function)
1028 goto err_free_block;
1029
1030 state->wrappers_eps[C_WRAPPERS_COUNT - 1] = block->function;
1031
1032 for (i = 0; i < C_WRAPPERS_COUNT - 1; i++)
1033 state->wrappers_eps[i] = jit_address(addr[i]);
1034
1035 if (ENABLE_DISASSEMBLER) {
1036 pr_debug("Wrapper block:\n");
1037 jit_disassemble();
1038 }
1039
1040 jit_clear_state();
1041 return block;
1042
1043err_free_block:
1044 lightrec_free(state, MEM_FOR_IR, sizeof(*block), block);
1045err_no_mem:
1046 pr_err("Unable to compile wrapper: Out of memory\n");
1047 return NULL;
1048}
1049
1050static u32 lightrec_memset(struct lightrec_state *state)
1051{
1052 u32 kunseg_pc = kunseg(state->regs.gpr[4]);
1053 void *host;
1054 const struct lightrec_mem_map *map = lightrec_get_map(state, &host, kunseg_pc);
1055 u32 length = state->regs.gpr[5] * 4;
1056
1057 if (!map) {
1058 pr_err("Unable to find memory map for memset target address "PC_FMT"\n",
1059 kunseg_pc);
1060 return 0;
1061 }
1062
1063 pr_debug("Calling host memset, "PC_FMT" (host address 0x%"PRIxPTR") for %u bytes\n",
1064 kunseg_pc, (uintptr_t)host, length);
1065 memset(host, 0, length);
1066
1067 if (!(state->opt_flags & LIGHTREC_OPT_INV_DMA_ONLY))
1068 lightrec_invalidate_map(state, map, kunseg_pc, length);
1069
1070 /* Rough estimation of the number of cycles consumed */
1071 return 8 + 5 * (length + 3 / 4);
1072}
1073
1074static u32 lightrec_check_load_delay(struct lightrec_state *state, u32 pc, u8 reg)
1075{
1076 struct block *block;
1077 union code first_op;
1078
1079 first_op = lightrec_read_opcode(state, pc);
1080
1081 if (likely(!opcode_reads_register(first_op, reg))) {
1082 state->regs.gpr[reg] = state->temp_reg;
1083 } else {
1084 block = lightrec_get_block(state, pc);
1085 if (unlikely(!block)) {
1086 pr_err("Unable to get block at "PC_FMT"\n", pc);
1087 lightrec_set_exit_flags(state, LIGHTREC_EXIT_SEGFAULT);
1088 pc = 0;
1089 } else {
1090 pc = lightrec_handle_load_delay(state, block, pc, reg);
1091 }
1092 }
1093
1094 return pc;
1095}
1096
1097static void update_cycle_counter_before_c(jit_state_t *_jit)
1098{
1099 /* update state->current_cycle */
1100 jit_ldxi_i(JIT_R2, LIGHTREC_REG_STATE,
1101 offsetof(struct lightrec_state, target_cycle));
1102 jit_subr(JIT_R1, JIT_R2, LIGHTREC_REG_CYCLE);
1103 jit_stxi_i(offsetof(struct lightrec_state, current_cycle),
1104 LIGHTREC_REG_STATE, JIT_R1);
1105}
1106
1107static void update_cycle_counter_after_c(jit_state_t *_jit)
1108{
1109 /* Recalc the delta */
1110 jit_ldxi_i(JIT_R1, LIGHTREC_REG_STATE,
1111 offsetof(struct lightrec_state, current_cycle));
1112 jit_ldxi_i(JIT_R2, LIGHTREC_REG_STATE,
1113 offsetof(struct lightrec_state, target_cycle));
1114 jit_subr(LIGHTREC_REG_CYCLE, JIT_R2, JIT_R1);
1115}
1116
1117static void sync_next_pc(jit_state_t *_jit)
1118{
1119 if (lightrec_store_next_pc()) {
1120 jit_ldxi_ui(JIT_V0, LIGHTREC_REG_STATE,
1121 offsetof(struct lightrec_state, next_pc));
1122 }
1123}
1124
1125static struct block * generate_dispatcher(struct lightrec_state *state)
1126{
1127 struct block *block;
1128 jit_state_t *_jit;
1129 jit_node_t *to_end, *loop, *addr, *addr2, *addr3, *addr4, *addr5, *jmp, *jmp2;
1130 unsigned int i;
1131 u32 offset;
1132
1133 block = lightrec_malloc(state, MEM_FOR_IR, sizeof(*block));
1134 if (!block)
1135 goto err_no_mem;
1136
1137 _jit = jit_new_state();
1138 if (!_jit)
1139 goto err_free_block;
1140
1141 jit_name("dispatcher");
1142 jit_note(__FILE__, __LINE__);
1143
1144 jit_prolog();
1145 jit_frame(256);
1146
1147 jit_getarg(LIGHTREC_REG_STATE, jit_arg());
1148 jit_getarg(JIT_V0, jit_arg());
1149 jit_getarg(JIT_V1, jit_arg());
1150 jit_getarg_i(LIGHTREC_REG_CYCLE, jit_arg());
1151
1152 /* Force all callee-saved registers to be pushed on the stack */
1153 for (i = 0; i < NUM_REGS; i++)
1154 jit_movr(JIT_V(i + FIRST_REG), JIT_V(i + FIRST_REG));
1155
1156 loop = jit_label();
1157
1158 /* Call the block's code */
1159 jit_jmpr(JIT_V1);
1160
1161 if (OPT_REPLACE_MEMSET) {
1162 /* Blocks will jump here when they need to call
1163 * lightrec_memset() */
1164 addr3 = jit_indirect();
1165
1166 jit_movr(JIT_V1, LIGHTREC_REG_CYCLE);
1167
1168 jit_prepare();
1169 jit_pushargr(LIGHTREC_REG_STATE);
1170
1171 jit_finishi(lightrec_memset);
1172 jit_retval(LIGHTREC_REG_CYCLE);
1173
1174 jit_ldxi_ui(JIT_V0, LIGHTREC_REG_STATE,
1175 offsetof(struct lightrec_state, regs.gpr[31]));
1176 jit_subr(LIGHTREC_REG_CYCLE, JIT_V1, LIGHTREC_REG_CYCLE);
1177
1178 if (OPT_DETECT_IMPOSSIBLE_BRANCHES || OPT_HANDLE_LOAD_DELAYS)
1179 jmp = jit_b();
1180 }
1181
1182 if (OPT_DETECT_IMPOSSIBLE_BRANCHES) {
1183 /* Blocks will jump here when they reach a branch that should
1184 * be executed with the interpreter, passing the branch's PC
1185 * in JIT_V0 and the address of the block in JIT_V1. */
1186 addr4 = jit_indirect();
1187
1188 sync_next_pc(_jit);
1189 update_cycle_counter_before_c(_jit);
1190
1191 jit_prepare();
1192 jit_pushargr(LIGHTREC_REG_STATE);
1193 jit_pushargr(JIT_V1);
1194 jit_pushargr(JIT_V0);
1195 jit_finishi(lightrec_emulate_block);
1196
1197 jit_retval(JIT_V0);
1198
1199 update_cycle_counter_after_c(_jit);
1200
1201 if (OPT_HANDLE_LOAD_DELAYS)
1202 jmp2 = jit_b();
1203
1204 }
1205
1206 if (OPT_HANDLE_LOAD_DELAYS) {
1207 /* Blocks will jump here when they reach a branch with a load
1208 * opcode in its delay slot. The delay slot has already been
1209 * executed; the load value is in (state->temp_reg), and the
1210 * register number is in JIT_V1.
1211 * Jump to a C function which will evaluate the branch target's
1212 * first opcode, to make sure that it does not read the register
1213 * in question; and if it does, handle it accordingly. */
1214 addr5 = jit_indirect();
1215
1216 sync_next_pc(_jit);
1217 update_cycle_counter_before_c(_jit);
1218
1219 jit_prepare();
1220 jit_pushargr(LIGHTREC_REG_STATE);
1221 jit_pushargr(JIT_V0);
1222 jit_pushargr(JIT_V1);
1223 jit_finishi(lightrec_check_load_delay);
1224
1225 jit_retval(JIT_V0);
1226
1227 update_cycle_counter_after_c(_jit);
1228 }
1229
1230 /* The block will jump here, with the number of cycles remaining in
1231 * LIGHTREC_REG_CYCLE */
1232 addr2 = jit_indirect();
1233
1234 sync_next_pc(_jit);
1235
1236 if (OPT_HANDLE_LOAD_DELAYS && OPT_DETECT_IMPOSSIBLE_BRANCHES)
1237 jit_patch(jmp2);
1238
1239 if (OPT_REPLACE_MEMSET
1240 && (OPT_DETECT_IMPOSSIBLE_BRANCHES || OPT_HANDLE_LOAD_DELAYS)) {
1241 jit_patch(jmp);
1242 }
1243
1244 /* Store back the next PC to the lightrec_state structure */
1245 offset = offsetof(struct lightrec_state, curr_pc);
1246 jit_stxi_i(offset, LIGHTREC_REG_STATE, JIT_V0);
1247
1248 /* Jump to end if state->target_cycle < state->current_cycle */
1249 to_end = jit_blei(LIGHTREC_REG_CYCLE, 0);
1250
1251 /* Convert next PC to KUNSEG and avoid mirrors */
1252 jit_andi(JIT_V1, JIT_V0, 0x10000000 | (RAM_SIZE - 1));
1253 jit_rshi_u(JIT_R1, JIT_V1, 28);
1254 jit_andi(JIT_R2, JIT_V0, BIOS_SIZE - 1);
1255 jit_addi(JIT_R2, JIT_R2, RAM_SIZE);
1256 jit_movnr(JIT_V1, JIT_R2, JIT_R1);
1257
1258 /* If possible, use the code LUT */
1259 if (!lut_is_32bit(state))
1260 jit_lshi(JIT_V1, JIT_V1, 1);
1261 jit_add_state(JIT_V1, JIT_V1);
1262
1263 offset = offsetof(struct lightrec_state, code_lut);
1264 if (lut_is_32bit(state))
1265 jit_ldxi_ui(JIT_V1, JIT_V1, offset);
1266 else
1267 jit_ldxi(JIT_V1, JIT_V1, offset);
1268
1269 /* If we get non-NULL, loop */
1270 jit_patch_at(jit_bnei(JIT_V1, 0), loop);
1271
1272 /* The code LUT will be set to this address when the block at the target
1273 * PC has been preprocessed but not yet compiled by the threaded
1274 * recompiler */
1275 addr = jit_indirect();
1276
1277 /* Slow path: call C function get_next_block_func() */
1278
1279 if (ENABLE_FIRST_PASS || OPT_DETECT_IMPOSSIBLE_BRANCHES) {
1280 /* We may call the interpreter - update state->current_cycle */
1281 update_cycle_counter_before_c(_jit);
1282 }
1283
1284 jit_prepare();
1285 jit_pushargr(LIGHTREC_REG_STATE);
1286 jit_pushargr(JIT_V0);
1287
1288 /* Save the cycles register if needed */
1289 if (!(ENABLE_FIRST_PASS || OPT_DETECT_IMPOSSIBLE_BRANCHES))
1290 jit_movr(JIT_V0, LIGHTREC_REG_CYCLE);
1291
1292 /* Get the next block */
1293 jit_finishi(&get_next_block_func);
1294 jit_retval(JIT_V1);
1295
1296 if (ENABLE_FIRST_PASS || OPT_DETECT_IMPOSSIBLE_BRANCHES) {
1297 /* The interpreter may have updated state->current_cycle and
1298 * state->target_cycle - recalc the delta */
1299 update_cycle_counter_after_c(_jit);
1300 } else {
1301 jit_movr(LIGHTREC_REG_CYCLE, JIT_V0);
1302 }
1303
1304 /* Reset JIT_V0 to the next PC */
1305 jit_ldxi_ui(JIT_V0, LIGHTREC_REG_STATE,
1306 offsetof(struct lightrec_state, curr_pc));
1307
1308 /* If we get non-NULL, loop */
1309 jit_patch_at(jit_bnei(JIT_V1, 0), loop);
1310
1311 /* When exiting, the recompiled code will jump to that address */
1312 jit_note(__FILE__, __LINE__);
1313 jit_patch(to_end);
1314
1315 jit_retr(LIGHTREC_REG_CYCLE);
1316 jit_epilog();
1317
1318 block->_jit = _jit;
1319 block->opcode_list = NULL;
1320 block->flags = BLOCK_NO_OPCODE_LIST;
1321 block->nb_ops = 0;
1322
1323 block->function = lightrec_emit_code(state, block, _jit,
1324 &block->code_size);
1325 if (!block->function)
1326 goto err_free_block;
1327
1328 state->eob_wrapper_func = jit_address(addr2);
1329 if (OPT_DETECT_IMPOSSIBLE_BRANCHES)
1330 state->interpreter_func = jit_address(addr4);
1331 if (OPT_HANDLE_LOAD_DELAYS)
1332 state->ds_check_func = jit_address(addr5);
1333 if (OPT_REPLACE_MEMSET)
1334 state->memset_func = jit_address(addr3);
1335 state->get_next_block = jit_address(addr);
1336
1337 if (ENABLE_DISASSEMBLER) {
1338 pr_debug("Dispatcher block:\n");
1339 jit_disassemble();
1340 }
1341
1342 /* We're done! */
1343 jit_clear_state();
1344 return block;
1345
1346err_free_block:
1347 lightrec_free(state, MEM_FOR_IR, sizeof(*block), block);
1348err_no_mem:
1349 pr_err("Unable to compile dispatcher: Out of memory\n");
1350 return NULL;
1351}
1352
1353union code lightrec_read_opcode(struct lightrec_state *state, u32 pc)
1354{
1355 void *host = NULL;
1356
1357 lightrec_get_map(state, &host, kunseg(pc));
1358
1359 const u32 *code = (u32 *)host;
1360 return (union code) LE32TOH(*code);
1361}
1362
1363unsigned int lightrec_cycles_of_opcode(const struct lightrec_state *state,
1364 union code code)
1365{
1366 return state->cycles_per_op;
1367}
1368
1369void lightrec_free_opcode_list(struct lightrec_state *state, struct opcode *ops)
1370{
1371 struct opcode_list *list = container_of(ops, struct opcode_list, ops);
1372
1373 lightrec_free(state, MEM_FOR_IR,
1374 sizeof(*list) + list->nb_ops * sizeof(struct opcode),
1375 list);
1376}
1377
1378static unsigned int lightrec_get_mips_block_len(const u32 *src)
1379{
1380 unsigned int i;
1381 union code c;
1382
1383 for (i = 1; ; i++) {
1384 c.opcode = LE32TOH(*src++);
1385
1386 if (is_syscall(c))
1387 return i;
1388
1389 if (is_unconditional_jump(c))
1390 return i + 1;
1391 }
1392}
1393
1394static struct opcode * lightrec_disassemble(struct lightrec_state *state,
1395 const u32 *src, unsigned int *len)
1396{
1397 struct opcode_list *list;
1398 unsigned int i, length;
1399
1400 length = lightrec_get_mips_block_len(src);
1401
1402 list = lightrec_malloc(state, MEM_FOR_IR,
1403 sizeof(*list) + sizeof(struct opcode) * length);
1404 if (!list) {
1405 pr_err("Unable to allocate memory\n");
1406 return NULL;
1407 }
1408
1409 list->nb_ops = (u16) length;
1410
1411 for (i = 0; i < length; i++) {
1412 list->ops[i].opcode = LE32TOH(src[i]);
1413 list->ops[i].flags = 0;
1414 }
1415
1416 *len = length * sizeof(u32);
1417
1418 return list->ops;
1419}
1420
1421static struct block * lightrec_precompile_block(struct lightrec_state *state,
1422 u32 pc)
1423{
1424 struct opcode *list;
1425 struct block *block;
1426 void *host, *addr;
1427 const struct lightrec_mem_map *map = lightrec_get_map(state, &host, kunseg(pc));
1428 const u32 *code = (u32 *) host;
1429 unsigned int length;
1430 bool fully_tagged;
1431 u8 block_flags = 0;
1432
1433 if (!map)
1434 return NULL;
1435
1436 block = lightrec_malloc(state, MEM_FOR_IR, sizeof(*block));
1437 if (!block) {
1438 pr_err("Unable to recompile block: Out of memory\n");
1439 return NULL;
1440 }
1441
1442 list = lightrec_disassemble(state, code, &length);
1443 if (!list) {
1444 lightrec_free(state, MEM_FOR_IR, sizeof(*block), block);
1445 return NULL;
1446 }
1447
1448 block->pc = pc;
1449 block->_jit = NULL;
1450 block->function = NULL;
1451 block->opcode_list = list;
1452 block->code = code;
1453 block->next = NULL;
1454 block->flags = 0;
1455 block->code_size = 0;
1456 block->precompile_date = state->current_cycle;
1457 block->nb_ops = length / sizeof(u32);
1458
1459 lightrec_optimize(state, block);
1460
1461 length = block->nb_ops * sizeof(u32);
1462
1463 lightrec_register(MEM_FOR_MIPS_CODE, length);
1464
1465 if (ENABLE_DISASSEMBLER) {
1466 pr_debug("Disassembled block at PC: 0x%08x\n", block->pc);
1467 lightrec_print_disassembly(block, code);
1468 }
1469
1470 pr_debug("Block size: %hu opcodes\n", block->nb_ops);
1471
1472 fully_tagged = lightrec_block_is_fully_tagged(block);
1473 if (fully_tagged)
1474 block_flags |= BLOCK_FULLY_TAGGED;
1475
1476 if (block_flags)
1477 block_set_flags(block, block_flags);
1478
1479 block->hash = lightrec_calculate_block_hash(block);
1480
1481 if (OPT_REPLACE_MEMSET && block_has_flag(block, BLOCK_IS_MEMSET))
1482 addr = state->memset_func;
1483 else
1484 addr = state->get_next_block;
1485 lut_write(state, lut_offset(pc), addr);
1486
1487 pr_debug("Blocks created: %u\n", ++state->nb_precompile);
1488
1489 return block;
1490}
1491
1492static bool lightrec_block_is_fully_tagged(const struct block *block)
1493{
1494 const struct opcode *op;
1495 unsigned int i;
1496
1497 for (i = 0; i < block->nb_ops; i++) {
1498 op = &block->opcode_list[i];
1499
1500 /* If we have one branch that must be emulated, we cannot trash
1501 * the opcode list. */
1502 if (should_emulate(op))
1503 return false;
1504
1505 /* Check all loads/stores of the opcode list and mark the
1506 * block as fully compiled if they all have been tagged. */
1507 switch (op->c.i.op) {
1508 case OP_LB:
1509 case OP_LH:
1510 case OP_LWL:
1511 case OP_LW:
1512 case OP_LBU:
1513 case OP_LHU:
1514 case OP_LWR:
1515 case OP_SB:
1516 case OP_SH:
1517 case OP_SWL:
1518 case OP_SW:
1519 case OP_SWR:
1520 case OP_LWC2:
1521 case OP_SWC2:
1522 case OP_META_LWU:
1523 case OP_META_SWU:
1524 if (!LIGHTREC_FLAGS_GET_IO_MODE(op->flags))
1525 return false;
1526 fallthrough;
1527 default:
1528 continue;
1529 }
1530 }
1531
1532 return true;
1533}
1534
1535static void lightrec_reap_block(struct lightrec_state *state, void *data)
1536{
1537 struct block *block = data;
1538
1539 pr_debug("Reap dead block at "PC_FMT"\n", block->pc);
1540 lightrec_unregister_block(state->block_cache, block);
1541 lightrec_free_block(state, block);
1542}
1543
1544static void lightrec_reap_jit(struct lightrec_state *state, void *data)
1545{
1546 _jit_destroy_state(data);
1547}
1548
1549static void lightrec_free_function(struct lightrec_state *state, void *fn)
1550{
1551 if (ENABLE_CODE_BUFFER && state->tlsf) {
1552 pr_debug("Freeing code block at 0x%" PRIxPTR "\n", (uintptr_t) fn);
1553 lightrec_free_code(state, fn);
1554 }
1555}
1556
1557static void lightrec_reap_function(struct lightrec_state *state, void *data)
1558{
1559 lightrec_free_function(state, data);
1560}
1561
1562static void lightrec_reap_opcode_list(struct lightrec_state *state, void *data)
1563{
1564 lightrec_free_opcode_list(state, data);
1565}
1566
1567int lightrec_compile_block(struct lightrec_cstate *cstate,
1568 struct block *block)
1569{
1570 struct block *dead_blocks[ARRAY_SIZE(cstate->targets)];
1571 u32 was_dead[ARRAY_SIZE(cstate->targets) / 8];
1572 struct lightrec_state *state = cstate->state;
1573 struct lightrec_branch_target *target;
1574 bool fully_tagged = false;
1575 struct block *block2;
1576 struct opcode *elm;
1577 jit_state_t *_jit, *oldjit;
1578 jit_node_t *start_of_block;
1579 bool skip_next = false;
1580 void *old_fn, *new_fn;
1581 size_t old_code_size;
1582 unsigned int i, j;
1583 u8 old_flags;
1584 u32 offset;
1585
1586 fully_tagged = lightrec_block_is_fully_tagged(block);
1587 if (fully_tagged)
1588 block_set_flags(block, BLOCK_FULLY_TAGGED);
1589
1590 _jit = jit_new_state();
1591 if (!_jit)
1592 return -ENOMEM;
1593
1594 oldjit = block->_jit;
1595 old_fn = block->function;
1596 old_code_size = block->code_size;
1597 block->_jit = _jit;
1598
1599 lightrec_regcache_reset(cstate->reg_cache);
1600
1601 if (OPT_PRELOAD_PC && (block->flags & BLOCK_PRELOAD_PC))
1602 lightrec_preload_pc(cstate->reg_cache, _jit);
1603
1604 cstate->cycles = 0;
1605 cstate->nb_local_branches = 0;
1606 cstate->nb_targets = 0;
1607 cstate->no_load_delay = false;
1608
1609 jit_prolog();
1610 jit_tramp(256);
1611
1612 start_of_block = jit_label();
1613
1614 for (i = 0; i < block->nb_ops; i++) {
1615 elm = &block->opcode_list[i];
1616
1617 if (skip_next) {
1618 skip_next = false;
1619 continue;
1620 }
1621
1622 if (should_emulate(elm)) {
1623 pr_debug("Branch at offset 0x%x will be emulated\n",
1624 i << 2);
1625
1626 lightrec_emit_jump_to_interpreter(cstate, block, i);
1627 skip_next = !op_flag_no_ds(elm->flags);
1628 } else {
1629 lightrec_rec_opcode(cstate, block, i);
1630 skip_next = !op_flag_no_ds(elm->flags) && has_delay_slot(elm->c);
1631#if _WIN32
1632 /* FIXME: GNU Lightning on Windows seems to use our
1633 * mapped registers as temporaries. Until the actual bug
1634 * is found and fixed, unconditionally mark our
1635 * registers as live here. */
1636 lightrec_regcache_mark_live(cstate->reg_cache, _jit);
1637#endif
1638 }
1639
1640 cstate->cycles += lightrec_cycles_of_opcode(state, elm->c);
1641 }
1642
1643 for (i = 0; i < cstate->nb_local_branches; i++) {
1644 struct lightrec_branch *branch = &cstate->local_branches[i];
1645
1646 pr_debug("Patch local branch to offset 0x%x\n",
1647 branch->target << 2);
1648
1649 if (branch->target == 0) {
1650 jit_patch_at(branch->branch, start_of_block);
1651 continue;
1652 }
1653
1654 for (j = 0; j < cstate->nb_targets; j++) {
1655 if (cstate->targets[j].offset == branch->target) {
1656 jit_patch_at(branch->branch,
1657 cstate->targets[j].label);
1658 break;
1659 }
1660 }
1661
1662 if (j == cstate->nb_targets)
1663 pr_err("Unable to find branch target\n");
1664 }
1665
1666 jit_ret();
1667 jit_epilog();
1668
1669 new_fn = lightrec_emit_code(state, block, _jit, &block->code_size);
1670 if (!new_fn) {
1671 if (!ENABLE_THREADED_COMPILER)
1672 pr_err("Unable to compile block!\n");
1673 block->_jit = oldjit;
1674 jit_clear_state();
1675 _jit_destroy_state(_jit);
1676 return -ENOMEM;
1677 }
1678
1679 /* Pause the reaper, because lightrec_reset_lut_offset() may try to set
1680 * the old block->function pointer to the code LUT. */
1681 if (ENABLE_THREADED_COMPILER)
1682 lightrec_reaper_pause(state->reaper);
1683
1684 block->function = new_fn;
1685 block_clear_flags(block, BLOCK_SHOULD_RECOMPILE);
1686
1687 /* Add compiled function to the LUT */
1688 lut_write(state, lut_offset(block->pc), block->function);
1689
1690 /* Detect old blocks that have been covered by the new one */
1691 for (i = 0; ENABLE_THREADED_COMPILER && i < cstate->nb_targets; i++) {
1692 target = &cstate->targets[i];
1693
1694 if (!target->offset)
1695 continue;
1696
1697 offset = block->pc + target->offset * sizeof(u32);
1698
1699 block2 = lightrec_find_block(state->block_cache, offset);
1700 if (block2) {
1701 /* No need to check if block2 is compilable - it must
1702 * be, otherwise block wouldn't be compilable either */
1703
1704 /* Set the "block dead" flag to prevent the dynarec from
1705 * recompiling this block */
1706 old_flags = block_set_flags(block2, BLOCK_IS_DEAD);
1707
1708 if (old_flags & BLOCK_IS_DEAD)
1709 was_dead[i / 32] |= BIT(i % 32);
1710 else
1711 was_dead[i / 32] &= ~BIT(i % 32);
1712 }
1713
1714 dead_blocks[i] = block2;
1715
1716 /* If block2 was pending for compilation, cancel it.
1717 * If it's being compiled right now, wait until it finishes. */
1718 if (block2)
1719 lightrec_recompiler_remove(state->rec, block2);
1720 }
1721
1722 for (i = 0; i < cstate->nb_targets; i++) {
1723 target = &cstate->targets[i];
1724
1725 if (!target->offset)
1726 continue;
1727
1728 /* We know from now on that block2 (if present) isn't going to
1729 * be compiled. We can override the LUT entry with our new
1730 * block's entry point. */
1731 offset = lut_offset(block->pc) + target->offset;
1732 lut_write(state, offset, jit_address(target->label));
1733
1734 if (ENABLE_THREADED_COMPILER) {
1735 block2 = dead_blocks[i];
1736 } else {
1737 offset = block->pc + target->offset * sizeof(u32);
1738 block2 = lightrec_find_block(state->block_cache, offset);
1739 }
1740 if (block2) {
1741 pr_debug("Reap block 0x%08x as it's covered by block "
1742 "0x%08x\n", block2->pc, block->pc);
1743
1744 /* Finally, reap the block. */
1745 if (!ENABLE_THREADED_COMPILER) {
1746 lightrec_unregister_block(state->block_cache, block2);
1747 lightrec_free_block(state, block2);
1748 } else if (!(was_dead[i / 32] & BIT(i % 32))) {
1749 lightrec_reaper_add(state->reaper,
1750 lightrec_reap_block,
1751 block2);
1752 }
1753 }
1754 }
1755
1756 if (ENABLE_THREADED_COMPILER)
1757 lightrec_reaper_continue(state->reaper);
1758
1759 if (ENABLE_DISASSEMBLER) {
1760 pr_debug("Compiling block at PC: 0x%08x\n", block->pc);
1761 jit_disassemble();
1762 }
1763
1764 jit_clear_state();
1765
1766 if (fully_tagged)
1767 old_flags = block_set_flags(block, BLOCK_NO_OPCODE_LIST);
1768
1769 if (fully_tagged && !(old_flags & BLOCK_NO_OPCODE_LIST)) {
1770 pr_debug("Block "PC_FMT" is fully tagged"
1771 " - free opcode list\n", block->pc);
1772
1773 if (ENABLE_THREADED_COMPILER) {
1774 lightrec_reaper_add(state->reaper,
1775 lightrec_reap_opcode_list,
1776 block->opcode_list);
1777 } else {
1778 lightrec_free_opcode_list(state, block->opcode_list);
1779 }
1780 }
1781
1782 if (oldjit) {
1783 pr_debug("Block 0x%08x recompiled, reaping old jit context.\n",
1784 block->pc);
1785
1786 if (ENABLE_THREADED_COMPILER) {
1787 lightrec_reaper_add(state->reaper,
1788 lightrec_reap_jit, oldjit);
1789 lightrec_reaper_add(state->reaper,
1790 lightrec_reap_function, old_fn);
1791 } else {
1792 _jit_destroy_state(oldjit);
1793 lightrec_free_function(state, old_fn);
1794 }
1795
1796 lightrec_unregister(MEM_FOR_CODE, old_code_size);
1797 }
1798
1799 pr_debug("Blocks compiled: %u\n", ++state->nb_compile);
1800
1801 return 0;
1802}
1803
1804static void lightrec_print_info(struct lightrec_state *state)
1805{
1806 if ((state->current_cycle & ~0xfffffff) != state->old_cycle_counter) {
1807 pr_info("Lightrec RAM usage: IR %u KiB, CODE %u KiB, "
1808 "MIPS %u KiB, TOTAL %u KiB, avg. IPI %f\n",
1809 lightrec_get_mem_usage(MEM_FOR_IR) / 1024,
1810 lightrec_get_mem_usage(MEM_FOR_CODE) / 1024,
1811 lightrec_get_mem_usage(MEM_FOR_MIPS_CODE) / 1024,
1812 lightrec_get_total_mem_usage() / 1024,
1813 lightrec_get_average_ipi());
1814 state->old_cycle_counter = state->current_cycle & ~0xfffffff;
1815 }
1816}
1817
1818u32 lightrec_execute(struct lightrec_state *state, u32 pc, u32 target_cycle)
1819{
1820 s32 (*func)(struct lightrec_state *, u32, void *, s32) = (void *)state->dispatcher->function;
1821 void *block_trace;
1822 s32 cycles_delta;
1823
1824 state->exit_flags = LIGHTREC_EXIT_NORMAL;
1825
1826 /* Handle the cycle counter overflowing */
1827 if (unlikely(target_cycle < state->current_cycle))
1828 target_cycle = UINT_MAX;
1829
1830 state->target_cycle = target_cycle;
1831 state->curr_pc = pc;
1832
1833 block_trace = get_next_block_func(state, pc);
1834 if (block_trace) {
1835 cycles_delta = state->target_cycle - state->current_cycle;
1836
1837 cycles_delta = (*func)(state, state->curr_pc,
1838 block_trace, cycles_delta);
1839
1840 state->current_cycle = state->target_cycle - cycles_delta;
1841 }
1842
1843 if (ENABLE_THREADED_COMPILER)
1844 lightrec_reaper_reap(state->reaper);
1845
1846 if (LOG_LEVEL >= INFO_L)
1847 lightrec_print_info(state);
1848
1849 return state->curr_pc;
1850}
1851
1852u32 lightrec_run_interpreter(struct lightrec_state *state, u32 pc,
1853 u32 target_cycle)
1854{
1855 struct block *block;
1856
1857 state->exit_flags = LIGHTREC_EXIT_NORMAL;
1858 state->target_cycle = target_cycle;
1859
1860 do {
1861 block = lightrec_get_block(state, pc);
1862 if (!block)
1863 break;
1864
1865 pc = lightrec_emulate_block(state, block, pc);
1866
1867 if (ENABLE_THREADED_COMPILER)
1868 lightrec_reaper_reap(state->reaper);
1869 } while (state->current_cycle < state->target_cycle);
1870
1871 if (LOG_LEVEL >= INFO_L)
1872 lightrec_print_info(state);
1873
1874 return pc;
1875}
1876
1877void lightrec_free_block(struct lightrec_state *state, struct block *block)
1878{
1879 u8 old_flags;
1880
1881 lightrec_unregister(MEM_FOR_MIPS_CODE, block->nb_ops * sizeof(u32));
1882 old_flags = block_set_flags(block, BLOCK_NO_OPCODE_LIST);
1883
1884 if (!(old_flags & BLOCK_NO_OPCODE_LIST))
1885 lightrec_free_opcode_list(state, block->opcode_list);
1886 if (block->_jit)
1887 _jit_destroy_state(block->_jit);
1888 if (block->function) {
1889 lightrec_free_function(state, block->function);
1890 lightrec_unregister(MEM_FOR_CODE, block->code_size);
1891 }
1892 lightrec_free(state, MEM_FOR_IR, sizeof(*block), block);
1893}
1894
1895struct lightrec_cstate * lightrec_create_cstate(struct lightrec_state *state)
1896{
1897 struct lightrec_cstate *cstate;
1898
1899 cstate = lightrec_malloc(state, MEM_FOR_LIGHTREC, sizeof(*cstate));
1900 if (!cstate)
1901 return NULL;
1902
1903 cstate->reg_cache = lightrec_regcache_init(state);
1904 if (!cstate->reg_cache) {
1905 lightrec_free(state, MEM_FOR_LIGHTREC, sizeof(*cstate), cstate);
1906 return NULL;
1907 }
1908
1909 cstate->state = state;
1910
1911 return cstate;
1912}
1913
1914void lightrec_free_cstate(struct lightrec_cstate *cstate)
1915{
1916 lightrec_free_regcache(cstate->reg_cache);
1917 lightrec_free(cstate->state, MEM_FOR_LIGHTREC, sizeof(*cstate), cstate);
1918}
1919
1920struct lightrec_state * lightrec_init(char *argv0,
1921 const struct lightrec_mem_map *map,
1922 size_t nb,
1923 const struct lightrec_ops *ops)
1924{
1925 const struct lightrec_mem_map *codebuf_map = &map[PSX_MAP_CODE_BUFFER];
1926 struct lightrec_state *state;
1927 uintptr_t addr;
1928 void *tlsf = NULL;
1929 bool with_32bit_lut = false;
1930 size_t lut_size;
1931
1932 /* Sanity-check ops */
1933 if (!ops || !ops->cop2_op || !ops->enable_ram) {
1934 pr_err("Missing callbacks in lightrec_ops structure\n");
1935 return NULL;
1936 }
1937
1938 if (ops->cop2_notify)
1939 pr_debug("Optional cop2_notify callback in lightrec_ops\n");
1940 else
1941 pr_debug("No optional cop2_notify callback in lightrec_ops\n");
1942
1943 if (ENABLE_CODE_BUFFER && nb > PSX_MAP_CODE_BUFFER
1944 && codebuf_map->address) {
1945 tlsf = tlsf_create_with_pool(codebuf_map->address,
1946 codebuf_map->length);
1947 if (!tlsf) {
1948 pr_err("Unable to initialize code buffer\n");
1949 return NULL;
1950 }
1951
1952 if (__WORDSIZE == 64) {
1953 addr = (uintptr_t) codebuf_map->address + codebuf_map->length - 1;
1954 with_32bit_lut = addr == (u32) addr;
1955 }
1956 }
1957
1958 if (with_32bit_lut)
1959 lut_size = CODE_LUT_SIZE * 4;
1960 else
1961 lut_size = CODE_LUT_SIZE * sizeof(void *);
1962
1963 init_jit_with_debug(argv0, stdout);
1964
1965 state = calloc(1, sizeof(*state) + lut_size);
1966 if (!state)
1967 goto err_finish_jit;
1968
1969 lightrec_register(MEM_FOR_LIGHTREC, sizeof(*state) + lut_size);
1970
1971 state->tlsf = tlsf;
1972 state->with_32bit_lut = with_32bit_lut;
1973 state->in_delay_slot_n = 0xff;
1974 state->cycles_per_op = 2;
1975
1976 state->block_cache = lightrec_blockcache_init(state);
1977 if (!state->block_cache)
1978 goto err_free_state;
1979
1980 if (ENABLE_THREADED_COMPILER) {
1981 state->rec = lightrec_recompiler_init(state);
1982 if (!state->rec)
1983 goto err_free_block_cache;
1984
1985 state->reaper = lightrec_reaper_init(state);
1986 if (!state->reaper)
1987 goto err_free_recompiler;
1988 } else {
1989 state->cstate = lightrec_create_cstate(state);
1990 if (!state->cstate)
1991 goto err_free_block_cache;
1992 }
1993
1994 state->nb_maps = nb;
1995 state->maps = map;
1996
1997 memcpy(&state->ops, ops, sizeof(*ops));
1998
1999 state->dispatcher = generate_dispatcher(state);
2000 if (!state->dispatcher)
2001 goto err_free_reaper;
2002
2003 state->c_wrapper_block = generate_wrapper(state);
2004 if (!state->c_wrapper_block)
2005 goto err_free_dispatcher;
2006
2007 state->c_wrappers[C_WRAPPER_RW] = lightrec_rw_cb;
2008 state->c_wrappers[C_WRAPPER_RW_GENERIC] = lightrec_rw_generic_cb;
2009 state->c_wrappers[C_WRAPPER_MFC] = lightrec_mfc_cb;
2010 state->c_wrappers[C_WRAPPER_MTC] = lightrec_mtc_cb;
2011 state->c_wrappers[C_WRAPPER_CP] = lightrec_cp_cb;
2012
2013 map = &state->maps[PSX_MAP_BIOS];
2014 state->offset_bios = (uintptr_t)map->address - map->pc;
2015
2016 map = &state->maps[PSX_MAP_SCRATCH_PAD];
2017 state->offset_scratch = (uintptr_t)map->address - map->pc;
2018
2019 map = &state->maps[PSX_MAP_HW_REGISTERS];
2020 state->offset_io = (uintptr_t)map->address - map->pc;
2021
2022 map = &state->maps[PSX_MAP_KERNEL_USER_RAM];
2023 state->offset_ram = (uintptr_t)map->address - map->pc;
2024
2025 if (state->maps[PSX_MAP_MIRROR1].address == map->address + 0x200000 &&
2026 state->maps[PSX_MAP_MIRROR2].address == map->address + 0x400000 &&
2027 state->maps[PSX_MAP_MIRROR3].address == map->address + 0x600000)
2028 state->mirrors_mapped = true;
2029
2030 if (state->offset_bios == 0 &&
2031 state->offset_scratch == 0 &&
2032 state->offset_ram == 0 &&
2033 state->offset_io == 0 &&
2034 state->mirrors_mapped) {
2035 pr_info("Memory map is perfect. Emitted code will be best.\n");
2036 } else {
2037 pr_info("Memory map is sub-par. Emitted code will be slow.\n");
2038 }
2039
2040 if (state->with_32bit_lut)
2041 pr_info("Using 32-bit LUT\n");
2042
2043 return state;
2044
2045err_free_dispatcher:
2046 lightrec_free_block(state, state->dispatcher);
2047err_free_reaper:
2048 if (ENABLE_THREADED_COMPILER)
2049 lightrec_reaper_destroy(state->reaper);
2050err_free_recompiler:
2051 if (ENABLE_THREADED_COMPILER)
2052 lightrec_free_recompiler(state->rec);
2053 else
2054 lightrec_free_cstate(state->cstate);
2055err_free_block_cache:
2056 lightrec_free_block_cache(state->block_cache);
2057err_free_state:
2058 lightrec_unregister(MEM_FOR_LIGHTREC, sizeof(*state) +
2059 lut_elm_size(state) * CODE_LUT_SIZE);
2060 free(state);
2061err_finish_jit:
2062 finish_jit();
2063 if (ENABLE_CODE_BUFFER && tlsf)
2064 tlsf_destroy(tlsf);
2065 return NULL;
2066}
2067
2068void lightrec_destroy(struct lightrec_state *state)
2069{
2070 /* Force a print info on destroy*/
2071 state->current_cycle = ~state->current_cycle;
2072 lightrec_print_info(state);
2073
2074 lightrec_free_block_cache(state->block_cache);
2075 lightrec_free_block(state, state->dispatcher);
2076 lightrec_free_block(state, state->c_wrapper_block);
2077
2078 if (ENABLE_THREADED_COMPILER) {
2079 lightrec_free_recompiler(state->rec);
2080 lightrec_reaper_destroy(state->reaper);
2081 } else {
2082 lightrec_free_cstate(state->cstate);
2083 }
2084
2085 finish_jit();
2086 if (ENABLE_CODE_BUFFER && state->tlsf)
2087 tlsf_destroy(state->tlsf);
2088
2089 lightrec_unregister(MEM_FOR_LIGHTREC, sizeof(*state) +
2090 lut_elm_size(state) * CODE_LUT_SIZE);
2091 free(state);
2092}
2093
2094void lightrec_invalidate(struct lightrec_state *state, u32 addr, u32 len)
2095{
2096 u32 kaddr = kunseg(addr & ~0x3);
2097 enum psx_map idx = lightrec_get_map_idx(state, kaddr);
2098
2099 switch (idx) {
2100 case PSX_MAP_MIRROR1:
2101 case PSX_MAP_MIRROR2:
2102 case PSX_MAP_MIRROR3:
2103 /* Handle mirrors */
2104 kaddr &= RAM_SIZE - 1;
2105 fallthrough;
2106 case PSX_MAP_KERNEL_USER_RAM:
2107 break;
2108 default:
2109 return;
2110 }
2111
2112 memset(lut_address(state, lut_offset(kaddr)), 0,
2113 ((len + 3) / 4) * lut_elm_size(state));
2114}
2115
2116void lightrec_invalidate_all(struct lightrec_state *state)
2117{
2118 memset(state->code_lut, 0, lut_elm_size(state) * CODE_LUT_SIZE);
2119}
2120
2121void lightrec_set_unsafe_opt_flags(struct lightrec_state *state, u32 flags)
2122{
2123 if ((flags ^ state->opt_flags) & LIGHTREC_OPT_INV_DMA_ONLY)
2124 lightrec_invalidate_all(state);
2125
2126 state->opt_flags = flags;
2127}
2128
2129void lightrec_set_exit_flags(struct lightrec_state *state, u32 flags)
2130{
2131 if (flags != LIGHTREC_EXIT_NORMAL) {
2132 state->exit_flags |= flags;
2133 state->target_cycle = state->current_cycle;
2134 }
2135}
2136
2137u32 lightrec_exit_flags(struct lightrec_state *state)
2138{
2139 return state->exit_flags;
2140}
2141
2142u32 lightrec_current_cycle_count(const struct lightrec_state *state)
2143{
2144 return state->current_cycle;
2145}
2146
2147void lightrec_reset_cycle_count(struct lightrec_state *state, u32 cycles)
2148{
2149 state->current_cycle = cycles;
2150
2151 if (state->target_cycle < cycles)
2152 state->target_cycle = cycles;
2153}
2154
2155void lightrec_set_target_cycle_count(struct lightrec_state *state, u32 cycles)
2156{
2157 if (state->exit_flags == LIGHTREC_EXIT_NORMAL) {
2158 if (cycles < state->current_cycle)
2159 cycles = state->current_cycle;
2160
2161 state->target_cycle = cycles;
2162 }
2163}
2164
2165struct lightrec_registers * lightrec_get_registers(struct lightrec_state *state)
2166{
2167 return &state->regs;
2168}
2169
2170void lightrec_set_cycles_per_opcode(struct lightrec_state *state, u32 cycles)
2171{
2172 state->cycles_per_op = cycles;
2173}