cdrom: adjust timing
[pcsx_rearmed.git] / deps / lightrec / lightrec.c
... / ...
CommitLineData
1// SPDX-License-Identifier: LGPL-2.1-or-later
2/*
3 * Copyright (C) 2014-2021 Paul Cercueil <paul@crapouillou.net>
4 */
5
6#include "blockcache.h"
7#include "debug.h"
8#include "disassembler.h"
9#include "emitter.h"
10#include "interpreter.h"
11#include "lightrec-config.h"
12#include "lightning-wrapper.h"
13#include "lightrec.h"
14#include "memmanager.h"
15#include "reaper.h"
16#include "recompiler.h"
17#include "regcache.h"
18#include "optimizer.h"
19#include "tlsf/tlsf.h"
20
21#include <errno.h>
22#include <inttypes.h>
23#include <limits.h>
24#if ENABLE_THREADED_COMPILER
25#include <stdatomic.h>
26#endif
27#include <stdbool.h>
28#include <stddef.h>
29#include <string.h>
30
31static struct block * lightrec_precompile_block(struct lightrec_state *state,
32 u32 pc);
33static bool lightrec_block_is_fully_tagged(const struct block *block);
34
35static void lightrec_mtc2(struct lightrec_state *state, u8 reg, u32 data);
36static u32 lightrec_mfc2(struct lightrec_state *state, u8 reg);
37
38static void lightrec_default_sb(struct lightrec_state *state, u32 opcode,
39 void *host, u32 addr, u8 data)
40{
41 *(u8 *)host = data;
42
43 if (!(state->opt_flags & LIGHTREC_OPT_INV_DMA_ONLY))
44 lightrec_invalidate(state, addr, 1);
45}
46
47static void lightrec_default_sh(struct lightrec_state *state, u32 opcode,
48 void *host, u32 addr, u16 data)
49{
50 *(u16 *)host = HTOLE16(data);
51
52 if (!(state->opt_flags & LIGHTREC_OPT_INV_DMA_ONLY))
53 lightrec_invalidate(state, addr, 2);
54}
55
56static void lightrec_default_sw(struct lightrec_state *state, u32 opcode,
57 void *host, u32 addr, u32 data)
58{
59 *(u32 *)host = HTOLE32(data);
60
61 if (!(state->opt_flags & LIGHTREC_OPT_INV_DMA_ONLY))
62 lightrec_invalidate(state, addr, 4);
63}
64
65static u8 lightrec_default_lb(struct lightrec_state *state,
66 u32 opcode, void *host, u32 addr)
67{
68 return *(u8 *)host;
69}
70
71static u16 lightrec_default_lh(struct lightrec_state *state,
72 u32 opcode, void *host, u32 addr)
73{
74 return LE16TOH(*(u16 *)host);
75}
76
77static u32 lightrec_default_lw(struct lightrec_state *state,
78 u32 opcode, void *host, u32 addr)
79{
80 return LE32TOH(*(u32 *)host);
81}
82
83static u32 lightrec_default_lwu(struct lightrec_state *state,
84 u32 opcode, void *host, u32 addr)
85{
86 u32 val;
87
88 memcpy(&val, host, 4);
89
90 return LE32TOH(val);
91}
92
93static void lightrec_default_swu(struct lightrec_state *state, u32 opcode,
94 void *host, u32 addr, u32 data)
95{
96 data = HTOLE32(data);
97
98 memcpy(host, &data, 4);
99
100 if (!(state->opt_flags & LIGHTREC_OPT_INV_DMA_ONLY))
101 lightrec_invalidate(state, addr & ~0x3, 8);
102}
103
104static const struct lightrec_mem_map_ops lightrec_default_ops = {
105 .sb = lightrec_default_sb,
106 .sh = lightrec_default_sh,
107 .sw = lightrec_default_sw,
108 .lb = lightrec_default_lb,
109 .lh = lightrec_default_lh,
110 .lw = lightrec_default_lw,
111 .lwu = lightrec_default_lwu,
112 .swu = lightrec_default_swu,
113};
114
115static void __segfault_cb(struct lightrec_state *state, u32 addr,
116 const struct block *block)
117{
118 lightrec_set_exit_flags(state, LIGHTREC_EXIT_SEGFAULT);
119 pr_err("Segmentation fault in recompiled code: invalid "
120 "load/store at address "PC_FMT"\n", addr);
121 if (block)
122 pr_err("Was executing block "PC_FMT"\n", block->pc);
123}
124
125static void lightrec_swl(struct lightrec_state *state,
126 const struct lightrec_mem_map_ops *ops,
127 u32 opcode, void *host, u32 addr, u32 data)
128{
129 unsigned int shift = addr & 0x3;
130 unsigned int mask = shift < 3 ? GENMASK(31, (shift + 1) * 8) : 0;
131 u32 old_data;
132
133 /* Align to 32 bits */
134 addr &= ~3;
135 host = (void *)((uintptr_t)host & ~3);
136
137 old_data = ops->lw(state, opcode, host, addr);
138
139 data = (data >> ((3 - shift) * 8)) | (old_data & mask);
140
141 ops->sw(state, opcode, host, addr, data);
142}
143
144static void lightrec_swr(struct lightrec_state *state,
145 const struct lightrec_mem_map_ops *ops,
146 u32 opcode, void *host, u32 addr, u32 data)
147{
148 unsigned int shift = addr & 0x3;
149 unsigned int mask = (1 << (shift * 8)) - 1;
150 u32 old_data;
151
152 /* Align to 32 bits */
153 addr &= ~3;
154 host = (void *)((uintptr_t)host & ~3);
155
156 old_data = ops->lw(state, opcode, host, addr);
157
158 data = (data << (shift * 8)) | (old_data & mask);
159
160 ops->sw(state, opcode, host, addr, data);
161}
162
163static void lightrec_swc2(struct lightrec_state *state, union code op,
164 const struct lightrec_mem_map_ops *ops,
165 void *host, u32 addr)
166{
167 u32 data = lightrec_mfc2(state, op.i.rt);
168
169 ops->sw(state, op.opcode, host, addr, data);
170}
171
172static u32 lightrec_lwl(struct lightrec_state *state,
173 const struct lightrec_mem_map_ops *ops,
174 u32 opcode, void *host, u32 addr, u32 data)
175{
176 unsigned int shift = addr & 0x3;
177 unsigned int mask = (1 << (24 - shift * 8)) - 1;
178 u32 old_data;
179
180 /* Align to 32 bits */
181 addr &= ~3;
182 host = (void *)((uintptr_t)host & ~3);
183
184 old_data = ops->lw(state, opcode, host, addr);
185
186 return (data & mask) | (old_data << (24 - shift * 8));
187}
188
189static u32 lightrec_lwr(struct lightrec_state *state,
190 const struct lightrec_mem_map_ops *ops,
191 u32 opcode, void *host, u32 addr, u32 data)
192{
193 unsigned int shift = addr & 0x3;
194 unsigned int mask = shift ? GENMASK(31, 32 - shift * 8) : 0;
195 u32 old_data;
196
197 /* Align to 32 bits */
198 addr &= ~3;
199 host = (void *)((uintptr_t)host & ~3);
200
201 old_data = ops->lw(state, opcode, host, addr);
202
203 return (data & mask) | (old_data >> (shift * 8));
204}
205
206static void lightrec_lwc2(struct lightrec_state *state, union code op,
207 const struct lightrec_mem_map_ops *ops,
208 void *host, u32 addr)
209{
210 u32 data = ops->lw(state, op.opcode, host, addr);
211
212 lightrec_mtc2(state, op.i.rt, data);
213}
214
215static void lightrec_invalidate_map(struct lightrec_state *state,
216 const struct lightrec_mem_map *map, u32 addr, u32 len)
217{
218 if (map == &state->maps[PSX_MAP_KERNEL_USER_RAM]) {
219 memset(lut_address(state, lut_offset(addr)), 0,
220 ((len + 3) / 4) * lut_elm_size(state));
221 }
222}
223
224static enum psx_map
225lightrec_get_map_idx(struct lightrec_state *state, u32 kaddr)
226{
227 const struct lightrec_mem_map *map;
228 unsigned int i;
229
230 for (i = 0; i < state->nb_maps; i++) {
231 map = &state->maps[i];
232
233 if (kaddr >= map->pc && kaddr < map->pc + map->length)
234 return (enum psx_map) i;
235 }
236
237 return PSX_MAP_UNKNOWN;
238}
239
240const struct lightrec_mem_map *
241lightrec_get_map(struct lightrec_state *state, void **host, u32 kaddr)
242{
243 const struct lightrec_mem_map *map;
244 enum psx_map idx;
245 u32 addr;
246
247 idx = lightrec_get_map_idx(state, kaddr);
248 if (idx == PSX_MAP_UNKNOWN)
249 return NULL;
250
251 map = &state->maps[idx];
252 addr = kaddr - map->pc;
253
254 while (map->mirror_of)
255 map = map->mirror_of;
256
257 if (host)
258 *host = map->address + addr;
259
260 return map;
261}
262
263u32 lightrec_rw(struct lightrec_state *state, union code op, u32 base,
264 u32 data, u32 *flags, struct block *block, u16 offset)
265{
266 const struct lightrec_mem_map *map;
267 const struct lightrec_mem_map_ops *ops;
268 u32 opcode = op.opcode;
269 bool was_tagged = true;
270 u16 old_flags;
271 u32 addr;
272 void *host;
273
274 addr = kunseg(base + (s16) op.i.imm);
275
276 map = lightrec_get_map(state, &host, addr);
277 if (!map) {
278 __segfault_cb(state, addr, block);
279 return 0;
280 }
281
282 if (flags)
283 was_tagged = LIGHTREC_FLAGS_GET_IO_MODE(*flags);
284
285 if (likely(!map->ops)) {
286 if (flags && !LIGHTREC_FLAGS_GET_IO_MODE(*flags)) {
287 /* Force parallel port accesses as HW accesses, because
288 * the direct-I/O emitters can't differenciate it. */
289 if (unlikely(map == &state->maps[PSX_MAP_PARALLEL_PORT]))
290 *flags |= LIGHTREC_IO_MODE(LIGHTREC_IO_HW);
291 /* If the base register is 0x0, be extra suspicious.
292 * Some games (e.g. Sled Storm) actually do segmentation
293 * faults by using uninitialized pointers, which are
294 * later initialized to point to hardware registers. */
295 else if (op.i.rs && base == 0x0)
296 *flags |= LIGHTREC_IO_MODE(LIGHTREC_IO_HW);
297 else
298 *flags |= LIGHTREC_IO_MODE(LIGHTREC_IO_DIRECT);
299 }
300
301 ops = &lightrec_default_ops;
302 } else if (flags &&
303 LIGHTREC_FLAGS_GET_IO_MODE(*flags) == LIGHTREC_IO_DIRECT_HW) {
304 ops = &lightrec_default_ops;
305 } else {
306 if (flags && !LIGHTREC_FLAGS_GET_IO_MODE(*flags))
307 *flags |= LIGHTREC_IO_MODE(LIGHTREC_IO_HW);
308
309 ops = map->ops;
310 }
311
312 if (!was_tagged) {
313 old_flags = block_set_flags(block, BLOCK_SHOULD_RECOMPILE);
314
315 if (!(old_flags & BLOCK_SHOULD_RECOMPILE)) {
316 pr_debug("Opcode of block at "PC_FMT" has been tagged"
317 " - flag for recompilation\n", block->pc);
318
319 lut_write(state, lut_offset(block->pc), NULL);
320 }
321 }
322
323 switch (op.i.op) {
324 case OP_SB:
325 ops->sb(state, opcode, host, addr, (u8) data);
326 return 0;
327 case OP_SH:
328 ops->sh(state, opcode, host, addr, (u16) data);
329 return 0;
330 case OP_SWL:
331 lightrec_swl(state, ops, opcode, host, addr, data);
332 return 0;
333 case OP_SWR:
334 lightrec_swr(state, ops, opcode, host, addr, data);
335 return 0;
336 case OP_SW:
337 ops->sw(state, opcode, host, addr, data);
338 return 0;
339 case OP_SWC2:
340 lightrec_swc2(state, op, ops, host, addr);
341 return 0;
342 case OP_LB:
343 return (s32) (s8) ops->lb(state, opcode, host, addr);
344 case OP_LBU:
345 return ops->lb(state, opcode, host, addr);
346 case OP_LH:
347 return (s32) (s16) ops->lh(state, opcode, host, addr);
348 case OP_LHU:
349 return ops->lh(state, opcode, host, addr);
350 case OP_LWC2:
351 lightrec_lwc2(state, op, ops, host, addr);
352 return 0;
353 case OP_LWL:
354 return lightrec_lwl(state, ops, opcode, host, addr, data);
355 case OP_LWR:
356 return lightrec_lwr(state, ops, opcode, host, addr, data);
357 case OP_META_LWU:
358 return ops->lwu(state, opcode, host, addr);
359 case OP_META_SWU:
360 ops->swu(state, opcode, host, addr, data);
361 return 0;
362 case OP_LW:
363 default:
364 return ops->lw(state, opcode, host, addr);
365 }
366}
367
368static void lightrec_rw_helper(struct lightrec_state *state,
369 union code op, u32 *flags,
370 struct block *block, u16 offset)
371{
372 u32 ret = lightrec_rw(state, op, state->regs.gpr[op.i.rs],
373 state->regs.gpr[op.i.rt], flags, block, offset);
374
375 switch (op.i.op) {
376 case OP_LB:
377 case OP_LBU:
378 case OP_LH:
379 case OP_LHU:
380 case OP_LWL:
381 case OP_LWR:
382 case OP_LW:
383 case OP_META_LWU:
384 if (OPT_HANDLE_LOAD_DELAYS && unlikely(!state->in_delay_slot_n)) {
385 state->temp_reg = ret;
386 state->in_delay_slot_n = 0xff;
387 } else if (op.i.rt) {
388 state->regs.gpr[op.i.rt] = ret;
389 }
390 fallthrough;
391 default:
392 break;
393 }
394}
395
396static void lightrec_rw_cb(struct lightrec_state *state, u32 arg)
397{
398 lightrec_rw_helper(state, (union code) arg, NULL, NULL, 0);
399}
400
401static void lightrec_rw_generic_cb(struct lightrec_state *state, u32 arg)
402{
403 struct block *block;
404 struct opcode *op;
405 u16 offset = (u16)arg;
406
407 block = lightrec_find_block_from_lut(state->block_cache,
408 arg >> 16, state->curr_pc);
409 if (unlikely(!block)) {
410 pr_err("rw_generic: No block found in LUT for "PC_FMT" offset 0x%"PRIx16"\n",
411 state->curr_pc, offset);
412 lightrec_set_exit_flags(state, LIGHTREC_EXIT_SEGFAULT);
413 return;
414 }
415
416 op = &block->opcode_list[offset];
417 lightrec_rw_helper(state, op->c, &op->flags, block, offset);
418}
419
420static u32 clamp_s32(s32 val, s32 min, s32 max)
421{
422 return val < min ? min : val > max ? max : val;
423}
424
425static u16 load_u16(u32 *ptr)
426{
427 return ((struct u16x2 *) ptr)->l;
428}
429
430static void store_u16(u32 *ptr, u16 value)
431{
432 ((struct u16x2 *) ptr)->l = value;
433}
434
435static u32 lightrec_mfc2(struct lightrec_state *state, u8 reg)
436{
437 s16 gteir1, gteir2, gteir3;
438
439 switch (reg) {
440 case 1:
441 case 3:
442 case 5:
443 case 8:
444 case 9:
445 case 10:
446 case 11:
447 return (s32)(s16) load_u16(&state->regs.cp2d[reg]);
448 case 7:
449 case 16:
450 case 17:
451 case 18:
452 case 19:
453 return load_u16(&state->regs.cp2d[reg]);
454 case 28:
455 case 29:
456 gteir1 = (s16) load_u16(&state->regs.cp2d[9]);
457 gteir2 = (s16) load_u16(&state->regs.cp2d[10]);
458 gteir3 = (s16) load_u16(&state->regs.cp2d[11]);
459
460 return clamp_s32(gteir1 >> 7, 0, 0x1f) << 0 |
461 clamp_s32(gteir2 >> 7, 0, 0x1f) << 5 |
462 clamp_s32(gteir3 >> 7, 0, 0x1f) << 10;
463 case 15:
464 reg = 14;
465 fallthrough;
466 default:
467 return state->regs.cp2d[reg];
468 }
469}
470
471u32 lightrec_mfc(struct lightrec_state *state, union code op)
472{
473 u32 val;
474
475 if (op.i.op == OP_CP0)
476 return state->regs.cp0[op.r.rd];
477
478 if (op.i.op == OP_SWC2) {
479 val = lightrec_mfc2(state, op.i.rt);
480 } else if (op.r.rs == OP_CP2_BASIC_MFC2)
481 val = lightrec_mfc2(state, op.r.rd);
482 else {
483 val = state->regs.cp2c[op.r.rd];
484
485 switch (op.r.rd) {
486 case 4:
487 case 12:
488 case 20:
489 case 26:
490 case 27:
491 case 29:
492 case 30:
493 val = (u32)(s16)val;
494 fallthrough;
495 default:
496 break;
497 }
498 }
499
500 if (state->ops.cop2_notify)
501 (*state->ops.cop2_notify)(state, op.opcode, val);
502
503 return val;
504}
505
506static void lightrec_mfc_cb(struct lightrec_state *state, union code op)
507{
508 u32 rt = lightrec_mfc(state, op);
509
510 if (op.i.op == OP_SWC2)
511 state->temp_reg = rt;
512 else if (op.r.rt)
513 state->regs.gpr[op.r.rt] = rt;
514}
515
516static void lightrec_mtc0(struct lightrec_state *state, u8 reg, u32 data)
517{
518 u32 status, oldstatus, cause;
519
520 switch (reg) {
521 case 1:
522 case 4:
523 case 8:
524 case 14:
525 case 15:
526 /* Those registers are read-only */
527 return;
528 default:
529 break;
530 }
531
532 if (reg == 12) {
533 status = state->regs.cp0[12];
534 oldstatus = status;
535
536 if (status & ~data & BIT(16)) {
537 state->ops.enable_ram(state, true);
538 lightrec_invalidate_all(state);
539 } else if (~status & data & BIT(16)) {
540 state->ops.enable_ram(state, false);
541 }
542 }
543
544 if (reg == 13) {
545 state->regs.cp0[13] &= ~0x300;
546 state->regs.cp0[13] |= data & 0x300;
547 } else {
548 state->regs.cp0[reg] = data;
549 }
550
551 if (reg == 12 || reg == 13) {
552 cause = state->regs.cp0[13];
553 status = state->regs.cp0[12];
554
555 /* Handle software interrupts */
556 if ((!!(status & cause & 0x300)) & status)
557 lightrec_set_exit_flags(state, LIGHTREC_EXIT_CHECK_INTERRUPT);
558
559 /* Handle hardware interrupts */
560 if (reg == 12 && !(~status & 0x401) && (~oldstatus & 0x401))
561 lightrec_set_exit_flags(state, LIGHTREC_EXIT_CHECK_INTERRUPT);
562 }
563}
564
565static u32 count_leading_bits(s32 data)
566{
567 u32 cnt = 33;
568
569#ifdef __has_builtin
570#if __has_builtin(__builtin_clrsb)
571 return 1 + __builtin_clrsb(data);
572#endif
573#endif
574
575 data = (data ^ (data >> 31)) << 1;
576
577 do {
578 cnt -= 1;
579 data >>= 1;
580 } while (data);
581
582 return cnt;
583}
584
585static void lightrec_mtc2(struct lightrec_state *state, u8 reg, u32 data)
586{
587 switch (reg) {
588 case 15:
589 state->regs.cp2d[12] = state->regs.cp2d[13];
590 state->regs.cp2d[13] = state->regs.cp2d[14];
591 state->regs.cp2d[14] = data;
592 break;
593 case 28:
594 state->regs.cp2d[9] = (data << 7) & 0xf80;
595 state->regs.cp2d[10] = (data << 2) & 0xf80;
596 state->regs.cp2d[11] = (data >> 3) & 0xf80;
597 break;
598 case 31:
599 return;
600 case 30:
601 state->regs.cp2d[31] = count_leading_bits((s32) data);
602 fallthrough;
603 default:
604 state->regs.cp2d[reg] = data;
605 break;
606 }
607}
608
609static void lightrec_ctc2(struct lightrec_state *state, u8 reg, u32 data)
610{
611 switch (reg) {
612 case 4:
613 case 12:
614 case 20:
615 case 26:
616 case 27:
617 case 29:
618 case 30:
619 store_u16(&state->regs.cp2c[reg], data);
620 break;
621 case 31:
622 data = (data & 0x7ffff000) | !!(data & 0x7f87e000) << 31;
623 fallthrough;
624 default:
625 state->regs.cp2c[reg] = data;
626 break;
627 }
628}
629
630void lightrec_mtc(struct lightrec_state *state, union code op, u8 reg, u32 data)
631{
632 if (op.i.op == OP_CP0) {
633 lightrec_mtc0(state, reg, data);
634 } else {
635 if (op.i.op == OP_LWC2 || op.r.rs != OP_CP2_BASIC_CTC2)
636 lightrec_mtc2(state, reg, data);
637 else
638 lightrec_ctc2(state, reg, data);
639
640 if (state->ops.cop2_notify)
641 (*state->ops.cop2_notify)(state, op.opcode, data);
642 }
643}
644
645static void lightrec_mtc_cb(struct lightrec_state *state, u32 arg)
646{
647 union code op = (union code) arg;
648 u32 data;
649 u8 reg;
650
651 if (op.i.op == OP_LWC2) {
652 data = state->temp_reg;
653 reg = op.i.rt;
654 } else {
655 data = state->regs.gpr[op.r.rt];
656 reg = op.r.rd;
657 }
658
659 lightrec_mtc(state, op, reg, data);
660}
661
662void lightrec_rfe(struct lightrec_state *state)
663{
664 u32 status;
665
666 /* Read CP0 Status register (r12) */
667 status = state->regs.cp0[12];
668
669 /* Switch the bits */
670 status = ((status & 0x3c) >> 2) | (status & ~0xf);
671
672 /* Write it back */
673 lightrec_mtc0(state, 12, status);
674}
675
676void lightrec_cp(struct lightrec_state *state, union code op)
677{
678 if (op.i.op == OP_CP0) {
679 pr_err("Invalid CP opcode to coprocessor #0\n");
680 return;
681 }
682
683 (*state->ops.cop2_op)(state, op.opcode);
684}
685
686static void lightrec_cp_cb(struct lightrec_state *state, u32 arg)
687{
688 lightrec_cp(state, (union code) arg);
689}
690
691static struct block * lightrec_get_block(struct lightrec_state *state, u32 pc)
692{
693 struct block *block = lightrec_find_block(state->block_cache, pc);
694 u8 old_flags;
695
696 if (block && lightrec_block_is_outdated(state, block)) {
697 pr_debug("Block at "PC_FMT" is outdated!\n", block->pc);
698
699 old_flags = block_set_flags(block, BLOCK_IS_DEAD);
700 if (!(old_flags & BLOCK_IS_DEAD)) {
701 /* Make sure the recompiler isn't processing the block
702 * we'll destroy */
703 if (ENABLE_THREADED_COMPILER)
704 lightrec_recompiler_remove(state->rec, block);
705
706 lightrec_unregister_block(state->block_cache, block);
707 remove_from_code_lut(state->block_cache, block);
708 lightrec_free_block(state, block);
709 }
710
711 block = NULL;
712 }
713
714 if (!block) {
715 block = lightrec_precompile_block(state, pc);
716 if (!block) {
717 pr_err("Unable to recompile block at "PC_FMT"\n", pc);
718 lightrec_set_exit_flags(state, LIGHTREC_EXIT_SEGFAULT);
719 return NULL;
720 }
721
722 lightrec_register_block(state->block_cache, block);
723 }
724
725 return block;
726}
727
728static void * get_next_block_func(struct lightrec_state *state, u32 pc)
729{
730 struct block *block;
731 bool should_recompile;
732 void *func;
733 int err;
734
735 do {
736 func = lut_read(state, lut_offset(pc));
737 if (func && func != state->get_next_block)
738 break;
739
740 block = lightrec_get_block(state, pc);
741
742 if (unlikely(!block))
743 break;
744
745 if (OPT_REPLACE_MEMSET &&
746 block_has_flag(block, BLOCK_IS_MEMSET)) {
747 func = state->memset_func;
748 break;
749 }
750
751 should_recompile = block_has_flag(block, BLOCK_SHOULD_RECOMPILE) &&
752 !block_has_flag(block, BLOCK_NEVER_COMPILE) &&
753 !block_has_flag(block, BLOCK_IS_DEAD);
754
755 if (unlikely(should_recompile)) {
756 pr_debug("Block at "PC_FMT" should recompile\n", pc);
757
758 if (ENABLE_THREADED_COMPILER) {
759 lightrec_recompiler_add(state->rec, block);
760 } else {
761 err = lightrec_compile_block(state->cstate, block);
762 if (err) {
763 state->exit_flags = LIGHTREC_EXIT_NOMEM;
764 return NULL;
765 }
766 }
767 }
768
769 if (ENABLE_THREADED_COMPILER && likely(!should_recompile))
770 func = lightrec_recompiler_run_first_pass(state, block, &pc);
771 else
772 func = block->function;
773
774 if (likely(func))
775 break;
776
777 if (unlikely(block_has_flag(block, BLOCK_NEVER_COMPILE))) {
778 pc = lightrec_emulate_block(state, block, pc);
779
780 } else if (!ENABLE_THREADED_COMPILER) {
781 /* Block wasn't compiled yet - run the interpreter */
782 if (block_has_flag(block, BLOCK_FULLY_TAGGED))
783 pr_debug("Block fully tagged, skipping first pass\n");
784 else if (ENABLE_FIRST_PASS && likely(!should_recompile))
785 pc = lightrec_emulate_block(state, block, pc);
786
787 /* Then compile it using the profiled data */
788 err = lightrec_compile_block(state->cstate, block);
789 if (err) {
790 state->exit_flags = LIGHTREC_EXIT_NOMEM;
791 return NULL;
792 }
793 } else if (unlikely(block_has_flag(block, BLOCK_IS_DEAD))) {
794 /*
795 * If the block is dead but has never been compiled,
796 * then its function pointer is NULL and we cannot
797 * execute the block. In that case, reap all the dead
798 * blocks now, and in the next loop we will create a
799 * new block.
800 */
801 lightrec_reaper_reap(state->reaper);
802 } else {
803 lightrec_recompiler_add(state->rec, block);
804 }
805 } while (state->exit_flags == LIGHTREC_EXIT_NORMAL
806 && state->current_cycle < state->target_cycle);
807
808 state->curr_pc = pc;
809 return func;
810}
811
812static void * lightrec_alloc_code(struct lightrec_state *state, size_t size)
813{
814 void *code;
815
816 if (ENABLE_THREADED_COMPILER)
817 lightrec_code_alloc_lock(state);
818
819 code = tlsf_malloc(state->tlsf, size);
820
821 if (ENABLE_THREADED_COMPILER)
822 lightrec_code_alloc_unlock(state);
823
824 return code;
825}
826
827static void lightrec_realloc_code(struct lightrec_state *state,
828 void *ptr, size_t size)
829{
830 /* NOTE: 'size' MUST be smaller than the size specified during
831 * the allocation. */
832
833 if (ENABLE_THREADED_COMPILER)
834 lightrec_code_alloc_lock(state);
835
836 tlsf_realloc(state->tlsf, ptr, size);
837
838 if (ENABLE_THREADED_COMPILER)
839 lightrec_code_alloc_unlock(state);
840}
841
842static void lightrec_free_code(struct lightrec_state *state, void *ptr)
843{
844 if (ENABLE_THREADED_COMPILER)
845 lightrec_code_alloc_lock(state);
846
847 tlsf_free(state->tlsf, ptr);
848
849 if (ENABLE_THREADED_COMPILER)
850 lightrec_code_alloc_unlock(state);
851}
852
853static char lightning_code_data[0x80000];
854
855static void * lightrec_emit_code(struct lightrec_state *state,
856 const struct block *block,
857 jit_state_t *_jit, unsigned int *size)
858{
859 bool has_code_buffer = ENABLE_CODE_BUFFER && state->tlsf;
860 jit_word_t code_size, new_code_size;
861 void *code;
862
863 jit_realize();
864
865 if (ENABLE_DISASSEMBLER)
866 jit_set_data(lightning_code_data, sizeof(lightning_code_data), 0);
867 else
868 jit_set_data(NULL, 0, JIT_DISABLE_DATA | JIT_DISABLE_NOTE);
869
870 if (has_code_buffer) {
871 jit_get_code(&code_size);
872 code = lightrec_alloc_code(state, (size_t) code_size);
873
874 if (!code) {
875 if (ENABLE_THREADED_COMPILER) {
876 /* If we're using the threaded compiler, return
877 * an allocation error here. The threaded
878 * compiler will then empty its job queue and
879 * request a code flush using the reaper. */
880 return NULL;
881 }
882
883 /* Remove outdated blocks, and try again */
884 lightrec_remove_outdated_blocks(state->block_cache, block);
885
886 pr_debug("Re-try to alloc %zu bytes...\n", code_size);
887
888 code = lightrec_alloc_code(state, code_size);
889 if (!code) {
890 pr_err("Could not alloc even after removing old blocks!\n");
891 return NULL;
892 }
893 }
894
895 jit_set_code(code, code_size);
896 }
897
898 code = jit_emit();
899
900 jit_get_code(&new_code_size);
901 lightrec_register(MEM_FOR_CODE, new_code_size);
902
903 if (has_code_buffer) {
904 lightrec_realloc_code(state, code, (size_t) new_code_size);
905
906 pr_debug("Creating code block at address 0x%" PRIxPTR ", "
907 "code size: %" PRIuPTR " new: %" PRIuPTR "\n",
908 (uintptr_t) code, code_size, new_code_size);
909 }
910
911 *size = (unsigned int) new_code_size;
912
913 if (state->ops.code_inv)
914 state->ops.code_inv(code, new_code_size);
915
916 return code;
917}
918
919static struct block * generate_wrapper(struct lightrec_state *state)
920{
921 struct block *block;
922 jit_state_t *_jit;
923 unsigned int i;
924 jit_node_t *addr[C_WRAPPERS_COUNT - 1];
925 jit_node_t *to_end[C_WRAPPERS_COUNT - 1];
926 u8 tmp = JIT_R1;
927
928#ifdef __sh__
929 /* On SH, GBR-relative loads target the r0 register.
930 * Use it as the temporary register to factorize the move to
931 * JIT_R1. */
932 if (LIGHTREC_REG_STATE == _GBR)
933 tmp = _R0;
934#endif
935
936 block = lightrec_malloc(state, MEM_FOR_IR, sizeof(*block));
937 if (!block)
938 goto err_no_mem;
939
940 _jit = jit_new_state();
941 if (!_jit)
942 goto err_free_block;
943
944 jit_name("RW wrapper");
945 jit_note(__FILE__, __LINE__);
946
947 /* Wrapper entry point */
948 jit_prolog();
949 jit_tramp(256);
950
951 /* Add entry points */
952 for (i = C_WRAPPERS_COUNT - 1; i > 0; i--) {
953 jit_ldxi(tmp, LIGHTREC_REG_STATE,
954 offsetof(struct lightrec_state, c_wrappers[i]));
955 to_end[i - 1] = jit_b();
956 addr[i - 1] = jit_indirect();
957 }
958
959 jit_ldxi(tmp, LIGHTREC_REG_STATE,
960 offsetof(struct lightrec_state, c_wrappers[0]));
961
962 for (i = 0; i < C_WRAPPERS_COUNT - 1; i++)
963 jit_patch(to_end[i]);
964 jit_movr(JIT_R1, tmp);
965
966 jit_epilog();
967 jit_prolog();
968
969 /* Save all temporaries on stack */
970 for (i = 0; i < NUM_TEMPS; i++) {
971 if (i + FIRST_TEMP != 1) {
972 jit_stxi(offsetof(struct lightrec_state, wrapper_regs[i]),
973 LIGHTREC_REG_STATE, JIT_R(i + FIRST_TEMP));
974 }
975 }
976
977 jit_getarg(JIT_R2, jit_arg());
978
979 jit_prepare();
980 jit_pushargr(LIGHTREC_REG_STATE);
981 jit_pushargr(JIT_R2);
982
983 jit_ldxi_ui(JIT_R2, LIGHTREC_REG_STATE,
984 offsetof(struct lightrec_state, target_cycle));
985
986 /* state->current_cycle = state->target_cycle - delta; */
987 jit_subr(LIGHTREC_REG_CYCLE, JIT_R2, LIGHTREC_REG_CYCLE);
988 jit_stxi_i(offsetof(struct lightrec_state, current_cycle),
989 LIGHTREC_REG_STATE, LIGHTREC_REG_CYCLE);
990
991 /* Call the wrapper function */
992 jit_finishr(JIT_R1);
993
994 /* delta = state->target_cycle - state->current_cycle */;
995 jit_ldxi_ui(LIGHTREC_REG_CYCLE, LIGHTREC_REG_STATE,
996 offsetof(struct lightrec_state, current_cycle));
997 jit_ldxi_ui(JIT_R1, LIGHTREC_REG_STATE,
998 offsetof(struct lightrec_state, target_cycle));
999 jit_subr(LIGHTREC_REG_CYCLE, JIT_R1, LIGHTREC_REG_CYCLE);
1000
1001 /* Restore temporaries from stack */
1002 for (i = 0; i < NUM_TEMPS; i++) {
1003 if (i + FIRST_TEMP != 1) {
1004 jit_ldxi(JIT_R(i + FIRST_TEMP), LIGHTREC_REG_STATE,
1005 offsetof(struct lightrec_state, wrapper_regs[i]));
1006 }
1007 }
1008
1009 jit_ret();
1010 jit_epilog();
1011
1012 block->_jit = _jit;
1013 block->opcode_list = NULL;
1014 block->flags = BLOCK_NO_OPCODE_LIST;
1015 block->nb_ops = 0;
1016
1017 block->function = lightrec_emit_code(state, block, _jit,
1018 &block->code_size);
1019 if (!block->function)
1020 goto err_free_block;
1021
1022 state->wrappers_eps[C_WRAPPERS_COUNT - 1] = block->function;
1023
1024 for (i = 0; i < C_WRAPPERS_COUNT - 1; i++)
1025 state->wrappers_eps[i] = jit_address(addr[i]);
1026
1027 if (ENABLE_DISASSEMBLER) {
1028 pr_debug("Wrapper block:\n");
1029 jit_disassemble();
1030 }
1031
1032 jit_clear_state();
1033 return block;
1034
1035err_free_block:
1036 lightrec_free(state, MEM_FOR_IR, sizeof(*block), block);
1037err_no_mem:
1038 pr_err("Unable to compile wrapper: Out of memory\n");
1039 return NULL;
1040}
1041
1042static u32 lightrec_memset(struct lightrec_state *state)
1043{
1044 u32 kunseg_pc = kunseg(state->regs.gpr[4]);
1045 void *host;
1046 const struct lightrec_mem_map *map = lightrec_get_map(state, &host, kunseg_pc);
1047 u32 length = state->regs.gpr[5] * 4;
1048
1049 if (!map) {
1050 pr_err("Unable to find memory map for memset target address "PC_FMT"\n",
1051 kunseg_pc);
1052 return 0;
1053 }
1054
1055 pr_debug("Calling host memset, "PC_FMT" (host address 0x%"PRIxPTR") for %u bytes\n",
1056 kunseg_pc, (uintptr_t)host, length);
1057 memset(host, 0, length);
1058
1059 if (!(state->opt_flags & LIGHTREC_OPT_INV_DMA_ONLY))
1060 lightrec_invalidate_map(state, map, kunseg_pc, length);
1061
1062 /* Rough estimation of the number of cycles consumed */
1063 return 8 + 5 * (length + 3 / 4);
1064}
1065
1066static u32 lightrec_check_load_delay(struct lightrec_state *state, u32 pc, u8 reg)
1067{
1068 struct block *block;
1069 union code first_op;
1070
1071 first_op = lightrec_read_opcode(state, pc);
1072
1073 if (likely(!opcode_reads_register(first_op, reg))) {
1074 state->regs.gpr[reg] = state->temp_reg;
1075 } else {
1076 block = lightrec_get_block(state, pc);
1077 if (unlikely(!block)) {
1078 pr_err("Unable to get block at "PC_FMT"\n", pc);
1079 lightrec_set_exit_flags(state, LIGHTREC_EXIT_SEGFAULT);
1080 pc = 0;
1081 } else {
1082 pc = lightrec_handle_load_delay(state, block, pc, reg);
1083 }
1084 }
1085
1086 return pc;
1087}
1088
1089static void update_cycle_counter_before_c(jit_state_t *_jit)
1090{
1091 /* update state->current_cycle */
1092 jit_ldxi_i(JIT_R2, LIGHTREC_REG_STATE,
1093 offsetof(struct lightrec_state, target_cycle));
1094 jit_subr(JIT_R1, JIT_R2, LIGHTREC_REG_CYCLE);
1095 jit_stxi_i(offsetof(struct lightrec_state, current_cycle),
1096 LIGHTREC_REG_STATE, JIT_R1);
1097}
1098
1099static void update_cycle_counter_after_c(jit_state_t *_jit)
1100{
1101 /* Recalc the delta */
1102 jit_ldxi_i(JIT_R1, LIGHTREC_REG_STATE,
1103 offsetof(struct lightrec_state, current_cycle));
1104 jit_ldxi_i(JIT_R2, LIGHTREC_REG_STATE,
1105 offsetof(struct lightrec_state, target_cycle));
1106 jit_subr(LIGHTREC_REG_CYCLE, JIT_R2, JIT_R1);
1107}
1108
1109static void sync_next_pc(jit_state_t *_jit)
1110{
1111 if (lightrec_store_next_pc()) {
1112 jit_ldxi_ui(JIT_V0, LIGHTREC_REG_STATE,
1113 offsetof(struct lightrec_state, next_pc));
1114 }
1115}
1116
1117static struct block * generate_dispatcher(struct lightrec_state *state)
1118{
1119 struct block *block;
1120 jit_state_t *_jit;
1121 jit_node_t *to_end, *loop, *addr, *addr2, *addr3, *addr4, *addr5, *jmp, *jmp2;
1122 unsigned int i;
1123 u32 offset;
1124
1125 block = lightrec_malloc(state, MEM_FOR_IR, sizeof(*block));
1126 if (!block)
1127 goto err_no_mem;
1128
1129 _jit = jit_new_state();
1130 if (!_jit)
1131 goto err_free_block;
1132
1133 jit_name("dispatcher");
1134 jit_note(__FILE__, __LINE__);
1135
1136 jit_prolog();
1137 jit_frame(256);
1138
1139 jit_getarg(LIGHTREC_REG_STATE, jit_arg());
1140 jit_getarg(JIT_V0, jit_arg());
1141 jit_getarg(JIT_V1, jit_arg());
1142 jit_getarg_i(LIGHTREC_REG_CYCLE, jit_arg());
1143
1144 /* Force all callee-saved registers to be pushed on the stack */
1145 for (i = 0; i < NUM_REGS; i++)
1146 jit_movr(JIT_V(i + FIRST_REG), JIT_V(i + FIRST_REG));
1147
1148 loop = jit_label();
1149
1150 /* Call the block's code */
1151 jit_jmpr(JIT_V1);
1152
1153 if (OPT_REPLACE_MEMSET) {
1154 /* Blocks will jump here when they need to call
1155 * lightrec_memset() */
1156 addr3 = jit_indirect();
1157
1158 jit_movr(JIT_V1, LIGHTREC_REG_CYCLE);
1159
1160 jit_prepare();
1161 jit_pushargr(LIGHTREC_REG_STATE);
1162
1163 jit_finishi(lightrec_memset);
1164 jit_retval(LIGHTREC_REG_CYCLE);
1165
1166 jit_ldxi_ui(JIT_V0, LIGHTREC_REG_STATE,
1167 offsetof(struct lightrec_state, regs.gpr[31]));
1168 jit_subr(LIGHTREC_REG_CYCLE, JIT_V1, LIGHTREC_REG_CYCLE);
1169
1170 if (OPT_DETECT_IMPOSSIBLE_BRANCHES || OPT_HANDLE_LOAD_DELAYS)
1171 jmp = jit_b();
1172 }
1173
1174 if (OPT_DETECT_IMPOSSIBLE_BRANCHES) {
1175 /* Blocks will jump here when they reach a branch that should
1176 * be executed with the interpreter, passing the branch's PC
1177 * in JIT_V0 and the address of the block in JIT_V1. */
1178 addr4 = jit_indirect();
1179
1180 sync_next_pc(_jit);
1181 update_cycle_counter_before_c(_jit);
1182
1183 jit_prepare();
1184 jit_pushargr(LIGHTREC_REG_STATE);
1185 jit_pushargr(JIT_V1);
1186 jit_pushargr(JIT_V0);
1187 jit_finishi(lightrec_emulate_block);
1188
1189 jit_retval(JIT_V0);
1190
1191 update_cycle_counter_after_c(_jit);
1192
1193 if (OPT_HANDLE_LOAD_DELAYS)
1194 jmp2 = jit_b();
1195
1196 }
1197
1198 if (OPT_HANDLE_LOAD_DELAYS) {
1199 /* Blocks will jump here when they reach a branch with a load
1200 * opcode in its delay slot. The delay slot has already been
1201 * executed; the load value is in (state->temp_reg), and the
1202 * register number is in JIT_V1.
1203 * Jump to a C function which will evaluate the branch target's
1204 * first opcode, to make sure that it does not read the register
1205 * in question; and if it does, handle it accordingly. */
1206 addr5 = jit_indirect();
1207
1208 sync_next_pc(_jit);
1209 update_cycle_counter_before_c(_jit);
1210
1211 jit_prepare();
1212 jit_pushargr(LIGHTREC_REG_STATE);
1213 jit_pushargr(JIT_V0);
1214 jit_pushargr(JIT_V1);
1215 jit_finishi(lightrec_check_load_delay);
1216
1217 jit_retval(JIT_V0);
1218
1219 update_cycle_counter_after_c(_jit);
1220 }
1221
1222 /* The block will jump here, with the number of cycles remaining in
1223 * LIGHTREC_REG_CYCLE */
1224 addr2 = jit_indirect();
1225
1226 sync_next_pc(_jit);
1227
1228 if (OPT_HANDLE_LOAD_DELAYS && OPT_DETECT_IMPOSSIBLE_BRANCHES)
1229 jit_patch(jmp2);
1230
1231 if (OPT_REPLACE_MEMSET
1232 && (OPT_DETECT_IMPOSSIBLE_BRANCHES || OPT_HANDLE_LOAD_DELAYS)) {
1233 jit_patch(jmp);
1234 }
1235
1236 /* Store back the next PC to the lightrec_state structure */
1237 offset = offsetof(struct lightrec_state, curr_pc);
1238 jit_stxi_i(offset, LIGHTREC_REG_STATE, JIT_V0);
1239
1240 /* Jump to end if state->target_cycle < state->current_cycle */
1241 to_end = jit_blei(LIGHTREC_REG_CYCLE, 0);
1242
1243 /* Convert next PC to KUNSEG and avoid mirrors */
1244 jit_andi(JIT_V1, JIT_V0, 0x10000000 | (RAM_SIZE - 1));
1245 jit_rshi_u(JIT_R1, JIT_V1, 28);
1246 jit_andi(JIT_R2, JIT_V0, BIOS_SIZE - 1);
1247 jit_addi(JIT_R2, JIT_R2, RAM_SIZE);
1248 jit_movnr(JIT_V1, JIT_R2, JIT_R1);
1249
1250 /* If possible, use the code LUT */
1251 if (!lut_is_32bit(state))
1252 jit_lshi(JIT_V1, JIT_V1, 1);
1253 jit_add_state(JIT_V1, JIT_V1);
1254
1255 offset = offsetof(struct lightrec_state, code_lut);
1256 if (lut_is_32bit(state))
1257 jit_ldxi_ui(JIT_V1, JIT_V1, offset);
1258 else
1259 jit_ldxi(JIT_V1, JIT_V1, offset);
1260
1261 /* If we get non-NULL, loop */
1262 jit_patch_at(jit_bnei(JIT_V1, 0), loop);
1263
1264 /* The code LUT will be set to this address when the block at the target
1265 * PC has been preprocessed but not yet compiled by the threaded
1266 * recompiler */
1267 addr = jit_indirect();
1268
1269 /* Slow path: call C function get_next_block_func() */
1270
1271 if (ENABLE_FIRST_PASS || OPT_DETECT_IMPOSSIBLE_BRANCHES) {
1272 /* We may call the interpreter - update state->current_cycle */
1273 update_cycle_counter_before_c(_jit);
1274 }
1275
1276 jit_prepare();
1277 jit_pushargr(LIGHTREC_REG_STATE);
1278 jit_pushargr(JIT_V0);
1279
1280 /* Save the cycles register if needed */
1281 if (!(ENABLE_FIRST_PASS || OPT_DETECT_IMPOSSIBLE_BRANCHES))
1282 jit_movr(JIT_V0, LIGHTREC_REG_CYCLE);
1283
1284 /* Get the next block */
1285 jit_finishi(&get_next_block_func);
1286 jit_retval(JIT_V1);
1287
1288 if (ENABLE_FIRST_PASS || OPT_DETECT_IMPOSSIBLE_BRANCHES) {
1289 /* The interpreter may have updated state->current_cycle and
1290 * state->target_cycle - recalc the delta */
1291 update_cycle_counter_after_c(_jit);
1292 } else {
1293 jit_movr(LIGHTREC_REG_CYCLE, JIT_V0);
1294 }
1295
1296 /* Reset JIT_V0 to the next PC */
1297 jit_ldxi_ui(JIT_V0, LIGHTREC_REG_STATE,
1298 offsetof(struct lightrec_state, curr_pc));
1299
1300 /* If we get non-NULL, loop */
1301 jit_patch_at(jit_bnei(JIT_V1, 0), loop);
1302
1303 /* When exiting, the recompiled code will jump to that address */
1304 jit_note(__FILE__, __LINE__);
1305 jit_patch(to_end);
1306
1307 jit_retr(LIGHTREC_REG_CYCLE);
1308 jit_epilog();
1309
1310 block->_jit = _jit;
1311 block->opcode_list = NULL;
1312 block->flags = BLOCK_NO_OPCODE_LIST;
1313 block->nb_ops = 0;
1314
1315 block->function = lightrec_emit_code(state, block, _jit,
1316 &block->code_size);
1317 if (!block->function)
1318 goto err_free_block;
1319
1320 state->eob_wrapper_func = jit_address(addr2);
1321 if (OPT_DETECT_IMPOSSIBLE_BRANCHES)
1322 state->interpreter_func = jit_address(addr4);
1323 if (OPT_HANDLE_LOAD_DELAYS)
1324 state->ds_check_func = jit_address(addr5);
1325 if (OPT_REPLACE_MEMSET)
1326 state->memset_func = jit_address(addr3);
1327 state->get_next_block = jit_address(addr);
1328
1329 if (ENABLE_DISASSEMBLER) {
1330 pr_debug("Dispatcher block:\n");
1331 jit_disassemble();
1332 }
1333
1334 /* We're done! */
1335 jit_clear_state();
1336 return block;
1337
1338err_free_block:
1339 lightrec_free(state, MEM_FOR_IR, sizeof(*block), block);
1340err_no_mem:
1341 pr_err("Unable to compile dispatcher: Out of memory\n");
1342 return NULL;
1343}
1344
1345union code lightrec_read_opcode(struct lightrec_state *state, u32 pc)
1346{
1347 void *host = NULL;
1348
1349 lightrec_get_map(state, &host, kunseg(pc));
1350
1351 const u32 *code = (u32 *)host;
1352 return (union code) LE32TOH(*code);
1353}
1354
1355unsigned int lightrec_cycles_of_opcode(const struct lightrec_state *state,
1356 union code code)
1357{
1358 return state->cycles_per_op;
1359}
1360
1361void lightrec_free_opcode_list(struct lightrec_state *state, struct opcode *ops)
1362{
1363 struct opcode_list *list = container_of(ops, struct opcode_list, ops);
1364
1365 lightrec_free(state, MEM_FOR_IR,
1366 sizeof(*list) + list->nb_ops * sizeof(struct opcode),
1367 list);
1368}
1369
1370static unsigned int lightrec_get_mips_block_len(const u32 *src)
1371{
1372 unsigned int i;
1373 union code c;
1374
1375 for (i = 1; ; i++) {
1376 c.opcode = LE32TOH(*src++);
1377
1378 if (is_syscall(c))
1379 return i;
1380
1381 if (is_unconditional_jump(c))
1382 return i + 1;
1383 }
1384}
1385
1386static struct opcode * lightrec_disassemble(struct lightrec_state *state,
1387 const u32 *src, unsigned int *len)
1388{
1389 struct opcode_list *list;
1390 unsigned int i, length;
1391
1392 length = lightrec_get_mips_block_len(src);
1393
1394 list = lightrec_malloc(state, MEM_FOR_IR,
1395 sizeof(*list) + sizeof(struct opcode) * length);
1396 if (!list) {
1397 pr_err("Unable to allocate memory\n");
1398 return NULL;
1399 }
1400
1401 list->nb_ops = (u16) length;
1402
1403 for (i = 0; i < length; i++) {
1404 list->ops[i].opcode = LE32TOH(src[i]);
1405 list->ops[i].flags = 0;
1406 }
1407
1408 *len = length * sizeof(u32);
1409
1410 return list->ops;
1411}
1412
1413static struct block * lightrec_precompile_block(struct lightrec_state *state,
1414 u32 pc)
1415{
1416 struct opcode *list;
1417 struct block *block;
1418 void *host, *addr;
1419 const struct lightrec_mem_map *map = lightrec_get_map(state, &host, kunseg(pc));
1420 const u32 *code = (u32 *) host;
1421 unsigned int length;
1422 bool fully_tagged;
1423 u8 block_flags = 0;
1424
1425 if (!map)
1426 return NULL;
1427
1428 block = lightrec_malloc(state, MEM_FOR_IR, sizeof(*block));
1429 if (!block) {
1430 pr_err("Unable to recompile block: Out of memory\n");
1431 return NULL;
1432 }
1433
1434 list = lightrec_disassemble(state, code, &length);
1435 if (!list) {
1436 lightrec_free(state, MEM_FOR_IR, sizeof(*block), block);
1437 return NULL;
1438 }
1439
1440 block->pc = pc;
1441 block->_jit = NULL;
1442 block->function = NULL;
1443 block->opcode_list = list;
1444 block->code = code;
1445 block->next = NULL;
1446 block->flags = 0;
1447 block->code_size = 0;
1448 block->precompile_date = state->current_cycle;
1449 block->nb_ops = length / sizeof(u32);
1450
1451 lightrec_optimize(state, block);
1452
1453 length = block->nb_ops * sizeof(u32);
1454
1455 lightrec_register(MEM_FOR_MIPS_CODE, length);
1456
1457 if (ENABLE_DISASSEMBLER) {
1458 pr_debug("Disassembled block at PC: 0x%08x\n", block->pc);
1459 lightrec_print_disassembly(block, code);
1460 }
1461
1462 pr_debug("Block size: %hu opcodes\n", block->nb_ops);
1463
1464 fully_tagged = lightrec_block_is_fully_tagged(block);
1465 if (fully_tagged)
1466 block_flags |= BLOCK_FULLY_TAGGED;
1467
1468 if (block_flags)
1469 block_set_flags(block, block_flags);
1470
1471 block->hash = lightrec_calculate_block_hash(block);
1472
1473 if (OPT_REPLACE_MEMSET && block_has_flag(block, BLOCK_IS_MEMSET))
1474 addr = state->memset_func;
1475 else
1476 addr = state->get_next_block;
1477 lut_write(state, lut_offset(pc), addr);
1478
1479 pr_debug("Blocks created: %u\n", ++state->nb_precompile);
1480
1481 return block;
1482}
1483
1484static bool lightrec_block_is_fully_tagged(const struct block *block)
1485{
1486 const struct opcode *op;
1487 unsigned int i;
1488
1489 for (i = 0; i < block->nb_ops; i++) {
1490 op = &block->opcode_list[i];
1491
1492 /* If we have one branch that must be emulated, we cannot trash
1493 * the opcode list. */
1494 if (should_emulate(op))
1495 return false;
1496
1497 /* Check all loads/stores of the opcode list and mark the
1498 * block as fully compiled if they all have been tagged. */
1499 switch (op->c.i.op) {
1500 case OP_LB:
1501 case OP_LH:
1502 case OP_LWL:
1503 case OP_LW:
1504 case OP_LBU:
1505 case OP_LHU:
1506 case OP_LWR:
1507 case OP_SB:
1508 case OP_SH:
1509 case OP_SWL:
1510 case OP_SW:
1511 case OP_SWR:
1512 case OP_LWC2:
1513 case OP_SWC2:
1514 case OP_META_LWU:
1515 case OP_META_SWU:
1516 if (!LIGHTREC_FLAGS_GET_IO_MODE(op->flags))
1517 return false;
1518 fallthrough;
1519 default:
1520 continue;
1521 }
1522 }
1523
1524 return true;
1525}
1526
1527static void lightrec_reap_block(struct lightrec_state *state, void *data)
1528{
1529 struct block *block = data;
1530
1531 pr_debug("Reap dead block at "PC_FMT"\n", block->pc);
1532 lightrec_unregister_block(state->block_cache, block);
1533 lightrec_free_block(state, block);
1534}
1535
1536static void lightrec_reap_jit(struct lightrec_state *state, void *data)
1537{
1538 _jit_destroy_state(data);
1539}
1540
1541static void lightrec_free_function(struct lightrec_state *state, void *fn)
1542{
1543 if (ENABLE_CODE_BUFFER && state->tlsf) {
1544 pr_debug("Freeing code block at 0x%" PRIxPTR "\n", (uintptr_t) fn);
1545 lightrec_free_code(state, fn);
1546 }
1547}
1548
1549static void lightrec_reap_function(struct lightrec_state *state, void *data)
1550{
1551 lightrec_free_function(state, data);
1552}
1553
1554static void lightrec_reap_opcode_list(struct lightrec_state *state, void *data)
1555{
1556 lightrec_free_opcode_list(state, data);
1557}
1558
1559int lightrec_compile_block(struct lightrec_cstate *cstate,
1560 struct block *block)
1561{
1562 struct lightrec_state *state = cstate->state;
1563 struct lightrec_branch_target *target;
1564 bool fully_tagged = false;
1565 struct block *block2;
1566 struct opcode *elm;
1567 jit_state_t *_jit, *oldjit;
1568 jit_node_t *start_of_block;
1569 bool skip_next = false;
1570 void *old_fn, *new_fn;
1571 size_t old_code_size;
1572 unsigned int i, j;
1573 u8 old_flags;
1574 u32 offset;
1575
1576 fully_tagged = lightrec_block_is_fully_tagged(block);
1577 if (fully_tagged)
1578 block_set_flags(block, BLOCK_FULLY_TAGGED);
1579
1580 _jit = jit_new_state();
1581 if (!_jit)
1582 return -ENOMEM;
1583
1584 oldjit = block->_jit;
1585 old_fn = block->function;
1586 old_code_size = block->code_size;
1587 block->_jit = _jit;
1588
1589 lightrec_regcache_reset(cstate->reg_cache);
1590
1591 if (OPT_PRELOAD_PC && (block->flags & BLOCK_PRELOAD_PC))
1592 lightrec_preload_pc(cstate->reg_cache, _jit);
1593
1594 cstate->cycles = 0;
1595 cstate->nb_local_branches = 0;
1596 cstate->nb_targets = 0;
1597 cstate->no_load_delay = false;
1598
1599 jit_prolog();
1600 jit_tramp(256);
1601
1602 start_of_block = jit_label();
1603
1604 for (i = 0; i < block->nb_ops; i++) {
1605 elm = &block->opcode_list[i];
1606
1607 if (skip_next) {
1608 skip_next = false;
1609 continue;
1610 }
1611
1612 if (should_emulate(elm)) {
1613 pr_debug("Branch at offset 0x%x will be emulated\n",
1614 i << 2);
1615
1616 lightrec_emit_jump_to_interpreter(cstate, block, i);
1617 skip_next = !op_flag_no_ds(elm->flags);
1618 } else {
1619 lightrec_rec_opcode(cstate, block, i);
1620 skip_next = !op_flag_no_ds(elm->flags) && has_delay_slot(elm->c);
1621#if _WIN32
1622 /* FIXME: GNU Lightning on Windows seems to use our
1623 * mapped registers as temporaries. Until the actual bug
1624 * is found and fixed, unconditionally mark our
1625 * registers as live here. */
1626 lightrec_regcache_mark_live(cstate->reg_cache, _jit);
1627#endif
1628 }
1629
1630 cstate->cycles += lightrec_cycles_of_opcode(state, elm->c);
1631 }
1632
1633 for (i = 0; i < cstate->nb_local_branches; i++) {
1634 struct lightrec_branch *branch = &cstate->local_branches[i];
1635
1636 pr_debug("Patch local branch to offset 0x%x\n",
1637 branch->target << 2);
1638
1639 if (branch->target == 0) {
1640 jit_patch_at(branch->branch, start_of_block);
1641 continue;
1642 }
1643
1644 for (j = 0; j < cstate->nb_targets; j++) {
1645 if (cstate->targets[j].offset == branch->target) {
1646 jit_patch_at(branch->branch,
1647 cstate->targets[j].label);
1648 break;
1649 }
1650 }
1651
1652 if (j == cstate->nb_targets)
1653 pr_err("Unable to find branch target\n");
1654 }
1655
1656 jit_ret();
1657 jit_epilog();
1658
1659 new_fn = lightrec_emit_code(state, block, _jit, &block->code_size);
1660 if (!new_fn) {
1661 if (!ENABLE_THREADED_COMPILER)
1662 pr_err("Unable to compile block!\n");
1663 block->_jit = oldjit;
1664 jit_clear_state();
1665 _jit_destroy_state(_jit);
1666 return -ENOMEM;
1667 }
1668
1669 /* Pause the reaper, because lightrec_reset_lut_offset() may try to set
1670 * the old block->function pointer to the code LUT. */
1671 if (ENABLE_THREADED_COMPILER)
1672 lightrec_reaper_pause(state->reaper);
1673
1674 block->function = new_fn;
1675 block_clear_flags(block, BLOCK_SHOULD_RECOMPILE);
1676
1677 /* Add compiled function to the LUT */
1678 lut_write(state, lut_offset(block->pc), block->function);
1679
1680 if (ENABLE_THREADED_COMPILER)
1681 lightrec_reaper_continue(state->reaper);
1682
1683 /* Detect old blocks that have been covered by the new one */
1684 for (i = 0; i < cstate->nb_targets; i++) {
1685 target = &cstate->targets[i];
1686
1687 if (!target->offset)
1688 continue;
1689
1690 offset = block->pc + target->offset * sizeof(u32);
1691
1692 /* Pause the reaper while we search for the block until we set
1693 * the BLOCK_IS_DEAD flag, otherwise the block may be removed
1694 * under our feet. */
1695 if (ENABLE_THREADED_COMPILER)
1696 lightrec_reaper_pause(state->reaper);
1697
1698 block2 = lightrec_find_block(state->block_cache, offset);
1699 if (block2) {
1700 /* No need to check if block2 is compilable - it must
1701 * be, otherwise block wouldn't be compilable either */
1702
1703 /* Set the "block dead" flag to prevent the dynarec from
1704 * recompiling this block */
1705 old_flags = block_set_flags(block2, BLOCK_IS_DEAD);
1706 }
1707
1708 if (ENABLE_THREADED_COMPILER) {
1709 lightrec_reaper_continue(state->reaper);
1710
1711 /* If block2 was pending for compilation, cancel it.
1712 * If it's being compiled right now, wait until it
1713 * finishes. */
1714 if (block2)
1715 lightrec_recompiler_remove(state->rec, block2);
1716 }
1717
1718 /* We know from now on that block2 (if present) isn't going to
1719 * be compiled. We can override the LUT entry with our new
1720 * block's entry point. */
1721 offset = lut_offset(block->pc) + target->offset;
1722 lut_write(state, offset, jit_address(target->label));
1723
1724 if (block2) {
1725 pr_debug("Reap block 0x%08x as it's covered by block "
1726 "0x%08x\n", block2->pc, block->pc);
1727
1728 /* Finally, reap the block. */
1729 if (!ENABLE_THREADED_COMPILER) {
1730 lightrec_unregister_block(state->block_cache, block2);
1731 lightrec_free_block(state, block2);
1732 } else if (!(old_flags & BLOCK_IS_DEAD)) {
1733 lightrec_reaper_add(state->reaper,
1734 lightrec_reap_block,
1735 block2);
1736 }
1737 }
1738 }
1739
1740 if (ENABLE_DISASSEMBLER) {
1741 pr_debug("Compiling block at PC: 0x%08x\n", block->pc);
1742 jit_disassemble();
1743 }
1744
1745 jit_clear_state();
1746
1747 if (fully_tagged)
1748 old_flags = block_set_flags(block, BLOCK_NO_OPCODE_LIST);
1749
1750 if (fully_tagged && !(old_flags & BLOCK_NO_OPCODE_LIST)) {
1751 pr_debug("Block "PC_FMT" is fully tagged"
1752 " - free opcode list\n", block->pc);
1753
1754 if (ENABLE_THREADED_COMPILER) {
1755 lightrec_reaper_add(state->reaper,
1756 lightrec_reap_opcode_list,
1757 block->opcode_list);
1758 } else {
1759 lightrec_free_opcode_list(state, block->opcode_list);
1760 }
1761 }
1762
1763 if (oldjit) {
1764 pr_debug("Block 0x%08x recompiled, reaping old jit context.\n",
1765 block->pc);
1766
1767 if (ENABLE_THREADED_COMPILER) {
1768 lightrec_reaper_add(state->reaper,
1769 lightrec_reap_jit, oldjit);
1770 lightrec_reaper_add(state->reaper,
1771 lightrec_reap_function, old_fn);
1772 } else {
1773 _jit_destroy_state(oldjit);
1774 lightrec_free_function(state, old_fn);
1775 }
1776
1777 lightrec_unregister(MEM_FOR_CODE, old_code_size);
1778 }
1779
1780 pr_debug("Blocks compiled: %u\n", ++state->nb_compile);
1781
1782 return 0;
1783}
1784
1785static void lightrec_print_info(struct lightrec_state *state)
1786{
1787 if ((state->current_cycle & ~0xfffffff) != state->old_cycle_counter) {
1788 pr_info("Lightrec RAM usage: IR %u KiB, CODE %u KiB, "
1789 "MIPS %u KiB, TOTAL %u KiB, avg. IPI %f\n",
1790 lightrec_get_mem_usage(MEM_FOR_IR) / 1024,
1791 lightrec_get_mem_usage(MEM_FOR_CODE) / 1024,
1792 lightrec_get_mem_usage(MEM_FOR_MIPS_CODE) / 1024,
1793 lightrec_get_total_mem_usage() / 1024,
1794 lightrec_get_average_ipi());
1795 state->old_cycle_counter = state->current_cycle & ~0xfffffff;
1796 }
1797}
1798
1799u32 lightrec_execute(struct lightrec_state *state, u32 pc, u32 target_cycle)
1800{
1801 s32 (*func)(struct lightrec_state *, u32, void *, s32) = (void *)state->dispatcher->function;
1802 void *block_trace;
1803 s32 cycles_delta;
1804
1805 state->exit_flags = LIGHTREC_EXIT_NORMAL;
1806
1807 /* Handle the cycle counter overflowing */
1808 if (unlikely(target_cycle < state->current_cycle))
1809 target_cycle = UINT_MAX;
1810
1811 state->target_cycle = target_cycle;
1812 state->curr_pc = pc;
1813
1814 block_trace = get_next_block_func(state, pc);
1815 if (block_trace) {
1816 cycles_delta = state->target_cycle - state->current_cycle;
1817
1818 cycles_delta = (*func)(state, state->curr_pc,
1819 block_trace, cycles_delta);
1820
1821 state->current_cycle = state->target_cycle - cycles_delta;
1822 }
1823
1824 if (ENABLE_THREADED_COMPILER)
1825 lightrec_reaper_reap(state->reaper);
1826
1827 if (LOG_LEVEL >= INFO_L)
1828 lightrec_print_info(state);
1829
1830 return state->curr_pc;
1831}
1832
1833u32 lightrec_run_interpreter(struct lightrec_state *state, u32 pc,
1834 u32 target_cycle)
1835{
1836 struct block *block;
1837
1838 state->exit_flags = LIGHTREC_EXIT_NORMAL;
1839 state->target_cycle = target_cycle;
1840
1841 do {
1842 block = lightrec_get_block(state, pc);
1843 if (!block)
1844 break;
1845
1846 pc = lightrec_emulate_block(state, block, pc);
1847
1848 if (ENABLE_THREADED_COMPILER)
1849 lightrec_reaper_reap(state->reaper);
1850 } while (state->current_cycle < state->target_cycle);
1851
1852 if (LOG_LEVEL >= INFO_L)
1853 lightrec_print_info(state);
1854
1855 return pc;
1856}
1857
1858void lightrec_free_block(struct lightrec_state *state, struct block *block)
1859{
1860 u8 old_flags;
1861
1862 lightrec_unregister(MEM_FOR_MIPS_CODE, block->nb_ops * sizeof(u32));
1863 old_flags = block_set_flags(block, BLOCK_NO_OPCODE_LIST);
1864
1865 if (!(old_flags & BLOCK_NO_OPCODE_LIST))
1866 lightrec_free_opcode_list(state, block->opcode_list);
1867 if (block->_jit)
1868 _jit_destroy_state(block->_jit);
1869 if (block->function) {
1870 lightrec_free_function(state, block->function);
1871 lightrec_unregister(MEM_FOR_CODE, block->code_size);
1872 }
1873 lightrec_free(state, MEM_FOR_IR, sizeof(*block), block);
1874}
1875
1876struct lightrec_cstate * lightrec_create_cstate(struct lightrec_state *state)
1877{
1878 struct lightrec_cstate *cstate;
1879
1880 cstate = lightrec_malloc(state, MEM_FOR_LIGHTREC, sizeof(*cstate));
1881 if (!cstate)
1882 return NULL;
1883
1884 cstate->reg_cache = lightrec_regcache_init(state);
1885 if (!cstate->reg_cache) {
1886 lightrec_free(state, MEM_FOR_LIGHTREC, sizeof(*cstate), cstate);
1887 return NULL;
1888 }
1889
1890 cstate->state = state;
1891
1892 return cstate;
1893}
1894
1895void lightrec_free_cstate(struct lightrec_cstate *cstate)
1896{
1897 lightrec_free_regcache(cstate->reg_cache);
1898 lightrec_free(cstate->state, MEM_FOR_LIGHTREC, sizeof(*cstate), cstate);
1899}
1900
1901struct lightrec_state * lightrec_init(char *argv0,
1902 const struct lightrec_mem_map *map,
1903 size_t nb,
1904 const struct lightrec_ops *ops)
1905{
1906 const struct lightrec_mem_map *codebuf_map = &map[PSX_MAP_CODE_BUFFER];
1907 struct lightrec_state *state;
1908 uintptr_t addr;
1909 void *tlsf = NULL;
1910 bool with_32bit_lut = false;
1911 size_t lut_size;
1912
1913 /* Sanity-check ops */
1914 if (!ops || !ops->cop2_op || !ops->enable_ram) {
1915 pr_err("Missing callbacks in lightrec_ops structure\n");
1916 return NULL;
1917 }
1918
1919 if (ops->cop2_notify)
1920 pr_debug("Optional cop2_notify callback in lightrec_ops\n");
1921 else
1922 pr_debug("No optional cop2_notify callback in lightrec_ops\n");
1923
1924 if (ENABLE_CODE_BUFFER && nb > PSX_MAP_CODE_BUFFER
1925 && codebuf_map->address) {
1926 tlsf = tlsf_create_with_pool(codebuf_map->address,
1927 codebuf_map->length);
1928 if (!tlsf) {
1929 pr_err("Unable to initialize code buffer\n");
1930 return NULL;
1931 }
1932
1933 if (__WORDSIZE == 64) {
1934 addr = (uintptr_t) codebuf_map->address + codebuf_map->length - 1;
1935 with_32bit_lut = addr == (u32) addr;
1936 }
1937 }
1938
1939 if (with_32bit_lut)
1940 lut_size = CODE_LUT_SIZE * 4;
1941 else
1942 lut_size = CODE_LUT_SIZE * sizeof(void *);
1943
1944 init_jit(argv0);
1945
1946 state = calloc(1, sizeof(*state) + lut_size);
1947 if (!state)
1948 goto err_finish_jit;
1949
1950 lightrec_register(MEM_FOR_LIGHTREC, sizeof(*state) + lut_size);
1951
1952 state->tlsf = tlsf;
1953 state->with_32bit_lut = with_32bit_lut;
1954 state->in_delay_slot_n = 0xff;
1955 state->cycles_per_op = 2;
1956
1957 state->block_cache = lightrec_blockcache_init(state);
1958 if (!state->block_cache)
1959 goto err_free_state;
1960
1961 if (ENABLE_THREADED_COMPILER) {
1962 state->rec = lightrec_recompiler_init(state);
1963 if (!state->rec)
1964 goto err_free_block_cache;
1965
1966 state->reaper = lightrec_reaper_init(state);
1967 if (!state->reaper)
1968 goto err_free_recompiler;
1969 } else {
1970 state->cstate = lightrec_create_cstate(state);
1971 if (!state->cstate)
1972 goto err_free_block_cache;
1973 }
1974
1975 state->nb_maps = nb;
1976 state->maps = map;
1977
1978 memcpy(&state->ops, ops, sizeof(*ops));
1979
1980 state->dispatcher = generate_dispatcher(state);
1981 if (!state->dispatcher)
1982 goto err_free_reaper;
1983
1984 state->c_wrapper_block = generate_wrapper(state);
1985 if (!state->c_wrapper_block)
1986 goto err_free_dispatcher;
1987
1988 state->c_wrappers[C_WRAPPER_RW] = lightrec_rw_cb;
1989 state->c_wrappers[C_WRAPPER_RW_GENERIC] = lightrec_rw_generic_cb;
1990 state->c_wrappers[C_WRAPPER_MFC] = lightrec_mfc_cb;
1991 state->c_wrappers[C_WRAPPER_MTC] = lightrec_mtc_cb;
1992 state->c_wrappers[C_WRAPPER_CP] = lightrec_cp_cb;
1993
1994 map = &state->maps[PSX_MAP_BIOS];
1995 state->offset_bios = (uintptr_t)map->address - map->pc;
1996
1997 map = &state->maps[PSX_MAP_SCRATCH_PAD];
1998 state->offset_scratch = (uintptr_t)map->address - map->pc;
1999
2000 map = &state->maps[PSX_MAP_HW_REGISTERS];
2001 state->offset_io = (uintptr_t)map->address - map->pc;
2002
2003 map = &state->maps[PSX_MAP_KERNEL_USER_RAM];
2004 state->offset_ram = (uintptr_t)map->address - map->pc;
2005
2006 if (state->maps[PSX_MAP_MIRROR1].address == map->address + 0x200000 &&
2007 state->maps[PSX_MAP_MIRROR2].address == map->address + 0x400000 &&
2008 state->maps[PSX_MAP_MIRROR3].address == map->address + 0x600000)
2009 state->mirrors_mapped = true;
2010
2011 if (state->offset_bios == 0 &&
2012 state->offset_scratch == 0 &&
2013 state->offset_ram == 0 &&
2014 state->offset_io == 0 &&
2015 state->mirrors_mapped) {
2016 pr_info("Memory map is perfect. Emitted code will be best.\n");
2017 } else {
2018 pr_info("Memory map is sub-par. Emitted code will be slow.\n");
2019 }
2020
2021 if (state->with_32bit_lut)
2022 pr_info("Using 32-bit LUT\n");
2023
2024 return state;
2025
2026err_free_dispatcher:
2027 lightrec_free_block(state, state->dispatcher);
2028err_free_reaper:
2029 if (ENABLE_THREADED_COMPILER)
2030 lightrec_reaper_destroy(state->reaper);
2031err_free_recompiler:
2032 if (ENABLE_THREADED_COMPILER)
2033 lightrec_free_recompiler(state->rec);
2034 else
2035 lightrec_free_cstate(state->cstate);
2036err_free_block_cache:
2037 lightrec_free_block_cache(state->block_cache);
2038err_free_state:
2039 lightrec_unregister(MEM_FOR_LIGHTREC, sizeof(*state) +
2040 lut_elm_size(state) * CODE_LUT_SIZE);
2041 free(state);
2042err_finish_jit:
2043 finish_jit();
2044 if (ENABLE_CODE_BUFFER && tlsf)
2045 tlsf_destroy(tlsf);
2046 return NULL;
2047}
2048
2049void lightrec_destroy(struct lightrec_state *state)
2050{
2051 /* Force a print info on destroy*/
2052 state->current_cycle = ~state->current_cycle;
2053 lightrec_print_info(state);
2054
2055 lightrec_free_block_cache(state->block_cache);
2056 lightrec_free_block(state, state->dispatcher);
2057 lightrec_free_block(state, state->c_wrapper_block);
2058
2059 if (ENABLE_THREADED_COMPILER) {
2060 lightrec_free_recompiler(state->rec);
2061 lightrec_reaper_destroy(state->reaper);
2062 } else {
2063 lightrec_free_cstate(state->cstate);
2064 }
2065
2066 finish_jit();
2067 if (ENABLE_CODE_BUFFER && state->tlsf)
2068 tlsf_destroy(state->tlsf);
2069
2070 lightrec_unregister(MEM_FOR_LIGHTREC, sizeof(*state) +
2071 lut_elm_size(state) * CODE_LUT_SIZE);
2072 free(state);
2073}
2074
2075void lightrec_invalidate(struct lightrec_state *state, u32 addr, u32 len)
2076{
2077 u32 kaddr = kunseg(addr & ~0x3);
2078 enum psx_map idx = lightrec_get_map_idx(state, kaddr);
2079
2080 switch (idx) {
2081 case PSX_MAP_MIRROR1:
2082 case PSX_MAP_MIRROR2:
2083 case PSX_MAP_MIRROR3:
2084 /* Handle mirrors */
2085 kaddr &= RAM_SIZE - 1;
2086 fallthrough;
2087 case PSX_MAP_KERNEL_USER_RAM:
2088 break;
2089 default:
2090 return;
2091 }
2092
2093 memset(lut_address(state, lut_offset(kaddr)), 0,
2094 ((len + 3) / 4) * lut_elm_size(state));
2095}
2096
2097void lightrec_invalidate_all(struct lightrec_state *state)
2098{
2099 memset(state->code_lut, 0, lut_elm_size(state) * CODE_LUT_SIZE);
2100}
2101
2102void lightrec_set_unsafe_opt_flags(struct lightrec_state *state, u32 flags)
2103{
2104 if ((flags ^ state->opt_flags) & LIGHTREC_OPT_INV_DMA_ONLY)
2105 lightrec_invalidate_all(state);
2106
2107 state->opt_flags = flags;
2108}
2109
2110void lightrec_set_exit_flags(struct lightrec_state *state, u32 flags)
2111{
2112 if (flags != LIGHTREC_EXIT_NORMAL) {
2113 state->exit_flags |= flags;
2114 state->target_cycle = state->current_cycle;
2115 }
2116}
2117
2118u32 lightrec_exit_flags(struct lightrec_state *state)
2119{
2120 return state->exit_flags;
2121}
2122
2123u32 lightrec_current_cycle_count(const struct lightrec_state *state)
2124{
2125 return state->current_cycle;
2126}
2127
2128void lightrec_reset_cycle_count(struct lightrec_state *state, u32 cycles)
2129{
2130 state->current_cycle = cycles;
2131
2132 if (state->target_cycle < cycles)
2133 state->target_cycle = cycles;
2134}
2135
2136void lightrec_set_target_cycle_count(struct lightrec_state *state, u32 cycles)
2137{
2138 if (state->exit_flags == LIGHTREC_EXIT_NORMAL) {
2139 if (cycles < state->current_cycle)
2140 cycles = state->current_cycle;
2141
2142 state->target_cycle = cycles;
2143 }
2144}
2145
2146struct lightrec_registers * lightrec_get_registers(struct lightrec_state *state)
2147{
2148 return &state->regs;
2149}
2150
2151void lightrec_set_cycles_per_opcode(struct lightrec_state *state, u32 cycles)
2152{
2153 state->cycles_per_op = cycles;
2154}