1 // SPDX-License-Identifier: LGPL-2.1-or-later
3 * Copyright (C) 2014-2021 Paul Cercueil <paul@crapouillou.net>
6 #include "blockcache.h"
8 #include "disassembler.h"
10 #include "interpreter.h"
11 #include "lightrec-config.h"
12 #include "lightning-wrapper.h"
14 #include "memmanager.h"
16 #include "recompiler.h"
18 #include "optimizer.h"
23 #if ENABLE_THREADED_COMPILER
24 #include <stdatomic.h>
33 #define GENMASK(h, l) \
34 (((uintptr_t)-1 << (l)) & ((uintptr_t)-1 >> (__WORDSIZE - 1 - (h))))
36 static struct block * lightrec_precompile_block(struct lightrec_state *state,
38 static bool lightrec_block_is_fully_tagged(const struct block *block);
40 static void lightrec_mtc2(struct lightrec_state *state, u8 reg, u32 data);
41 static u32 lightrec_mfc2(struct lightrec_state *state, u8 reg);
43 static void lightrec_default_sb(struct lightrec_state *state, u32 opcode,
44 void *host, u32 addr, u8 data)
48 if (!state->invalidate_from_dma_only)
49 lightrec_invalidate(state, addr, 1);
52 static void lightrec_default_sh(struct lightrec_state *state, u32 opcode,
53 void *host, u32 addr, u16 data)
55 *(u16 *)host = HTOLE16(data);
57 if (!state->invalidate_from_dma_only)
58 lightrec_invalidate(state, addr, 2);
61 static void lightrec_default_sw(struct lightrec_state *state, u32 opcode,
62 void *host, u32 addr, u32 data)
64 *(u32 *)host = HTOLE32(data);
66 if (!state->invalidate_from_dma_only)
67 lightrec_invalidate(state, addr, 4);
70 static u8 lightrec_default_lb(struct lightrec_state *state,
71 u32 opcode, void *host, u32 addr)
76 static u16 lightrec_default_lh(struct lightrec_state *state,
77 u32 opcode, void *host, u32 addr)
79 return LE16TOH(*(u16 *)host);
82 static u32 lightrec_default_lw(struct lightrec_state *state,
83 u32 opcode, void *host, u32 addr)
85 return LE32TOH(*(u32 *)host);
88 static const struct lightrec_mem_map_ops lightrec_default_ops = {
89 .sb = lightrec_default_sb,
90 .sh = lightrec_default_sh,
91 .sw = lightrec_default_sw,
92 .lb = lightrec_default_lb,
93 .lh = lightrec_default_lh,
94 .lw = lightrec_default_lw,
97 static void __segfault_cb(struct lightrec_state *state, u32 addr,
98 const struct block *block)
100 lightrec_set_exit_flags(state, LIGHTREC_EXIT_SEGFAULT);
101 pr_err("Segmentation fault in recompiled code: invalid "
102 "load/store at address 0x%08x\n", addr);
104 pr_err("Was executing block PC 0x%08x\n", block->pc);
107 static void lightrec_swl(struct lightrec_state *state,
108 const struct lightrec_mem_map_ops *ops,
109 u32 opcode, void *host, u32 addr, u32 data)
111 unsigned int shift = addr & 0x3;
112 unsigned int mask = GENMASK(31, (shift + 1) * 8);
115 /* Align to 32 bits */
117 host = (void *)((uintptr_t)host & ~3);
119 old_data = ops->lw(state, opcode, host, addr);
121 data = (data >> ((3 - shift) * 8)) | (old_data & mask);
123 ops->sw(state, opcode, host, addr, data);
126 static void lightrec_swr(struct lightrec_state *state,
127 const struct lightrec_mem_map_ops *ops,
128 u32 opcode, void *host, u32 addr, u32 data)
130 unsigned int shift = addr & 0x3;
131 unsigned int mask = (1 << (shift * 8)) - 1;
134 /* Align to 32 bits */
136 host = (void *)((uintptr_t)host & ~3);
138 old_data = ops->lw(state, opcode, host, addr);
140 data = (data << (shift * 8)) | (old_data & mask);
142 ops->sw(state, opcode, host, addr, data);
145 static void lightrec_swc2(struct lightrec_state *state, union code op,
146 const struct lightrec_mem_map_ops *ops,
147 void *host, u32 addr)
149 u32 data = lightrec_mfc2(state, op.i.rt);
151 ops->sw(state, op.opcode, host, addr, data);
154 static u32 lightrec_lwl(struct lightrec_state *state,
155 const struct lightrec_mem_map_ops *ops,
156 u32 opcode, void *host, u32 addr, u32 data)
158 unsigned int shift = addr & 0x3;
159 unsigned int mask = (1 << (24 - shift * 8)) - 1;
162 /* Align to 32 bits */
164 host = (void *)((uintptr_t)host & ~3);
166 old_data = ops->lw(state, opcode, host, addr);
168 return (data & mask) | (old_data << (24 - shift * 8));
171 static u32 lightrec_lwr(struct lightrec_state *state,
172 const struct lightrec_mem_map_ops *ops,
173 u32 opcode, void *host, u32 addr, u32 data)
175 unsigned int shift = addr & 0x3;
176 unsigned int mask = GENMASK(31, 32 - shift * 8);
179 /* Align to 32 bits */
181 host = (void *)((uintptr_t)host & ~3);
183 old_data = ops->lw(state, opcode, host, addr);
185 return (data & mask) | (old_data >> (shift * 8));
188 static void lightrec_lwc2(struct lightrec_state *state, union code op,
189 const struct lightrec_mem_map_ops *ops,
190 void *host, u32 addr)
192 u32 data = ops->lw(state, op.opcode, host, addr);
194 lightrec_mtc2(state, op.i.rt, data);
197 static void lightrec_invalidate_map(struct lightrec_state *state,
198 const struct lightrec_mem_map *map, u32 addr, u32 len)
200 if (map == &state->maps[PSX_MAP_KERNEL_USER_RAM]) {
201 memset(&state->code_lut[lut_offset(addr)], 0,
202 ((len + 3) / 4) * sizeof(void *));
206 const struct lightrec_mem_map *
207 lightrec_get_map(struct lightrec_state *state, void **host, u32 kaddr)
209 const struct lightrec_mem_map *map;
213 for (i = 0; i < state->nb_maps; i++) {
214 const struct lightrec_mem_map *mapi = &state->maps[i];
216 if (kaddr >= mapi->pc && kaddr < mapi->pc + mapi->length) {
222 if (i == state->nb_maps)
225 addr = kaddr - map->pc;
227 while (map->mirror_of)
228 map = map->mirror_of;
231 *host = map->address + addr;
236 u32 lightrec_rw(struct lightrec_state *state, union code op,
237 u32 addr, u32 data, u16 *flags, struct block *block)
239 const struct lightrec_mem_map *map;
240 const struct lightrec_mem_map_ops *ops;
241 u32 opcode = op.opcode;
244 addr += (s16) op.i.imm;
246 map = lightrec_get_map(state, &host, kunseg(addr));
248 __segfault_cb(state, addr, block);
252 if (unlikely(map->ops)) {
253 if (flags && !LIGHTREC_FLAGS_GET_IO_MODE(*flags))
254 *flags |= LIGHTREC_IO_MODE(LIGHTREC_IO_HW);
258 if (flags && !LIGHTREC_FLAGS_GET_IO_MODE(*flags))
259 *flags |= LIGHTREC_IO_MODE(LIGHTREC_IO_DIRECT);
261 ops = &lightrec_default_ops;
266 ops->sb(state, opcode, host, addr, (u8) data);
269 ops->sh(state, opcode, host, addr, (u16) data);
272 lightrec_swl(state, ops, opcode, host, addr, data);
275 lightrec_swr(state, ops, opcode, host, addr, data);
278 ops->sw(state, opcode, host, addr, data);
281 lightrec_swc2(state, op, ops, host, addr);
284 return (s32) (s8) ops->lb(state, opcode, host, addr);
286 return ops->lb(state, opcode, host, addr);
288 return (s32) (s16) ops->lh(state, opcode, host, addr);
290 return ops->lh(state, opcode, host, addr);
292 lightrec_lwc2(state, op, ops, host, addr);
295 return lightrec_lwl(state, ops, opcode, host, addr, data);
297 return lightrec_lwr(state, ops, opcode, host, addr, data);
300 return ops->lw(state, opcode, host, addr);
304 static void lightrec_rw_helper(struct lightrec_state *state,
305 union code op, u16 *flags,
308 u32 ret = lightrec_rw(state, op, state->regs.gpr[op.i.rs],
309 state->regs.gpr[op.i.rt], flags, block);
320 state->regs.gpr[op.i.rt] = ret;
321 default: /* fall-through */
326 static void lightrec_rw_cb(struct lightrec_state *state)
328 lightrec_rw_helper(state, (union code)state->c_wrapper_arg, NULL, NULL);
331 static void lightrec_rw_generic_cb(struct lightrec_state *state)
336 u32 arg = state->c_wrapper_arg;
337 u16 offset = (u16)arg;
339 block = lightrec_find_block_from_lut(state->block_cache,
340 arg >> 16, state->next_pc);
341 if (unlikely(!block)) {
342 pr_err("rw_generic: No block found in LUT for PC 0x%x offset 0x%x\n",
343 state->next_pc, offset);
347 op = &block->opcode_list[offset];
348 was_tagged = LIGHTREC_FLAGS_GET_IO_MODE(op->flags);
350 lightrec_rw_helper(state, op->c, &op->flags, block);
353 pr_debug("Opcode of block at PC 0x%08x has been tagged - flag "
354 "for recompilation\n", block->pc);
356 block->flags |= BLOCK_SHOULD_RECOMPILE;
360 static u32 clamp_s32(s32 val, s32 min, s32 max)
362 return val < min ? min : val > max ? max : val;
365 static u32 lightrec_mfc2(struct lightrec_state *state, u8 reg)
367 s16 gteir1, gteir2, gteir3;
377 return (s32)(s16) state->regs.cp2d[reg];
383 return (u16) state->regs.cp2d[reg];
386 gteir1 = (s16) state->regs.cp2d[9];
387 gteir2 = (s16) state->regs.cp2d[10];
388 gteir3 = (s16) state->regs.cp2d[11];
390 return clamp_s32(gteir1 >> 7, 0, 0x1f) << 0 |
391 clamp_s32(gteir2 >> 7, 0, 0x1f) << 5 |
392 clamp_s32(gteir3 >> 7, 0, 0x1f) << 10;
395 default: /* fall-through */
396 return state->regs.cp2d[reg];
400 u32 lightrec_mfc(struct lightrec_state *state, union code op)
402 if (op.i.op == OP_CP0)
403 return state->regs.cp0[op.r.rd];
404 else if (op.r.rs == OP_CP2_BASIC_MFC2)
405 return lightrec_mfc2(state, op.r.rd);
407 return state->regs.cp2c[op.r.rd];
410 static void lightrec_mtc0(struct lightrec_state *state, u8 reg, u32 data)
412 u32 status, oldstatus, cause;
420 /* Those registers are read-only */
427 status = state->regs.cp0[12];
430 if (status & ~data & BIT(16)) {
431 state->ops.enable_ram(state, true);
432 lightrec_invalidate_all(state);
433 } else if (~status & data & BIT(16)) {
434 state->ops.enable_ram(state, false);
439 state->regs.cp0[13] &= ~0x300;
440 state->regs.cp0[13] |= data & 0x300;
442 state->regs.cp0[reg] = data;
445 if (reg == 12 || reg == 13) {
446 cause = state->regs.cp0[13];
447 status = state->regs.cp0[12];
449 /* Handle software interrupts */
450 if (!!(status & cause & 0x300) & status)
451 lightrec_set_exit_flags(state, LIGHTREC_EXIT_CHECK_INTERRUPT);
453 /* Handle hardware interrupts */
454 if (reg == 12 && !(~status & 0x401) && (~oldstatus & 0x401))
455 lightrec_set_exit_flags(state, LIGHTREC_EXIT_CHECK_INTERRUPT);
459 static u32 count_leading_bits(s32 data)
464 #if __has_builtin(__builtin_clrsb)
465 return 1 + __builtin_clrsb(data);
469 data = (data ^ (data >> 31)) << 1;
479 static void lightrec_mtc2(struct lightrec_state *state, u8 reg, u32 data)
483 state->regs.cp2d[12] = state->regs.cp2d[13];
484 state->regs.cp2d[13] = state->regs.cp2d[14];
485 state->regs.cp2d[14] = data;
488 state->regs.cp2d[9] = (data << 7) & 0xf80;
489 state->regs.cp2d[10] = (data << 2) & 0xf80;
490 state->regs.cp2d[11] = (data >> 3) & 0xf80;
495 state->regs.cp2d[31] = count_leading_bits((s32) data);
496 default: /* fall-through */
497 state->regs.cp2d[reg] = data;
502 static void lightrec_ctc2(struct lightrec_state *state, u8 reg, u32 data)
512 data = (s32)(s16) data;
515 data = (data & 0x7ffff000) | !!(data & 0x7f87e000) << 31;
516 default: /* fall-through */
520 state->regs.cp2c[reg] = data;
523 void lightrec_mtc(struct lightrec_state *state, union code op, u32 data)
525 if (op.i.op == OP_CP0)
526 lightrec_mtc0(state, op.r.rd, data);
527 else if (op.r.rs == OP_CP2_BASIC_CTC2)
528 lightrec_ctc2(state, op.r.rd, data);
530 lightrec_mtc2(state, op.r.rd, data);
533 static void lightrec_mtc_cb(struct lightrec_state *state)
535 union code op = (union code) state->c_wrapper_arg;
537 lightrec_mtc(state, op, state->regs.gpr[op.r.rt]);
540 void lightrec_rfe(struct lightrec_state *state)
544 /* Read CP0 Status register (r12) */
545 status = state->regs.cp0[12];
547 /* Switch the bits */
548 status = ((status & 0x3c) >> 2) | (status & ~0xf);
551 lightrec_mtc0(state, 12, status);
554 void lightrec_cp(struct lightrec_state *state, union code op)
556 if (op.i.op == OP_CP0) {
557 pr_err("Invalid CP opcode to coprocessor #0\n");
561 (*state->ops.cop2_op)(state, op.opcode);
564 static void lightrec_cp_cb(struct lightrec_state *state)
566 lightrec_cp(state, (union code) state->c_wrapper_arg);
569 static void lightrec_syscall_cb(struct lightrec_state *state)
571 lightrec_set_exit_flags(state, LIGHTREC_EXIT_SYSCALL);
574 static void lightrec_break_cb(struct lightrec_state *state)
576 lightrec_set_exit_flags(state, LIGHTREC_EXIT_BREAK);
579 struct block * lightrec_get_block(struct lightrec_state *state, u32 pc)
581 struct block *block = lightrec_find_block(state->block_cache, pc);
583 if (block && lightrec_block_is_outdated(state, block)) {
584 pr_debug("Block at PC 0x%08x is outdated!\n", block->pc);
586 /* Make sure the recompiler isn't processing the block we'll
588 if (ENABLE_THREADED_COMPILER)
589 lightrec_recompiler_remove(state->rec, block);
591 lightrec_unregister_block(state->block_cache, block);
592 remove_from_code_lut(state->block_cache, block);
593 lightrec_free_block(state, block);
598 block = lightrec_precompile_block(state, pc);
600 pr_err("Unable to recompile block at PC 0x%x\n", pc);
601 lightrec_set_exit_flags(state, LIGHTREC_EXIT_SEGFAULT);
605 lightrec_register_block(state->block_cache, block);
611 static void * get_next_block_func(struct lightrec_state *state, u32 pc)
614 bool should_recompile;
618 func = state->code_lut[lut_offset(pc)];
619 if (func && func != state->get_next_block)
622 block = lightrec_get_block(state, pc);
624 if (unlikely(!block))
627 if (OPT_REPLACE_MEMSET && (block->flags & BLOCK_IS_MEMSET)) {
628 func = state->memset_func;
632 should_recompile = block->flags & BLOCK_SHOULD_RECOMPILE &&
633 !(block->flags & BLOCK_IS_DEAD);
635 if (unlikely(should_recompile)) {
636 pr_debug("Block at PC 0x%08x should recompile\n", pc);
638 lightrec_unregister(MEM_FOR_CODE, block->code_size);
640 if (ENABLE_THREADED_COMPILER)
641 lightrec_recompiler_add(state->rec, block);
643 lightrec_compile_block(state->cstate, block);
646 if (ENABLE_THREADED_COMPILER && likely(!should_recompile))
647 func = lightrec_recompiler_run_first_pass(state, block, &pc);
649 func = block->function;
654 if (unlikely(block->flags & BLOCK_NEVER_COMPILE)) {
655 pc = lightrec_emulate_block(state, block, pc);
657 } else if (!ENABLE_THREADED_COMPILER) {
658 /* Block wasn't compiled yet - run the interpreter */
659 if (block->flags & BLOCK_FULLY_TAGGED)
660 pr_debug("Block fully tagged, skipping first pass\n");
661 else if (ENABLE_FIRST_PASS && likely(!should_recompile))
662 pc = lightrec_emulate_block(state, block, pc);
664 /* Then compile it using the profiled data */
665 lightrec_compile_block(state->cstate, block);
667 lightrec_recompiler_add(state->rec, block);
670 if (state->exit_flags != LIGHTREC_EXIT_NORMAL ||
671 state->current_cycle >= state->target_cycle)
679 static s32 c_function_wrapper(struct lightrec_state *state, s32 cycles_delta,
680 void (*f)(struct lightrec_state *))
682 state->current_cycle = state->target_cycle - cycles_delta;
686 return state->target_cycle - state->current_cycle;
689 static struct block * generate_wrapper(struct lightrec_state *state)
695 jit_word_t code_size;
696 jit_node_t *to_tramp, *to_fn_epilog;
697 jit_node_t *addr[C_WRAPPERS_COUNT - 1];
699 block = lightrec_malloc(state, MEM_FOR_IR, sizeof(*block));
703 _jit = jit_new_state();
707 jit_name("RW wrapper");
708 jit_note(__FILE__, __LINE__);
710 /* Wrapper entry point */
714 /* Add entry points; separate them by opcodes that increment
715 * LIGHTREC_REG_STATE (since we cannot touch other registers).
716 * The difference will then tell us which C function to call. */
717 for (i = C_WRAPPERS_COUNT - 1; i > 0; i--) {
718 jit_addi(LIGHTREC_REG_STATE, LIGHTREC_REG_STATE, __WORDSIZE / 8);
719 addr[i - 1] = jit_indirect();
725 stack_ptr = jit_allocai(sizeof(uintptr_t) * NUM_TEMPS);
727 /* Save all temporaries on stack */
728 for (i = 0; i < NUM_TEMPS; i++)
729 jit_stxi(stack_ptr + i * sizeof(uintptr_t), JIT_FP, JIT_R(i));
731 /* Jump to the trampoline */
732 to_tramp = jit_jmpi();
734 /* The trampoline will jump back here */
735 to_fn_epilog = jit_label();
737 /* Restore temporaries from stack */
738 for (i = 0; i < NUM_TEMPS; i++)
739 jit_ldxi(JIT_R(i), JIT_FP, stack_ptr + i * sizeof(uintptr_t));
744 /* Trampoline entry point.
745 * The sole purpose of the trampoline is to cheese Lightning not to
746 * save/restore the callee-saved register LIGHTREC_REG_CYCLE, since we
747 * do want to return to the caller with this register modified. */
752 /* Retrieve the wrapper function */
753 jit_ldxi(JIT_R0, LIGHTREC_REG_STATE,
754 offsetof(struct lightrec_state, c_wrappers));
756 /* Restore LIGHTREC_REG_STATE to its correct value */
757 jit_movi(LIGHTREC_REG_STATE, (uintptr_t) state);
760 jit_pushargr(LIGHTREC_REG_STATE);
761 jit_pushargr(LIGHTREC_REG_CYCLE);
762 jit_pushargr(JIT_R0);
763 jit_finishi(c_function_wrapper);
764 jit_retval_i(LIGHTREC_REG_CYCLE);
766 jit_patch_at(jit_jmpi(), to_fn_epilog);
770 block->function = jit_emit();
771 block->opcode_list = NULL;
775 state->wrappers_eps[C_WRAPPERS_COUNT - 1] = block->function;
777 for (i = 0; i < C_WRAPPERS_COUNT - 1; i++)
778 state->wrappers_eps[i] = jit_address(addr[i]);
780 jit_get_code(&code_size);
781 lightrec_register(MEM_FOR_CODE, code_size);
783 block->code_size = code_size;
785 if (ENABLE_DISASSEMBLER) {
786 pr_debug("Wrapper block:\n");
794 lightrec_free(state, MEM_FOR_IR, sizeof(*block), block);
796 pr_err("Unable to compile wrapper: Out of memory\n");
800 static u32 lightrec_memset(struct lightrec_state *state)
802 u32 kunseg_pc = kunseg(state->regs.gpr[4]);
804 const struct lightrec_mem_map *map = lightrec_get_map(state, &host, kunseg_pc);
805 u32 length = state->regs.gpr[5] * 4;
808 pr_err("Unable to find memory map for memset target address "
809 "0x%x\n", kunseg_pc);
813 pr_debug("Calling host memset, PC 0x%x (host address 0x%" PRIxPTR ") for %u bytes\n",
814 kunseg_pc, (uintptr_t)host, length);
815 memset(host, 0, length);
817 if (!state->invalidate_from_dma_only)
818 lightrec_invalidate_map(state, map, kunseg_pc, length);
820 /* Rough estimation of the number of cycles consumed */
821 return 8 + 5 * (length + 3 / 4);
824 static struct block * generate_dispatcher(struct lightrec_state *state)
828 jit_node_t *to_end, *to_c, *loop, *addr, *addr2, *addr3;
831 jit_word_t code_size;
833 block = lightrec_malloc(state, MEM_FOR_IR, sizeof(*block));
837 _jit = jit_new_state();
841 jit_name("dispatcher");
842 jit_note(__FILE__, __LINE__);
847 jit_getarg(JIT_R0, jit_arg());
848 jit_getarg_i(LIGHTREC_REG_CYCLE, jit_arg());
850 /* Force all callee-saved registers to be pushed on the stack */
851 for (i = 0; i < NUM_REGS; i++)
852 jit_movr(JIT_V(i), JIT_V(i));
854 /* Pass lightrec_state structure to blocks, using the last callee-saved
855 * register that Lightning provides */
856 jit_movi(LIGHTREC_REG_STATE, (intptr_t) state);
860 /* Call the block's code */
863 if (OPT_REPLACE_MEMSET) {
864 /* Blocks will jump here when they need to call
865 * lightrec_memset() */
866 addr3 = jit_indirect();
869 jit_pushargr(LIGHTREC_REG_STATE);
870 jit_finishi(lightrec_memset);
872 jit_ldxi_ui(JIT_V0, LIGHTREC_REG_STATE,
873 offsetof(struct lightrec_state, regs.gpr[31]));
876 jit_subr(LIGHTREC_REG_CYCLE, LIGHTREC_REG_CYCLE, JIT_R0);
879 /* The block will jump here, with the number of cycles remaining in
880 * LIGHTREC_REG_CYCLE */
881 addr2 = jit_indirect();
883 /* Store back the next_pc to the lightrec_state structure */
884 offset = offsetof(struct lightrec_state, next_pc);
885 jit_stxi_i(offset, LIGHTREC_REG_STATE, JIT_V0);
887 /* Jump to end if state->target_cycle < state->current_cycle */
888 to_end = jit_blei(LIGHTREC_REG_CYCLE, 0);
890 /* Convert next PC to KUNSEG and avoid mirrors */
891 ram_len = state->maps[PSX_MAP_KERNEL_USER_RAM].length;
892 jit_andi(JIT_R0, JIT_V0, 0x10000000 | (ram_len - 1));
893 to_c = jit_bgei(JIT_R0, ram_len);
895 /* Fast path: code is running from RAM, use the code LUT */
896 if (__WORDSIZE == 64)
897 jit_lshi(JIT_R0, JIT_R0, 1);
898 jit_addr(JIT_R0, JIT_R0, LIGHTREC_REG_STATE);
899 jit_ldxi(JIT_R0, JIT_R0, offsetof(struct lightrec_state, code_lut));
901 /* If we get non-NULL, loop */
902 jit_patch_at(jit_bnei(JIT_R0, 0), loop);
904 /* Slow path: call C function get_next_block_func() */
907 if (ENABLE_FIRST_PASS || OPT_DETECT_IMPOSSIBLE_BRANCHES) {
908 /* We may call the interpreter - update state->current_cycle */
909 jit_ldxi_i(JIT_R2, LIGHTREC_REG_STATE,
910 offsetof(struct lightrec_state, target_cycle));
911 jit_subr(JIT_R1, JIT_R2, LIGHTREC_REG_CYCLE);
912 jit_stxi_i(offsetof(struct lightrec_state, current_cycle),
913 LIGHTREC_REG_STATE, JIT_R1);
916 /* The code LUT will be set to this address when the block at the target
917 * PC has been preprocessed but not yet compiled by the threaded
919 addr = jit_indirect();
921 /* Get the next block */
923 jit_pushargr(LIGHTREC_REG_STATE);
924 jit_pushargr(JIT_V0);
925 jit_finishi(&get_next_block_func);
928 if (ENABLE_FIRST_PASS || OPT_DETECT_IMPOSSIBLE_BRANCHES) {
929 /* The interpreter may have updated state->current_cycle and
930 * state->target_cycle - recalc the delta */
931 jit_ldxi_i(JIT_R1, LIGHTREC_REG_STATE,
932 offsetof(struct lightrec_state, current_cycle));
933 jit_ldxi_i(JIT_R2, LIGHTREC_REG_STATE,
934 offsetof(struct lightrec_state, target_cycle));
935 jit_subr(LIGHTREC_REG_CYCLE, JIT_R2, JIT_R1);
938 /* If we get non-NULL, loop */
939 jit_patch_at(jit_bnei(JIT_R0, 0), loop);
941 /* When exiting, the recompiled code will jump to that address */
942 jit_note(__FILE__, __LINE__);
945 jit_retr(LIGHTREC_REG_CYCLE);
949 block->function = jit_emit();
950 block->opcode_list = NULL;
954 jit_get_code(&code_size);
955 lightrec_register(MEM_FOR_CODE, code_size);
957 block->code_size = code_size;
959 state->eob_wrapper_func = jit_address(addr2);
960 if (OPT_REPLACE_MEMSET)
961 state->memset_func = jit_address(addr3);
962 state->get_next_block = jit_address(addr);
964 if (ENABLE_DISASSEMBLER) {
965 pr_debug("Dispatcher block:\n");
974 lightrec_free(state, MEM_FOR_IR, sizeof(*block), block);
976 pr_err("Unable to compile dispatcher: Out of memory\n");
980 union code lightrec_read_opcode(struct lightrec_state *state, u32 pc)
984 lightrec_get_map(state, &host, kunseg(pc));
986 const u32 *code = (u32 *)host;
987 return (union code) *code;
990 unsigned int lightrec_cycles_of_opcode(union code code)
995 void lightrec_free_opcode_list(struct lightrec_state *state, struct block *block)
997 lightrec_free(state, MEM_FOR_IR,
998 sizeof(*block->opcode_list) * block->nb_ops,
1002 static unsigned int lightrec_get_mips_block_len(const u32 *src)
1007 for (i = 1; ; i++) {
1008 c.opcode = LE32TOH(*src++);
1013 if (is_unconditional_jump(c))
1018 static struct opcode * lightrec_disassemble(struct lightrec_state *state,
1019 const u32 *src, unsigned int *len)
1021 struct opcode *list;
1022 unsigned int i, length;
1024 length = lightrec_get_mips_block_len(src);
1026 list = lightrec_malloc(state, MEM_FOR_IR, sizeof(*list) * length);
1028 pr_err("Unable to allocate memory\n");
1032 for (i = 0; i < length; i++) {
1033 list[i].opcode = LE32TOH(src[i]);
1037 *len = length * sizeof(u32);
1042 static struct block * lightrec_precompile_block(struct lightrec_state *state,
1045 struct opcode *list;
1046 struct block *block;
1048 const struct lightrec_mem_map *map = lightrec_get_map(state, &host, kunseg(pc));
1049 const u32 *code = (u32 *) host;
1050 unsigned int length;
1056 block = lightrec_malloc(state, MEM_FOR_IR, sizeof(*block));
1058 pr_err("Unable to recompile block: Out of memory\n");
1062 list = lightrec_disassemble(state, code, &length);
1064 lightrec_free(state, MEM_FOR_IR, sizeof(*block), block);
1070 block->function = NULL;
1071 block->opcode_list = list;
1075 block->code_size = 0;
1076 #if ENABLE_THREADED_COMPILER
1077 block->op_list_freed = (atomic_flag)ATOMIC_FLAG_INIT;
1079 block->nb_ops = length / sizeof(u32);
1081 lightrec_optimize(state, block);
1083 length = block->nb_ops * sizeof(u32);
1085 lightrec_register(MEM_FOR_MIPS_CODE, length);
1087 if (ENABLE_DISASSEMBLER) {
1088 pr_debug("Disassembled block at PC: 0x%08x\n", block->pc);
1089 lightrec_print_disassembly(block, code);
1092 pr_debug("Block size: %hu opcodes\n", block->nb_ops);
1094 /* If the first opcode is an 'impossible' branch, never compile the
1096 if (should_emulate(block->opcode_list))
1097 block->flags |= BLOCK_NEVER_COMPILE;
1099 fully_tagged = lightrec_block_is_fully_tagged(block);
1101 block->flags |= BLOCK_FULLY_TAGGED;
1103 if (OPT_REPLACE_MEMSET && (block->flags & BLOCK_IS_MEMSET))
1104 state->code_lut[lut_offset(pc)] = state->memset_func;
1106 block->hash = lightrec_calculate_block_hash(block);
1108 pr_debug("Recompile count: %u\n", state->nb_precompile++);
1113 static bool lightrec_block_is_fully_tagged(const struct block *block)
1115 const struct opcode *op;
1118 for (i = 0; i < block->nb_ops; i++) {
1119 op = &block->opcode_list[i];
1121 /* Verify that all load/stores of the opcode list
1122 * Check all loads/stores of the opcode list and mark the
1123 * block as fully compiled if they all have been tagged. */
1124 switch (op->c.i.op) {
1139 if (!LIGHTREC_FLAGS_GET_IO_MODE(op->flags))
1141 default: /* fall-through */
1149 static void lightrec_reap_block(struct lightrec_state *state, void *data)
1151 struct block *block = data;
1153 pr_debug("Reap dead block at PC 0x%08x\n", block->pc);
1154 lightrec_unregister_block(state->block_cache, block);
1155 lightrec_free_block(state, block);
1158 static void lightrec_reap_jit(struct lightrec_state *state, void *data)
1160 _jit_destroy_state(data);
1163 int lightrec_compile_block(struct lightrec_cstate *cstate,
1164 struct block *block)
1166 struct lightrec_state *state = cstate->state;
1167 struct lightrec_branch_target *target;
1168 bool op_list_freed = false, fully_tagged = false;
1169 struct block *block2;
1171 jit_state_t *_jit, *oldjit;
1172 jit_node_t *start_of_block;
1173 bool skip_next = false;
1174 jit_word_t code_size;
1178 fully_tagged = lightrec_block_is_fully_tagged(block);
1180 block->flags |= BLOCK_FULLY_TAGGED;
1182 _jit = jit_new_state();
1186 oldjit = block->_jit;
1189 lightrec_regcache_reset(cstate->reg_cache);
1191 cstate->nb_branches = 0;
1192 cstate->nb_local_branches = 0;
1193 cstate->nb_targets = 0;
1198 start_of_block = jit_label();
1200 for (i = 0; i < block->nb_ops; i++) {
1201 elm = &block->opcode_list[i];
1208 cstate->cycles += lightrec_cycles_of_opcode(elm->c);
1210 if (should_emulate(elm)) {
1211 pr_debug("Branch at offset 0x%x will be emulated\n",
1214 lightrec_emit_eob(cstate, block, i, false);
1215 skip_next = !(elm->flags & LIGHTREC_NO_DS);
1217 lightrec_rec_opcode(cstate, block, i);
1218 skip_next = has_delay_slot(elm->c) &&
1219 !(elm->flags & LIGHTREC_NO_DS);
1221 /* FIXME: GNU Lightning on Windows seems to use our
1222 * mapped registers as temporaries. Until the actual bug
1223 * is found and fixed, unconditionally mark our
1224 * registers as live here. */
1225 lightrec_regcache_mark_live(cstate->reg_cache, _jit);
1230 for (i = 0; i < cstate->nb_branches; i++)
1231 jit_patch(cstate->branches[i]);
1233 for (i = 0; i < cstate->nb_local_branches; i++) {
1234 struct lightrec_branch *branch = &cstate->local_branches[i];
1236 pr_debug("Patch local branch to offset 0x%x\n",
1237 branch->target << 2);
1239 if (branch->target == 0) {
1240 jit_patch_at(branch->branch, start_of_block);
1244 for (j = 0; j < cstate->nb_targets; j++) {
1245 if (cstate->targets[j].offset == branch->target) {
1246 jit_patch_at(branch->branch,
1247 cstate->targets[j].label);
1252 if (j == cstate->nb_targets)
1253 pr_err("Unable to find branch target\n");
1256 jit_ldxi(JIT_R0, LIGHTREC_REG_STATE,
1257 offsetof(struct lightrec_state, eob_wrapper_func));
1264 block->function = jit_emit();
1265 block->flags &= ~BLOCK_SHOULD_RECOMPILE;
1267 /* Add compiled function to the LUT */
1268 state->code_lut[lut_offset(block->pc)] = block->function;
1270 if (ENABLE_THREADED_COMPILER) {
1271 /* Since we might try to reap the same block multiple times,
1272 * we need the reaper to wait until everything has been
1273 * submitted, so that the duplicate entries can be dropped. */
1274 lightrec_reaper_pause(state->reaper);
1277 /* Detect old blocks that have been covered by the new one */
1278 for (i = 0; i < cstate->nb_targets; i++) {
1279 target = &cstate->targets[i];
1281 if (!target->offset)
1284 offset = block->pc + target->offset * sizeof(u32);
1285 block2 = lightrec_find_block(state->block_cache, offset);
1287 /* No need to check if block2 is compilable - it must
1288 * be, otherwise block wouldn't be compilable either */
1290 /* Set the "block dead" flag to prevent the dynarec from
1291 * recompiling this block */
1292 block2->flags |= BLOCK_IS_DEAD;
1294 /* If block2 was pending for compilation, cancel it.
1295 * If it's being compiled right now, wait until it
1297 if (ENABLE_THREADED_COMPILER)
1298 lightrec_recompiler_remove(state->rec, block2);
1301 /* We know from now on that block2 (if present) isn't going to
1302 * be compiled. We can override the LUT entry with our new
1303 * block's entry point. */
1304 offset = lut_offset(block->pc) + target->offset;
1305 state->code_lut[offset] = jit_address(target->label);
1308 pr_debug("Reap block 0x%08x as it's covered by block "
1309 "0x%08x\n", block2->pc, block->pc);
1311 /* Finally, reap the block. */
1312 if (ENABLE_THREADED_COMPILER) {
1313 lightrec_reaper_add(state->reaper,
1314 lightrec_reap_block,
1317 lightrec_unregister_block(state->block_cache, block2);
1318 lightrec_free_block(state, block2);
1323 if (ENABLE_THREADED_COMPILER)
1324 lightrec_reaper_continue(state->reaper);
1326 jit_get_code(&code_size);
1327 lightrec_register(MEM_FOR_CODE, code_size);
1329 block->code_size = code_size;
1331 if (ENABLE_DISASSEMBLER) {
1332 pr_debug("Compiling block at PC: 0x%08x\n", block->pc);
1338 #if ENABLE_THREADED_COMPILER
1340 op_list_freed = atomic_flag_test_and_set(&block->op_list_freed);
1342 if (fully_tagged && !op_list_freed) {
1343 pr_debug("Block PC 0x%08x is fully tagged"
1344 " - free opcode list\n", block->pc);
1345 lightrec_free_opcode_list(state, block);
1346 block->opcode_list = NULL;
1350 pr_debug("Block 0x%08x recompiled, reaping old jit context.\n",
1353 if (ENABLE_THREADED_COMPILER)
1354 lightrec_reaper_add(state->reaper,
1355 lightrec_reap_jit, oldjit);
1357 _jit_destroy_state(oldjit);
1363 static void lightrec_print_info(struct lightrec_state *state)
1365 if ((state->current_cycle & ~0xfffffff) != state->old_cycle_counter) {
1366 pr_info("Lightrec RAM usage: IR %u KiB, CODE %u KiB, "
1367 "MIPS %u KiB, TOTAL %u KiB, avg. IPI %f\n",
1368 lightrec_get_mem_usage(MEM_FOR_IR) / 1024,
1369 lightrec_get_mem_usage(MEM_FOR_CODE) / 1024,
1370 lightrec_get_mem_usage(MEM_FOR_MIPS_CODE) / 1024,
1371 lightrec_get_total_mem_usage() / 1024,
1372 lightrec_get_average_ipi());
1373 state->old_cycle_counter = state->current_cycle & ~0xfffffff;
1377 u32 lightrec_execute(struct lightrec_state *state, u32 pc, u32 target_cycle)
1379 s32 (*func)(void *, s32) = (void *)state->dispatcher->function;
1383 state->exit_flags = LIGHTREC_EXIT_NORMAL;
1385 /* Handle the cycle counter overflowing */
1386 if (unlikely(target_cycle < state->current_cycle))
1387 target_cycle = UINT_MAX;
1389 state->target_cycle = target_cycle;
1390 state->next_pc = pc;
1392 block_trace = get_next_block_func(state, pc);
1394 cycles_delta = state->target_cycle - state->current_cycle;
1396 cycles_delta = (*func)(block_trace, cycles_delta);
1398 state->current_cycle = state->target_cycle - cycles_delta;
1401 if (ENABLE_THREADED_COMPILER)
1402 lightrec_reaper_reap(state->reaper);
1404 if (LOG_LEVEL >= INFO_L)
1405 lightrec_print_info(state);
1407 return state->next_pc;
1410 u32 lightrec_execute_one(struct lightrec_state *state, u32 pc)
1412 return lightrec_execute(state, pc, state->current_cycle);
1415 u32 lightrec_run_interpreter(struct lightrec_state *state, u32 pc)
1417 struct block *block = lightrec_get_block(state, pc);
1421 state->exit_flags = LIGHTREC_EXIT_NORMAL;
1423 pc = lightrec_emulate_block(state, block, pc);
1425 if (LOG_LEVEL >= INFO_L)
1426 lightrec_print_info(state);
1431 void lightrec_free_block(struct lightrec_state *state, struct block *block)
1433 lightrec_unregister(MEM_FOR_MIPS_CODE, block->nb_ops * sizeof(u32));
1434 if (block->opcode_list)
1435 lightrec_free_opcode_list(state, block);
1437 _jit_destroy_state(block->_jit);
1438 lightrec_unregister(MEM_FOR_CODE, block->code_size);
1439 lightrec_free(state, MEM_FOR_IR, sizeof(*block), block);
1442 struct lightrec_cstate * lightrec_create_cstate(struct lightrec_state *state)
1444 struct lightrec_cstate *cstate;
1446 cstate = lightrec_malloc(state, MEM_FOR_LIGHTREC, sizeof(*cstate));
1450 cstate->reg_cache = lightrec_regcache_init(state);
1451 if (!cstate->reg_cache) {
1452 lightrec_free(state, MEM_FOR_LIGHTREC, sizeof(*cstate), cstate);
1456 cstate->state = state;
1461 void lightrec_free_cstate(struct lightrec_cstate *cstate)
1463 lightrec_free_regcache(cstate->reg_cache);
1464 lightrec_free(cstate->state, MEM_FOR_LIGHTREC, sizeof(*cstate), cstate);
1467 struct lightrec_state * lightrec_init(char *argv0,
1468 const struct lightrec_mem_map *map,
1470 const struct lightrec_ops *ops)
1472 struct lightrec_state *state;
1474 /* Sanity-check ops */
1475 if (!ops || !ops->cop2_op || !ops->enable_ram) {
1476 pr_err("Missing callbacks in lightrec_ops structure\n");
1482 state = calloc(1, sizeof(*state) +
1483 sizeof(*state->code_lut) * CODE_LUT_SIZE);
1485 goto err_finish_jit;
1487 lightrec_register(MEM_FOR_LIGHTREC, sizeof(*state) +
1488 sizeof(*state->code_lut) * CODE_LUT_SIZE);
1491 state->tinymm = tinymm_init(malloc, free, 4096);
1493 goto err_free_state;
1496 state->block_cache = lightrec_blockcache_init(state);
1497 if (!state->block_cache)
1498 goto err_free_tinymm;
1500 if (ENABLE_THREADED_COMPILER) {
1501 state->rec = lightrec_recompiler_init(state);
1503 goto err_free_block_cache;
1505 state->reaper = lightrec_reaper_init(state);
1507 goto err_free_recompiler;
1509 state->cstate = lightrec_create_cstate(state);
1511 goto err_free_block_cache;
1514 state->nb_maps = nb;
1517 memcpy(&state->ops, ops, sizeof(*ops));
1519 state->dispatcher = generate_dispatcher(state);
1520 if (!state->dispatcher)
1521 goto err_free_reaper;
1523 state->c_wrapper_block = generate_wrapper(state);
1524 if (!state->c_wrapper_block)
1525 goto err_free_dispatcher;
1527 state->c_wrappers[C_WRAPPER_RW] = lightrec_rw_cb;
1528 state->c_wrappers[C_WRAPPER_RW_GENERIC] = lightrec_rw_generic_cb;
1529 state->c_wrappers[C_WRAPPER_MTC] = lightrec_mtc_cb;
1530 state->c_wrappers[C_WRAPPER_CP] = lightrec_cp_cb;
1531 state->c_wrappers[C_WRAPPER_SYSCALL] = lightrec_syscall_cb;
1532 state->c_wrappers[C_WRAPPER_BREAK] = lightrec_break_cb;
1534 map = &state->maps[PSX_MAP_BIOS];
1535 state->offset_bios = (uintptr_t)map->address - map->pc;
1537 map = &state->maps[PSX_MAP_SCRATCH_PAD];
1538 state->offset_scratch = (uintptr_t)map->address - map->pc;
1540 map = &state->maps[PSX_MAP_KERNEL_USER_RAM];
1541 state->offset_ram = (uintptr_t)map->address - map->pc;
1543 if (state->maps[PSX_MAP_MIRROR1].address == map->address + 0x200000 &&
1544 state->maps[PSX_MAP_MIRROR2].address == map->address + 0x400000 &&
1545 state->maps[PSX_MAP_MIRROR3].address == map->address + 0x600000)
1546 state->mirrors_mapped = true;
1548 if (state->offset_bios == 0 &&
1549 state->offset_scratch == 0 &&
1550 state->offset_ram == 0 &&
1551 state->mirrors_mapped) {
1552 pr_info("Memory map is perfect. Emitted code will be best.\n");
1554 pr_info("Memory map is sub-par. Emitted code will be slow.\n");
1559 err_free_dispatcher:
1560 lightrec_free_block(state, state->dispatcher);
1562 if (ENABLE_THREADED_COMPILER)
1563 lightrec_reaper_destroy(state->reaper);
1564 err_free_recompiler:
1565 if (ENABLE_THREADED_COMPILER)
1566 lightrec_free_recompiler(state->rec);
1568 lightrec_free_cstate(state->cstate);
1569 err_free_block_cache:
1570 lightrec_free_block_cache(state->block_cache);
1573 tinymm_shutdown(state->tinymm);
1576 lightrec_unregister(MEM_FOR_LIGHTREC, sizeof(*state) +
1577 sizeof(*state->code_lut) * CODE_LUT_SIZE);
1584 void lightrec_destroy(struct lightrec_state *state)
1586 /* Force a print info on destroy*/
1587 state->current_cycle = ~state->current_cycle;
1588 lightrec_print_info(state);
1590 if (ENABLE_THREADED_COMPILER) {
1591 lightrec_free_recompiler(state->rec);
1592 lightrec_reaper_destroy(state->reaper);
1594 lightrec_free_cstate(state->cstate);
1597 lightrec_free_block_cache(state->block_cache);
1598 lightrec_free_block(state, state->dispatcher);
1599 lightrec_free_block(state, state->c_wrapper_block);
1603 tinymm_shutdown(state->tinymm);
1605 lightrec_unregister(MEM_FOR_LIGHTREC, sizeof(*state) +
1606 sizeof(*state->code_lut) * CODE_LUT_SIZE);
1610 void lightrec_invalidate(struct lightrec_state *state, u32 addr, u32 len)
1612 u32 kaddr = kunseg(addr & ~0x3);
1613 const struct lightrec_mem_map *map = lightrec_get_map(state, NULL, kaddr);
1616 if (map != &state->maps[PSX_MAP_KERNEL_USER_RAM])
1619 /* Handle mirrors */
1620 kaddr &= (state->maps[PSX_MAP_KERNEL_USER_RAM].length - 1);
1622 lightrec_invalidate_map(state, map, kaddr, len);
1626 void lightrec_invalidate_all(struct lightrec_state *state)
1628 memset(state->code_lut, 0, sizeof(*state->code_lut) * CODE_LUT_SIZE);
1631 void lightrec_set_invalidate_mode(struct lightrec_state *state, bool dma_only)
1633 if (state->invalidate_from_dma_only != dma_only)
1634 lightrec_invalidate_all(state);
1636 state->invalidate_from_dma_only = dma_only;
1639 void lightrec_set_exit_flags(struct lightrec_state *state, u32 flags)
1641 if (flags != LIGHTREC_EXIT_NORMAL) {
1642 state->exit_flags |= flags;
1643 state->target_cycle = state->current_cycle;
1647 u32 lightrec_exit_flags(struct lightrec_state *state)
1649 return state->exit_flags;
1652 u32 lightrec_current_cycle_count(const struct lightrec_state *state)
1654 return state->current_cycle;
1657 void lightrec_reset_cycle_count(struct lightrec_state *state, u32 cycles)
1659 state->current_cycle = cycles;
1661 if (state->target_cycle < cycles)
1662 state->target_cycle = cycles;
1665 void lightrec_set_target_cycle_count(struct lightrec_state *state, u32 cycles)
1667 if (state->exit_flags == LIGHTREC_EXIT_NORMAL) {
1668 if (cycles < state->current_cycle)
1669 cycles = state->current_cycle;
1671 state->target_cycle = cycles;
1675 struct lightrec_registers * lightrec_get_registers(struct lightrec_state *state)
1677 return &state->regs;