1 // SPDX-License-Identifier: LGPL-2.1-or-later
3 * Copyright (C) 2014-2021 Paul Cercueil <paul@crapouillou.net>
6 #include "blockcache.h"
8 #include "disassembler.h"
10 #include "interpreter.h"
11 #include "lightrec-config.h"
12 #include "lightning-wrapper.h"
14 #include "memmanager.h"
16 #include "recompiler.h"
18 #include "optimizer.h"
19 #include "tlsf/tlsf.h"
24 #if ENABLE_THREADED_COMPILER
25 #include <stdatomic.h>
31 static struct block * lightrec_precompile_block(struct lightrec_state *state,
33 static bool lightrec_block_is_fully_tagged(const struct block *block);
35 static void lightrec_mtc2(struct lightrec_state *state, u8 reg, u32 data);
36 static u32 lightrec_mfc2(struct lightrec_state *state, u8 reg);
38 static void lightrec_default_sb(struct lightrec_state *state, u32 opcode,
39 void *host, u32 addr, u8 data)
43 if (!(state->opt_flags & LIGHTREC_OPT_INV_DMA_ONLY))
44 lightrec_invalidate(state, addr, 1);
47 static void lightrec_default_sh(struct lightrec_state *state, u32 opcode,
48 void *host, u32 addr, u16 data)
50 *(u16 *)host = HTOLE16(data);
52 if (!(state->opt_flags & LIGHTREC_OPT_INV_DMA_ONLY))
53 lightrec_invalidate(state, addr, 2);
56 static void lightrec_default_sw(struct lightrec_state *state, u32 opcode,
57 void *host, u32 addr, u32 data)
59 *(u32 *)host = HTOLE32(data);
61 if (!(state->opt_flags & LIGHTREC_OPT_INV_DMA_ONLY))
62 lightrec_invalidate(state, addr, 4);
65 static u8 lightrec_default_lb(struct lightrec_state *state,
66 u32 opcode, void *host, u32 addr)
71 static u16 lightrec_default_lh(struct lightrec_state *state,
72 u32 opcode, void *host, u32 addr)
74 return LE16TOH(*(u16 *)host);
77 static u32 lightrec_default_lw(struct lightrec_state *state,
78 u32 opcode, void *host, u32 addr)
80 return LE32TOH(*(u32 *)host);
83 static u32 lightrec_default_lwu(struct lightrec_state *state,
84 u32 opcode, void *host, u32 addr)
88 memcpy(&val, host, 4);
93 static void lightrec_default_swu(struct lightrec_state *state, u32 opcode,
94 void *host, u32 addr, u32 data)
98 memcpy(host, &data, 4);
100 if (!(state->opt_flags & LIGHTREC_OPT_INV_DMA_ONLY))
101 lightrec_invalidate(state, addr & ~0x3, 8);
104 static const struct lightrec_mem_map_ops lightrec_default_ops = {
105 .sb = lightrec_default_sb,
106 .sh = lightrec_default_sh,
107 .sw = lightrec_default_sw,
108 .lb = lightrec_default_lb,
109 .lh = lightrec_default_lh,
110 .lw = lightrec_default_lw,
111 .lwu = lightrec_default_lwu,
112 .swu = lightrec_default_swu,
115 static void __segfault_cb(struct lightrec_state *state, u32 addr,
116 const struct block *block)
118 lightrec_set_exit_flags(state, LIGHTREC_EXIT_SEGFAULT);
119 pr_err("Segmentation fault in recompiled code: invalid "
120 "load/store at address "PC_FMT"\n", addr);
122 pr_err("Was executing block "PC_FMT"\n", block->pc);
125 static void lightrec_swl(struct lightrec_state *state,
126 const struct lightrec_mem_map_ops *ops,
127 u32 opcode, void *host, u32 addr, u32 data)
129 unsigned int shift = addr & 0x3;
130 unsigned int mask = shift < 3 ? GENMASK(31, (shift + 1) * 8) : 0;
133 /* Align to 32 bits */
135 host = (void *)((uintptr_t)host & ~3);
137 old_data = ops->lw(state, opcode, host, addr);
139 data = (data >> ((3 - shift) * 8)) | (old_data & mask);
141 ops->sw(state, opcode, host, addr, data);
144 static void lightrec_swr(struct lightrec_state *state,
145 const struct lightrec_mem_map_ops *ops,
146 u32 opcode, void *host, u32 addr, u32 data)
148 unsigned int shift = addr & 0x3;
149 unsigned int mask = (1 << (shift * 8)) - 1;
152 /* Align to 32 bits */
154 host = (void *)((uintptr_t)host & ~3);
156 old_data = ops->lw(state, opcode, host, addr);
158 data = (data << (shift * 8)) | (old_data & mask);
160 ops->sw(state, opcode, host, addr, data);
163 static void lightrec_swc2(struct lightrec_state *state, union code op,
164 const struct lightrec_mem_map_ops *ops,
165 void *host, u32 addr)
167 u32 data = lightrec_mfc2(state, op.i.rt);
169 ops->sw(state, op.opcode, host, addr, data);
172 static u32 lightrec_lwl(struct lightrec_state *state,
173 const struct lightrec_mem_map_ops *ops,
174 u32 opcode, void *host, u32 addr, u32 data)
176 unsigned int shift = addr & 0x3;
177 unsigned int mask = (1 << (24 - shift * 8)) - 1;
180 /* Align to 32 bits */
182 host = (void *)((uintptr_t)host & ~3);
184 old_data = ops->lw(state, opcode, host, addr);
186 return (data & mask) | (old_data << (24 - shift * 8));
189 static u32 lightrec_lwr(struct lightrec_state *state,
190 const struct lightrec_mem_map_ops *ops,
191 u32 opcode, void *host, u32 addr, u32 data)
193 unsigned int shift = addr & 0x3;
194 unsigned int mask = shift ? GENMASK(31, 32 - shift * 8) : 0;
197 /* Align to 32 bits */
199 host = (void *)((uintptr_t)host & ~3);
201 old_data = ops->lw(state, opcode, host, addr);
203 return (data & mask) | (old_data >> (shift * 8));
206 static void lightrec_lwc2(struct lightrec_state *state, union code op,
207 const struct lightrec_mem_map_ops *ops,
208 void *host, u32 addr)
210 u32 data = ops->lw(state, op.opcode, host, addr);
212 lightrec_mtc2(state, op.i.rt, data);
215 static void lightrec_invalidate_map(struct lightrec_state *state,
216 const struct lightrec_mem_map *map, u32 addr, u32 len)
218 if (map == &state->maps[PSX_MAP_KERNEL_USER_RAM]) {
219 memset(lut_address(state, lut_offset(addr)), 0,
220 ((len + 3) / 4) * lut_elm_size(state));
225 lightrec_get_map_idx(struct lightrec_state *state, u32 kaddr)
227 const struct lightrec_mem_map *map;
230 for (i = 0; i < state->nb_maps; i++) {
231 map = &state->maps[i];
233 if (kaddr >= map->pc && kaddr < map->pc + map->length)
234 return (enum psx_map) i;
237 return PSX_MAP_UNKNOWN;
240 const struct lightrec_mem_map *
241 lightrec_get_map(struct lightrec_state *state, void **host, u32 kaddr)
243 const struct lightrec_mem_map *map;
247 idx = lightrec_get_map_idx(state, kaddr);
248 if (idx == PSX_MAP_UNKNOWN)
251 map = &state->maps[idx];
252 addr = kaddr - map->pc;
254 while (map->mirror_of)
255 map = map->mirror_of;
258 *host = map->address + addr;
263 u32 lightrec_rw(struct lightrec_state *state, union code op, u32 base,
264 u32 data, u32 *flags, struct block *block, u16 offset)
266 const struct lightrec_mem_map *map;
267 const struct lightrec_mem_map_ops *ops;
268 u32 opcode = op.opcode;
269 bool was_tagged = true;
274 addr = kunseg(base + (s16) op.i.imm);
276 map = lightrec_get_map(state, &host, addr);
278 __segfault_cb(state, addr, block);
283 was_tagged = LIGHTREC_FLAGS_GET_IO_MODE(*flags);
285 if (likely(!map->ops)) {
286 if (flags && !LIGHTREC_FLAGS_GET_IO_MODE(*flags)) {
287 /* Force parallel port accesses as HW accesses, because
288 * the direct-I/O emitters can't differenciate it. */
289 if (unlikely(map == &state->maps[PSX_MAP_PARALLEL_PORT]))
290 *flags |= LIGHTREC_IO_MODE(LIGHTREC_IO_HW);
291 /* If the base register is 0x0, be extra suspicious.
292 * Some games (e.g. Sled Storm) actually do segmentation
293 * faults by using uninitialized pointers, which are
294 * later initialized to point to hardware registers. */
295 else if (op.i.rs && base == 0x0)
296 *flags |= LIGHTREC_IO_MODE(LIGHTREC_IO_HW);
298 *flags |= LIGHTREC_IO_MODE(LIGHTREC_IO_DIRECT);
301 ops = &lightrec_default_ops;
303 LIGHTREC_FLAGS_GET_IO_MODE(*flags) == LIGHTREC_IO_DIRECT_HW) {
304 ops = &lightrec_default_ops;
306 if (flags && !LIGHTREC_FLAGS_GET_IO_MODE(*flags))
307 *flags |= LIGHTREC_IO_MODE(LIGHTREC_IO_HW);
313 old_flags = block_set_flags(block, BLOCK_SHOULD_RECOMPILE);
315 if (!(old_flags & BLOCK_SHOULD_RECOMPILE)) {
316 pr_debug("Opcode of block at "PC_FMT" has been tagged"
317 " - flag for recompilation\n", block->pc);
319 lut_write(state, lut_offset(block->pc), NULL);
325 ops->sb(state, opcode, host, addr, (u8) data);
328 ops->sh(state, opcode, host, addr, (u16) data);
331 lightrec_swl(state, ops, opcode, host, addr, data);
334 lightrec_swr(state, ops, opcode, host, addr, data);
337 ops->sw(state, opcode, host, addr, data);
340 lightrec_swc2(state, op, ops, host, addr);
343 return (s32) (s8) ops->lb(state, opcode, host, addr);
345 return ops->lb(state, opcode, host, addr);
347 return (s32) (s16) ops->lh(state, opcode, host, addr);
349 return ops->lh(state, opcode, host, addr);
351 lightrec_lwc2(state, op, ops, host, addr);
354 return lightrec_lwl(state, ops, opcode, host, addr, data);
356 return lightrec_lwr(state, ops, opcode, host, addr, data);
358 return ops->lwu(state, opcode, host, addr);
360 ops->swu(state, opcode, host, addr, data);
364 return ops->lw(state, opcode, host, addr);
368 static void lightrec_rw_helper(struct lightrec_state *state,
369 union code op, u32 *flags,
370 struct block *block, u16 offset)
372 u32 ret = lightrec_rw(state, op, state->regs.gpr[op.i.rs],
373 state->regs.gpr[op.i.rt], flags, block, offset);
384 if (OPT_HANDLE_LOAD_DELAYS && unlikely(!state->in_delay_slot_n)) {
385 state->temp_reg = ret;
386 state->in_delay_slot_n = 0xff;
387 } else if (op.i.rt) {
388 state->regs.gpr[op.i.rt] = ret;
396 static void lightrec_rw_cb(struct lightrec_state *state, u32 arg)
398 lightrec_rw_helper(state, (union code) arg, NULL, NULL, 0);
401 static void lightrec_rw_generic_cb(struct lightrec_state *state, u32 arg)
405 u16 offset = (u16)arg;
407 block = lightrec_find_block_from_lut(state->block_cache,
408 arg >> 16, state->curr_pc);
409 if (unlikely(!block)) {
410 pr_err("rw_generic: No block found in LUT for "PC_FMT" offset 0x%"PRIx16"\n",
411 state->curr_pc, offset);
412 lightrec_set_exit_flags(state, LIGHTREC_EXIT_SEGFAULT);
416 op = &block->opcode_list[offset];
417 lightrec_rw_helper(state, op->c, &op->flags, block, offset);
420 static u32 clamp_s32(s32 val, s32 min, s32 max)
422 return val < min ? min : val > max ? max : val;
425 static u16 load_u16(u32 *ptr)
427 return ((struct u16x2 *) ptr)->l;
430 static void store_u16(u32 *ptr, u16 value)
432 ((struct u16x2 *) ptr)->l = value;
435 static u32 lightrec_mfc2(struct lightrec_state *state, u8 reg)
437 s16 gteir1, gteir2, gteir3;
447 return (s32)(s16) load_u16(&state->regs.cp2d[reg]);
453 return load_u16(&state->regs.cp2d[reg]);
456 gteir1 = (s16) load_u16(&state->regs.cp2d[9]);
457 gteir2 = (s16) load_u16(&state->regs.cp2d[10]);
458 gteir3 = (s16) load_u16(&state->regs.cp2d[11]);
460 return clamp_s32(gteir1 >> 7, 0, 0x1f) << 0 |
461 clamp_s32(gteir2 >> 7, 0, 0x1f) << 5 |
462 clamp_s32(gteir3 >> 7, 0, 0x1f) << 10;
467 return state->regs.cp2d[reg];
471 u32 lightrec_mfc(struct lightrec_state *state, union code op)
475 if (op.i.op == OP_CP0)
476 return state->regs.cp0[op.r.rd];
478 if (op.i.op == OP_SWC2) {
479 val = lightrec_mfc2(state, op.i.rt);
480 } else if (op.r.rs == OP_CP2_BASIC_MFC2)
481 val = lightrec_mfc2(state, op.r.rd);
483 val = state->regs.cp2c[op.r.rd];
500 if (state->ops.cop2_notify)
501 (*state->ops.cop2_notify)(state, op.opcode, val);
506 static void lightrec_mfc_cb(struct lightrec_state *state, union code op)
508 u32 rt = lightrec_mfc(state, op);
510 if (op.i.op == OP_SWC2)
511 state->temp_reg = rt;
513 state->regs.gpr[op.r.rt] = rt;
516 static void lightrec_mtc0(struct lightrec_state *state, u8 reg, u32 data)
518 u32 status, oldstatus, cause;
526 /* Those registers are read-only */
533 status = state->regs.cp0[12];
536 if (status & ~data & BIT(16)) {
537 state->ops.enable_ram(state, true);
538 lightrec_invalidate_all(state);
539 } else if (~status & data & BIT(16)) {
540 state->ops.enable_ram(state, false);
545 state->regs.cp0[13] &= ~0x300;
546 state->regs.cp0[13] |= data & 0x300;
548 state->regs.cp0[reg] = data;
551 if (reg == 12 || reg == 13) {
552 cause = state->regs.cp0[13];
553 status = state->regs.cp0[12];
555 /* Handle software interrupts */
556 if ((!!(status & cause & 0x300)) & status)
557 lightrec_set_exit_flags(state, LIGHTREC_EXIT_CHECK_INTERRUPT);
559 /* Handle hardware interrupts */
560 if (reg == 12 && !(~status & 0x401) && (~oldstatus & 0x401))
561 lightrec_set_exit_flags(state, LIGHTREC_EXIT_CHECK_INTERRUPT);
565 static u32 count_leading_bits(s32 data)
570 #if __has_builtin(__builtin_clrsb)
571 return 1 + __builtin_clrsb(data);
575 data = (data ^ (data >> 31)) << 1;
585 static void lightrec_mtc2(struct lightrec_state *state, u8 reg, u32 data)
589 state->regs.cp2d[12] = state->regs.cp2d[13];
590 state->regs.cp2d[13] = state->regs.cp2d[14];
591 state->regs.cp2d[14] = data;
594 state->regs.cp2d[9] = (data << 7) & 0xf80;
595 state->regs.cp2d[10] = (data << 2) & 0xf80;
596 state->regs.cp2d[11] = (data >> 3) & 0xf80;
601 state->regs.cp2d[31] = count_leading_bits((s32) data);
604 state->regs.cp2d[reg] = data;
609 static void lightrec_ctc2(struct lightrec_state *state, u8 reg, u32 data)
619 store_u16(&state->regs.cp2c[reg], data);
622 data = (data & 0x7ffff000) | !!(data & 0x7f87e000) << 31;
625 state->regs.cp2c[reg] = data;
630 void lightrec_mtc(struct lightrec_state *state, union code op, u8 reg, u32 data)
632 if (op.i.op == OP_CP0) {
633 lightrec_mtc0(state, reg, data);
635 if (op.i.op == OP_LWC2 || op.r.rs != OP_CP2_BASIC_CTC2)
636 lightrec_mtc2(state, reg, data);
638 lightrec_ctc2(state, reg, data);
640 if (state->ops.cop2_notify)
641 (*state->ops.cop2_notify)(state, op.opcode, data);
645 static void lightrec_mtc_cb(struct lightrec_state *state, u32 arg)
647 union code op = (union code) arg;
651 if (op.i.op == OP_LWC2) {
652 data = state->temp_reg;
655 data = state->regs.gpr[op.r.rt];
659 lightrec_mtc(state, op, reg, data);
662 void lightrec_rfe(struct lightrec_state *state)
666 /* Read CP0 Status register (r12) */
667 status = state->regs.cp0[12];
669 /* Switch the bits */
670 status = ((status & 0x3c) >> 2) | (status & ~0xf);
673 lightrec_mtc0(state, 12, status);
676 void lightrec_cp(struct lightrec_state *state, union code op)
678 if (op.i.op == OP_CP0) {
679 pr_err("Invalid CP opcode to coprocessor #0\n");
683 (*state->ops.cop2_op)(state, op.opcode);
686 static void lightrec_cp_cb(struct lightrec_state *state, u32 arg)
688 lightrec_cp(state, (union code) arg);
691 static struct block * lightrec_get_block(struct lightrec_state *state, u32 pc)
693 struct block *block = lightrec_find_block(state->block_cache, pc);
696 if (block && lightrec_block_is_outdated(state, block)) {
697 pr_debug("Block at "PC_FMT" is outdated!\n", block->pc);
699 old_flags = block_set_flags(block, BLOCK_IS_DEAD);
700 if (!(old_flags & BLOCK_IS_DEAD)) {
701 /* Make sure the recompiler isn't processing the block
703 if (ENABLE_THREADED_COMPILER)
704 lightrec_recompiler_remove(state->rec, block);
706 lightrec_unregister_block(state->block_cache, block);
707 remove_from_code_lut(state->block_cache, block);
708 lightrec_free_block(state, block);
715 block = lightrec_precompile_block(state, pc);
717 pr_err("Unable to recompile block at "PC_FMT"\n", pc);
718 lightrec_set_exit_flags(state, LIGHTREC_EXIT_SEGFAULT);
722 lightrec_register_block(state->block_cache, block);
728 static void * get_next_block_func(struct lightrec_state *state, u32 pc)
731 bool should_recompile;
736 func = lut_read(state, lut_offset(pc));
737 if (func && func != state->get_next_block)
740 block = lightrec_get_block(state, pc);
742 if (unlikely(!block))
745 if (OPT_REPLACE_MEMSET &&
746 block_has_flag(block, BLOCK_IS_MEMSET)) {
747 func = state->memset_func;
751 should_recompile = block_has_flag(block, BLOCK_SHOULD_RECOMPILE) &&
752 !block_has_flag(block, BLOCK_NEVER_COMPILE) &&
753 !block_has_flag(block, BLOCK_IS_DEAD);
755 if (unlikely(should_recompile)) {
756 pr_debug("Block at "PC_FMT" should recompile\n", pc);
758 if (ENABLE_THREADED_COMPILER) {
759 lightrec_recompiler_add(state->rec, block);
761 err = lightrec_compile_block(state->cstate, block);
763 state->exit_flags = LIGHTREC_EXIT_NOMEM;
769 if (ENABLE_THREADED_COMPILER && likely(!should_recompile))
770 func = lightrec_recompiler_run_first_pass(state, block, &pc);
772 func = block->function;
777 if (unlikely(block_has_flag(block, BLOCK_NEVER_COMPILE))) {
778 pc = lightrec_emulate_block(state, block, pc);
780 } else if (!ENABLE_THREADED_COMPILER) {
781 /* Block wasn't compiled yet - run the interpreter */
782 if (block_has_flag(block, BLOCK_FULLY_TAGGED))
783 pr_debug("Block fully tagged, skipping first pass\n");
784 else if (ENABLE_FIRST_PASS && likely(!should_recompile))
785 pc = lightrec_emulate_block(state, block, pc);
787 /* Then compile it using the profiled data */
788 err = lightrec_compile_block(state->cstate, block);
790 state->exit_flags = LIGHTREC_EXIT_NOMEM;
793 } else if (unlikely(block_has_flag(block, BLOCK_IS_DEAD))) {
795 * If the block is dead but has never been compiled,
796 * then its function pointer is NULL and we cannot
797 * execute the block. In that case, reap all the dead
798 * blocks now, and in the next loop we will create a
801 lightrec_reaper_reap(state->reaper);
803 lightrec_recompiler_add(state->rec, block);
805 } while (state->exit_flags == LIGHTREC_EXIT_NORMAL
806 && state->current_cycle < state->target_cycle);
812 static void * lightrec_alloc_code(struct lightrec_state *state, size_t size)
816 if (ENABLE_THREADED_COMPILER)
817 lightrec_code_alloc_lock(state);
819 code = tlsf_malloc(state->tlsf, size);
821 if (ENABLE_THREADED_COMPILER)
822 lightrec_code_alloc_unlock(state);
827 static void lightrec_realloc_code(struct lightrec_state *state,
828 void *ptr, size_t size)
830 /* NOTE: 'size' MUST be smaller than the size specified during
833 if (ENABLE_THREADED_COMPILER)
834 lightrec_code_alloc_lock(state);
836 tlsf_realloc(state->tlsf, ptr, size);
838 if (ENABLE_THREADED_COMPILER)
839 lightrec_code_alloc_unlock(state);
842 static void lightrec_free_code(struct lightrec_state *state, void *ptr)
844 if (ENABLE_THREADED_COMPILER)
845 lightrec_code_alloc_lock(state);
847 tlsf_free(state->tlsf, ptr);
849 if (ENABLE_THREADED_COMPILER)
850 lightrec_code_alloc_unlock(state);
853 static char lightning_code_data[0x80000];
855 static void * lightrec_emit_code(struct lightrec_state *state,
856 const struct block *block,
857 jit_state_t *_jit, unsigned int *size)
859 bool has_code_buffer = ENABLE_CODE_BUFFER && state->tlsf;
860 jit_word_t code_size, new_code_size;
865 if (ENABLE_DISASSEMBLER)
866 jit_set_data(lightning_code_data, sizeof(lightning_code_data), 0);
868 jit_set_data(NULL, 0, JIT_DISABLE_DATA | JIT_DISABLE_NOTE);
870 if (has_code_buffer) {
871 jit_get_code(&code_size);
872 code = lightrec_alloc_code(state, (size_t) code_size);
875 if (ENABLE_THREADED_COMPILER) {
876 /* If we're using the threaded compiler, return
877 * an allocation error here. The threaded
878 * compiler will then empty its job queue and
879 * request a code flush using the reaper. */
883 /* Remove outdated blocks, and try again */
884 lightrec_remove_outdated_blocks(state->block_cache, block);
886 pr_debug("Re-try to alloc %zu bytes...\n", code_size);
888 code = lightrec_alloc_code(state, code_size);
890 pr_err("Could not alloc even after removing old blocks!\n");
895 jit_set_code(code, code_size);
900 jit_get_code(&new_code_size);
901 lightrec_register(MEM_FOR_CODE, new_code_size);
903 if (has_code_buffer) {
904 lightrec_realloc_code(state, code, (size_t) new_code_size);
906 pr_debug("Creating code block at address 0x%" PRIxPTR ", "
907 "code size: %" PRIuPTR " new: %" PRIuPTR "\n",
908 (uintptr_t) code, code_size, new_code_size);
911 *size = (unsigned int) new_code_size;
913 if (state->ops.code_inv)
914 state->ops.code_inv(code, new_code_size);
919 static struct block * generate_wrapper(struct lightrec_state *state)
924 jit_node_t *addr[C_WRAPPERS_COUNT - 1];
925 jit_node_t *to_end[C_WRAPPERS_COUNT - 1];
929 /* On SH, GBR-relative loads target the r0 register.
930 * Use it as the temporary register to factorize the move to
932 if (LIGHTREC_REG_STATE == _GBR)
936 block = lightrec_malloc(state, MEM_FOR_IR, sizeof(*block));
940 _jit = jit_new_state();
944 jit_name("RW wrapper");
945 jit_note(__FILE__, __LINE__);
947 /* Wrapper entry point */
951 /* Add entry points */
952 for (i = C_WRAPPERS_COUNT - 1; i > 0; i--) {
953 jit_ldxi(tmp, LIGHTREC_REG_STATE,
954 offsetof(struct lightrec_state, c_wrappers[i]));
955 to_end[i - 1] = jit_b();
956 addr[i - 1] = jit_indirect();
959 jit_ldxi(tmp, LIGHTREC_REG_STATE,
960 offsetof(struct lightrec_state, c_wrappers[0]));
962 for (i = 0; i < C_WRAPPERS_COUNT - 1; i++)
963 jit_patch(to_end[i]);
964 jit_movr(JIT_R1, tmp);
969 /* Save all temporaries on stack */
970 for (i = 0; i < NUM_TEMPS; i++) {
971 if (i + FIRST_TEMP != 1) {
972 jit_stxi(offsetof(struct lightrec_state, wrapper_regs[i]),
973 LIGHTREC_REG_STATE, JIT_R(i + FIRST_TEMP));
977 jit_getarg(JIT_R2, jit_arg());
980 jit_pushargr(LIGHTREC_REG_STATE);
981 jit_pushargr(JIT_R2);
983 jit_ldxi_ui(JIT_R2, LIGHTREC_REG_STATE,
984 offsetof(struct lightrec_state, target_cycle));
986 /* state->current_cycle = state->target_cycle - delta; */
987 jit_subr(LIGHTREC_REG_CYCLE, JIT_R2, LIGHTREC_REG_CYCLE);
988 jit_stxi_i(offsetof(struct lightrec_state, current_cycle),
989 LIGHTREC_REG_STATE, LIGHTREC_REG_CYCLE);
991 /* Call the wrapper function */
994 /* delta = state->target_cycle - state->current_cycle */;
995 jit_ldxi_ui(LIGHTREC_REG_CYCLE, LIGHTREC_REG_STATE,
996 offsetof(struct lightrec_state, current_cycle));
997 jit_ldxi_ui(JIT_R1, LIGHTREC_REG_STATE,
998 offsetof(struct lightrec_state, target_cycle));
999 jit_subr(LIGHTREC_REG_CYCLE, JIT_R1, LIGHTREC_REG_CYCLE);
1001 /* Restore temporaries from stack */
1002 for (i = 0; i < NUM_TEMPS; i++) {
1003 if (i + FIRST_TEMP != 1) {
1004 jit_ldxi(JIT_R(i + FIRST_TEMP), LIGHTREC_REG_STATE,
1005 offsetof(struct lightrec_state, wrapper_regs[i]));
1013 block->opcode_list = NULL;
1014 block->flags = BLOCK_NO_OPCODE_LIST;
1017 block->function = lightrec_emit_code(state, block, _jit,
1019 if (!block->function)
1020 goto err_free_block;
1022 state->wrappers_eps[C_WRAPPERS_COUNT - 1] = block->function;
1024 for (i = 0; i < C_WRAPPERS_COUNT - 1; i++)
1025 state->wrappers_eps[i] = jit_address(addr[i]);
1027 if (ENABLE_DISASSEMBLER) {
1028 pr_debug("Wrapper block:\n");
1036 lightrec_free(state, MEM_FOR_IR, sizeof(*block), block);
1038 pr_err("Unable to compile wrapper: Out of memory\n");
1042 static u32 lightrec_memset(struct lightrec_state *state)
1044 u32 kunseg_pc = kunseg(state->regs.gpr[4]);
1046 const struct lightrec_mem_map *map = lightrec_get_map(state, &host, kunseg_pc);
1047 u32 length = state->regs.gpr[5] * 4;
1050 pr_err("Unable to find memory map for memset target address "PC_FMT"\n",
1055 pr_debug("Calling host memset, "PC_FMT" (host address 0x%"PRIxPTR") for %u bytes\n",
1056 kunseg_pc, (uintptr_t)host, length);
1057 memset(host, 0, length);
1059 if (!(state->opt_flags & LIGHTREC_OPT_INV_DMA_ONLY))
1060 lightrec_invalidate_map(state, map, kunseg_pc, length);
1062 /* Rough estimation of the number of cycles consumed */
1063 return 8 + 5 * (length + 3 / 4);
1066 static u32 lightrec_check_load_delay(struct lightrec_state *state, u32 pc, u8 reg)
1068 struct block *block;
1069 union code first_op;
1071 first_op = lightrec_read_opcode(state, pc);
1073 if (likely(!opcode_reads_register(first_op, reg))) {
1074 state->regs.gpr[reg] = state->temp_reg;
1076 block = lightrec_get_block(state, pc);
1077 if (unlikely(!block)) {
1078 pr_err("Unable to get block at "PC_FMT"\n", pc);
1079 lightrec_set_exit_flags(state, LIGHTREC_EXIT_SEGFAULT);
1082 pc = lightrec_handle_load_delay(state, block, pc, reg);
1089 static void update_cycle_counter_before_c(jit_state_t *_jit)
1091 /* update state->current_cycle */
1092 jit_ldxi_i(JIT_R2, LIGHTREC_REG_STATE,
1093 offsetof(struct lightrec_state, target_cycle));
1094 jit_subr(JIT_R1, JIT_R2, LIGHTREC_REG_CYCLE);
1095 jit_stxi_i(offsetof(struct lightrec_state, current_cycle),
1096 LIGHTREC_REG_STATE, JIT_R1);
1099 static void update_cycle_counter_after_c(jit_state_t *_jit)
1101 /* Recalc the delta */
1102 jit_ldxi_i(JIT_R1, LIGHTREC_REG_STATE,
1103 offsetof(struct lightrec_state, current_cycle));
1104 jit_ldxi_i(JIT_R2, LIGHTREC_REG_STATE,
1105 offsetof(struct lightrec_state, target_cycle));
1106 jit_subr(LIGHTREC_REG_CYCLE, JIT_R2, JIT_R1);
1109 static void sync_next_pc(jit_state_t *_jit)
1111 if (lightrec_store_next_pc()) {
1112 jit_ldxi_ui(JIT_V0, LIGHTREC_REG_STATE,
1113 offsetof(struct lightrec_state, next_pc));
1117 static struct block * generate_dispatcher(struct lightrec_state *state)
1119 struct block *block;
1121 jit_node_t *to_end, *loop, *addr, *addr2, *addr3, *addr4, *addr5, *jmp, *jmp2;
1125 block = lightrec_malloc(state, MEM_FOR_IR, sizeof(*block));
1129 _jit = jit_new_state();
1131 goto err_free_block;
1133 jit_name("dispatcher");
1134 jit_note(__FILE__, __LINE__);
1139 jit_getarg(LIGHTREC_REG_STATE, jit_arg());
1140 jit_getarg(JIT_V0, jit_arg());
1141 jit_getarg(JIT_V1, jit_arg());
1142 jit_getarg_i(LIGHTREC_REG_CYCLE, jit_arg());
1144 /* Force all callee-saved registers to be pushed on the stack */
1145 for (i = 0; i < NUM_REGS; i++)
1146 jit_movr(JIT_V(i + FIRST_REG), JIT_V(i + FIRST_REG));
1150 /* Call the block's code */
1153 if (OPT_REPLACE_MEMSET) {
1154 /* Blocks will jump here when they need to call
1155 * lightrec_memset() */
1156 addr3 = jit_indirect();
1158 jit_movr(JIT_V1, LIGHTREC_REG_CYCLE);
1161 jit_pushargr(LIGHTREC_REG_STATE);
1163 jit_finishi(lightrec_memset);
1164 jit_retval(LIGHTREC_REG_CYCLE);
1166 jit_ldxi_ui(JIT_V0, LIGHTREC_REG_STATE,
1167 offsetof(struct lightrec_state, regs.gpr[31]));
1168 jit_subr(LIGHTREC_REG_CYCLE, JIT_V1, LIGHTREC_REG_CYCLE);
1170 if (OPT_DETECT_IMPOSSIBLE_BRANCHES || OPT_HANDLE_LOAD_DELAYS)
1174 if (OPT_DETECT_IMPOSSIBLE_BRANCHES) {
1175 /* Blocks will jump here when they reach a branch that should
1176 * be executed with the interpreter, passing the branch's PC
1177 * in JIT_V0 and the address of the block in JIT_V1. */
1178 addr4 = jit_indirect();
1181 update_cycle_counter_before_c(_jit);
1184 jit_pushargr(LIGHTREC_REG_STATE);
1185 jit_pushargr(JIT_V1);
1186 jit_pushargr(JIT_V0);
1187 jit_finishi(lightrec_emulate_block);
1191 update_cycle_counter_after_c(_jit);
1193 if (OPT_HANDLE_LOAD_DELAYS)
1198 if (OPT_HANDLE_LOAD_DELAYS) {
1199 /* Blocks will jump here when they reach a branch with a load
1200 * opcode in its delay slot. The delay slot has already been
1201 * executed; the load value is in (state->temp_reg), and the
1202 * register number is in JIT_V1.
1203 * Jump to a C function which will evaluate the branch target's
1204 * first opcode, to make sure that it does not read the register
1205 * in question; and if it does, handle it accordingly. */
1206 addr5 = jit_indirect();
1209 update_cycle_counter_before_c(_jit);
1212 jit_pushargr(LIGHTREC_REG_STATE);
1213 jit_pushargr(JIT_V0);
1214 jit_pushargr(JIT_V1);
1215 jit_finishi(lightrec_check_load_delay);
1219 update_cycle_counter_after_c(_jit);
1222 /* The block will jump here, with the number of cycles remaining in
1223 * LIGHTREC_REG_CYCLE */
1224 addr2 = jit_indirect();
1228 if (OPT_HANDLE_LOAD_DELAYS && OPT_DETECT_IMPOSSIBLE_BRANCHES)
1231 if (OPT_REPLACE_MEMSET
1232 && (OPT_DETECT_IMPOSSIBLE_BRANCHES || OPT_HANDLE_LOAD_DELAYS)) {
1236 /* Store back the next PC to the lightrec_state structure */
1237 offset = offsetof(struct lightrec_state, curr_pc);
1238 jit_stxi_i(offset, LIGHTREC_REG_STATE, JIT_V0);
1240 /* Jump to end if state->target_cycle < state->current_cycle */
1241 to_end = jit_blei(LIGHTREC_REG_CYCLE, 0);
1243 /* Convert next PC to KUNSEG and avoid mirrors */
1244 jit_andi(JIT_V1, JIT_V0, 0x10000000 | (RAM_SIZE - 1));
1245 jit_rshi_u(JIT_R1, JIT_V1, 28);
1246 jit_andi(JIT_R2, JIT_V0, BIOS_SIZE - 1);
1247 jit_addi(JIT_R2, JIT_R2, RAM_SIZE);
1248 jit_movnr(JIT_V1, JIT_R2, JIT_R1);
1250 /* If possible, use the code LUT */
1251 if (!lut_is_32bit(state))
1252 jit_lshi(JIT_V1, JIT_V1, 1);
1253 jit_add_state(JIT_V1, JIT_V1);
1255 offset = offsetof(struct lightrec_state, code_lut);
1256 if (lut_is_32bit(state))
1257 jit_ldxi_ui(JIT_V1, JIT_V1, offset);
1259 jit_ldxi(JIT_V1, JIT_V1, offset);
1261 /* If we get non-NULL, loop */
1262 jit_patch_at(jit_bnei(JIT_V1, 0), loop);
1264 /* The code LUT will be set to this address when the block at the target
1265 * PC has been preprocessed but not yet compiled by the threaded
1267 addr = jit_indirect();
1269 /* Slow path: call C function get_next_block_func() */
1271 if (ENABLE_FIRST_PASS || OPT_DETECT_IMPOSSIBLE_BRANCHES) {
1272 /* We may call the interpreter - update state->current_cycle */
1273 update_cycle_counter_before_c(_jit);
1277 jit_pushargr(LIGHTREC_REG_STATE);
1278 jit_pushargr(JIT_V0);
1280 /* Save the cycles register if needed */
1281 if (!(ENABLE_FIRST_PASS || OPT_DETECT_IMPOSSIBLE_BRANCHES))
1282 jit_movr(JIT_V0, LIGHTREC_REG_CYCLE);
1284 /* Get the next block */
1285 jit_finishi(&get_next_block_func);
1288 if (ENABLE_FIRST_PASS || OPT_DETECT_IMPOSSIBLE_BRANCHES) {
1289 /* The interpreter may have updated state->current_cycle and
1290 * state->target_cycle - recalc the delta */
1291 update_cycle_counter_after_c(_jit);
1293 jit_movr(LIGHTREC_REG_CYCLE, JIT_V0);
1296 /* Reset JIT_V0 to the next PC */
1297 jit_ldxi_ui(JIT_V0, LIGHTREC_REG_STATE,
1298 offsetof(struct lightrec_state, curr_pc));
1300 /* If we get non-NULL, loop */
1301 jit_patch_at(jit_bnei(JIT_V1, 0), loop);
1303 /* When exiting, the recompiled code will jump to that address */
1304 jit_note(__FILE__, __LINE__);
1307 jit_retr(LIGHTREC_REG_CYCLE);
1311 block->opcode_list = NULL;
1312 block->flags = BLOCK_NO_OPCODE_LIST;
1315 block->function = lightrec_emit_code(state, block, _jit,
1317 if (!block->function)
1318 goto err_free_block;
1320 state->eob_wrapper_func = jit_address(addr2);
1321 if (OPT_DETECT_IMPOSSIBLE_BRANCHES)
1322 state->interpreter_func = jit_address(addr4);
1323 if (OPT_HANDLE_LOAD_DELAYS)
1324 state->ds_check_func = jit_address(addr5);
1325 if (OPT_REPLACE_MEMSET)
1326 state->memset_func = jit_address(addr3);
1327 state->get_next_block = jit_address(addr);
1329 if (ENABLE_DISASSEMBLER) {
1330 pr_debug("Dispatcher block:\n");
1339 lightrec_free(state, MEM_FOR_IR, sizeof(*block), block);
1341 pr_err("Unable to compile dispatcher: Out of memory\n");
1345 union code lightrec_read_opcode(struct lightrec_state *state, u32 pc)
1349 lightrec_get_map(state, &host, kunseg(pc));
1351 const u32 *code = (u32 *)host;
1352 return (union code) LE32TOH(*code);
1355 unsigned int lightrec_cycles_of_opcode(const struct lightrec_state *state,
1358 return state->cycles_per_op;
1361 void lightrec_free_opcode_list(struct lightrec_state *state, struct opcode *ops)
1363 struct opcode_list *list = container_of(ops, struct opcode_list, ops);
1365 lightrec_free(state, MEM_FOR_IR,
1366 sizeof(*list) + list->nb_ops * sizeof(struct opcode),
1370 static unsigned int lightrec_get_mips_block_len(const u32 *src)
1375 for (i = 1; ; i++) {
1376 c.opcode = LE32TOH(*src++);
1381 if (is_unconditional_jump(c))
1386 static struct opcode * lightrec_disassemble(struct lightrec_state *state,
1387 const u32 *src, unsigned int *len)
1389 struct opcode_list *list;
1390 unsigned int i, length;
1392 length = lightrec_get_mips_block_len(src);
1394 list = lightrec_malloc(state, MEM_FOR_IR,
1395 sizeof(*list) + sizeof(struct opcode) * length);
1397 pr_err("Unable to allocate memory\n");
1401 list->nb_ops = (u16) length;
1403 for (i = 0; i < length; i++) {
1404 list->ops[i].opcode = LE32TOH(src[i]);
1405 list->ops[i].flags = 0;
1408 *len = length * sizeof(u32);
1413 static struct block * lightrec_precompile_block(struct lightrec_state *state,
1416 struct opcode *list;
1417 struct block *block;
1419 const struct lightrec_mem_map *map = lightrec_get_map(state, &host, kunseg(pc));
1420 const u32 *code = (u32 *) host;
1421 unsigned int length;
1428 block = lightrec_malloc(state, MEM_FOR_IR, sizeof(*block));
1430 pr_err("Unable to recompile block: Out of memory\n");
1434 list = lightrec_disassemble(state, code, &length);
1436 lightrec_free(state, MEM_FOR_IR, sizeof(*block), block);
1442 block->function = NULL;
1443 block->opcode_list = list;
1447 block->code_size = 0;
1448 block->precompile_date = state->current_cycle;
1449 block->nb_ops = length / sizeof(u32);
1451 lightrec_optimize(state, block);
1453 length = block->nb_ops * sizeof(u32);
1455 lightrec_register(MEM_FOR_MIPS_CODE, length);
1457 if (ENABLE_DISASSEMBLER) {
1458 pr_debug("Disassembled block at PC: 0x%08x\n", block->pc);
1459 lightrec_print_disassembly(block, code);
1462 pr_debug("Block size: %hu opcodes\n", block->nb_ops);
1464 fully_tagged = lightrec_block_is_fully_tagged(block);
1466 block_flags |= BLOCK_FULLY_TAGGED;
1469 block_set_flags(block, block_flags);
1471 block->hash = lightrec_calculate_block_hash(block);
1473 if (OPT_REPLACE_MEMSET && block_has_flag(block, BLOCK_IS_MEMSET))
1474 addr = state->memset_func;
1476 addr = state->get_next_block;
1477 lut_write(state, lut_offset(pc), addr);
1479 pr_debug("Blocks created: %u\n", ++state->nb_precompile);
1484 static bool lightrec_block_is_fully_tagged(const struct block *block)
1486 const struct opcode *op;
1489 for (i = 0; i < block->nb_ops; i++) {
1490 op = &block->opcode_list[i];
1492 /* If we have one branch that must be emulated, we cannot trash
1493 * the opcode list. */
1494 if (should_emulate(op))
1497 /* Check all loads/stores of the opcode list and mark the
1498 * block as fully compiled if they all have been tagged. */
1499 switch (op->c.i.op) {
1516 if (!LIGHTREC_FLAGS_GET_IO_MODE(op->flags))
1527 static void lightrec_reap_block(struct lightrec_state *state, void *data)
1529 struct block *block = data;
1531 pr_debug("Reap dead block at "PC_FMT"\n", block->pc);
1532 lightrec_unregister_block(state->block_cache, block);
1533 lightrec_free_block(state, block);
1536 static void lightrec_reap_jit(struct lightrec_state *state, void *data)
1538 _jit_destroy_state(data);
1541 static void lightrec_free_function(struct lightrec_state *state, void *fn)
1543 if (ENABLE_CODE_BUFFER && state->tlsf) {
1544 pr_debug("Freeing code block at 0x%" PRIxPTR "\n", (uintptr_t) fn);
1545 lightrec_free_code(state, fn);
1549 static void lightrec_reap_function(struct lightrec_state *state, void *data)
1551 lightrec_free_function(state, data);
1554 static void lightrec_reap_opcode_list(struct lightrec_state *state, void *data)
1556 lightrec_free_opcode_list(state, data);
1559 int lightrec_compile_block(struct lightrec_cstate *cstate,
1560 struct block *block)
1562 struct lightrec_state *state = cstate->state;
1563 struct lightrec_branch_target *target;
1564 bool fully_tagged = false;
1565 struct block *block2;
1567 jit_state_t *_jit, *oldjit;
1568 jit_node_t *start_of_block;
1569 bool skip_next = false;
1570 void *old_fn, *new_fn;
1571 size_t old_code_size;
1576 fully_tagged = lightrec_block_is_fully_tagged(block);
1578 block_set_flags(block, BLOCK_FULLY_TAGGED);
1580 _jit = jit_new_state();
1584 oldjit = block->_jit;
1585 old_fn = block->function;
1586 old_code_size = block->code_size;
1589 lightrec_regcache_reset(cstate->reg_cache);
1591 if (OPT_PRELOAD_PC && (block->flags & BLOCK_PRELOAD_PC))
1592 lightrec_preload_pc(cstate->reg_cache, _jit);
1595 cstate->nb_local_branches = 0;
1596 cstate->nb_targets = 0;
1597 cstate->no_load_delay = false;
1602 start_of_block = jit_label();
1604 for (i = 0; i < block->nb_ops; i++) {
1605 elm = &block->opcode_list[i];
1612 if (should_emulate(elm)) {
1613 pr_debug("Branch at offset 0x%x will be emulated\n",
1616 lightrec_emit_jump_to_interpreter(cstate, block, i);
1617 skip_next = !op_flag_no_ds(elm->flags);
1619 lightrec_rec_opcode(cstate, block, i);
1620 skip_next = !op_flag_no_ds(elm->flags) && has_delay_slot(elm->c);
1622 /* FIXME: GNU Lightning on Windows seems to use our
1623 * mapped registers as temporaries. Until the actual bug
1624 * is found and fixed, unconditionally mark our
1625 * registers as live here. */
1626 lightrec_regcache_mark_live(cstate->reg_cache, _jit);
1630 cstate->cycles += lightrec_cycles_of_opcode(state, elm->c);
1633 for (i = 0; i < cstate->nb_local_branches; i++) {
1634 struct lightrec_branch *branch = &cstate->local_branches[i];
1636 pr_debug("Patch local branch to offset 0x%x\n",
1637 branch->target << 2);
1639 if (branch->target == 0) {
1640 jit_patch_at(branch->branch, start_of_block);
1644 for (j = 0; j < cstate->nb_targets; j++) {
1645 if (cstate->targets[j].offset == branch->target) {
1646 jit_patch_at(branch->branch,
1647 cstate->targets[j].label);
1652 if (j == cstate->nb_targets)
1653 pr_err("Unable to find branch target\n");
1659 new_fn = lightrec_emit_code(state, block, _jit, &block->code_size);
1661 if (!ENABLE_THREADED_COMPILER)
1662 pr_err("Unable to compile block!\n");
1663 block->_jit = oldjit;
1665 _jit_destroy_state(_jit);
1669 /* Pause the reaper, because lightrec_reset_lut_offset() may try to set
1670 * the old block->function pointer to the code LUT. */
1671 if (ENABLE_THREADED_COMPILER)
1672 lightrec_reaper_pause(state->reaper);
1674 block->function = new_fn;
1675 block_clear_flags(block, BLOCK_SHOULD_RECOMPILE);
1677 /* Add compiled function to the LUT */
1678 lut_write(state, lut_offset(block->pc), block->function);
1680 if (ENABLE_THREADED_COMPILER)
1681 lightrec_reaper_continue(state->reaper);
1683 /* Detect old blocks that have been covered by the new one */
1684 for (i = 0; i < cstate->nb_targets; i++) {
1685 target = &cstate->targets[i];
1687 if (!target->offset)
1690 offset = block->pc + target->offset * sizeof(u32);
1692 /* Pause the reaper while we search for the block until we set
1693 * the BLOCK_IS_DEAD flag, otherwise the block may be removed
1694 * under our feet. */
1695 if (ENABLE_THREADED_COMPILER)
1696 lightrec_reaper_pause(state->reaper);
1698 block2 = lightrec_find_block(state->block_cache, offset);
1700 /* No need to check if block2 is compilable - it must
1701 * be, otherwise block wouldn't be compilable either */
1703 /* Set the "block dead" flag to prevent the dynarec from
1704 * recompiling this block */
1705 old_flags = block_set_flags(block2, BLOCK_IS_DEAD);
1708 if (ENABLE_THREADED_COMPILER) {
1709 lightrec_reaper_continue(state->reaper);
1711 /* If block2 was pending for compilation, cancel it.
1712 * If it's being compiled right now, wait until it
1715 lightrec_recompiler_remove(state->rec, block2);
1718 /* We know from now on that block2 (if present) isn't going to
1719 * be compiled. We can override the LUT entry with our new
1720 * block's entry point. */
1721 offset = lut_offset(block->pc) + target->offset;
1722 lut_write(state, offset, jit_address(target->label));
1725 pr_debug("Reap block 0x%08x as it's covered by block "
1726 "0x%08x\n", block2->pc, block->pc);
1728 /* Finally, reap the block. */
1729 if (!ENABLE_THREADED_COMPILER) {
1730 lightrec_unregister_block(state->block_cache, block2);
1731 lightrec_free_block(state, block2);
1732 } else if (!(old_flags & BLOCK_IS_DEAD)) {
1733 lightrec_reaper_add(state->reaper,
1734 lightrec_reap_block,
1740 if (ENABLE_DISASSEMBLER) {
1741 pr_debug("Compiling block at PC: 0x%08x\n", block->pc);
1748 old_flags = block_set_flags(block, BLOCK_NO_OPCODE_LIST);
1750 if (fully_tagged && !(old_flags & BLOCK_NO_OPCODE_LIST)) {
1751 pr_debug("Block "PC_FMT" is fully tagged"
1752 " - free opcode list\n", block->pc);
1754 if (ENABLE_THREADED_COMPILER) {
1755 lightrec_reaper_add(state->reaper,
1756 lightrec_reap_opcode_list,
1757 block->opcode_list);
1759 lightrec_free_opcode_list(state, block->opcode_list);
1764 pr_debug("Block 0x%08x recompiled, reaping old jit context.\n",
1767 if (ENABLE_THREADED_COMPILER) {
1768 lightrec_reaper_add(state->reaper,
1769 lightrec_reap_jit, oldjit);
1770 lightrec_reaper_add(state->reaper,
1771 lightrec_reap_function, old_fn);
1773 _jit_destroy_state(oldjit);
1774 lightrec_free_function(state, old_fn);
1777 lightrec_unregister(MEM_FOR_CODE, old_code_size);
1780 pr_debug("Blocks compiled: %u\n", ++state->nb_compile);
1785 static void lightrec_print_info(struct lightrec_state *state)
1787 if ((state->current_cycle & ~0xfffffff) != state->old_cycle_counter) {
1788 pr_info("Lightrec RAM usage: IR %u KiB, CODE %u KiB, "
1789 "MIPS %u KiB, TOTAL %u KiB, avg. IPI %f\n",
1790 lightrec_get_mem_usage(MEM_FOR_IR) / 1024,
1791 lightrec_get_mem_usage(MEM_FOR_CODE) / 1024,
1792 lightrec_get_mem_usage(MEM_FOR_MIPS_CODE) / 1024,
1793 lightrec_get_total_mem_usage() / 1024,
1794 lightrec_get_average_ipi());
1795 state->old_cycle_counter = state->current_cycle & ~0xfffffff;
1799 u32 lightrec_execute(struct lightrec_state *state, u32 pc, u32 target_cycle)
1801 s32 (*func)(struct lightrec_state *, u32, void *, s32) = (void *)state->dispatcher->function;
1805 state->exit_flags = LIGHTREC_EXIT_NORMAL;
1807 /* Handle the cycle counter overflowing */
1808 if (unlikely(target_cycle < state->current_cycle))
1809 target_cycle = UINT_MAX;
1811 state->target_cycle = target_cycle;
1812 state->curr_pc = pc;
1814 block_trace = get_next_block_func(state, pc);
1816 cycles_delta = state->target_cycle - state->current_cycle;
1818 cycles_delta = (*func)(state, state->curr_pc,
1819 block_trace, cycles_delta);
1821 state->current_cycle = state->target_cycle - cycles_delta;
1824 if (ENABLE_THREADED_COMPILER)
1825 lightrec_reaper_reap(state->reaper);
1827 if (LOG_LEVEL >= INFO_L)
1828 lightrec_print_info(state);
1830 return state->curr_pc;
1833 u32 lightrec_run_interpreter(struct lightrec_state *state, u32 pc,
1836 struct block *block;
1838 state->exit_flags = LIGHTREC_EXIT_NORMAL;
1839 state->target_cycle = target_cycle;
1842 block = lightrec_get_block(state, pc);
1846 pc = lightrec_emulate_block(state, block, pc);
1848 if (ENABLE_THREADED_COMPILER)
1849 lightrec_reaper_reap(state->reaper);
1850 } while (state->current_cycle < state->target_cycle);
1852 if (LOG_LEVEL >= INFO_L)
1853 lightrec_print_info(state);
1858 void lightrec_free_block(struct lightrec_state *state, struct block *block)
1862 lightrec_unregister(MEM_FOR_MIPS_CODE, block->nb_ops * sizeof(u32));
1863 old_flags = block_set_flags(block, BLOCK_NO_OPCODE_LIST);
1865 if (!(old_flags & BLOCK_NO_OPCODE_LIST))
1866 lightrec_free_opcode_list(state, block->opcode_list);
1868 _jit_destroy_state(block->_jit);
1869 if (block->function) {
1870 lightrec_free_function(state, block->function);
1871 lightrec_unregister(MEM_FOR_CODE, block->code_size);
1873 lightrec_free(state, MEM_FOR_IR, sizeof(*block), block);
1876 struct lightrec_cstate * lightrec_create_cstate(struct lightrec_state *state)
1878 struct lightrec_cstate *cstate;
1880 cstate = lightrec_malloc(state, MEM_FOR_LIGHTREC, sizeof(*cstate));
1884 cstate->reg_cache = lightrec_regcache_init(state);
1885 if (!cstate->reg_cache) {
1886 lightrec_free(state, MEM_FOR_LIGHTREC, sizeof(*cstate), cstate);
1890 cstate->state = state;
1895 void lightrec_free_cstate(struct lightrec_cstate *cstate)
1897 lightrec_free_regcache(cstate->reg_cache);
1898 lightrec_free(cstate->state, MEM_FOR_LIGHTREC, sizeof(*cstate), cstate);
1901 struct lightrec_state * lightrec_init(char *argv0,
1902 const struct lightrec_mem_map *map,
1904 const struct lightrec_ops *ops)
1906 const struct lightrec_mem_map *codebuf_map = &map[PSX_MAP_CODE_BUFFER];
1907 struct lightrec_state *state;
1910 bool with_32bit_lut = false;
1913 /* Sanity-check ops */
1914 if (!ops || !ops->cop2_op || !ops->enable_ram) {
1915 pr_err("Missing callbacks in lightrec_ops structure\n");
1919 if (ops->cop2_notify)
1920 pr_debug("Optional cop2_notify callback in lightrec_ops\n");
1922 pr_debug("No optional cop2_notify callback in lightrec_ops\n");
1924 if (ENABLE_CODE_BUFFER && nb > PSX_MAP_CODE_BUFFER
1925 && codebuf_map->address) {
1926 tlsf = tlsf_create_with_pool(codebuf_map->address,
1927 codebuf_map->length);
1929 pr_err("Unable to initialize code buffer\n");
1933 if (__WORDSIZE == 64) {
1934 addr = (uintptr_t) codebuf_map->address + codebuf_map->length - 1;
1935 with_32bit_lut = addr == (u32) addr;
1940 lut_size = CODE_LUT_SIZE * 4;
1942 lut_size = CODE_LUT_SIZE * sizeof(void *);
1946 state = calloc(1, sizeof(*state) + lut_size);
1948 goto err_finish_jit;
1950 lightrec_register(MEM_FOR_LIGHTREC, sizeof(*state) + lut_size);
1953 state->with_32bit_lut = with_32bit_lut;
1954 state->in_delay_slot_n = 0xff;
1955 state->cycles_per_op = 2;
1957 state->block_cache = lightrec_blockcache_init(state);
1958 if (!state->block_cache)
1959 goto err_free_state;
1961 if (ENABLE_THREADED_COMPILER) {
1962 state->rec = lightrec_recompiler_init(state);
1964 goto err_free_block_cache;
1966 state->reaper = lightrec_reaper_init(state);
1968 goto err_free_recompiler;
1970 state->cstate = lightrec_create_cstate(state);
1972 goto err_free_block_cache;
1975 state->nb_maps = nb;
1978 memcpy(&state->ops, ops, sizeof(*ops));
1980 state->dispatcher = generate_dispatcher(state);
1981 if (!state->dispatcher)
1982 goto err_free_reaper;
1984 state->c_wrapper_block = generate_wrapper(state);
1985 if (!state->c_wrapper_block)
1986 goto err_free_dispatcher;
1988 state->c_wrappers[C_WRAPPER_RW] = lightrec_rw_cb;
1989 state->c_wrappers[C_WRAPPER_RW_GENERIC] = lightrec_rw_generic_cb;
1990 state->c_wrappers[C_WRAPPER_MFC] = lightrec_mfc_cb;
1991 state->c_wrappers[C_WRAPPER_MTC] = lightrec_mtc_cb;
1992 state->c_wrappers[C_WRAPPER_CP] = lightrec_cp_cb;
1994 map = &state->maps[PSX_MAP_BIOS];
1995 state->offset_bios = (uintptr_t)map->address - map->pc;
1997 map = &state->maps[PSX_MAP_SCRATCH_PAD];
1998 state->offset_scratch = (uintptr_t)map->address - map->pc;
2000 map = &state->maps[PSX_MAP_HW_REGISTERS];
2001 state->offset_io = (uintptr_t)map->address - map->pc;
2003 map = &state->maps[PSX_MAP_KERNEL_USER_RAM];
2004 state->offset_ram = (uintptr_t)map->address - map->pc;
2006 if (state->maps[PSX_MAP_MIRROR1].address == map->address + 0x200000 &&
2007 state->maps[PSX_MAP_MIRROR2].address == map->address + 0x400000 &&
2008 state->maps[PSX_MAP_MIRROR3].address == map->address + 0x600000)
2009 state->mirrors_mapped = true;
2011 if (state->offset_bios == 0 &&
2012 state->offset_scratch == 0 &&
2013 state->offset_ram == 0 &&
2014 state->offset_io == 0 &&
2015 state->mirrors_mapped) {
2016 pr_info("Memory map is perfect. Emitted code will be best.\n");
2018 pr_info("Memory map is sub-par. Emitted code will be slow.\n");
2021 if (state->with_32bit_lut)
2022 pr_info("Using 32-bit LUT\n");
2026 err_free_dispatcher:
2027 lightrec_free_block(state, state->dispatcher);
2029 if (ENABLE_THREADED_COMPILER)
2030 lightrec_reaper_destroy(state->reaper);
2031 err_free_recompiler:
2032 if (ENABLE_THREADED_COMPILER)
2033 lightrec_free_recompiler(state->rec);
2035 lightrec_free_cstate(state->cstate);
2036 err_free_block_cache:
2037 lightrec_free_block_cache(state->block_cache);
2039 lightrec_unregister(MEM_FOR_LIGHTREC, sizeof(*state) +
2040 lut_elm_size(state) * CODE_LUT_SIZE);
2044 if (ENABLE_CODE_BUFFER && tlsf)
2049 void lightrec_destroy(struct lightrec_state *state)
2051 /* Force a print info on destroy*/
2052 state->current_cycle = ~state->current_cycle;
2053 lightrec_print_info(state);
2055 lightrec_free_block_cache(state->block_cache);
2056 lightrec_free_block(state, state->dispatcher);
2057 lightrec_free_block(state, state->c_wrapper_block);
2059 if (ENABLE_THREADED_COMPILER) {
2060 lightrec_free_recompiler(state->rec);
2061 lightrec_reaper_destroy(state->reaper);
2063 lightrec_free_cstate(state->cstate);
2067 if (ENABLE_CODE_BUFFER && state->tlsf)
2068 tlsf_destroy(state->tlsf);
2070 lightrec_unregister(MEM_FOR_LIGHTREC, sizeof(*state) +
2071 lut_elm_size(state) * CODE_LUT_SIZE);
2075 void lightrec_invalidate(struct lightrec_state *state, u32 addr, u32 len)
2077 u32 kaddr = kunseg(addr & ~0x3);
2078 enum psx_map idx = lightrec_get_map_idx(state, kaddr);
2081 case PSX_MAP_MIRROR1:
2082 case PSX_MAP_MIRROR2:
2083 case PSX_MAP_MIRROR3:
2084 /* Handle mirrors */
2085 kaddr &= RAM_SIZE - 1;
2087 case PSX_MAP_KERNEL_USER_RAM:
2093 memset(lut_address(state, lut_offset(kaddr)), 0,
2094 ((len + 3) / 4) * lut_elm_size(state));
2097 void lightrec_invalidate_all(struct lightrec_state *state)
2099 memset(state->code_lut, 0, lut_elm_size(state) * CODE_LUT_SIZE);
2102 void lightrec_set_unsafe_opt_flags(struct lightrec_state *state, u32 flags)
2104 if ((flags ^ state->opt_flags) & LIGHTREC_OPT_INV_DMA_ONLY)
2105 lightrec_invalidate_all(state);
2107 state->opt_flags = flags;
2110 void lightrec_set_exit_flags(struct lightrec_state *state, u32 flags)
2112 if (flags != LIGHTREC_EXIT_NORMAL) {
2113 state->exit_flags |= flags;
2114 state->target_cycle = state->current_cycle;
2118 u32 lightrec_exit_flags(struct lightrec_state *state)
2120 return state->exit_flags;
2123 u32 lightrec_current_cycle_count(const struct lightrec_state *state)
2125 return state->current_cycle;
2128 void lightrec_reset_cycle_count(struct lightrec_state *state, u32 cycles)
2130 state->current_cycle = cycles;
2132 if (state->target_cycle < cycles)
2133 state->target_cycle = cycles;
2136 void lightrec_set_target_cycle_count(struct lightrec_state *state, u32 cycles)
2138 if (state->exit_flags == LIGHTREC_EXIT_NORMAL) {
2139 if (cycles < state->current_cycle)
2140 cycles = state->current_cycle;
2142 state->target_cycle = cycles;
2146 struct lightrec_registers * lightrec_get_registers(struct lightrec_state *state)
2148 return &state->regs;
2151 void lightrec_set_cycles_per_opcode(struct lightrec_state *state, u32 cycles)
2153 state->cycles_per_op = cycles;