{
*(u8 *)host = data;
- if (!state->invalidate_from_dma_only)
+ if (!(state->opt_flags & LIGHTREC_OPT_INV_DMA_ONLY))
lightrec_invalidate(state, addr, 1);
}
{
*(u16 *)host = HTOLE16(data);
- if (!state->invalidate_from_dma_only)
+ if (!(state->opt_flags & LIGHTREC_OPT_INV_DMA_ONLY))
lightrec_invalidate(state, addr, 2);
}
{
*(u32 *)host = HTOLE32(data);
- if (!state->invalidate_from_dma_only)
+ if (!(state->opt_flags & LIGHTREC_OPT_INV_DMA_ONLY))
lightrec_invalidate(state, addr, 4);
}
return LE32TOH(*(u32 *)host);
}
+static u32 lightrec_default_lwu(struct lightrec_state *state,
+ u32 opcode, void *host, u32 addr)
+{
+ u32 val;
+
+ memcpy(&val, host, 4);
+
+ return LE32TOH(val);
+}
+
+static void lightrec_default_swu(struct lightrec_state *state, u32 opcode,
+ void *host, u32 addr, u32 data)
+{
+ data = HTOLE32(data);
+
+ memcpy(host, &data, 4);
+
+ if (!(state->opt_flags & LIGHTREC_OPT_INV_DMA_ONLY))
+ lightrec_invalidate(state, addr & ~0x3, 8);
+}
+
static const struct lightrec_mem_map_ops lightrec_default_ops = {
.sb = lightrec_default_sb,
.sh = lightrec_default_sh,
.lb = lightrec_default_lb,
.lh = lightrec_default_lh,
.lw = lightrec_default_lw,
+ .lwu = lightrec_default_lwu,
+ .swu = lightrec_default_swu,
};
static void __segfault_cb(struct lightrec_state *state, u32 addr,
{
lightrec_set_exit_flags(state, LIGHTREC_EXIT_SEGFAULT);
pr_err("Segmentation fault in recompiled code: invalid "
- "load/store at address 0x%08x\n", addr);
+ "load/store at address "PC_FMT"\n", addr);
if (block)
- pr_err("Was executing block PC 0x%08x\n", block->pc);
+ pr_err("Was executing block "PC_FMT"\n", block->pc);
}
static void lightrec_swl(struct lightrec_state *state,
old_flags = block_set_flags(block, BLOCK_SHOULD_RECOMPILE);
if (!(old_flags & BLOCK_SHOULD_RECOMPILE)) {
- pr_debug("Opcode of block at PC 0x%08x has been tagged"
+ pr_debug("Opcode of block at "PC_FMT" has been tagged"
" - flag for recompilation\n", block->pc);
lut_write(state, lut_offset(block->pc), NULL);
return lightrec_lwl(state, ops, opcode, host, addr, data);
case OP_LWR:
return lightrec_lwr(state, ops, opcode, host, addr, data);
+ case OP_META_LWU:
+ return ops->lwu(state, opcode, host, addr);
+ case OP_META_SWU:
+ ops->swu(state, opcode, host, addr, data);
+ return 0;
case OP_LW:
default:
return ops->lw(state, opcode, host, addr);
case OP_LWL:
case OP_LWR:
case OP_LW:
+ case OP_META_LWU:
if (OPT_HANDLE_LOAD_DELAYS && unlikely(!state->in_delay_slot_n)) {
state->temp_reg = ret;
state->in_delay_slot_n = 0xff;
block = lightrec_find_block_from_lut(state->block_cache,
arg >> 16, state->curr_pc);
if (unlikely(!block)) {
- pr_err("rw_generic: No block found in LUT for PC 0x%x offset 0x%x\n",
+ pr_err("rw_generic: No block found in LUT for "PC_FMT" offset 0x%"PRIx16"\n",
state->curr_pc, offset);
lightrec_set_exit_flags(state, LIGHTREC_EXIT_SEGFAULT);
return;
status = state->regs.cp0[12];
/* Handle software interrupts */
- if (!!(status & cause & 0x300) & status)
+ if ((!!(status & cause & 0x300)) & status)
lightrec_set_exit_flags(state, LIGHTREC_EXIT_CHECK_INTERRUPT);
/* Handle hardware interrupts */
u8 old_flags;
if (block && lightrec_block_is_outdated(state, block)) {
- pr_debug("Block at PC 0x%08x is outdated!\n", block->pc);
+ pr_debug("Block at "PC_FMT" is outdated!\n", block->pc);
old_flags = block_set_flags(block, BLOCK_IS_DEAD);
if (!(old_flags & BLOCK_IS_DEAD)) {
if (!block) {
block = lightrec_precompile_block(state, pc);
if (!block) {
- pr_err("Unable to recompile block at PC 0x%x\n", pc);
+ pr_err("Unable to recompile block at "PC_FMT"\n", pc);
lightrec_set_exit_flags(state, LIGHTREC_EXIT_SEGFAULT);
return NULL;
}
!block_has_flag(block, BLOCK_IS_DEAD);
if (unlikely(should_recompile)) {
- pr_debug("Block at PC 0x%08x should recompile\n", pc);
+ pr_debug("Block at "PC_FMT" should recompile\n", pc);
if (ENABLE_THREADED_COMPILER) {
lightrec_recompiler_add(state->rec, block);
u32 length = state->regs.gpr[5] * 4;
if (!map) {
- pr_err("Unable to find memory map for memset target address "
- "0x%x\n", kunseg_pc);
+ pr_err("Unable to find memory map for memset target address "PC_FMT"\n",
+ kunseg_pc);
return 0;
}
- pr_debug("Calling host memset, PC 0x%x (host address 0x%" PRIxPTR ") for %u bytes\n",
+ pr_debug("Calling host memset, "PC_FMT" (host address 0x%"PRIxPTR") for %u bytes\n",
kunseg_pc, (uintptr_t)host, length);
memset(host, 0, length);
- if (!state->invalidate_from_dma_only)
+ if (!(state->opt_flags & LIGHTREC_OPT_INV_DMA_ONLY))
lightrec_invalidate_map(state, map, kunseg_pc, length);
/* Rough estimation of the number of cycles consumed */
} else {
block = lightrec_get_block(state, pc);
if (unlikely(!block)) {
- pr_err("Unable to get block at PC 0x%08x\n", pc);
+ pr_err("Unable to get block at "PC_FMT"\n", pc);
lightrec_set_exit_flags(state, LIGHTREC_EXIT_SEGFAULT);
pc = 0;
} else {
static void sync_next_pc(jit_state_t *_jit)
{
if (lightrec_store_next_pc()) {
- jit_ldxi_i(JIT_V0, LIGHTREC_REG_STATE,
- offsetof(struct lightrec_state, next_pc));
+ jit_ldxi_ui(JIT_V0, LIGHTREC_REG_STATE,
+ offsetof(struct lightrec_state, next_pc));
}
}
return (union code) LE32TOH(*code);
}
-__cnst unsigned int lightrec_cycles_of_opcode(union code code)
+unsigned int lightrec_cycles_of_opcode(const struct lightrec_state *state,
+ union code code)
{
- return 2;
+ return state->cycles_per_op;
}
void lightrec_free_opcode_list(struct lightrec_state *state, struct opcode *ops)
case OP_SWR:
case OP_LWC2:
case OP_SWC2:
+ case OP_META_LWU:
+ case OP_META_SWU:
if (!LIGHTREC_FLAGS_GET_IO_MODE(op->flags))
return false;
fallthrough;
{
struct block *block = data;
- pr_debug("Reap dead block at PC 0x%08x\n", block->pc);
+ pr_debug("Reap dead block at "PC_FMT"\n", block->pc);
lightrec_unregister_block(state->block_cache, block);
lightrec_free_block(state, block);
}
block->_jit = _jit;
lightrec_regcache_reset(cstate->reg_cache);
- lightrec_preload_pc(cstate->reg_cache, _jit);
+
+ if (OPT_PRELOAD_PC && (block->flags & BLOCK_PRELOAD_PC))
+ lightrec_preload_pc(cstate->reg_cache, _jit);
cstate->cycles = 0;
cstate->nb_local_branches = 0;
#endif
}
- cstate->cycles += lightrec_cycles_of_opcode(elm->c);
+ cstate->cycles += lightrec_cycles_of_opcode(state, elm->c);
}
for (i = 0; i < cstate->nb_local_branches; i++) {
old_flags = block_set_flags(block, BLOCK_NO_OPCODE_LIST);
if (fully_tagged && !(old_flags & BLOCK_NO_OPCODE_LIST)) {
- pr_debug("Block PC 0x%08x is fully tagged"
+ pr_debug("Block "PC_FMT" is fully tagged"
" - free opcode list\n", block->pc);
if (ENABLE_THREADED_COMPILER) {
state->tlsf = tlsf;
state->with_32bit_lut = with_32bit_lut;
state->in_delay_slot_n = 0xff;
+ state->cycles_per_op = 2;
state->block_cache = lightrec_blockcache_init(state);
if (!state->block_cache)
memset(state->code_lut, 0, lut_elm_size(state) * CODE_LUT_SIZE);
}
-void lightrec_set_invalidate_mode(struct lightrec_state *state, bool dma_only)
+void lightrec_set_unsafe_opt_flags(struct lightrec_state *state, u32 flags)
{
- if (state->invalidate_from_dma_only != dma_only)
+ if ((flags ^ state->opt_flags) & LIGHTREC_OPT_INV_DMA_ONLY)
lightrec_invalidate_all(state);
- state->invalidate_from_dma_only = dma_only;
+ state->opt_flags = flags;
}
void lightrec_set_exit_flags(struct lightrec_state *state, u32 flags)
{
return &state->regs;
}
+
+void lightrec_set_cycles_per_opcode(struct lightrec_state *state, u32 cycles)
+{
+ state->cycles_per_op = cycles;
+}