{
*(u8 *)host = data;
- if (!state->invalidate_from_dma_only)
+ if (!(state->opt_flags & LIGHTREC_OPT_INV_DMA_ONLY))
lightrec_invalidate(state, addr, 1);
}
{
*(u16 *)host = HTOLE16(data);
- if (!state->invalidate_from_dma_only)
+ if (!(state->opt_flags & LIGHTREC_OPT_INV_DMA_ONLY))
lightrec_invalidate(state, addr, 2);
}
{
*(u32 *)host = HTOLE32(data);
- if (!state->invalidate_from_dma_only)
+ if (!(state->opt_flags & LIGHTREC_OPT_INV_DMA_ONLY))
lightrec_invalidate(state, addr, 4);
}
u16 offset = (u16)arg;
block = lightrec_find_block_from_lut(state->block_cache,
- arg >> 16, state->next_pc);
+ arg >> 16, state->curr_pc);
if (unlikely(!block)) {
pr_err("rw_generic: No block found in LUT for PC 0x%x offset 0x%x\n",
- state->next_pc, offset);
+ state->curr_pc, offset);
lightrec_set_exit_flags(state, LIGHTREC_EXIT_SEGFAULT);
return;
}
status = state->regs.cp0[12];
/* Handle software interrupts */
- if (!!(status & cause & 0x300) & status)
+ if ((!!(status & cause & 0x300)) & status)
lightrec_set_exit_flags(state, LIGHTREC_EXIT_CHECK_INTERRUPT);
/* Handle hardware interrupts */
} while (state->exit_flags == LIGHTREC_EXIT_NORMAL
&& state->current_cycle < state->target_cycle);
- state->next_pc = pc;
+ state->curr_pc = pc;
return func;
}
kunseg_pc, (uintptr_t)host, length);
memset(host, 0, length);
- if (!state->invalidate_from_dma_only)
+ if (!(state->opt_flags & LIGHTREC_OPT_INV_DMA_ONLY))
lightrec_invalidate_map(state, map, kunseg_pc, length);
/* Rough estimation of the number of cycles consumed */
jit_subr(LIGHTREC_REG_CYCLE, JIT_R2, JIT_R1);
}
+static void sync_next_pc(jit_state_t *_jit)
+{
+ if (lightrec_store_next_pc()) {
+ jit_ldxi_ui(JIT_V0, LIGHTREC_REG_STATE,
+ offsetof(struct lightrec_state, next_pc));
+ }
+}
+
static struct block * generate_dispatcher(struct lightrec_state *state)
{
struct block *block;
* in JIT_V0 and the address of the block in JIT_V1. */
addr4 = jit_indirect();
+ sync_next_pc(_jit);
update_cycle_counter_before_c(_jit);
jit_prepare();
* in question; and if it does, handle it accordingly. */
addr5 = jit_indirect();
+ sync_next_pc(_jit);
update_cycle_counter_before_c(_jit);
jit_prepare();
jit_retval(JIT_V0);
update_cycle_counter_after_c(_jit);
-
- if (OPT_DETECT_IMPOSSIBLE_BRANCHES)
- jit_patch(jmp2);
}
+ /* The block will jump here, with the number of cycles remaining in
+ * LIGHTREC_REG_CYCLE */
+ addr2 = jit_indirect();
+
+ sync_next_pc(_jit);
+
+ if (OPT_HANDLE_LOAD_DELAYS && OPT_DETECT_IMPOSSIBLE_BRANCHES)
+ jit_patch(jmp2);
+
if (OPT_REPLACE_MEMSET
&& (OPT_DETECT_IMPOSSIBLE_BRANCHES || OPT_HANDLE_LOAD_DELAYS)) {
jit_patch(jmp);
}
- /* The block will jump here, with the number of cycles remaining in
- * LIGHTREC_REG_CYCLE */
- addr2 = jit_indirect();
-
- /* Store back the next_pc to the lightrec_state structure */
- offset = offsetof(struct lightrec_state, next_pc);
+ /* Store back the next PC to the lightrec_state structure */
+ offset = offsetof(struct lightrec_state, curr_pc);
jit_stxi_i(offset, LIGHTREC_REG_STATE, JIT_V0);
/* Jump to end if state->target_cycle < state->current_cycle */
/* Reset JIT_V0 to the next PC */
jit_ldxi_ui(JIT_V0, LIGHTREC_REG_STATE,
- offsetof(struct lightrec_state, next_pc));
+ offsetof(struct lightrec_state, curr_pc));
/* If we get non-NULL, loop */
jit_patch_at(jit_bnei(JIT_V1, 0), loop);
return (union code) LE32TOH(*code);
}
-__cnst unsigned int lightrec_cycles_of_opcode(union code code)
+unsigned int lightrec_cycles_of_opcode(const struct lightrec_state *state,
+ union code code)
{
- return 2;
+ return state->cycles_per_op;
}
void lightrec_free_opcode_list(struct lightrec_state *state, struct opcode *ops)
block->_jit = _jit;
lightrec_regcache_reset(cstate->reg_cache);
- lightrec_preload_pc(cstate->reg_cache);
+
+ if (OPT_PRELOAD_PC && (block->flags & BLOCK_PRELOAD_PC))
+ lightrec_preload_pc(cstate->reg_cache, _jit);
cstate->cycles = 0;
cstate->nb_local_branches = 0;
#endif
}
- cstate->cycles += lightrec_cycles_of_opcode(elm->c);
+ cstate->cycles += lightrec_cycles_of_opcode(state, elm->c);
}
for (i = 0; i < cstate->nb_local_branches; i++) {
target_cycle = UINT_MAX;
state->target_cycle = target_cycle;
- state->next_pc = pc;
+ state->curr_pc = pc;
block_trace = get_next_block_func(state, pc);
if (block_trace) {
cycles_delta = state->target_cycle - state->current_cycle;
- cycles_delta = (*func)(state, state->next_pc,
+ cycles_delta = (*func)(state, state->curr_pc,
block_trace, cycles_delta);
state->current_cycle = state->target_cycle - cycles_delta;
if (LOG_LEVEL >= INFO_L)
lightrec_print_info(state);
- return state->next_pc;
+ return state->curr_pc;
}
u32 lightrec_run_interpreter(struct lightrec_state *state, u32 pc,
state->tlsf = tlsf;
state->with_32bit_lut = with_32bit_lut;
state->in_delay_slot_n = 0xff;
+ state->cycles_per_op = 2;
state->block_cache = lightrec_blockcache_init(state);
if (!state->block_cache)
memset(state->code_lut, 0, lut_elm_size(state) * CODE_LUT_SIZE);
}
-void lightrec_set_invalidate_mode(struct lightrec_state *state, bool dma_only)
+void lightrec_set_unsafe_opt_flags(struct lightrec_state *state, u32 flags)
{
- if (state->invalidate_from_dma_only != dma_only)
+ if ((flags ^ state->opt_flags) & LIGHTREC_OPT_INV_DMA_ONLY)
lightrec_invalidate_all(state);
- state->invalidate_from_dma_only = dma_only;
+ state->opt_flags = flags;
}
void lightrec_set_exit_flags(struct lightrec_state *state, u32 flags)
{
return &state->regs;
}
+
+void lightrec_set_cycles_per_opcode(struct lightrec_state *state, u32 cycles)
+{
+ state->cycles_per_op = cycles;
+}