X-Git-Url: https://notaz.gp2x.de/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=deps%2Flightrec%2Finterpreter.c;h=74cbca50f7c71cdb5c24d80ad60b44e3da8f0c63;hb=f99193c2c2ec0ab56636773949fae34d847d91b0;hp=80a07f32534d6279ff07f337814db5ab8c4a6558;hpb=384af87540d751ef274e5956d58f4bbc153a34a9;p=pcsx_rearmed.git diff --git a/deps/lightrec/interpreter.c b/deps/lightrec/interpreter.c index 80a07f32..74cbca50 100644 --- a/deps/lightrec/interpreter.c +++ b/deps/lightrec/interpreter.c @@ -31,6 +31,7 @@ struct interpreter { struct opcode *op; u32 cycles; bool delay_slot; + bool load_delay; u16 offset; }; @@ -74,7 +75,7 @@ static inline u32 jump_skip(struct interpreter *inter) static inline u32 jump_next(struct interpreter *inter) { - inter->cycles += lightrec_cycles_of_opcode(inter->op->c); + inter->cycles += lightrec_cycles_of_opcode(inter->state, inter->op->c); if (unlikely(inter->delay_slot)) return 0; @@ -84,7 +85,7 @@ static inline u32 jump_next(struct interpreter *inter) static inline u32 jump_after_branch(struct interpreter *inter) { - inter->cycles += lightrec_cycles_of_opcode(inter->op->c); + inter->cycles += lightrec_cycles_of_opcode(inter->state, inter->op->c); if (unlikely(inter->delay_slot)) return 0; @@ -100,11 +101,11 @@ static void update_cycles_before_branch(struct interpreter *inter) u32 cycles; if (!inter->delay_slot) { - cycles = lightrec_cycles_of_opcode(inter->op->c); + cycles = lightrec_cycles_of_opcode(inter->state, inter->op->c); if (!op_flag_no_ds(inter->op->flags) && has_delay_slot(inter->op->c)) - cycles += lightrec_cycles_of_opcode(next_op(inter)->c); + cycles += lightrec_cycles_of_opcode(inter->state, next_op(inter)->c); inter->cycles += cycles; inter->state->current_cycle += inter->cycles; @@ -150,14 +151,13 @@ static u32 int_delay_slot(struct interpreter *inter, u32 pc, bool branch) .state = state, .cycles = inter->cycles, .delay_slot = true, - .block = NULL, + .load_delay = true, }; bool run_first_op = false, dummy_ld = false, save_rs = false, load_in_ds, branch_in_ds = false, branch_at_addr = false, branch_taken; - u32 old_rs, new_rs, new_rt; - u32 next_pc, ds_next_pc; - u32 cause, epc; + u32 new_rt, old_rs = 0, new_rs = 0; + u32 next_pc, ds_next_pc, epc; if (op->i.op == OP_CP0 && op->r.rs == OP_CP0_RFE) { /* When an IRQ happens, the PSX exception handlers (when done) @@ -168,11 +168,13 @@ static u32 int_delay_slot(struct interpreter *inter, u32 pc, bool branch) * but on branch boundaries, we need to adjust the return * address so that the GTE opcode is effectively executed. */ - cause = state->regs.cp0[13]; epc = state->regs.cp0[14]; - if (!(cause & 0x7c) && epc == pc - 4) - pc -= 4; + if (epc == pc - 4) { + op_next = lightrec_read_opcode(state, epc); + if (op_next.i.op == OP_CP2) + pc -= 4; + } } if (inter->delay_slot) { @@ -236,7 +238,7 @@ static u32 int_delay_slot(struct interpreter *inter, u32 pc, bool branch) branch_taken = is_branch_taken(reg_cache, op_next); pr_debug("Target of impossible branch is a branch, " "%staken.\n", branch_taken ? "" : "not "); - inter->cycles += lightrec_cycles_of_opcode(op_next); + inter->cycles += lightrec_cycles_of_opcode(inter->state, op_next); old_rs = reg_cache[op_next.r.rs]; } else { new_op.c = op_next; @@ -252,7 +254,7 @@ static u32 int_delay_slot(struct interpreter *inter, u32 pc, bool branch) reg_cache[op->r.rs] = old_rs; } - inter->cycles += lightrec_cycles_of_opcode(op_next); + inter->cycles += lightrec_cycles_of_opcode(inter->state, op_next); } } else { next_pc = int_get_ds_pc(inter, 2); @@ -293,7 +295,7 @@ static u32 int_delay_slot(struct interpreter *inter, u32 pc, bool branch) if (dummy_ld) reg_cache[op->r.rt] = new_rt; - inter->cycles += lightrec_cycles_of_opcode(op->c); + inter->cycles += lightrec_cycles_of_opcode(inter->state, op->c); if (branch_at_addr && branch_taken) { /* If the branch at the target of the branch opcode is taken, @@ -306,7 +308,7 @@ static u32 int_delay_slot(struct interpreter *inter, u32 pc, bool branch) inter2.op = &new_op; inter2.block = NULL; - inter->cycles += lightrec_cycles_of_opcode(op_next); + inter->cycles += lightrec_cycles_of_opcode(inter->state, op_next); pr_debug("Running delay slot of branch at target of impossible " "branch\n"); @@ -318,9 +320,9 @@ static u32 int_delay_slot(struct interpreter *inter, u32 pc, bool branch) static u32 int_unimplemented(struct interpreter *inter) { - pr_warn("Unimplemented opcode 0x%08x\n", inter->op->opcode); + lightrec_set_exit_flags(inter->state, LIGHTREC_EXIT_UNKNOWN_OP); - return jump_next(inter); + return inter->block->pc + (inter->offset << 2); } static u32 int_jump(struct interpreter *inter, bool link) @@ -600,7 +602,7 @@ static u32 int_io(struct interpreter *inter, bool is_load) u32 *reg_cache = inter->state->regs.gpr; u32 val, *flags = NULL; - if (inter->block) + if (!inter->load_delay && inter->block) flags = &inter->op->flags; val = lightrec_rw(inter->state, inter->op->c, @@ -997,12 +999,20 @@ static u32 int_META_MULT2(struct interpreter *inter) } if (!op_flag_no_hi(inter->op->flags)) { - if (c.r.op >= 32) + if (c.r.op >= 32) { reg_cache[reg_hi] = rs << (c.r.op - 32); - else if (c.i.op == OP_META_MULT2) - reg_cache[reg_hi] = (s32) rs >> (32 - c.r.op); - else - reg_cache[reg_hi] = rs >> (32 - c.r.op); + } + else if (c.i.op == OP_META_MULT2) { + if (c.r.op) + reg_cache[reg_hi] = (s32) rs >> (32 - c.r.op); + else + reg_cache[reg_hi] = (s32) rs >> 31; + } else { + if (c.r.op) + reg_cache[reg_hi] = rs >> (32 - c.r.op); + else + reg_cache[reg_hi] = 0; + } } return jump_next(inter); @@ -1057,6 +1067,8 @@ static const lightrec_int_func_t int_standard[64] = { [OP_META] = int_META, [OP_META_MULT2] = int_META_MULT2, [OP_META_MULTU2] = int_META_MULT2, + [OP_META_LWU] = int_load, + [OP_META_SWU] = int_store, }; static const lightrec_int_func_t int_special[64] = { @@ -1178,20 +1190,18 @@ static u32 int_META(struct interpreter *inter) static u32 lightrec_emulate_block_list(struct lightrec_state *state, struct block *block, u32 offset) { - struct interpreter inter; + struct interpreter inter = { + .block = block, + .state = state, + .offset = offset, + .op = &block->opcode_list[offset], + }; u32 pc; - inter.block = block; - inter.state = state; - inter.offset = offset; - inter.op = &block->opcode_list[offset]; - inter.cycles = 0; - inter.delay_slot = false; - pc = lightrec_int_op(&inter); /* Add the cycles of the last branch */ - inter.cycles += lightrec_cycles_of_opcode(inter.op->c); + inter.cycles += lightrec_cycles_of_opcode(inter.state, inter.op->c); state->current_cycle += inter.cycles; @@ -1205,7 +1215,7 @@ u32 lightrec_emulate_block(struct lightrec_state *state, struct block *block, u3 if (offset < block->nb_ops) return lightrec_emulate_block_list(state, block, offset); - pr_err("PC 0x%x is outside block at PC 0x%x\n", pc, block->pc); + pr_err(PC_FMT" is outside block at "PC_FMT"\n", pc, block->pc); lightrec_set_exit_flags(state, LIGHTREC_EXIT_SEGFAULT); @@ -1243,9 +1253,8 @@ u32 lightrec_handle_load_delay(struct lightrec_state *state, struct interpreter inter = { .block = block, .state = state, - .offset = 0, .op = op, - .cycles = 0, + .load_delay = true, }; bool branch_taken; u32 reg_mask, next_pc;