2 * Copyright (C) 2019-2020 Paul Cercueil <paul@crapouillou.net>
4 * This library is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Lesser General Public
6 * License as published by the Free Software Foundation; either
7 * version 2.1 of the License, or (at your option) any later version.
9 * This library is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Lesser General Public License for more details.
15 #include "disassembler.h"
16 #include "interpreter.h"
17 #include "lightrec-private.h"
18 #include "optimizer.h"
25 static u32 int_CP0(struct interpreter *inter);
26 static u32 int_CP2(struct interpreter *inter);
27 static u32 int_SPECIAL(struct interpreter *inter);
28 static u32 int_REGIMM(struct interpreter *inter);
29 static u32 int_branch(struct interpreter *inter, u32 pc,
30 union code code, bool branch);
32 typedef u32 (*lightrec_int_func_t)(struct interpreter *inter);
34 static const lightrec_int_func_t int_standard[64];
37 struct lightrec_state *state;
44 static inline u32 execute(lightrec_int_func_t func, struct interpreter *inter)
46 return (*func)(inter);
49 static inline u32 jump_skip(struct interpreter *inter)
51 inter->op = inter->op->next;
53 return execute(int_standard[inter->op->i.op], inter);
56 static inline u32 jump_next(struct interpreter *inter)
58 inter->cycles += lightrec_cycles_of_opcode(inter->op->c);
60 if (unlikely(inter->delay_slot))
63 return jump_skip(inter);
66 static inline u32 jump_after_branch(struct interpreter *inter)
68 inter->cycles += lightrec_cycles_of_opcode(inter->op->c);
70 if (unlikely(inter->delay_slot))
73 inter->op = inter->op->next;
75 return jump_skip(inter);
78 static void update_cycles_before_branch(struct interpreter *inter)
82 if (!inter->delay_slot) {
83 cycles = lightrec_cycles_of_opcode(inter->op->c);
85 if (has_delay_slot(inter->op->c) &&
86 !(inter->op->flags & LIGHTREC_NO_DS))
87 cycles += lightrec_cycles_of_opcode(inter->op->next->c);
89 inter->cycles += cycles;
90 inter->state->current_cycle += inter->cycles;
91 inter->cycles = -cycles;
95 static bool is_branch_taken(const u32 *reg_cache, union code op)
99 return op.r.op == OP_SPECIAL_JR || op.r.op == OP_SPECIAL_JALR;
105 return reg_cache[op.r.rs] == reg_cache[op.r.rt];
108 return reg_cache[op.r.rs] != reg_cache[op.r.rt];
112 case OP_REGIMM_BLTZAL:
113 return (s32)reg_cache[op.r.rs] < 0;
115 case OP_REGIMM_BGEZAL:
116 return (s32)reg_cache[op.r.rs] >= 0;
125 static u32 int_delay_slot(struct interpreter *inter, u32 pc, bool branch)
127 struct lightrec_state *state = inter->state;
128 u32 *reg_cache = state->native_reg_cache;
129 struct opcode new_op, *op = inter->op->next;
131 struct interpreter inter2 = {
133 .cycles = inter->cycles,
137 bool run_first_op = false, dummy_ld = false, save_rs = false,
138 load_in_ds, branch_in_ds = false, branch_at_addr = false,
140 u32 old_rs, new_rs, new_rt;
141 u32 next_pc, ds_next_pc;
144 if (op->i.op == OP_CP0 && op->r.rs == OP_CP0_RFE) {
145 /* When an IRQ happens, the PSX exception handlers (when done)
146 * will jump back to the instruction that was executed right
147 * before the IRQ, unless it was a GTE opcode; in that case, it
148 * jumps to the instruction right after.
149 * Since we will never handle the IRQ right after a GTE opcode,
150 * but on branch boundaries, we need to adjust the return
151 * address so that the GTE opcode is effectively executed.
153 cause = (*state->ops.cop0_ops.cfc)(state, 13);
154 epc = (*state->ops.cop0_ops.cfc)(state, 14);
156 if (!(cause & 0x7c) && epc == pc - 4)
160 if (inter->delay_slot) {
161 /* The branch opcode was in a delay slot of another branch
162 * opcode. Just return the target address of the second
167 /* An opcode located in the delay slot performing a delayed read
168 * requires special handling; we will always resort to using the
169 * interpreter in that case.
170 * Same goes for when we have a branch in a delay slot of another
172 load_in_ds = load_in_delay_slot(op->c);
173 branch_in_ds = has_delay_slot(op->c);
176 if (load_in_ds || branch_in_ds)
177 op_next = lightrec_read_opcode(state, pc);
180 /* Verify that the next block actually reads the
181 * destination register of the delay slot opcode. */
182 run_first_op = opcode_reads_register(op_next, op->r.rt);
190 if (load_in_ds && run_first_op) {
193 /* If the first opcode of the next block writes the
194 * regiser used as the address for the load, we need to
195 * reset to the old value after it has been executed,
196 * then restore the new value after the delay slot
197 * opcode has been executed. */
198 save_rs = opcode_reads_register(op->c, op->r.rs) &&
199 opcode_writes_register(op_next, op->r.rs);
201 old_rs = reg_cache[op->r.rs];
203 /* If both the first opcode of the next block and the
204 * delay slot opcode write to the same register, the
205 * value written by the delay slot opcode is
207 dummy_ld = opcode_writes_register(op_next, op->r.rt);
212 } else if (has_delay_slot(op_next)) {
213 /* The first opcode of the next block is a branch, so we
214 * cannot execute it here, because of the load delay.
215 * Just check whether or not the branch would be taken,
216 * and save that info into the interpreter struct. */
217 branch_at_addr = true;
218 branch_taken = is_branch_taken(reg_cache, op_next);
219 pr_debug("Target of impossible branch is a branch, "
220 "%staken.\n", branch_taken ? "" : "not ");
228 /* Execute the first opcode of the next block */
229 (*int_standard[inter2.op->i.op])(&inter2);
232 new_rs = reg_cache[op->r.rs];
233 reg_cache[op->r.rs] = old_rs;
236 inter->cycles += lightrec_cycles_of_opcode(op_next);
239 next_pc = inter->block->pc
240 + (inter->op->offset + 2) * sizeof(u32);
243 inter2.block = inter->block;
245 inter2.cycles = inter->cycles;
248 new_rt = reg_cache[op->r.rt];
250 /* Execute delay slot opcode */
252 ds_next_pc = int_branch(&inter2, pc, op_next, branch_taken);
254 ds_next_pc = (*int_standard[inter2.op->i.op])(&inter2);
256 if (branch_at_addr && !branch_taken) {
257 /* If the branch at the target of the branch opcode is not
258 * taken, we jump to its delay slot */
259 next_pc = pc + sizeof(u32);
260 } else if (!branch && branch_in_ds) {
261 next_pc = ds_next_pc;
265 reg_cache[op->r.rs] = new_rs;
267 reg_cache[op->r.rt] = new_rt;
269 inter->cycles += lightrec_cycles_of_opcode(op->c);
271 if (branch_at_addr && branch_taken) {
272 /* If the branch at the target of the branch opcode is taken,
273 * we execute its delay slot here, and jump to its target
275 op_next = lightrec_read_opcode(state, pc + 4);
279 new_op.offset = sizeof(u32);
284 inter->cycles += lightrec_cycles_of_opcode(op_next);
286 pr_debug("Running delay slot of branch at target of impossible "
288 (*int_standard[inter2.op->i.op])(&inter2);
294 static u32 int_unimplemented(struct interpreter *inter)
296 pr_warn("Unimplemented opcode 0x%08x\n", inter->op->opcode);
298 return jump_next(inter);
301 static u32 int_jump(struct interpreter *inter, bool link)
303 struct lightrec_state *state = inter->state;
304 u32 old_pc = inter->block->pc + inter->op->offset * sizeof(u32);
305 u32 pc = (old_pc & 0xf0000000) | (inter->op->j.imm << 2);
308 state->native_reg_cache[31] = old_pc + 8;
310 if (inter->op->flags & LIGHTREC_NO_DS)
313 return int_delay_slot(inter, pc, true);
316 static u32 int_J(struct interpreter *inter)
318 return int_jump(inter, false);
321 static u32 int_JAL(struct interpreter *inter)
323 return int_jump(inter, true);
326 static u32 int_jumpr(struct interpreter *inter, u8 link_reg)
328 struct lightrec_state *state = inter->state;
329 u32 old_pc, next_pc = state->native_reg_cache[inter->op->r.rs];
332 old_pc = inter->block->pc + inter->op->offset * sizeof(u32);
333 state->native_reg_cache[link_reg] = old_pc + 8;
336 if (inter->op->flags & LIGHTREC_NO_DS)
339 return int_delay_slot(inter, next_pc, true);
342 static u32 int_special_JR(struct interpreter *inter)
344 return int_jumpr(inter, 0);
347 static u32 int_special_JALR(struct interpreter *inter)
349 return int_jumpr(inter, inter->op->r.rd);
352 static u32 int_do_branch(struct interpreter *inter, u32 old_pc, u32 next_pc)
354 if (!inter->delay_slot &&
355 (inter->op->flags & LIGHTREC_LOCAL_BRANCH) &&
356 (s16)inter->op->c.i.imm >= 0) {
357 next_pc = old_pc + ((1 + (s16)inter->op->c.i.imm) << 2);
358 next_pc = lightrec_emulate_block(inter->block, next_pc);
364 static u32 int_branch(struct interpreter *inter, u32 pc,
365 union code code, bool branch)
367 u32 next_pc = pc + 4 + ((s16)code.i.imm << 2);
369 update_cycles_before_branch(inter);
371 if (inter->op->flags & LIGHTREC_NO_DS) {
373 return int_do_branch(inter, pc, next_pc);
375 return jump_next(inter);
378 if (!inter->delay_slot)
379 next_pc = int_delay_slot(inter, next_pc, branch);
382 return int_do_branch(inter, pc, next_pc);
384 if (inter->op->flags & LIGHTREC_EMULATE_BRANCH)
387 return jump_after_branch(inter);
390 static u32 int_beq(struct interpreter *inter, bool bne)
392 u32 rs, rt, old_pc = inter->block->pc + inter->op->offset * sizeof(u32);
394 rs = inter->state->native_reg_cache[inter->op->i.rs];
395 rt = inter->state->native_reg_cache[inter->op->i.rt];
397 return int_branch(inter, old_pc, inter->op->c, (rs == rt) ^ bne);
400 static u32 int_BEQ(struct interpreter *inter)
402 return int_beq(inter, false);
405 static u32 int_BNE(struct interpreter *inter)
407 return int_beq(inter, true);
410 static u32 int_bgez(struct interpreter *inter, bool link, bool lt, bool regimm)
412 u32 old_pc = inter->block->pc + inter->op->offset * sizeof(u32);
416 inter->state->native_reg_cache[31] = old_pc + 8;
418 rs = (s32)inter->state->native_reg_cache[inter->op->i.rs];
420 return int_branch(inter, old_pc, inter->op->c,
421 ((regimm && !rs) || rs > 0) ^ lt);
424 static u32 int_regimm_BLTZ(struct interpreter *inter)
426 return int_bgez(inter, false, true, true);
429 static u32 int_regimm_BGEZ(struct interpreter *inter)
431 return int_bgez(inter, false, false, true);
434 static u32 int_regimm_BLTZAL(struct interpreter *inter)
436 return int_bgez(inter, true, true, true);
439 static u32 int_regimm_BGEZAL(struct interpreter *inter)
441 return int_bgez(inter, true, false, true);
444 static u32 int_BLEZ(struct interpreter *inter)
446 return int_bgez(inter, false, true, false);
449 static u32 int_BGTZ(struct interpreter *inter)
451 return int_bgez(inter, false, false, false);
454 static u32 int_cfc(struct interpreter *inter)
456 struct lightrec_state *state = inter->state;
457 const struct opcode *op = inter->op;
460 val = lightrec_mfc(state, op->c);
462 if (likely(op->r.rt))
463 state->native_reg_cache[op->r.rt] = val;
465 return jump_next(inter);
468 static u32 int_ctc(struct interpreter *inter)
470 struct lightrec_state *state = inter->state;
471 const struct opcode *op = inter->op;
473 lightrec_mtc(state, op->c, state->native_reg_cache[op->r.rt]);
475 /* If we have a MTC0 or CTC0 to CP0 register 12 (Status) or 13 (Cause),
476 * return early so that the emulator will be able to check software
477 * interrupt status. */
478 if (op->i.op == OP_CP0 && (op->r.rd == 12 || op->r.rd == 13))
479 return inter->block->pc + (op->offset + 1) * sizeof(u32);
481 return jump_next(inter);
484 static u32 int_cp0_RFE(struct interpreter *inter)
486 struct lightrec_state *state = inter->state;
489 /* Read CP0 Status register (r12) */
490 status = state->ops.cop0_ops.mfc(state, 12);
492 /* Switch the bits */
493 status = ((status & 0x3c) >> 2) | (status & ~0xf);
496 state->ops.cop0_ops.ctc(state, 12, status);
498 return jump_next(inter);
501 static u32 int_CP(struct interpreter *inter)
503 struct lightrec_state *state = inter->state;
504 const struct lightrec_cop_ops *ops;
505 const struct opcode *op = inter->op;
507 if ((op->j.imm >> 25) & 1)
508 ops = &state->ops.cop2_ops;
510 ops = &state->ops.cop0_ops;
512 (*ops->op)(state, (op->j.imm) & ~(1 << 25));
514 return jump_next(inter);
517 static u32 int_ADDI(struct interpreter *inter)
519 u32 *reg_cache = inter->state->native_reg_cache;
520 struct opcode_i *op = &inter->op->i;
523 reg_cache[op->rt] = reg_cache[op->rs] + (s32)(s16)op->imm;
525 return jump_next(inter);
528 static u32 int_SLTI(struct interpreter *inter)
530 u32 *reg_cache = inter->state->native_reg_cache;
531 struct opcode_i *op = &inter->op->i;
534 reg_cache[op->rt] = (s32)reg_cache[op->rs] < (s32)(s16)op->imm;
536 return jump_next(inter);
539 static u32 int_SLTIU(struct interpreter *inter)
541 u32 *reg_cache = inter->state->native_reg_cache;
542 struct opcode_i *op = &inter->op->i;
545 reg_cache[op->rt] = reg_cache[op->rs] < (u32)(s32)(s16)op->imm;
547 return jump_next(inter);
550 static u32 int_ANDI(struct interpreter *inter)
552 u32 *reg_cache = inter->state->native_reg_cache;
553 struct opcode_i *op = &inter->op->i;
556 reg_cache[op->rt] = reg_cache[op->rs] & op->imm;
558 return jump_next(inter);
561 static u32 int_ORI(struct interpreter *inter)
563 u32 *reg_cache = inter->state->native_reg_cache;
564 struct opcode_i *op = &inter->op->i;
567 reg_cache[op->rt] = reg_cache[op->rs] | op->imm;
569 return jump_next(inter);
572 static u32 int_XORI(struct interpreter *inter)
574 u32 *reg_cache = inter->state->native_reg_cache;
575 struct opcode_i *op = &inter->op->i;
578 reg_cache[op->rt] = reg_cache[op->rs] ^ op->imm;
580 return jump_next(inter);
583 static u32 int_LUI(struct interpreter *inter)
585 struct opcode_i *op = &inter->op->i;
587 inter->state->native_reg_cache[op->rt] = op->imm << 16;
589 return jump_next(inter);
592 static u32 int_io(struct interpreter *inter, bool is_load)
594 struct opcode_i *op = &inter->op->i;
595 u32 *reg_cache = inter->state->native_reg_cache;
598 val = lightrec_rw(inter->state, inter->op->c,
599 reg_cache[op->rs], reg_cache[op->rt],
602 if (is_load && op->rt)
603 reg_cache[op->rt] = val;
605 return jump_next(inter);
608 static u32 int_load(struct interpreter *inter)
610 return int_io(inter, true);
613 static u32 int_store(struct interpreter *inter)
617 if (likely(!(inter->op->flags & LIGHTREC_SMC)))
618 return int_io(inter, false);
620 lightrec_rw(inter->state, inter->op->c,
621 inter->state->native_reg_cache[inter->op->i.rs],
622 inter->state->native_reg_cache[inter->op->i.rt],
625 next_pc = inter->block->pc + (inter->op->offset + 1) * 4;
627 /* Invalidate next PC, to force the rest of the block to be rebuilt */
628 lightrec_invalidate(inter->state, next_pc, 4);
633 static u32 int_LWC2(struct interpreter *inter)
635 return int_io(inter, false);
638 static u32 int_special_SLL(struct interpreter *inter)
640 struct opcode *op = inter->op;
643 if (op->opcode) { /* Handle NOPs */
644 rt = inter->state->native_reg_cache[op->r.rt];
645 inter->state->native_reg_cache[op->r.rd] = rt << op->r.imm;
648 return jump_next(inter);
651 static u32 int_special_SRL(struct interpreter *inter)
653 struct opcode *op = inter->op;
654 u32 rt = inter->state->native_reg_cache[op->r.rt];
656 inter->state->native_reg_cache[op->r.rd] = rt >> op->r.imm;
658 return jump_next(inter);
661 static u32 int_special_SRA(struct interpreter *inter)
663 struct opcode *op = inter->op;
664 s32 rt = inter->state->native_reg_cache[op->r.rt];
666 inter->state->native_reg_cache[op->r.rd] = rt >> op->r.imm;
668 return jump_next(inter);
671 static u32 int_special_SLLV(struct interpreter *inter)
673 struct opcode *op = inter->op;
674 u32 rs = inter->state->native_reg_cache[op->r.rs];
675 u32 rt = inter->state->native_reg_cache[op->r.rt];
677 inter->state->native_reg_cache[op->r.rd] = rt << (rs & 0x1f);
679 return jump_next(inter);
682 static u32 int_special_SRLV(struct interpreter *inter)
684 struct opcode *op = inter->op;
685 u32 rs = inter->state->native_reg_cache[op->r.rs];
686 u32 rt = inter->state->native_reg_cache[op->r.rt];
688 inter->state->native_reg_cache[op->r.rd] = rt >> (rs & 0x1f);
690 return jump_next(inter);
693 static u32 int_special_SRAV(struct interpreter *inter)
695 struct opcode *op = inter->op;
696 u32 rs = inter->state->native_reg_cache[op->r.rs];
697 s32 rt = inter->state->native_reg_cache[op->r.rt];
699 inter->state->native_reg_cache[op->r.rd] = rt >> (rs & 0x1f);
701 return jump_next(inter);
704 static u32 int_syscall_break(struct interpreter *inter)
707 if (inter->op->r.op == OP_SPECIAL_BREAK)
708 inter->state->exit_flags |= LIGHTREC_EXIT_BREAK;
710 inter->state->exit_flags |= LIGHTREC_EXIT_SYSCALL;
712 return inter->block->pc + inter->op->offset * sizeof(u32);
715 static u32 int_special_MFHI(struct interpreter *inter)
717 u32 *reg_cache = inter->state->native_reg_cache;
718 struct opcode_r *op = &inter->op->r;
721 reg_cache[op->rd] = reg_cache[REG_HI];
723 return jump_next(inter);
726 static u32 int_special_MTHI(struct interpreter *inter)
728 u32 *reg_cache = inter->state->native_reg_cache;
730 reg_cache[REG_HI] = reg_cache[inter->op->r.rs];
732 return jump_next(inter);
735 static u32 int_special_MFLO(struct interpreter *inter)
737 u32 *reg_cache = inter->state->native_reg_cache;
738 struct opcode_r *op = &inter->op->r;
741 reg_cache[op->rd] = reg_cache[REG_LO];
743 return jump_next(inter);
746 static u32 int_special_MTLO(struct interpreter *inter)
748 u32 *reg_cache = inter->state->native_reg_cache;
750 reg_cache[REG_LO] = reg_cache[inter->op->r.rs];
752 return jump_next(inter);
755 static u32 int_special_MULT(struct interpreter *inter)
757 u32 *reg_cache = inter->state->native_reg_cache;
758 s32 rs = reg_cache[inter->op->r.rs];
759 s32 rt = reg_cache[inter->op->r.rt];
760 u64 res = (s64)rs * (s64)rt;
762 if (!(inter->op->flags & LIGHTREC_MULT32))
763 reg_cache[REG_HI] = res >> 32;
764 reg_cache[REG_LO] = res;
766 return jump_next(inter);
769 static u32 int_special_MULTU(struct interpreter *inter)
771 u32 *reg_cache = inter->state->native_reg_cache;
772 u32 rs = reg_cache[inter->op->r.rs];
773 u32 rt = reg_cache[inter->op->r.rt];
774 u64 res = (u64)rs * (u64)rt;
776 if (!(inter->op->flags & LIGHTREC_MULT32))
777 reg_cache[REG_HI] = res >> 32;
778 reg_cache[REG_LO] = res;
780 return jump_next(inter);
783 static u32 int_special_DIV(struct interpreter *inter)
785 u32 *reg_cache = inter->state->native_reg_cache;
786 s32 rs = reg_cache[inter->op->r.rs];
787 s32 rt = reg_cache[inter->op->r.rt];
792 lo = (rs < 0) * 2 - 1;
798 reg_cache[REG_HI] = hi;
799 reg_cache[REG_LO] = lo;
801 return jump_next(inter);
804 static u32 int_special_DIVU(struct interpreter *inter)
806 u32 *reg_cache = inter->state->native_reg_cache;
807 u32 rs = reg_cache[inter->op->r.rs];
808 u32 rt = reg_cache[inter->op->r.rt];
819 reg_cache[REG_HI] = hi;
820 reg_cache[REG_LO] = lo;
822 return jump_next(inter);
825 static u32 int_special_ADD(struct interpreter *inter)
827 u32 *reg_cache = inter->state->native_reg_cache;
828 struct opcode_r *op = &inter->op->r;
829 s32 rs = reg_cache[op->rs];
830 s32 rt = reg_cache[op->rt];
833 reg_cache[op->rd] = rs + rt;
835 return jump_next(inter);
838 static u32 int_special_SUB(struct interpreter *inter)
840 u32 *reg_cache = inter->state->native_reg_cache;
841 struct opcode_r *op = &inter->op->r;
842 u32 rs = reg_cache[op->rs];
843 u32 rt = reg_cache[op->rt];
846 reg_cache[op->rd] = rs - rt;
848 return jump_next(inter);
851 static u32 int_special_AND(struct interpreter *inter)
853 u32 *reg_cache = inter->state->native_reg_cache;
854 struct opcode_r *op = &inter->op->r;
855 u32 rs = reg_cache[op->rs];
856 u32 rt = reg_cache[op->rt];
859 reg_cache[op->rd] = rs & rt;
861 return jump_next(inter);
864 static u32 int_special_OR(struct interpreter *inter)
866 u32 *reg_cache = inter->state->native_reg_cache;
867 struct opcode_r *op = &inter->op->r;
868 u32 rs = reg_cache[op->rs];
869 u32 rt = reg_cache[op->rt];
872 reg_cache[op->rd] = rs | rt;
874 return jump_next(inter);
877 static u32 int_special_XOR(struct interpreter *inter)
879 u32 *reg_cache = inter->state->native_reg_cache;
880 struct opcode_r *op = &inter->op->r;
881 u32 rs = reg_cache[op->rs];
882 u32 rt = reg_cache[op->rt];
885 reg_cache[op->rd] = rs ^ rt;
887 return jump_next(inter);
890 static u32 int_special_NOR(struct interpreter *inter)
892 u32 *reg_cache = inter->state->native_reg_cache;
893 struct opcode_r *op = &inter->op->r;
894 u32 rs = reg_cache[op->rs];
895 u32 rt = reg_cache[op->rt];
898 reg_cache[op->rd] = ~(rs | rt);
900 return jump_next(inter);
903 static u32 int_special_SLT(struct interpreter *inter)
905 u32 *reg_cache = inter->state->native_reg_cache;
906 struct opcode_r *op = &inter->op->r;
907 s32 rs = reg_cache[op->rs];
908 s32 rt = reg_cache[op->rt];
911 reg_cache[op->rd] = rs < rt;
913 return jump_next(inter);
916 static u32 int_special_SLTU(struct interpreter *inter)
918 u32 *reg_cache = inter->state->native_reg_cache;
919 struct opcode_r *op = &inter->op->r;
920 u32 rs = reg_cache[op->rs];
921 u32 rt = reg_cache[op->rt];
924 reg_cache[op->rd] = rs < rt;
926 return jump_next(inter);
929 static u32 int_META_SKIP(struct interpreter *inter)
931 return jump_skip(inter);
934 static u32 int_META_MOV(struct interpreter *inter)
936 u32 *reg_cache = inter->state->native_reg_cache;
937 struct opcode_r *op = &inter->op->r;
940 reg_cache[op->rd] = reg_cache[op->rs];
942 return jump_next(inter);
945 static u32 int_META_SYNC(struct interpreter *inter)
947 inter->state->current_cycle += inter->cycles;
950 return jump_skip(inter);
953 static const lightrec_int_func_t int_standard[64] = {
954 [OP_SPECIAL] = int_SPECIAL,
955 [OP_REGIMM] = int_REGIMM,
960 [OP_BLEZ] = int_BLEZ,
961 [OP_BGTZ] = int_BGTZ,
962 [OP_ADDI] = int_ADDI,
963 [OP_ADDIU] = int_ADDI,
964 [OP_SLTI] = int_SLTI,
965 [OP_SLTIU] = int_SLTIU,
966 [OP_ANDI] = int_ANDI,
968 [OP_XORI] = int_XORI,
981 [OP_SWL] = int_store,
983 [OP_SWR] = int_store,
984 [OP_LWC2] = int_LWC2,
985 [OP_SWC2] = int_store,
987 [OP_META_REG_UNLOAD] = int_META_SKIP,
988 [OP_META_BEQZ] = int_BEQ,
989 [OP_META_BNEZ] = int_BNE,
990 [OP_META_MOV] = int_META_MOV,
991 [OP_META_SYNC] = int_META_SYNC,
994 static const lightrec_int_func_t int_special[64] = {
995 [OP_SPECIAL_SLL] = int_special_SLL,
996 [OP_SPECIAL_SRL] = int_special_SRL,
997 [OP_SPECIAL_SRA] = int_special_SRA,
998 [OP_SPECIAL_SLLV] = int_special_SLLV,
999 [OP_SPECIAL_SRLV] = int_special_SRLV,
1000 [OP_SPECIAL_SRAV] = int_special_SRAV,
1001 [OP_SPECIAL_JR] = int_special_JR,
1002 [OP_SPECIAL_JALR] = int_special_JALR,
1003 [OP_SPECIAL_SYSCALL] = int_syscall_break,
1004 [OP_SPECIAL_BREAK] = int_syscall_break,
1005 [OP_SPECIAL_MFHI] = int_special_MFHI,
1006 [OP_SPECIAL_MTHI] = int_special_MTHI,
1007 [OP_SPECIAL_MFLO] = int_special_MFLO,
1008 [OP_SPECIAL_MTLO] = int_special_MTLO,
1009 [OP_SPECIAL_MULT] = int_special_MULT,
1010 [OP_SPECIAL_MULTU] = int_special_MULTU,
1011 [OP_SPECIAL_DIV] = int_special_DIV,
1012 [OP_SPECIAL_DIVU] = int_special_DIVU,
1013 [OP_SPECIAL_ADD] = int_special_ADD,
1014 [OP_SPECIAL_ADDU] = int_special_ADD,
1015 [OP_SPECIAL_SUB] = int_special_SUB,
1016 [OP_SPECIAL_SUBU] = int_special_SUB,
1017 [OP_SPECIAL_AND] = int_special_AND,
1018 [OP_SPECIAL_OR] = int_special_OR,
1019 [OP_SPECIAL_XOR] = int_special_XOR,
1020 [OP_SPECIAL_NOR] = int_special_NOR,
1021 [OP_SPECIAL_SLT] = int_special_SLT,
1022 [OP_SPECIAL_SLTU] = int_special_SLTU,
1025 static const lightrec_int_func_t int_regimm[64] = {
1026 [OP_REGIMM_BLTZ] = int_regimm_BLTZ,
1027 [OP_REGIMM_BGEZ] = int_regimm_BGEZ,
1028 [OP_REGIMM_BLTZAL] = int_regimm_BLTZAL,
1029 [OP_REGIMM_BGEZAL] = int_regimm_BGEZAL,
1032 static const lightrec_int_func_t int_cp0[64] = {
1033 [OP_CP0_MFC0] = int_cfc,
1034 [OP_CP0_CFC0] = int_cfc,
1035 [OP_CP0_MTC0] = int_ctc,
1036 [OP_CP0_CTC0] = int_ctc,
1037 [OP_CP0_RFE] = int_cp0_RFE,
1040 static const lightrec_int_func_t int_cp2_basic[64] = {
1041 [OP_CP2_BASIC_MFC2] = int_cfc,
1042 [OP_CP2_BASIC_CFC2] = int_cfc,
1043 [OP_CP2_BASIC_MTC2] = int_ctc,
1044 [OP_CP2_BASIC_CTC2] = int_ctc,
1047 static u32 int_SPECIAL(struct interpreter *inter)
1049 lightrec_int_func_t f = int_special[inter->op->r.op];
1051 return execute(f, inter);
1053 return int_unimplemented(inter);
1056 static u32 int_REGIMM(struct interpreter *inter)
1058 lightrec_int_func_t f = int_regimm[inter->op->r.rt];
1060 return execute(f, inter);
1062 return int_unimplemented(inter);
1065 static u32 int_CP0(struct interpreter *inter)
1067 lightrec_int_func_t f = int_cp0[inter->op->r.rs];
1069 return execute(f, inter);
1071 return int_CP(inter);
1074 static u32 int_CP2(struct interpreter *inter)
1076 if (inter->op->r.op == OP_CP2_BASIC) {
1077 lightrec_int_func_t f = int_cp2_basic[inter->op->r.rs];
1079 return execute(f, inter);
1082 return int_CP(inter);
1085 static u32 lightrec_int_op(struct interpreter *inter)
1087 return execute(int_standard[inter->op->i.op], inter);
1090 static u32 lightrec_emulate_block_list(struct block *block, struct opcode *op)
1092 struct interpreter inter;
1095 inter.block = block;
1096 inter.state = block->state;
1099 inter.delay_slot = false;
1101 pc = lightrec_int_op(&inter);
1103 /* Add the cycles of the last branch */
1104 inter.cycles += lightrec_cycles_of_opcode(inter.op->c);
1106 block->state->current_cycle += inter.cycles;
1111 u32 lightrec_emulate_block(struct block *block, u32 pc)
1113 u32 offset = (kunseg(pc) - kunseg(block->pc)) >> 2;
1116 for (op = block->opcode_list;
1117 op && (op->offset < offset); op = op->next);
1119 return lightrec_emulate_block_list(block, op);
1121 pr_err("PC 0x%x is outside block at PC 0x%x\n", pc, block->pc);