1 // SPDX-License-Identifier: LGPL-2.1-or-later
3 * Copyright (C) 2019-2021 Paul Cercueil <paul@crapouillou.net>
6 #include "disassembler.h"
7 #include "interpreter.h"
8 #include "lightrec-private.h"
16 static u32 int_CP0(struct interpreter *inter);
17 static u32 int_CP2(struct interpreter *inter);
18 static u32 int_SPECIAL(struct interpreter *inter);
19 static u32 int_REGIMM(struct interpreter *inter);
20 static u32 int_branch(struct interpreter *inter, u32 pc,
21 union code code, bool branch);
23 typedef u32 (*lightrec_int_func_t)(struct interpreter *inter);
25 static const lightrec_int_func_t int_standard[64];
28 struct lightrec_state *state;
36 static u32 int_get_branch_pc(const struct interpreter *inter)
38 return get_branch_pc(inter->block, inter->offset, 0);
41 static inline u32 int_get_ds_pc(const struct interpreter *inter, s16 imm)
43 return get_ds_pc(inter->block, inter->offset, imm);
46 static inline struct opcode *next_op(const struct interpreter *inter)
48 return &inter->block->opcode_list[inter->offset + 1];
51 static inline u32 execute(lightrec_int_func_t func, struct interpreter *inter)
53 return (*func)(inter);
56 static inline u32 lightrec_int_op(struct interpreter *inter)
58 return execute(int_standard[inter->op->i.op], inter);
61 static inline u32 jump_skip(struct interpreter *inter)
63 inter->op = next_op(inter);
66 if (op_flag_sync(inter->op->flags)) {
67 inter->state->current_cycle += inter->cycles;
71 return lightrec_int_op(inter);
74 static inline u32 jump_next(struct interpreter *inter)
76 inter->cycles += lightrec_cycles_of_opcode(inter->op->c);
78 if (unlikely(inter->delay_slot))
81 return jump_skip(inter);
84 static inline u32 jump_after_branch(struct interpreter *inter)
86 inter->cycles += lightrec_cycles_of_opcode(inter->op->c);
88 if (unlikely(inter->delay_slot))
91 inter->op = next_op(inter);
94 return jump_skip(inter);
97 static void update_cycles_before_branch(struct interpreter *inter)
101 if (!inter->delay_slot) {
102 cycles = lightrec_cycles_of_opcode(inter->op->c);
104 if (!op_flag_no_ds(inter->op->flags) &&
105 has_delay_slot(inter->op->c))
106 cycles += lightrec_cycles_of_opcode(next_op(inter)->c);
108 inter->cycles += cycles;
109 inter->state->current_cycle += inter->cycles;
110 inter->cycles = -cycles;
114 static bool is_branch_taken(const u32 *reg_cache, union code op)
118 return op.r.op == OP_SPECIAL_JR || op.r.op == OP_SPECIAL_JALR;
123 return reg_cache[op.r.rs] == reg_cache[op.r.rt];
125 return reg_cache[op.r.rs] != reg_cache[op.r.rt];
129 case OP_REGIMM_BLTZAL:
130 return (s32)reg_cache[op.r.rs] < 0;
132 case OP_REGIMM_BGEZAL:
133 return (s32)reg_cache[op.r.rs] >= 0;
142 static u32 int_delay_slot(struct interpreter *inter, u32 pc, bool branch)
144 struct lightrec_state *state = inter->state;
145 u32 *reg_cache = state->regs.gpr;
146 struct opcode new_op, *op = next_op(inter);
148 struct interpreter inter2 = {
150 .cycles = inter->cycles,
154 bool run_first_op = false, dummy_ld = false, save_rs = false,
155 load_in_ds, branch_in_ds = false, branch_at_addr = false,
157 u32 old_rs, new_rs, new_rt;
158 u32 next_pc, ds_next_pc;
161 if (op->i.op == OP_CP0 && op->r.rs == OP_CP0_RFE) {
162 /* When an IRQ happens, the PSX exception handlers (when done)
163 * will jump back to the instruction that was executed right
164 * before the IRQ, unless it was a GTE opcode; in that case, it
165 * jumps to the instruction right after.
166 * Since we will never handle the IRQ right after a GTE opcode,
167 * but on branch boundaries, we need to adjust the return
168 * address so that the GTE opcode is effectively executed.
170 cause = state->regs.cp0[13];
171 epc = state->regs.cp0[14];
173 if (!(cause & 0x7c) && epc == pc - 4)
177 if (inter->delay_slot) {
178 /* The branch opcode was in a delay slot of another branch
179 * opcode. Just return the target address of the second
184 /* An opcode located in the delay slot performing a delayed read
185 * requires special handling; we will always resort to using the
186 * interpreter in that case.
187 * Same goes for when we have a branch in a delay slot of another
189 load_in_ds = load_in_delay_slot(op->c);
190 branch_in_ds = has_delay_slot(op->c);
193 if (load_in_ds || branch_in_ds)
194 op_next = lightrec_read_opcode(state, pc);
197 /* Verify that the next block actually reads the
198 * destination register of the delay slot opcode. */
199 run_first_op = opcode_reads_register(op_next, op->r.rt);
207 if (load_in_ds && run_first_op) {
210 /* If the first opcode of the next block writes the
211 * regiser used as the address for the load, we need to
212 * reset to the old value after it has been executed,
213 * then restore the new value after the delay slot
214 * opcode has been executed. */
215 save_rs = opcode_reads_register(op->c, op->r.rs) &&
216 opcode_writes_register(op_next, op->r.rs);
218 old_rs = reg_cache[op->r.rs];
220 /* If both the first opcode of the next block and the
221 * delay slot opcode write to the same register, the
222 * value written by the delay slot opcode is
224 dummy_ld = opcode_writes_register(op_next, op->r.rt);
229 } else if (has_delay_slot(op_next)) {
230 /* The first opcode of the next block is a branch, so we
231 * cannot execute it here, because of the load delay.
232 * Just check whether or not the branch would be taken,
233 * and save that info into the interpreter struct. */
234 branch_at_addr = true;
235 branch_taken = is_branch_taken(reg_cache, op_next);
236 pr_debug("Target of impossible branch is a branch, "
237 "%staken.\n", branch_taken ? "" : "not ");
238 inter->cycles += lightrec_cycles_of_opcode(op_next);
239 old_rs = reg_cache[op_next.r.rs];
245 /* Execute the first opcode of the next block */
246 lightrec_int_op(&inter2);
249 new_rs = reg_cache[op->r.rs];
250 reg_cache[op->r.rs] = old_rs;
253 inter->cycles += lightrec_cycles_of_opcode(op_next);
256 next_pc = int_get_ds_pc(inter, 2);
259 inter2.block = inter->block;
261 inter2.cycles = inter->cycles;
264 new_rt = reg_cache[op->r.rt];
266 /* Execute delay slot opcode */
267 ds_next_pc = lightrec_int_op(&inter2);
269 if (branch_at_addr) {
270 if (op_next.i.op == OP_SPECIAL)
271 /* TODO: Handle JALR setting $ra */
273 else if (op_next.i.op == OP_J || op_next.i.op == OP_JAL)
274 /* TODO: Handle JAL setting $ra */
275 ds_next_pc = (pc & 0xf0000000) | (op_next.j.imm << 2);
277 ds_next_pc = pc + 4 + ((s16)op_next.i.imm << 2);
280 if (branch_at_addr && !branch_taken) {
281 /* If the branch at the target of the branch opcode is not
282 * taken, we jump to its delay slot */
283 next_pc = pc + sizeof(u32);
284 } else if (branch_at_addr || (!branch && branch_in_ds)) {
285 next_pc = ds_next_pc;
289 reg_cache[op->r.rs] = new_rs;
291 reg_cache[op->r.rt] = new_rt;
293 inter->cycles += lightrec_cycles_of_opcode(op->c);
295 if (branch_at_addr && branch_taken) {
296 /* If the branch at the target of the branch opcode is taken,
297 * we execute its delay slot here, and jump to its target
299 op_next = lightrec_read_opcode(state, pc + 4);
306 inter->cycles += lightrec_cycles_of_opcode(op_next);
308 pr_debug("Running delay slot of branch at target of impossible "
310 lightrec_int_op(&inter2);
316 static u32 int_unimplemented(struct interpreter *inter)
318 pr_warn("Unimplemented opcode 0x%08x\n", inter->op->opcode);
320 return jump_next(inter);
323 static u32 int_jump(struct interpreter *inter, bool link)
325 struct lightrec_state *state = inter->state;
326 u32 old_pc = int_get_branch_pc(inter);
327 u32 pc = (old_pc & 0xf0000000) | (inter->op->j.imm << 2);
330 state->regs.gpr[31] = old_pc + 8;
332 if (op_flag_no_ds(inter->op->flags))
335 return int_delay_slot(inter, pc, true);
338 static u32 int_J(struct interpreter *inter)
340 return int_jump(inter, false);
343 static u32 int_JAL(struct interpreter *inter)
345 return int_jump(inter, true);
348 static u32 int_jumpr(struct interpreter *inter, u8 link_reg)
350 struct lightrec_state *state = inter->state;
351 u32 old_pc = int_get_branch_pc(inter);
352 u32 next_pc = state->regs.gpr[inter->op->r.rs];
354 if (op_flag_emulate_branch(inter->op->flags) && inter->offset) {
355 inter->cycles -= lightrec_cycles_of_opcode(inter->op->c);
360 state->regs.gpr[link_reg] = old_pc + 8;
362 if (op_flag_no_ds(inter->op->flags))
365 return int_delay_slot(inter, next_pc, true);
368 static u32 int_special_JR(struct interpreter *inter)
370 return int_jumpr(inter, 0);
373 static u32 int_special_JALR(struct interpreter *inter)
375 return int_jumpr(inter, inter->op->r.rd);
378 static u32 int_do_branch(struct interpreter *inter, u32 old_pc, u32 next_pc)
380 if (!inter->delay_slot && op_flag_local_branch(inter->op->flags) &&
381 (s16)inter->op->c.i.imm >= 0) {
382 next_pc = old_pc + ((1 + (s16)inter->op->c.i.imm) << 2);
383 next_pc = lightrec_emulate_block(inter->state, inter->block, next_pc);
389 static u32 int_branch(struct interpreter *inter, u32 pc,
390 union code code, bool branch)
392 u32 next_pc = pc + 4 + ((s16)code.i.imm << 2);
394 if (op_flag_emulate_branch(inter->op->flags) && inter->offset) {
395 inter->cycles -= lightrec_cycles_of_opcode(inter->op->c);
399 update_cycles_before_branch(inter);
401 if (op_flag_no_ds(inter->op->flags)) {
403 return int_do_branch(inter, pc, next_pc);
405 return jump_next(inter);
408 if (!inter->delay_slot)
409 next_pc = int_delay_slot(inter, next_pc, branch);
412 return int_do_branch(inter, pc, next_pc);
414 if (op_flag_emulate_branch(inter->op->flags))
417 return jump_after_branch(inter);
420 static u32 int_beq(struct interpreter *inter, bool bne)
422 u32 rs, rt, old_pc = int_get_branch_pc(inter);
424 rs = inter->state->regs.gpr[inter->op->i.rs];
425 rt = inter->state->regs.gpr[inter->op->i.rt];
427 return int_branch(inter, old_pc, inter->op->c, (rs == rt) ^ bne);
430 static u32 int_BEQ(struct interpreter *inter)
432 return int_beq(inter, false);
435 static u32 int_BNE(struct interpreter *inter)
437 return int_beq(inter, true);
440 static u32 int_bgez(struct interpreter *inter, bool link, bool lt, bool regimm)
442 u32 old_pc = int_get_branch_pc(inter);
446 inter->state->regs.gpr[31] = old_pc + 8;
448 rs = (s32)inter->state->regs.gpr[inter->op->i.rs];
450 return int_branch(inter, old_pc, inter->op->c,
451 ((regimm && !rs) || rs > 0) ^ lt);
454 static u32 int_regimm_BLTZ(struct interpreter *inter)
456 return int_bgez(inter, false, true, true);
459 static u32 int_regimm_BGEZ(struct interpreter *inter)
461 return int_bgez(inter, false, false, true);
464 static u32 int_regimm_BLTZAL(struct interpreter *inter)
466 return int_bgez(inter, true, true, true);
469 static u32 int_regimm_BGEZAL(struct interpreter *inter)
471 return int_bgez(inter, true, false, true);
474 static u32 int_BLEZ(struct interpreter *inter)
476 return int_bgez(inter, false, true, false);
479 static u32 int_BGTZ(struct interpreter *inter)
481 return int_bgez(inter, false, false, false);
484 static u32 int_cfc(struct interpreter *inter)
486 struct lightrec_state *state = inter->state;
487 const struct opcode *op = inter->op;
490 val = lightrec_mfc(state, op->c);
492 if (likely(op->r.rt))
493 state->regs.gpr[op->r.rt] = val;
495 return jump_next(inter);
498 static u32 int_ctc(struct interpreter *inter)
500 struct lightrec_state *state = inter->state;
501 const struct opcode *op = inter->op;
503 lightrec_mtc(state, op->c, state->regs.gpr[op->r.rt]);
505 /* If we have a MTC0 or CTC0 to CP0 register 12 (Status) or 13 (Cause),
506 * return early so that the emulator will be able to check software
507 * interrupt status. */
508 if (!op_flag_no_ds(inter->op->flags) &&
509 op->i.op == OP_CP0 && (op->r.rd == 12 || op->r.rd == 13))
510 return int_get_ds_pc(inter, 1);
512 return jump_next(inter);
515 static u32 int_cp0_RFE(struct interpreter *inter)
517 lightrec_rfe(inter->state);
519 return jump_next(inter);
522 static u32 int_CP(struct interpreter *inter)
524 lightrec_cp(inter->state, inter->op->c);
526 return jump_next(inter);
529 static u32 int_ADDI(struct interpreter *inter)
531 u32 *reg_cache = inter->state->regs.gpr;
532 struct opcode_i *op = &inter->op->i;
535 reg_cache[op->rt] = reg_cache[op->rs] + (s32)(s16)op->imm;
537 return jump_next(inter);
540 static u32 int_SLTI(struct interpreter *inter)
542 u32 *reg_cache = inter->state->regs.gpr;
543 struct opcode_i *op = &inter->op->i;
546 reg_cache[op->rt] = (s32)reg_cache[op->rs] < (s32)(s16)op->imm;
548 return jump_next(inter);
551 static u32 int_SLTIU(struct interpreter *inter)
553 u32 *reg_cache = inter->state->regs.gpr;
554 struct opcode_i *op = &inter->op->i;
557 reg_cache[op->rt] = reg_cache[op->rs] < (u32)(s32)(s16)op->imm;
559 return jump_next(inter);
562 static u32 int_ANDI(struct interpreter *inter)
564 u32 *reg_cache = inter->state->regs.gpr;
565 struct opcode_i *op = &inter->op->i;
568 reg_cache[op->rt] = reg_cache[op->rs] & op->imm;
570 return jump_next(inter);
573 static u32 int_ORI(struct interpreter *inter)
575 u32 *reg_cache = inter->state->regs.gpr;
576 struct opcode_i *op = &inter->op->i;
579 reg_cache[op->rt] = reg_cache[op->rs] | op->imm;
581 return jump_next(inter);
584 static u32 int_XORI(struct interpreter *inter)
586 u32 *reg_cache = inter->state->regs.gpr;
587 struct opcode_i *op = &inter->op->i;
590 reg_cache[op->rt] = reg_cache[op->rs] ^ op->imm;
592 return jump_next(inter);
595 static u32 int_LUI(struct interpreter *inter)
597 struct opcode_i *op = &inter->op->i;
599 inter->state->regs.gpr[op->rt] = op->imm << 16;
601 return jump_next(inter);
604 static u32 int_io(struct interpreter *inter, bool is_load)
606 struct opcode_i *op = &inter->op->i;
607 u32 *reg_cache = inter->state->regs.gpr;
610 val = lightrec_rw(inter->state, inter->op->c,
611 reg_cache[op->rs], reg_cache[op->rt],
612 &inter->op->flags, inter->block);
614 if (is_load && op->rt)
615 reg_cache[op->rt] = val;
617 return jump_next(inter);
620 static u32 int_load(struct interpreter *inter)
622 return int_io(inter, true);
625 static u32 int_store(struct interpreter *inter)
629 if (likely(!op_flag_smc(inter->op->flags)))
630 return int_io(inter, false);
632 lightrec_rw(inter->state, inter->op->c,
633 inter->state->regs.gpr[inter->op->i.rs],
634 inter->state->regs.gpr[inter->op->i.rt],
635 &inter->op->flags, inter->block);
637 next_pc = int_get_ds_pc(inter, 1);
639 /* Invalidate next PC, to force the rest of the block to be rebuilt */
640 lightrec_invalidate(inter->state, next_pc, 4);
645 static u32 int_LWC2(struct interpreter *inter)
647 return int_io(inter, false);
650 static u32 int_special_SLL(struct interpreter *inter)
652 struct opcode *op = inter->op;
655 if (op->opcode) { /* Handle NOPs */
656 rt = inter->state->regs.gpr[op->r.rt];
657 inter->state->regs.gpr[op->r.rd] = rt << op->r.imm;
660 return jump_next(inter);
663 static u32 int_special_SRL(struct interpreter *inter)
665 struct opcode *op = inter->op;
666 u32 rt = inter->state->regs.gpr[op->r.rt];
668 inter->state->regs.gpr[op->r.rd] = rt >> op->r.imm;
670 return jump_next(inter);
673 static u32 int_special_SRA(struct interpreter *inter)
675 struct opcode *op = inter->op;
676 s32 rt = inter->state->regs.gpr[op->r.rt];
678 inter->state->regs.gpr[op->r.rd] = rt >> op->r.imm;
680 return jump_next(inter);
683 static u32 int_special_SLLV(struct interpreter *inter)
685 struct opcode *op = inter->op;
686 u32 rs = inter->state->regs.gpr[op->r.rs];
687 u32 rt = inter->state->regs.gpr[op->r.rt];
689 inter->state->regs.gpr[op->r.rd] = rt << (rs & 0x1f);
691 return jump_next(inter);
694 static u32 int_special_SRLV(struct interpreter *inter)
696 struct opcode *op = inter->op;
697 u32 rs = inter->state->regs.gpr[op->r.rs];
698 u32 rt = inter->state->regs.gpr[op->r.rt];
700 inter->state->regs.gpr[op->r.rd] = rt >> (rs & 0x1f);
702 return jump_next(inter);
705 static u32 int_special_SRAV(struct interpreter *inter)
707 struct opcode *op = inter->op;
708 u32 rs = inter->state->regs.gpr[op->r.rs];
709 s32 rt = inter->state->regs.gpr[op->r.rt];
711 inter->state->regs.gpr[op->r.rd] = rt >> (rs & 0x1f);
713 return jump_next(inter);
716 static u32 int_syscall_break(struct interpreter *inter)
719 if (inter->op->r.op == OP_SPECIAL_BREAK)
720 inter->state->exit_flags |= LIGHTREC_EXIT_BREAK;
722 inter->state->exit_flags |= LIGHTREC_EXIT_SYSCALL;
724 return int_get_ds_pc(inter, 0);
727 static u32 int_special_MFHI(struct interpreter *inter)
729 u32 *reg_cache = inter->state->regs.gpr;
730 struct opcode_r *op = &inter->op->r;
733 reg_cache[op->rd] = reg_cache[REG_HI];
735 return jump_next(inter);
738 static u32 int_special_MTHI(struct interpreter *inter)
740 u32 *reg_cache = inter->state->regs.gpr;
742 reg_cache[REG_HI] = reg_cache[inter->op->r.rs];
744 return jump_next(inter);
747 static u32 int_special_MFLO(struct interpreter *inter)
749 u32 *reg_cache = inter->state->regs.gpr;
750 struct opcode_r *op = &inter->op->r;
753 reg_cache[op->rd] = reg_cache[REG_LO];
755 return jump_next(inter);
758 static u32 int_special_MTLO(struct interpreter *inter)
760 u32 *reg_cache = inter->state->regs.gpr;
762 reg_cache[REG_LO] = reg_cache[inter->op->r.rs];
764 return jump_next(inter);
767 static u32 int_special_MULT(struct interpreter *inter)
769 u32 *reg_cache = inter->state->regs.gpr;
770 s32 rs = reg_cache[inter->op->r.rs];
771 s32 rt = reg_cache[inter->op->r.rt];
772 u8 reg_lo = get_mult_div_lo(inter->op->c);
773 u8 reg_hi = get_mult_div_hi(inter->op->c);
774 u64 res = (s64)rs * (s64)rt;
776 if (!op_flag_no_hi(inter->op->flags))
777 reg_cache[reg_hi] = res >> 32;
778 if (!op_flag_no_lo(inter->op->flags))
779 reg_cache[reg_lo] = res;
781 return jump_next(inter);
784 static u32 int_special_MULTU(struct interpreter *inter)
786 u32 *reg_cache = inter->state->regs.gpr;
787 u32 rs = reg_cache[inter->op->r.rs];
788 u32 rt = reg_cache[inter->op->r.rt];
789 u8 reg_lo = get_mult_div_lo(inter->op->c);
790 u8 reg_hi = get_mult_div_hi(inter->op->c);
791 u64 res = (u64)rs * (u64)rt;
793 if (!op_flag_no_hi(inter->op->flags))
794 reg_cache[reg_hi] = res >> 32;
795 if (!op_flag_no_lo(inter->op->flags))
796 reg_cache[reg_lo] = res;
798 return jump_next(inter);
801 static u32 int_special_DIV(struct interpreter *inter)
803 u32 *reg_cache = inter->state->regs.gpr;
804 s32 rs = reg_cache[inter->op->r.rs];
805 s32 rt = reg_cache[inter->op->r.rt];
806 u8 reg_lo = get_mult_div_lo(inter->op->c);
807 u8 reg_hi = get_mult_div_hi(inter->op->c);
812 lo = (rs < 0) * 2 - 1;
818 if (!op_flag_no_hi(inter->op->flags))
819 reg_cache[reg_hi] = hi;
820 if (!op_flag_no_lo(inter->op->flags))
821 reg_cache[reg_lo] = lo;
823 return jump_next(inter);
826 static u32 int_special_DIVU(struct interpreter *inter)
828 u32 *reg_cache = inter->state->regs.gpr;
829 u32 rs = reg_cache[inter->op->r.rs];
830 u32 rt = reg_cache[inter->op->r.rt];
831 u8 reg_lo = get_mult_div_lo(inter->op->c);
832 u8 reg_hi = get_mult_div_hi(inter->op->c);
843 if (!op_flag_no_hi(inter->op->flags))
844 reg_cache[reg_hi] = hi;
845 if (!op_flag_no_lo(inter->op->flags))
846 reg_cache[reg_lo] = lo;
848 return jump_next(inter);
851 static u32 int_special_ADD(struct interpreter *inter)
853 u32 *reg_cache = inter->state->regs.gpr;
854 struct opcode_r *op = &inter->op->r;
855 s32 rs = reg_cache[op->rs];
856 s32 rt = reg_cache[op->rt];
859 reg_cache[op->rd] = rs + rt;
861 return jump_next(inter);
864 static u32 int_special_SUB(struct interpreter *inter)
866 u32 *reg_cache = inter->state->regs.gpr;
867 struct opcode_r *op = &inter->op->r;
868 u32 rs = reg_cache[op->rs];
869 u32 rt = reg_cache[op->rt];
872 reg_cache[op->rd] = rs - rt;
874 return jump_next(inter);
877 static u32 int_special_AND(struct interpreter *inter)
879 u32 *reg_cache = inter->state->regs.gpr;
880 struct opcode_r *op = &inter->op->r;
881 u32 rs = reg_cache[op->rs];
882 u32 rt = reg_cache[op->rt];
885 reg_cache[op->rd] = rs & rt;
887 return jump_next(inter);
890 static u32 int_special_OR(struct interpreter *inter)
892 u32 *reg_cache = inter->state->regs.gpr;
893 struct opcode_r *op = &inter->op->r;
894 u32 rs = reg_cache[op->rs];
895 u32 rt = reg_cache[op->rt];
898 reg_cache[op->rd] = rs | rt;
900 return jump_next(inter);
903 static u32 int_special_XOR(struct interpreter *inter)
905 u32 *reg_cache = inter->state->regs.gpr;
906 struct opcode_r *op = &inter->op->r;
907 u32 rs = reg_cache[op->rs];
908 u32 rt = reg_cache[op->rt];
911 reg_cache[op->rd] = rs ^ rt;
913 return jump_next(inter);
916 static u32 int_special_NOR(struct interpreter *inter)
918 u32 *reg_cache = inter->state->regs.gpr;
919 struct opcode_r *op = &inter->op->r;
920 u32 rs = reg_cache[op->rs];
921 u32 rt = reg_cache[op->rt];
924 reg_cache[op->rd] = ~(rs | rt);
926 return jump_next(inter);
929 static u32 int_special_SLT(struct interpreter *inter)
931 u32 *reg_cache = inter->state->regs.gpr;
932 struct opcode_r *op = &inter->op->r;
933 s32 rs = reg_cache[op->rs];
934 s32 rt = reg_cache[op->rt];
937 reg_cache[op->rd] = rs < rt;
939 return jump_next(inter);
942 static u32 int_special_SLTU(struct interpreter *inter)
944 u32 *reg_cache = inter->state->regs.gpr;
945 struct opcode_r *op = &inter->op->r;
946 u32 rs = reg_cache[op->rs];
947 u32 rt = reg_cache[op->rt];
950 reg_cache[op->rd] = rs < rt;
952 return jump_next(inter);
955 static u32 int_META_MOV(struct interpreter *inter)
957 u32 *reg_cache = inter->state->regs.gpr;
958 struct opcode_r *op = &inter->op->r;
961 reg_cache[op->rd] = reg_cache[op->rs];
963 return jump_next(inter);
966 static u32 int_META_EXTC(struct interpreter *inter)
968 u32 *reg_cache = inter->state->regs.gpr;
969 struct opcode_i *op = &inter->op->i;
972 reg_cache[op->rt] = (u32)(s32)(s8)reg_cache[op->rs];
974 return jump_next(inter);
977 static u32 int_META_EXTS(struct interpreter *inter)
979 u32 *reg_cache = inter->state->regs.gpr;
980 struct opcode_i *op = &inter->op->i;
983 reg_cache[op->rt] = (u32)(s32)(s16)reg_cache[op->rs];
985 return jump_next(inter);
988 static const lightrec_int_func_t int_standard[64] = {
989 SET_DEFAULT_ELM(int_standard, int_unimplemented),
990 [OP_SPECIAL] = int_SPECIAL,
991 [OP_REGIMM] = int_REGIMM,
996 [OP_BLEZ] = int_BLEZ,
997 [OP_BGTZ] = int_BGTZ,
998 [OP_ADDI] = int_ADDI,
999 [OP_ADDIU] = int_ADDI,
1000 [OP_SLTI] = int_SLTI,
1001 [OP_SLTIU] = int_SLTIU,
1002 [OP_ANDI] = int_ANDI,
1004 [OP_XORI] = int_XORI,
1010 [OP_LWL] = int_load,
1012 [OP_LBU] = int_load,
1013 [OP_LHU] = int_load,
1014 [OP_LWR] = int_load,
1015 [OP_SB] = int_store,
1016 [OP_SH] = int_store,
1017 [OP_SWL] = int_store,
1018 [OP_SW] = int_store,
1019 [OP_SWR] = int_store,
1020 [OP_LWC2] = int_LWC2,
1021 [OP_SWC2] = int_store,
1023 [OP_META_MOV] = int_META_MOV,
1024 [OP_META_EXTC] = int_META_EXTC,
1025 [OP_META_EXTS] = int_META_EXTS,
1028 static const lightrec_int_func_t int_special[64] = {
1029 SET_DEFAULT_ELM(int_special, int_unimplemented),
1030 [OP_SPECIAL_SLL] = int_special_SLL,
1031 [OP_SPECIAL_SRL] = int_special_SRL,
1032 [OP_SPECIAL_SRA] = int_special_SRA,
1033 [OP_SPECIAL_SLLV] = int_special_SLLV,
1034 [OP_SPECIAL_SRLV] = int_special_SRLV,
1035 [OP_SPECIAL_SRAV] = int_special_SRAV,
1036 [OP_SPECIAL_JR] = int_special_JR,
1037 [OP_SPECIAL_JALR] = int_special_JALR,
1038 [OP_SPECIAL_SYSCALL] = int_syscall_break,
1039 [OP_SPECIAL_BREAK] = int_syscall_break,
1040 [OP_SPECIAL_MFHI] = int_special_MFHI,
1041 [OP_SPECIAL_MTHI] = int_special_MTHI,
1042 [OP_SPECIAL_MFLO] = int_special_MFLO,
1043 [OP_SPECIAL_MTLO] = int_special_MTLO,
1044 [OP_SPECIAL_MULT] = int_special_MULT,
1045 [OP_SPECIAL_MULTU] = int_special_MULTU,
1046 [OP_SPECIAL_DIV] = int_special_DIV,
1047 [OP_SPECIAL_DIVU] = int_special_DIVU,
1048 [OP_SPECIAL_ADD] = int_special_ADD,
1049 [OP_SPECIAL_ADDU] = int_special_ADD,
1050 [OP_SPECIAL_SUB] = int_special_SUB,
1051 [OP_SPECIAL_SUBU] = int_special_SUB,
1052 [OP_SPECIAL_AND] = int_special_AND,
1053 [OP_SPECIAL_OR] = int_special_OR,
1054 [OP_SPECIAL_XOR] = int_special_XOR,
1055 [OP_SPECIAL_NOR] = int_special_NOR,
1056 [OP_SPECIAL_SLT] = int_special_SLT,
1057 [OP_SPECIAL_SLTU] = int_special_SLTU,
1060 static const lightrec_int_func_t int_regimm[64] = {
1061 SET_DEFAULT_ELM(int_regimm, int_unimplemented),
1062 [OP_REGIMM_BLTZ] = int_regimm_BLTZ,
1063 [OP_REGIMM_BGEZ] = int_regimm_BGEZ,
1064 [OP_REGIMM_BLTZAL] = int_regimm_BLTZAL,
1065 [OP_REGIMM_BGEZAL] = int_regimm_BGEZAL,
1068 static const lightrec_int_func_t int_cp0[64] = {
1069 SET_DEFAULT_ELM(int_cp0, int_CP),
1070 [OP_CP0_MFC0] = int_cfc,
1071 [OP_CP0_CFC0] = int_cfc,
1072 [OP_CP0_MTC0] = int_ctc,
1073 [OP_CP0_CTC0] = int_ctc,
1074 [OP_CP0_RFE] = int_cp0_RFE,
1077 static const lightrec_int_func_t int_cp2_basic[64] = {
1078 SET_DEFAULT_ELM(int_cp2_basic, int_CP),
1079 [OP_CP2_BASIC_MFC2] = int_cfc,
1080 [OP_CP2_BASIC_CFC2] = int_cfc,
1081 [OP_CP2_BASIC_MTC2] = int_ctc,
1082 [OP_CP2_BASIC_CTC2] = int_ctc,
1085 static u32 int_SPECIAL(struct interpreter *inter)
1087 lightrec_int_func_t f = int_special[inter->op->r.op];
1089 if (!HAS_DEFAULT_ELM && unlikely(!f))
1090 return int_unimplemented(inter);
1092 return execute(f, inter);
1095 static u32 int_REGIMM(struct interpreter *inter)
1097 lightrec_int_func_t f = int_regimm[inter->op->r.rt];
1099 if (!HAS_DEFAULT_ELM && unlikely(!f))
1100 return int_unimplemented(inter);
1102 return execute(f, inter);
1105 static u32 int_CP0(struct interpreter *inter)
1107 lightrec_int_func_t f = int_cp0[inter->op->r.rs];
1109 if (!HAS_DEFAULT_ELM && unlikely(!f))
1110 return int_CP(inter);
1112 return execute(f, inter);
1115 static u32 int_CP2(struct interpreter *inter)
1117 if (inter->op->r.op == OP_CP2_BASIC) {
1118 lightrec_int_func_t f = int_cp2_basic[inter->op->r.rs];
1119 if (HAS_DEFAULT_ELM || likely(f))
1120 return execute(f, inter);
1123 return int_CP(inter);
1126 static u32 lightrec_emulate_block_list(struct lightrec_state *state,
1127 struct block *block, u32 offset)
1129 struct interpreter inter;
1132 inter.block = block;
1133 inter.state = state;
1134 inter.offset = offset;
1135 inter.op = &block->opcode_list[offset];
1137 inter.delay_slot = false;
1139 pc = lightrec_int_op(&inter);
1141 /* Add the cycles of the last branch */
1142 inter.cycles += lightrec_cycles_of_opcode(inter.op->c);
1144 state->current_cycle += inter.cycles;
1149 u32 lightrec_emulate_block(struct lightrec_state *state, struct block *block, u32 pc)
1151 u32 offset = (kunseg(pc) - kunseg(block->pc)) >> 2;
1153 if (offset < block->nb_ops)
1154 return lightrec_emulate_block_list(state, block, offset);
1156 pr_err("PC 0x%x is outside block at PC 0x%x\n", pc, block->pc);