lightrec: Increase size of hw registers area
[pcsx_rearmed.git] / deps / lightrec / interpreter.c
1 // SPDX-License-Identifier: LGPL-2.1-or-later
2 /*
3  * Copyright (C) 2019-2021 Paul Cercueil <paul@crapouillou.net>
4  */
5
6 #include "disassembler.h"
7 #include "interpreter.h"
8 #include "lightrec-private.h"
9 #include "optimizer.h"
10 #include "regcache.h"
11
12 #include <stdbool.h>
13
14 struct interpreter;
15
16 static u32 int_CP0(struct interpreter *inter);
17 static u32 int_CP2(struct interpreter *inter);
18 static u32 int_SPECIAL(struct interpreter *inter);
19 static u32 int_META(struct interpreter *inter);
20 static u32 int_REGIMM(struct interpreter *inter);
21 static u32 int_branch(struct interpreter *inter, u32 pc,
22                       union code code, bool branch);
23
24 typedef u32 (*lightrec_int_func_t)(struct interpreter *inter);
25
26 static const lightrec_int_func_t int_standard[64];
27
28 struct interpreter {
29         struct lightrec_state *state;
30         struct block *block;
31         struct opcode *op;
32         u32 cycles;
33         bool delay_slot;
34         u16 offset;
35 };
36
37 static u32 int_get_branch_pc(const struct interpreter *inter)
38 {
39         return get_branch_pc(inter->block, inter->offset, 0);
40 }
41
42 static inline u32 int_get_ds_pc(const struct interpreter *inter, s16 imm)
43 {
44         return get_ds_pc(inter->block, inter->offset, imm);
45 }
46
47 static inline struct opcode *next_op(const struct interpreter *inter)
48 {
49         return &inter->op[1];
50 }
51
52 static inline u32 execute(lightrec_int_func_t func, struct interpreter *inter)
53 {
54         return (*func)(inter);
55 }
56
57 static inline u32 lightrec_int_op(struct interpreter *inter)
58 {
59         return execute(int_standard[inter->op->i.op], inter);
60 }
61
62 static inline u32 jump_skip(struct interpreter *inter)
63 {
64         inter->op = next_op(inter);
65         inter->offset++;
66
67         if (op_flag_sync(inter->op->flags)) {
68                 inter->state->current_cycle += inter->cycles;
69                 inter->cycles = 0;
70         }
71
72         return lightrec_int_op(inter);
73 }
74
75 static inline u32 jump_next(struct interpreter *inter)
76 {
77         inter->cycles += lightrec_cycles_of_opcode(inter->op->c);
78
79         if (unlikely(inter->delay_slot))
80                 return 0;
81
82         return jump_skip(inter);
83 }
84
85 static inline u32 jump_after_branch(struct interpreter *inter)
86 {
87         inter->cycles += lightrec_cycles_of_opcode(inter->op->c);
88
89         if (unlikely(inter->delay_slot))
90                 return 0;
91
92         inter->op = next_op(inter);
93         inter->offset++;
94
95         return jump_skip(inter);
96 }
97
98 static void update_cycles_before_branch(struct interpreter *inter)
99 {
100         u32 cycles;
101
102         if (!inter->delay_slot) {
103                 cycles = lightrec_cycles_of_opcode(inter->op->c);
104
105                 if (!op_flag_no_ds(inter->op->flags) &&
106                     has_delay_slot(inter->op->c))
107                         cycles += lightrec_cycles_of_opcode(next_op(inter)->c);
108
109                 inter->cycles += cycles;
110                 inter->state->current_cycle += inter->cycles;
111                 inter->cycles = -cycles;
112         }
113 }
114
115 static bool is_branch_taken(const u32 *reg_cache, union code op)
116 {
117         switch (op.i.op) {
118         case OP_SPECIAL:
119                 return op.r.op == OP_SPECIAL_JR || op.r.op == OP_SPECIAL_JALR;
120         case OP_J:
121         case OP_JAL:
122                 return true;
123         case OP_BEQ:
124                 return reg_cache[op.r.rs] == reg_cache[op.r.rt];
125         case OP_BNE:
126                 return reg_cache[op.r.rs] != reg_cache[op.r.rt];
127         case OP_REGIMM:
128                 switch (op.r.rt) {
129                 case OP_REGIMM_BLTZ:
130                 case OP_REGIMM_BLTZAL:
131                         return (s32)reg_cache[op.r.rs] < 0;
132                 case OP_REGIMM_BGEZ:
133                 case OP_REGIMM_BGEZAL:
134                         return (s32)reg_cache[op.r.rs] >= 0;
135                 }
136         default:
137                 break;
138         }
139
140         return false;
141 }
142
143 static u32 int_delay_slot(struct interpreter *inter, u32 pc, bool branch)
144 {
145         struct lightrec_state *state = inter->state;
146         u32 *reg_cache = state->regs.gpr;
147         struct opcode new_op, *op = next_op(inter);
148         union code op_next;
149         struct interpreter inter2 = {
150                 .state = state,
151                 .cycles = inter->cycles,
152                 .delay_slot = true,
153                 .block = NULL,
154         };
155         bool run_first_op = false, dummy_ld = false, save_rs = false,
156              load_in_ds, branch_in_ds = false, branch_at_addr = false,
157              branch_taken;
158         u32 old_rs, new_rs, new_rt;
159         u32 next_pc, ds_next_pc;
160         u32 cause, epc;
161
162         if (op->i.op == OP_CP0 && op->r.rs == OP_CP0_RFE) {
163                 /* When an IRQ happens, the PSX exception handlers (when done)
164                  * will jump back to the instruction that was executed right
165                  * before the IRQ, unless it was a GTE opcode; in that case, it
166                  * jumps to the instruction right after.
167                  * Since we will never handle the IRQ right after a GTE opcode,
168                  * but on branch boundaries, we need to adjust the return
169                  * address so that the GTE opcode is effectively executed.
170                  */
171                 cause = state->regs.cp0[13];
172                 epc = state->regs.cp0[14];
173
174                 if (!(cause & 0x7c) && epc == pc - 4)
175                         pc -= 4;
176         }
177
178         if (inter->delay_slot) {
179                 /* The branch opcode was in a delay slot of another branch
180                  * opcode. Just return the target address of the second
181                  * branch. */
182                 return pc;
183         }
184
185         /* An opcode located in the delay slot performing a delayed read
186          * requires special handling; we will always resort to using the
187          * interpreter in that case.
188          * Same goes for when we have a branch in a delay slot of another
189          * branch. */
190         load_in_ds = opcode_is_load(op->c) || opcode_is_mfc(op->c);
191         branch_in_ds = has_delay_slot(op->c);
192
193         if (branch) {
194                 if (load_in_ds || branch_in_ds)
195                         op_next = lightrec_read_opcode(state, pc);
196
197                 if (load_in_ds) {
198                         /* Verify that the next block actually reads the
199                          * destination register of the delay slot opcode. */
200                         run_first_op = opcode_reads_register(op_next, op->r.rt);
201                 }
202
203                 if (branch_in_ds) {
204                         run_first_op = true;
205                         next_pc = pc + 4;
206                 }
207
208                 if (load_in_ds && run_first_op) {
209                         next_pc = pc + 4;
210
211                         /* If the first opcode of the next block writes the
212                          * regiser used as the address for the load, we need to
213                          * reset to the old value after it has been executed,
214                          * then restore the new value after the delay slot
215                          * opcode has been executed. */
216                         save_rs = opcode_reads_register(op->c, op->r.rs) &&
217                                 opcode_writes_register(op_next, op->r.rs);
218                         if (save_rs)
219                                 old_rs = reg_cache[op->r.rs];
220
221                         /* If both the first opcode of the next block and the
222                          * delay slot opcode write to the same register, the
223                          * value written by the delay slot opcode is
224                          * discarded. */
225                         dummy_ld = opcode_writes_register(op_next, op->r.rt);
226                 }
227
228                 if (!run_first_op) {
229                         next_pc = pc;
230                 } else if (has_delay_slot(op_next)) {
231                         /* The first opcode of the next block is a branch, so we
232                          * cannot execute it here, because of the load delay.
233                          * Just check whether or not the branch would be taken,
234                          * and save that info into the interpreter struct. */
235                         branch_at_addr = true;
236                         branch_taken = is_branch_taken(reg_cache, op_next);
237                         pr_debug("Target of impossible branch is a branch, "
238                                  "%staken.\n", branch_taken ? "" : "not ");
239                         inter->cycles += lightrec_cycles_of_opcode(op_next);
240                         old_rs = reg_cache[op_next.r.rs];
241                 } else {
242                         new_op.c = op_next;
243                         new_op.flags = 0;
244                         inter2.op = &new_op;
245                         inter2.offset = 0;
246
247                         /* Execute the first opcode of the next block */
248                         lightrec_int_op(&inter2);
249
250                         if (save_rs) {
251                                 new_rs = reg_cache[op->r.rs];
252                                 reg_cache[op->r.rs] = old_rs;
253                         }
254
255                         inter->cycles += lightrec_cycles_of_opcode(op_next);
256                 }
257         } else {
258                 next_pc = int_get_ds_pc(inter, 2);
259         }
260
261         inter2.block = inter->block;
262         inter2.op = op;
263         inter2.cycles = inter->cycles;
264         inter2.offset = inter->offset + 1;
265
266         if (dummy_ld)
267                 new_rt = reg_cache[op->r.rt];
268
269         /* Execute delay slot opcode */
270         ds_next_pc = lightrec_int_op(&inter2);
271
272         if (branch_at_addr) {
273                 if (op_next.i.op == OP_SPECIAL)
274                         /* TODO: Handle JALR setting $ra */
275                         ds_next_pc = old_rs;
276                 else if (op_next.i.op == OP_J || op_next.i.op == OP_JAL)
277                         /* TODO: Handle JAL setting $ra */
278                         ds_next_pc = (pc & 0xf0000000) | (op_next.j.imm << 2);
279                 else
280                         ds_next_pc = pc + 4 + ((s16)op_next.i.imm << 2);
281         }
282
283         if (branch_at_addr && !branch_taken) {
284                 /* If the branch at the target of the branch opcode is not
285                  * taken, we jump to its delay slot */
286                 next_pc = pc + sizeof(u32);
287         } else if (branch_at_addr || (!branch && branch_in_ds)) {
288                 next_pc = ds_next_pc;
289         }
290
291         if (save_rs)
292                 reg_cache[op->r.rs] = new_rs;
293         if (dummy_ld)
294                 reg_cache[op->r.rt] = new_rt;
295
296         inter->cycles += lightrec_cycles_of_opcode(op->c);
297
298         if (branch_at_addr && branch_taken) {
299                 /* If the branch at the target of the branch opcode is taken,
300                  * we execute its delay slot here, and jump to its target
301                  * address. */
302                 op_next = lightrec_read_opcode(state, pc + 4);
303
304                 new_op.c = op_next;
305                 new_op.flags = 0;
306                 inter2.op = &new_op;
307                 inter2.block = NULL;
308
309                 inter->cycles += lightrec_cycles_of_opcode(op_next);
310
311                 pr_debug("Running delay slot of branch at target of impossible "
312                          "branch\n");
313                 lightrec_int_op(&inter2);
314         }
315
316         return next_pc;
317 }
318
319 static u32 int_unimplemented(struct interpreter *inter)
320 {
321         pr_warn("Unimplemented opcode 0x%08x\n", inter->op->opcode);
322
323         return jump_next(inter);
324 }
325
326 static u32 int_jump(struct interpreter *inter, bool link)
327 {
328         struct lightrec_state *state = inter->state;
329         u32 old_pc = int_get_branch_pc(inter);
330         u32 pc = (old_pc & 0xf0000000) | (inter->op->j.imm << 2);
331
332         if (link)
333                 state->regs.gpr[31] = old_pc + 8;
334
335         if (op_flag_no_ds(inter->op->flags))
336                 return pc;
337
338         return int_delay_slot(inter, pc, true);
339 }
340
341 static u32 int_J(struct interpreter *inter)
342 {
343         return int_jump(inter, false);
344 }
345
346 static u32 int_JAL(struct interpreter *inter)
347 {
348         return int_jump(inter, true);
349 }
350
351 static u32 int_jumpr(struct interpreter *inter, u8 link_reg)
352 {
353         struct lightrec_state *state = inter->state;
354         u32 old_pc = int_get_branch_pc(inter);
355         u32 next_pc = state->regs.gpr[inter->op->r.rs];
356
357         if (link_reg)
358                 state->regs.gpr[link_reg] = old_pc + 8;
359
360         if (op_flag_no_ds(inter->op->flags))
361                 return next_pc;
362
363         return int_delay_slot(inter, next_pc, true);
364 }
365
366 static u32 int_special_JR(struct interpreter *inter)
367 {
368         return int_jumpr(inter, 0);
369 }
370
371 static u32 int_special_JALR(struct interpreter *inter)
372 {
373         return int_jumpr(inter, inter->op->r.rd);
374 }
375
376 static u32 int_do_branch(struct interpreter *inter, u32 old_pc, u32 next_pc)
377 {
378         if (!inter->delay_slot && op_flag_local_branch(inter->op->flags) &&
379             (s16)inter->op->c.i.imm >= 0) {
380                 next_pc = old_pc + ((1 + (s16)inter->op->c.i.imm) << 2);
381                 next_pc = lightrec_emulate_block(inter->state, inter->block, next_pc);
382         }
383
384         return next_pc;
385 }
386
387 static u32 int_branch(struct interpreter *inter, u32 pc,
388                       union code code, bool branch)
389 {
390         u32 next_pc = pc + 4 + ((s16)code.i.imm << 2);
391
392         update_cycles_before_branch(inter);
393
394         if (op_flag_no_ds(inter->op->flags)) {
395                 if (branch)
396                         return int_do_branch(inter, pc, next_pc);
397                 else
398                         return jump_next(inter);
399         }
400
401         if (!inter->delay_slot)
402                 next_pc = int_delay_slot(inter, next_pc, branch);
403
404         if (branch)
405                 return int_do_branch(inter, pc, next_pc);
406
407         if (op_flag_emulate_branch(inter->op->flags))
408                 return pc + 8;
409         else
410                 return jump_after_branch(inter);
411 }
412
413 static u32 int_beq(struct interpreter *inter, bool bne)
414 {
415         u32 rs, rt, old_pc = int_get_branch_pc(inter);
416
417         rs = inter->state->regs.gpr[inter->op->i.rs];
418         rt = inter->state->regs.gpr[inter->op->i.rt];
419
420         return int_branch(inter, old_pc, inter->op->c, (rs == rt) ^ bne);
421 }
422
423 static u32 int_BEQ(struct interpreter *inter)
424 {
425         return int_beq(inter, false);
426 }
427
428 static u32 int_BNE(struct interpreter *inter)
429 {
430         return int_beq(inter, true);
431 }
432
433 static u32 int_bgez(struct interpreter *inter, bool link, bool lt, bool regimm)
434 {
435         u32 old_pc = int_get_branch_pc(inter);
436         s32 rs;
437
438         if (link)
439                 inter->state->regs.gpr[31] = old_pc + 8;
440
441         rs = (s32)inter->state->regs.gpr[inter->op->i.rs];
442
443         return int_branch(inter, old_pc, inter->op->c,
444                           ((regimm && !rs) || rs > 0) ^ lt);
445 }
446
447 static u32 int_regimm_BLTZ(struct interpreter *inter)
448 {
449         return int_bgez(inter, false, true, true);
450 }
451
452 static u32 int_regimm_BGEZ(struct interpreter *inter)
453 {
454         return int_bgez(inter, false, false, true);
455 }
456
457 static u32 int_regimm_BLTZAL(struct interpreter *inter)
458 {
459         return int_bgez(inter, true, true, true);
460 }
461
462 static u32 int_regimm_BGEZAL(struct interpreter *inter)
463 {
464         return int_bgez(inter, true, false, true);
465 }
466
467 static u32 int_BLEZ(struct interpreter *inter)
468 {
469         return int_bgez(inter, false, true, false);
470 }
471
472 static u32 int_BGTZ(struct interpreter *inter)
473 {
474         return int_bgez(inter, false, false, false);
475 }
476
477 static u32 int_cfc(struct interpreter *inter)
478 {
479         struct lightrec_state *state = inter->state;
480         const struct opcode *op = inter->op;
481         u32 val;
482
483         val = lightrec_mfc(state, op->c);
484
485         if (likely(op->r.rt))
486                 state->regs.gpr[op->r.rt] = val;
487
488         return jump_next(inter);
489 }
490
491 static u32 int_ctc(struct interpreter *inter)
492 {
493         struct lightrec_state *state = inter->state;
494         const struct opcode *op = inter->op;
495
496         lightrec_mtc(state, op->c, op->r.rd, state->regs.gpr[op->r.rt]);
497
498         /* If we have a MTC0 or CTC0 to CP0 register 12 (Status) or 13 (Cause),
499          * return early so that the emulator will be able to check software
500          * interrupt status. */
501         if (!op_flag_no_ds(inter->op->flags) &&
502             op->i.op == OP_CP0 && (op->r.rd == 12 || op->r.rd == 13))
503                 return int_get_ds_pc(inter, 1);
504         else
505                 return jump_next(inter);
506 }
507
508 static u32 int_cp0_RFE(struct interpreter *inter)
509 {
510         lightrec_rfe(inter->state);
511
512         return jump_next(inter);
513 }
514
515 static u32 int_CP(struct interpreter *inter)
516 {
517         lightrec_cp(inter->state, inter->op->c);
518
519         return jump_next(inter);
520 }
521
522 static u32 int_ADDI(struct interpreter *inter)
523 {
524         u32 *reg_cache = inter->state->regs.gpr;
525         struct opcode_i *op = &inter->op->i;
526
527         if (likely(op->rt))
528                 reg_cache[op->rt] = reg_cache[op->rs] + (s32)(s16)op->imm;
529
530         return jump_next(inter);
531 }
532
533 static u32 int_SLTI(struct interpreter *inter)
534 {
535         u32 *reg_cache = inter->state->regs.gpr;
536         struct opcode_i *op = &inter->op->i;
537
538         if (likely(op->rt))
539                 reg_cache[op->rt] = (s32)reg_cache[op->rs] < (s32)(s16)op->imm;
540
541         return jump_next(inter);
542 }
543
544 static u32 int_SLTIU(struct interpreter *inter)
545 {
546         u32 *reg_cache = inter->state->regs.gpr;
547         struct opcode_i *op = &inter->op->i;
548
549         if (likely(op->rt))
550                 reg_cache[op->rt] = reg_cache[op->rs] < (u32)(s32)(s16)op->imm;
551
552         return jump_next(inter);
553 }
554
555 static u32 int_ANDI(struct interpreter *inter)
556 {
557         u32 *reg_cache = inter->state->regs.gpr;
558         struct opcode_i *op = &inter->op->i;
559
560         if (likely(op->rt))
561                 reg_cache[op->rt] = reg_cache[op->rs] & op->imm;
562
563         return jump_next(inter);
564 }
565
566 static u32 int_ORI(struct interpreter *inter)
567 {
568         u32 *reg_cache = inter->state->regs.gpr;
569         struct opcode_i *op = &inter->op->i;
570
571         if (likely(op->rt))
572                 reg_cache[op->rt] = reg_cache[op->rs] | op->imm;
573
574         return jump_next(inter);
575 }
576
577 static u32 int_XORI(struct interpreter *inter)
578 {
579         u32 *reg_cache = inter->state->regs.gpr;
580         struct opcode_i *op = &inter->op->i;
581
582         if (likely(op->rt))
583                 reg_cache[op->rt] = reg_cache[op->rs] ^ op->imm;
584
585         return jump_next(inter);
586 }
587
588 static u32 int_LUI(struct interpreter *inter)
589 {
590         struct opcode_i *op = &inter->op->i;
591
592         inter->state->regs.gpr[op->rt] = op->imm << 16;
593
594         return jump_next(inter);
595 }
596
597 static u32 int_io(struct interpreter *inter, bool is_load)
598 {
599         struct opcode_i *op = &inter->op->i;
600         u32 *reg_cache = inter->state->regs.gpr;
601         u32 val, *flags = NULL;
602
603         if (inter->block)
604                 flags = &inter->op->flags;
605
606         val = lightrec_rw(inter->state, inter->op->c,
607                           reg_cache[op->rs], reg_cache[op->rt],
608                           flags, inter->block, inter->offset);
609
610         if (is_load && op->rt)
611                 reg_cache[op->rt] = val;
612
613         return jump_next(inter);
614 }
615
616 static u32 int_load(struct interpreter *inter)
617 {
618         return int_io(inter, true);
619 }
620
621 static u32 int_store(struct interpreter *inter)
622 {
623         u32 next_pc;
624
625         if (likely(!op_flag_smc(inter->op->flags)))
626                 return int_io(inter, false);
627
628         lightrec_rw(inter->state, inter->op->c,
629                     inter->state->regs.gpr[inter->op->i.rs],
630                     inter->state->regs.gpr[inter->op->i.rt],
631                     &inter->op->flags, inter->block, inter->offset);
632
633         next_pc = int_get_ds_pc(inter, 1);
634
635         /* Invalidate next PC, to force the rest of the block to be rebuilt */
636         lightrec_invalidate(inter->state, next_pc, 4);
637
638         return next_pc;
639 }
640
641 static u32 int_LWC2(struct interpreter *inter)
642 {
643         return int_io(inter, false);
644 }
645
646 static u32 int_special_SLL(struct interpreter *inter)
647 {
648         struct opcode *op = inter->op;
649         u32 rt;
650
651         if (op->opcode) { /* Handle NOPs */
652                 rt = inter->state->regs.gpr[op->r.rt];
653                 inter->state->regs.gpr[op->r.rd] = rt << op->r.imm;
654         }
655
656         return jump_next(inter);
657 }
658
659 static u32 int_special_SRL(struct interpreter *inter)
660 {
661         struct opcode *op = inter->op;
662         u32 rt = inter->state->regs.gpr[op->r.rt];
663
664         inter->state->regs.gpr[op->r.rd] = rt >> op->r.imm;
665
666         return jump_next(inter);
667 }
668
669 static u32 int_special_SRA(struct interpreter *inter)
670 {
671         struct opcode *op = inter->op;
672         s32 rt = inter->state->regs.gpr[op->r.rt];
673
674         inter->state->regs.gpr[op->r.rd] = rt >> op->r.imm;
675
676         return jump_next(inter);
677 }
678
679 static u32 int_special_SLLV(struct interpreter *inter)
680 {
681         struct opcode *op = inter->op;
682         u32 rs = inter->state->regs.gpr[op->r.rs];
683         u32 rt = inter->state->regs.gpr[op->r.rt];
684
685         inter->state->regs.gpr[op->r.rd] = rt << (rs & 0x1f);
686
687         return jump_next(inter);
688 }
689
690 static u32 int_special_SRLV(struct interpreter *inter)
691 {
692         struct opcode *op = inter->op;
693         u32 rs = inter->state->regs.gpr[op->r.rs];
694         u32 rt = inter->state->regs.gpr[op->r.rt];
695
696         inter->state->regs.gpr[op->r.rd] = rt >> (rs & 0x1f);
697
698         return jump_next(inter);
699 }
700
701 static u32 int_special_SRAV(struct interpreter *inter)
702 {
703         struct opcode *op = inter->op;
704         u32 rs = inter->state->regs.gpr[op->r.rs];
705         s32 rt = inter->state->regs.gpr[op->r.rt];
706
707         inter->state->regs.gpr[op->r.rd] = rt >> (rs & 0x1f);
708
709         return jump_next(inter);
710 }
711
712 static u32 int_syscall_break(struct interpreter *inter)
713 {
714
715         if (inter->op->r.op == OP_SPECIAL_BREAK)
716                 lightrec_set_exit_flags(inter->state, LIGHTREC_EXIT_BREAK);
717         else
718                 lightrec_set_exit_flags(inter->state, LIGHTREC_EXIT_SYSCALL);
719
720         return int_get_ds_pc(inter, 0);
721 }
722
723 static u32 int_special_MFHI(struct interpreter *inter)
724 {
725         u32 *reg_cache = inter->state->regs.gpr;
726         struct opcode_r *op = &inter->op->r;
727
728         if (likely(op->rd))
729                 reg_cache[op->rd] = reg_cache[REG_HI];
730
731         return jump_next(inter);
732 }
733
734 static u32 int_special_MTHI(struct interpreter *inter)
735 {
736         u32 *reg_cache = inter->state->regs.gpr;
737
738         reg_cache[REG_HI] = reg_cache[inter->op->r.rs];
739
740         return jump_next(inter);
741 }
742
743 static u32 int_special_MFLO(struct interpreter *inter)
744 {
745         u32 *reg_cache = inter->state->regs.gpr;
746         struct opcode_r *op = &inter->op->r;
747
748         if (likely(op->rd))
749                 reg_cache[op->rd] = reg_cache[REG_LO];
750
751         return jump_next(inter);
752 }
753
754 static u32 int_special_MTLO(struct interpreter *inter)
755 {
756         u32 *reg_cache = inter->state->regs.gpr;
757
758         reg_cache[REG_LO] = reg_cache[inter->op->r.rs];
759
760         return jump_next(inter);
761 }
762
763 static u32 int_special_MULT(struct interpreter *inter)
764 {
765         u32 *reg_cache = inter->state->regs.gpr;
766         s32 rs = reg_cache[inter->op->r.rs];
767         s32 rt = reg_cache[inter->op->r.rt];
768         u8 reg_lo = get_mult_div_lo(inter->op->c);
769         u8 reg_hi = get_mult_div_hi(inter->op->c);
770         u64 res = (s64)rs * (s64)rt;
771
772         if (!op_flag_no_hi(inter->op->flags))
773                 reg_cache[reg_hi] = res >> 32;
774         if (!op_flag_no_lo(inter->op->flags))
775                 reg_cache[reg_lo] = res;
776
777         return jump_next(inter);
778 }
779
780 static u32 int_special_MULTU(struct interpreter *inter)
781 {
782         u32 *reg_cache = inter->state->regs.gpr;
783         u32 rs = reg_cache[inter->op->r.rs];
784         u32 rt = reg_cache[inter->op->r.rt];
785         u8 reg_lo = get_mult_div_lo(inter->op->c);
786         u8 reg_hi = get_mult_div_hi(inter->op->c);
787         u64 res = (u64)rs * (u64)rt;
788
789         if (!op_flag_no_hi(inter->op->flags))
790                 reg_cache[reg_hi] = res >> 32;
791         if (!op_flag_no_lo(inter->op->flags))
792                 reg_cache[reg_lo] = res;
793
794         return jump_next(inter);
795 }
796
797 static u32 int_special_DIV(struct interpreter *inter)
798 {
799         u32 *reg_cache = inter->state->regs.gpr;
800         s32 rs = reg_cache[inter->op->r.rs];
801         s32 rt = reg_cache[inter->op->r.rt];
802         u8 reg_lo = get_mult_div_lo(inter->op->c);
803         u8 reg_hi = get_mult_div_hi(inter->op->c);
804         u32 lo, hi;
805
806         if (rt == 0) {
807                 hi = rs;
808                 lo = (rs < 0) * 2 - 1;
809         } else {
810                 lo = rs / rt;
811                 hi = rs % rt;
812         }
813
814         if (!op_flag_no_hi(inter->op->flags))
815                 reg_cache[reg_hi] = hi;
816         if (!op_flag_no_lo(inter->op->flags))
817                 reg_cache[reg_lo] = lo;
818
819         return jump_next(inter);
820 }
821
822 static u32 int_special_DIVU(struct interpreter *inter)
823 {
824         u32 *reg_cache = inter->state->regs.gpr;
825         u32 rs = reg_cache[inter->op->r.rs];
826         u32 rt = reg_cache[inter->op->r.rt];
827         u8 reg_lo = get_mult_div_lo(inter->op->c);
828         u8 reg_hi = get_mult_div_hi(inter->op->c);
829         u32 lo, hi;
830
831         if (rt == 0) {
832                 hi = rs;
833                 lo = (u32)-1;
834         } else {
835                 lo = rs / rt;
836                 hi = rs % rt;
837         }
838
839         if (!op_flag_no_hi(inter->op->flags))
840                 reg_cache[reg_hi] = hi;
841         if (!op_flag_no_lo(inter->op->flags))
842                 reg_cache[reg_lo] = lo;
843
844         return jump_next(inter);
845 }
846
847 static u32 int_special_ADD(struct interpreter *inter)
848 {
849         u32 *reg_cache = inter->state->regs.gpr;
850         struct opcode_r *op = &inter->op->r;
851         s32 rs = reg_cache[op->rs];
852         s32 rt = reg_cache[op->rt];
853
854         if (likely(op->rd))
855                 reg_cache[op->rd] = rs + rt;
856
857         return jump_next(inter);
858 }
859
860 static u32 int_special_SUB(struct interpreter *inter)
861 {
862         u32 *reg_cache = inter->state->regs.gpr;
863         struct opcode_r *op = &inter->op->r;
864         u32 rs = reg_cache[op->rs];
865         u32 rt = reg_cache[op->rt];
866
867         if (likely(op->rd))
868                 reg_cache[op->rd] = rs - rt;
869
870         return jump_next(inter);
871 }
872
873 static u32 int_special_AND(struct interpreter *inter)
874 {
875         u32 *reg_cache = inter->state->regs.gpr;
876         struct opcode_r *op = &inter->op->r;
877         u32 rs = reg_cache[op->rs];
878         u32 rt = reg_cache[op->rt];
879
880         if (likely(op->rd))
881                 reg_cache[op->rd] = rs & rt;
882
883         return jump_next(inter);
884 }
885
886 static u32 int_special_OR(struct interpreter *inter)
887 {
888         u32 *reg_cache = inter->state->regs.gpr;
889         struct opcode_r *op = &inter->op->r;
890         u32 rs = reg_cache[op->rs];
891         u32 rt = reg_cache[op->rt];
892
893         if (likely(op->rd))
894                 reg_cache[op->rd] = rs | rt;
895
896         return jump_next(inter);
897 }
898
899 static u32 int_special_XOR(struct interpreter *inter)
900 {
901         u32 *reg_cache = inter->state->regs.gpr;
902         struct opcode_r *op = &inter->op->r;
903         u32 rs = reg_cache[op->rs];
904         u32 rt = reg_cache[op->rt];
905
906         if (likely(op->rd))
907                 reg_cache[op->rd] = rs ^ rt;
908
909         return jump_next(inter);
910 }
911
912 static u32 int_special_NOR(struct interpreter *inter)
913 {
914         u32 *reg_cache = inter->state->regs.gpr;
915         struct opcode_r *op = &inter->op->r;
916         u32 rs = reg_cache[op->rs];
917         u32 rt = reg_cache[op->rt];
918
919         if (likely(op->rd))
920                 reg_cache[op->rd] = ~(rs | rt);
921
922         return jump_next(inter);
923 }
924
925 static u32 int_special_SLT(struct interpreter *inter)
926 {
927         u32 *reg_cache = inter->state->regs.gpr;
928         struct opcode_r *op = &inter->op->r;
929         s32 rs = reg_cache[op->rs];
930         s32 rt = reg_cache[op->rt];
931
932         if (likely(op->rd))
933                 reg_cache[op->rd] = rs < rt;
934
935         return jump_next(inter);
936 }
937
938 static u32 int_special_SLTU(struct interpreter *inter)
939 {
940         u32 *reg_cache = inter->state->regs.gpr;
941         struct opcode_r *op = &inter->op->r;
942         u32 rs = reg_cache[op->rs];
943         u32 rt = reg_cache[op->rt];
944
945         if (likely(op->rd))
946                 reg_cache[op->rd] = rs < rt;
947
948         return jump_next(inter);
949 }
950
951 static u32 int_META_MOV(struct interpreter *inter)
952 {
953         u32 *reg_cache = inter->state->regs.gpr;
954         struct opcode_m *op = &inter->op->m;
955
956         if (likely(op->rd))
957                 reg_cache[op->rd] = reg_cache[op->rs];
958
959         return jump_next(inter);
960 }
961
962 static u32 int_META_EXTC(struct interpreter *inter)
963 {
964         u32 *reg_cache = inter->state->regs.gpr;
965         struct opcode_m *op = &inter->op->m;
966
967         if (likely(op->rd))
968                 reg_cache[op->rd] = (u32)(s32)(s8)reg_cache[op->rs];
969
970         return jump_next(inter);
971 }
972
973 static u32 int_META_EXTS(struct interpreter *inter)
974 {
975         u32 *reg_cache = inter->state->regs.gpr;
976         struct opcode_m *op = &inter->op->m;
977
978         if (likely(op->rd))
979                 reg_cache[op->rd] = (u32)(s32)(s16)reg_cache[op->rs];
980
981         return jump_next(inter);
982 }
983
984 static u32 int_META_MULT2(struct interpreter *inter)
985 {
986         u32 *reg_cache = inter->state->regs.gpr;
987         union code c = inter->op->c;
988         u32 rs = reg_cache[c.r.rs];
989         u8 reg_lo = get_mult_div_lo(c);
990         u8 reg_hi = get_mult_div_hi(c);
991
992         if (!op_flag_no_lo(inter->op->flags)) {
993                 if (c.r.op < 32)
994                         reg_cache[reg_lo] = rs << c.r.op;
995                 else
996                         reg_cache[reg_lo] = 0;
997         }
998
999         if (!op_flag_no_hi(inter->op->flags)) {
1000                 if (c.r.op >= 32)
1001                         reg_cache[reg_hi] = rs << (c.r.op - 32);
1002                 else if (c.i.op == OP_META_MULT2)
1003                         reg_cache[reg_hi] = (s32) rs >> (32 - c.r.op);
1004                 else
1005                         reg_cache[reg_hi] = rs >> (32 - c.r.op);
1006         }
1007
1008         return jump_next(inter);
1009 }
1010
1011 static u32 int_META_COM(struct interpreter *inter)
1012 {
1013         u32 *reg_cache = inter->state->regs.gpr;
1014         union code c = inter->op->c;
1015
1016         if (likely(c.m.rd))
1017                 reg_cache[c.m.rd] = ~reg_cache[c.m.rs];
1018
1019         return jump_next(inter);
1020 }
1021
1022 static const lightrec_int_func_t int_standard[64] = {
1023         SET_DEFAULT_ELM(int_standard, int_unimplemented),
1024         [OP_SPECIAL]            = int_SPECIAL,
1025         [OP_REGIMM]             = int_REGIMM,
1026         [OP_J]                  = int_J,
1027         [OP_JAL]                = int_JAL,
1028         [OP_BEQ]                = int_BEQ,
1029         [OP_BNE]                = int_BNE,
1030         [OP_BLEZ]               = int_BLEZ,
1031         [OP_BGTZ]               = int_BGTZ,
1032         [OP_ADDI]               = int_ADDI,
1033         [OP_ADDIU]              = int_ADDI,
1034         [OP_SLTI]               = int_SLTI,
1035         [OP_SLTIU]              = int_SLTIU,
1036         [OP_ANDI]               = int_ANDI,
1037         [OP_ORI]                = int_ORI,
1038         [OP_XORI]               = int_XORI,
1039         [OP_LUI]                = int_LUI,
1040         [OP_CP0]                = int_CP0,
1041         [OP_CP2]                = int_CP2,
1042         [OP_LB]                 = int_load,
1043         [OP_LH]                 = int_load,
1044         [OP_LWL]                = int_load,
1045         [OP_LW]                 = int_load,
1046         [OP_LBU]                = int_load,
1047         [OP_LHU]                = int_load,
1048         [OP_LWR]                = int_load,
1049         [OP_SB]                 = int_store,
1050         [OP_SH]                 = int_store,
1051         [OP_SWL]                = int_store,
1052         [OP_SW]                 = int_store,
1053         [OP_SWR]                = int_store,
1054         [OP_LWC2]               = int_LWC2,
1055         [OP_SWC2]               = int_store,
1056
1057         [OP_META]               = int_META,
1058         [OP_META_MULT2]         = int_META_MULT2,
1059         [OP_META_MULTU2]        = int_META_MULT2,
1060 };
1061
1062 static const lightrec_int_func_t int_special[64] = {
1063         SET_DEFAULT_ELM(int_special, int_unimplemented),
1064         [OP_SPECIAL_SLL]        = int_special_SLL,
1065         [OP_SPECIAL_SRL]        = int_special_SRL,
1066         [OP_SPECIAL_SRA]        = int_special_SRA,
1067         [OP_SPECIAL_SLLV]       = int_special_SLLV,
1068         [OP_SPECIAL_SRLV]       = int_special_SRLV,
1069         [OP_SPECIAL_SRAV]       = int_special_SRAV,
1070         [OP_SPECIAL_JR]         = int_special_JR,
1071         [OP_SPECIAL_JALR]       = int_special_JALR,
1072         [OP_SPECIAL_SYSCALL]    = int_syscall_break,
1073         [OP_SPECIAL_BREAK]      = int_syscall_break,
1074         [OP_SPECIAL_MFHI]       = int_special_MFHI,
1075         [OP_SPECIAL_MTHI]       = int_special_MTHI,
1076         [OP_SPECIAL_MFLO]       = int_special_MFLO,
1077         [OP_SPECIAL_MTLO]       = int_special_MTLO,
1078         [OP_SPECIAL_MULT]       = int_special_MULT,
1079         [OP_SPECIAL_MULTU]      = int_special_MULTU,
1080         [OP_SPECIAL_DIV]        = int_special_DIV,
1081         [OP_SPECIAL_DIVU]       = int_special_DIVU,
1082         [OP_SPECIAL_ADD]        = int_special_ADD,
1083         [OP_SPECIAL_ADDU]       = int_special_ADD,
1084         [OP_SPECIAL_SUB]        = int_special_SUB,
1085         [OP_SPECIAL_SUBU]       = int_special_SUB,
1086         [OP_SPECIAL_AND]        = int_special_AND,
1087         [OP_SPECIAL_OR]         = int_special_OR,
1088         [OP_SPECIAL_XOR]        = int_special_XOR,
1089         [OP_SPECIAL_NOR]        = int_special_NOR,
1090         [OP_SPECIAL_SLT]        = int_special_SLT,
1091         [OP_SPECIAL_SLTU]       = int_special_SLTU,
1092 };
1093
1094 static const lightrec_int_func_t int_regimm[64] = {
1095         SET_DEFAULT_ELM(int_regimm, int_unimplemented),
1096         [OP_REGIMM_BLTZ]        = int_regimm_BLTZ,
1097         [OP_REGIMM_BGEZ]        = int_regimm_BGEZ,
1098         [OP_REGIMM_BLTZAL]      = int_regimm_BLTZAL,
1099         [OP_REGIMM_BGEZAL]      = int_regimm_BGEZAL,
1100 };
1101
1102 static const lightrec_int_func_t int_cp0[64] = {
1103         SET_DEFAULT_ELM(int_cp0, int_CP),
1104         [OP_CP0_MFC0]           = int_cfc,
1105         [OP_CP0_CFC0]           = int_cfc,
1106         [OP_CP0_MTC0]           = int_ctc,
1107         [OP_CP0_CTC0]           = int_ctc,
1108         [OP_CP0_RFE]            = int_cp0_RFE,
1109 };
1110
1111 static const lightrec_int_func_t int_cp2_basic[64] = {
1112         SET_DEFAULT_ELM(int_cp2_basic, int_CP),
1113         [OP_CP2_BASIC_MFC2]     = int_cfc,
1114         [OP_CP2_BASIC_CFC2]     = int_cfc,
1115         [OP_CP2_BASIC_MTC2]     = int_ctc,
1116         [OP_CP2_BASIC_CTC2]     = int_ctc,
1117 };
1118
1119 static const lightrec_int_func_t int_meta[64] = {
1120         SET_DEFAULT_ELM(int_meta, int_unimplemented),
1121         [OP_META_MOV]           = int_META_MOV,
1122         [OP_META_EXTC]          = int_META_EXTC,
1123         [OP_META_EXTS]          = int_META_EXTS,
1124         [OP_META_COM]           = int_META_COM,
1125 };
1126
1127 static u32 int_SPECIAL(struct interpreter *inter)
1128 {
1129         lightrec_int_func_t f = int_special[inter->op->r.op];
1130
1131         if (!HAS_DEFAULT_ELM && unlikely(!f))
1132                 return int_unimplemented(inter);
1133
1134         return execute(f, inter);
1135 }
1136
1137 static u32 int_REGIMM(struct interpreter *inter)
1138 {
1139         lightrec_int_func_t f = int_regimm[inter->op->r.rt];
1140
1141         if (!HAS_DEFAULT_ELM && unlikely(!f))
1142                 return int_unimplemented(inter);
1143
1144         return execute(f, inter);
1145 }
1146
1147 static u32 int_CP0(struct interpreter *inter)
1148 {
1149         lightrec_int_func_t f = int_cp0[inter->op->r.rs];
1150
1151         if (!HAS_DEFAULT_ELM && unlikely(!f))
1152                 return int_CP(inter);
1153
1154         return execute(f, inter);
1155 }
1156
1157 static u32 int_CP2(struct interpreter *inter)
1158 {
1159         if (inter->op->r.op == OP_CP2_BASIC) {
1160                 lightrec_int_func_t f = int_cp2_basic[inter->op->r.rs];
1161                 if (HAS_DEFAULT_ELM || likely(f))
1162                         return execute(f, inter);
1163         }
1164
1165         return int_CP(inter);
1166 }
1167
1168 static u32 int_META(struct interpreter *inter)
1169 {
1170         lightrec_int_func_t f = int_meta[inter->op->m.op];
1171
1172         if (!HAS_DEFAULT_ELM && unlikely(!f))
1173                 return int_unimplemented(inter);
1174
1175         return execute(f, inter);
1176 }
1177
1178 static u32 lightrec_emulate_block_list(struct lightrec_state *state,
1179                                        struct block *block, u32 offset)
1180 {
1181         struct interpreter inter;
1182         u32 pc;
1183
1184         inter.block = block;
1185         inter.state = state;
1186         inter.offset = offset;
1187         inter.op = &block->opcode_list[offset];
1188         inter.cycles = 0;
1189         inter.delay_slot = false;
1190
1191         pc = lightrec_int_op(&inter);
1192
1193         /* Add the cycles of the last branch */
1194         inter.cycles += lightrec_cycles_of_opcode(inter.op->c);
1195
1196         state->current_cycle += inter.cycles;
1197
1198         return pc;
1199 }
1200
1201 u32 lightrec_emulate_block(struct lightrec_state *state, struct block *block, u32 pc)
1202 {
1203         u32 offset = (kunseg(pc) - kunseg(block->pc)) >> 2;
1204
1205         if (offset < block->nb_ops)
1206                 return lightrec_emulate_block_list(state, block, offset);
1207
1208         pr_err("PC 0x%x is outside block at PC 0x%x\n", pc, block->pc);
1209
1210         lightrec_set_exit_flags(state, LIGHTREC_EXIT_SEGFAULT);
1211
1212         return 0;
1213 }
1214
1215 static u32 branch_get_next_pc(struct lightrec_state *state, union code c, u32 pc)
1216 {
1217         switch (c.i.op) {
1218         case OP_SPECIAL:
1219                 /* JR / JALR */
1220                 return state->regs.gpr[c.r.rs];
1221         case OP_J:
1222         case OP_JAL:
1223                 return (pc & 0xf0000000) | (c.j.imm << 2);
1224         default:
1225                 /* Branch opcodes */
1226                 return pc + 4 + ((s16)c.i.imm << 2);
1227         }
1228 }
1229
1230 u32 lightrec_handle_load_delay(struct lightrec_state *state,
1231                                struct block *block, u32 pc, u32 reg)
1232 {
1233         union code c = lightrec_read_opcode(state, pc);
1234         struct opcode op[2] = {
1235                 {
1236                         .c = c,
1237                         .flags = 0,
1238                 },
1239                 {
1240                         .flags = 0,
1241                 },
1242         };
1243         struct interpreter inter = {
1244                 .block = block,
1245                 .state = state,
1246                 .offset = 0,
1247                 .op = op,
1248                 .cycles = 0,
1249         };
1250         bool branch_taken;
1251         u32 reg_mask, next_pc;
1252
1253         if (has_delay_slot(c)) {
1254                 op[1].c = lightrec_read_opcode(state, pc + 4);
1255
1256                 branch_taken = is_branch_taken(state->regs.gpr, c);
1257                 next_pc = branch_get_next_pc(state, c, pc);
1258
1259                 /* Branch was evaluated, we can write the load opcode's target
1260                  * register now. */
1261                 state->regs.gpr[reg] = state->temp_reg;
1262
1263                 /* Handle JALR / regimm opcodes setting $ra (or any other
1264                  * register in the case of JALR) */
1265                 reg_mask = (u32)opcode_write_mask(c);
1266                 if (reg_mask)
1267                         state->regs.gpr[ctz32(reg_mask)] = pc + 8;
1268
1269                 /* Handle delay slot of the branch opcode */
1270                 pc = int_delay_slot(&inter, next_pc, branch_taken);
1271         } else {
1272                 /* Make sure we only run one instruction */
1273                 inter.delay_slot = true;
1274
1275                 lightrec_int_op(&inter);
1276                 pc += 4;
1277
1278                 if (!opcode_writes_register(c, reg))
1279                         state->regs.gpr[reg] = state->temp_reg;
1280         }
1281
1282         state->current_cycle += inter.cycles;
1283
1284         return pc;
1285 }