git subrepo pull --force deps/lightrec
[pcsx_rearmed.git] / deps / lightrec / optimizer.c
1 // SPDX-License-Identifier: LGPL-2.1-or-later
2 /*
3  * Copyright (C) 2014-2021 Paul Cercueil <paul@crapouillou.net>
4  */
5
6 #include "constprop.h"
7 #include "lightrec-config.h"
8 #include "disassembler.h"
9 #include "lightrec.h"
10 #include "memmanager.h"
11 #include "optimizer.h"
12 #include "regcache.h"
13
14 #include <errno.h>
15 #include <stdbool.h>
16 #include <stdlib.h>
17 #include <string.h>
18
19 #define IF_OPT(opt, ptr) ((opt) ? (ptr) : NULL)
20
21 struct optimizer_list {
22         void (**optimizers)(struct opcode *);
23         unsigned int nb_optimizers;
24 };
25
26 static bool is_nop(union code op);
27
28 bool is_unconditional_jump(union code c)
29 {
30         switch (c.i.op) {
31         case OP_SPECIAL:
32                 return c.r.op == OP_SPECIAL_JR || c.r.op == OP_SPECIAL_JALR;
33         case OP_J:
34         case OP_JAL:
35                 return true;
36         case OP_BEQ:
37         case OP_BLEZ:
38                 return c.i.rs == c.i.rt;
39         case OP_REGIMM:
40                 return (c.r.rt == OP_REGIMM_BGEZ ||
41                         c.r.rt == OP_REGIMM_BGEZAL) && c.i.rs == 0;
42         default:
43                 return false;
44         }
45 }
46
47 bool is_syscall(union code c)
48 {
49         return (c.i.op == OP_SPECIAL && c.r.op == OP_SPECIAL_SYSCALL) ||
50                 (c.i.op == OP_CP0 && (c.r.rs == OP_CP0_MTC0 ||
51                                         c.r.rs == OP_CP0_CTC0) &&
52                  (c.r.rd == 12 || c.r.rd == 13));
53 }
54
55 static u64 opcode_read_mask(union code op)
56 {
57         switch (op.i.op) {
58         case OP_SPECIAL:
59                 switch (op.r.op) {
60                 case OP_SPECIAL_SYSCALL:
61                 case OP_SPECIAL_BREAK:
62                         return 0;
63                 case OP_SPECIAL_JR:
64                 case OP_SPECIAL_JALR:
65                 case OP_SPECIAL_MTHI:
66                 case OP_SPECIAL_MTLO:
67                         return BIT(op.r.rs);
68                 case OP_SPECIAL_MFHI:
69                         return BIT(REG_HI);
70                 case OP_SPECIAL_MFLO:
71                         return BIT(REG_LO);
72                 case OP_SPECIAL_SLL:
73                         if (!op.r.imm)
74                                 return 0;
75                         fallthrough;
76                 case OP_SPECIAL_SRL:
77                 case OP_SPECIAL_SRA:
78                         return BIT(op.r.rt);
79                 default:
80                         return BIT(op.r.rs) | BIT(op.r.rt);
81                 }
82         case OP_CP0:
83                 switch (op.r.rs) {
84                 case OP_CP0_MTC0:
85                 case OP_CP0_CTC0:
86                         return BIT(op.r.rt);
87                 default:
88                         return 0;
89                 }
90         case OP_CP2:
91                 if (op.r.op == OP_CP2_BASIC) {
92                         switch (op.r.rs) {
93                         case OP_CP2_BASIC_MTC2:
94                         case OP_CP2_BASIC_CTC2:
95                                 return BIT(op.r.rt);
96                         default:
97                                 break;
98                         }
99                 }
100                 return 0;
101         case OP_J:
102         case OP_JAL:
103         case OP_LUI:
104                 return 0;
105         case OP_BEQ:
106                 if (op.i.rs == op.i.rt)
107                         return 0;
108                 fallthrough;
109         case OP_BNE:
110         case OP_LWL:
111         case OP_LWR:
112         case OP_SB:
113         case OP_SH:
114         case OP_SWL:
115         case OP_SW:
116         case OP_SWR:
117         case OP_META_LWU:
118         case OP_META_SWU:
119                 return BIT(op.i.rs) | BIT(op.i.rt);
120         case OP_META:
121                 return BIT(op.m.rs);
122         default:
123                 return BIT(op.i.rs);
124         }
125 }
126
127 static u64 mult_div_write_mask(union code op)
128 {
129         u64 flags;
130
131         if (!OPT_FLAG_MULT_DIV)
132                 return BIT(REG_LO) | BIT(REG_HI);
133
134         if (op.r.rd)
135                 flags = BIT(op.r.rd);
136         else
137                 flags = BIT(REG_LO);
138         if (op.r.imm)
139                 flags |= BIT(op.r.imm);
140         else
141                 flags |= BIT(REG_HI);
142
143         return flags;
144 }
145
146 u64 opcode_write_mask(union code op)
147 {
148         switch (op.i.op) {
149         case OP_META_MULT2:
150         case OP_META_MULTU2:
151                 return mult_div_write_mask(op);
152         case OP_META:
153                 return BIT(op.m.rd);
154         case OP_SPECIAL:
155                 switch (op.r.op) {
156                 case OP_SPECIAL_JR:
157                 case OP_SPECIAL_SYSCALL:
158                 case OP_SPECIAL_BREAK:
159                         return 0;
160                 case OP_SPECIAL_MULT:
161                 case OP_SPECIAL_MULTU:
162                 case OP_SPECIAL_DIV:
163                 case OP_SPECIAL_DIVU:
164                         return mult_div_write_mask(op);
165                 case OP_SPECIAL_MTHI:
166                         return BIT(REG_HI);
167                 case OP_SPECIAL_MTLO:
168                         return BIT(REG_LO);
169                 case OP_SPECIAL_SLL:
170                         if (!op.r.imm)
171                                 return 0;
172                         fallthrough;
173                 default:
174                         return BIT(op.r.rd);
175                 }
176         case OP_ADDI:
177         case OP_ADDIU:
178         case OP_SLTI:
179         case OP_SLTIU:
180         case OP_ANDI:
181         case OP_ORI:
182         case OP_XORI:
183         case OP_LUI:
184         case OP_LB:
185         case OP_LH:
186         case OP_LWL:
187         case OP_LW:
188         case OP_LBU:
189         case OP_LHU:
190         case OP_LWR:
191         case OP_META_LWU:
192                 return BIT(op.i.rt);
193         case OP_JAL:
194                 return BIT(31);
195         case OP_CP0:
196                 switch (op.r.rs) {
197                 case OP_CP0_MFC0:
198                 case OP_CP0_CFC0:
199                         return BIT(op.i.rt);
200                 default:
201                         return 0;
202                 }
203         case OP_CP2:
204                 if (op.r.op == OP_CP2_BASIC) {
205                         switch (op.r.rs) {
206                         case OP_CP2_BASIC_MFC2:
207                         case OP_CP2_BASIC_CFC2:
208                                 return BIT(op.i.rt);
209                         default:
210                                 break;
211                         }
212                 }
213                 return 0;
214         case OP_REGIMM:
215                 switch (op.r.rt) {
216                 case OP_REGIMM_BLTZAL:
217                 case OP_REGIMM_BGEZAL:
218                         return BIT(31);
219                 default:
220                         return 0;
221                 }
222         default:
223                 return 0;
224         }
225 }
226
227 bool opcode_reads_register(union code op, u8 reg)
228 {
229         return opcode_read_mask(op) & BIT(reg);
230 }
231
232 bool opcode_writes_register(union code op, u8 reg)
233 {
234         return opcode_write_mask(op) & BIT(reg);
235 }
236
237 static int find_prev_writer(const struct opcode *list, unsigned int offset, u8 reg)
238 {
239         union code c;
240         unsigned int i;
241
242         if (op_flag_sync(list[offset].flags))
243                 return -1;
244
245         for (i = offset; i > 0; i--) {
246                 c = list[i - 1].c;
247
248                 if (opcode_writes_register(c, reg)) {
249                         if (i > 1 && has_delay_slot(list[i - 2].c))
250                                 break;
251
252                         return i - 1;
253                 }
254
255                 if (op_flag_sync(list[i - 1].flags) ||
256                     has_delay_slot(c) ||
257                     opcode_reads_register(c, reg))
258                         break;
259         }
260
261         return -1;
262 }
263
264 static int find_next_reader(const struct opcode *list, unsigned int offset, u8 reg)
265 {
266         unsigned int i;
267         union code c;
268
269         if (op_flag_sync(list[offset].flags))
270                 return -1;
271
272         for (i = offset; ; i++) {
273                 c = list[i].c;
274
275                 if (opcode_reads_register(c, reg))
276                         return i;
277
278                 if (op_flag_sync(list[i].flags)
279                     || (op_flag_no_ds(list[i].flags) && has_delay_slot(c))
280                     || is_delay_slot(list, i)
281                     || opcode_writes_register(c, reg))
282                         break;
283         }
284
285         return -1;
286 }
287
288 static bool reg_is_dead(const struct opcode *list, unsigned int offset, u8 reg)
289 {
290         unsigned int i;
291
292         if (op_flag_sync(list[offset].flags) || is_delay_slot(list, offset))
293                 return false;
294
295         for (i = offset + 1; ; i++) {
296                 if (opcode_reads_register(list[i].c, reg))
297                         return false;
298
299                 if (opcode_writes_register(list[i].c, reg))
300                         return true;
301
302                 if (is_syscall(list[i].c))
303                         return false;
304
305                 if (has_delay_slot(list[i].c)) {
306                         if (op_flag_no_ds(list[i].flags) ||
307                             opcode_reads_register(list[i + 1].c, reg))
308                                 return false;
309
310                         return opcode_writes_register(list[i + 1].c, reg);
311                 }
312         }
313 }
314
315 static bool reg_is_read(const struct opcode *list,
316                         unsigned int a, unsigned int b, u8 reg)
317 {
318         /* Return true if reg is read in one of the opcodes of the interval
319          * [a, b[ */
320         for (; a < b; a++) {
321                 if (!is_nop(list[a].c) && opcode_reads_register(list[a].c, reg))
322                         return true;
323         }
324
325         return false;
326 }
327
328 static bool reg_is_written(const struct opcode *list,
329                            unsigned int a, unsigned int b, u8 reg)
330 {
331         /* Return true if reg is written in one of the opcodes of the interval
332          * [a, b[ */
333
334         for (; a < b; a++) {
335                 if (!is_nop(list[a].c) && opcode_writes_register(list[a].c, reg))
336                         return true;
337         }
338
339         return false;
340 }
341
342 static bool reg_is_read_or_written(const struct opcode *list,
343                                    unsigned int a, unsigned int b, u8 reg)
344 {
345         return reg_is_read(list, a, b, reg) || reg_is_written(list, a, b, reg);
346 }
347
348 bool opcode_is_mfc(union code op)
349 {
350         switch (op.i.op) {
351         case OP_CP0:
352                 switch (op.r.rs) {
353                 case OP_CP0_MFC0:
354                 case OP_CP0_CFC0:
355                         return true;
356                 default:
357                         break;
358                 }
359
360                 break;
361         case OP_CP2:
362                 if (op.r.op == OP_CP2_BASIC) {
363                         switch (op.r.rs) {
364                         case OP_CP2_BASIC_MFC2:
365                         case OP_CP2_BASIC_CFC2:
366                                 return true;
367                         default:
368                                 break;
369                         }
370                 }
371
372                 break;
373         default:
374                 break;
375         }
376
377         return false;
378 }
379
380 bool opcode_is_load(union code op)
381 {
382         switch (op.i.op) {
383         case OP_LB:
384         case OP_LH:
385         case OP_LWL:
386         case OP_LW:
387         case OP_LBU:
388         case OP_LHU:
389         case OP_LWR:
390         case OP_LWC2:
391         case OP_META_LWU:
392                 return true;
393         default:
394                 return false;
395         }
396 }
397
398 static bool opcode_is_store(union code op)
399 {
400         switch (op.i.op) {
401         case OP_SB:
402         case OP_SH:
403         case OP_SW:
404         case OP_SWL:
405         case OP_SWR:
406         case OP_SWC2:
407         case OP_META_SWU:
408                 return true;
409         default:
410                 return false;
411         }
412 }
413
414 static u8 opcode_get_io_size(union code op)
415 {
416         switch (op.i.op) {
417         case OP_LB:
418         case OP_LBU:
419         case OP_SB:
420                 return 8;
421         case OP_LH:
422         case OP_LHU:
423         case OP_SH:
424                 return 16;
425         default:
426                 return 32;
427         }
428 }
429
430 bool opcode_is_io(union code op)
431 {
432         return opcode_is_load(op) || opcode_is_store(op);
433 }
434
435 /* TODO: Complete */
436 static bool is_nop(union code op)
437 {
438         if (opcode_writes_register(op, 0)) {
439                 switch (op.i.op) {
440                 case OP_CP0:
441                         return op.r.rs != OP_CP0_MFC0;
442                 case OP_LB:
443                 case OP_LH:
444                 case OP_LWL:
445                 case OP_LW:
446                 case OP_LBU:
447                 case OP_LHU:
448                 case OP_LWR:
449                 case OP_META_LWU:
450                         return false;
451                 default:
452                         return true;
453                 }
454         }
455
456         switch (op.i.op) {
457         case OP_SPECIAL:
458                 switch (op.r.op) {
459                 case OP_SPECIAL_AND:
460                         return op.r.rd == op.r.rt && op.r.rd == op.r.rs;
461                 case OP_SPECIAL_ADD:
462                 case OP_SPECIAL_ADDU:
463                         return (op.r.rd == op.r.rt && op.r.rs == 0) ||
464                                 (op.r.rd == op.r.rs && op.r.rt == 0);
465                 case OP_SPECIAL_SUB:
466                 case OP_SPECIAL_SUBU:
467                         return op.r.rd == op.r.rs && op.r.rt == 0;
468                 case OP_SPECIAL_OR:
469                         if (op.r.rd == op.r.rt)
470                                 return op.r.rd == op.r.rs || op.r.rs == 0;
471                         else
472                                 return (op.r.rd == op.r.rs) && op.r.rt == 0;
473                 case OP_SPECIAL_SLL:
474                 case OP_SPECIAL_SRA:
475                 case OP_SPECIAL_SRL:
476                         return op.r.rd == op.r.rt && op.r.imm == 0;
477                 case OP_SPECIAL_MFHI:
478                 case OP_SPECIAL_MFLO:
479                         return op.r.rd == 0;
480                 default:
481                         return false;
482                 }
483         case OP_ORI:
484         case OP_ADDI:
485         case OP_ADDIU:
486                 return op.i.rt == op.i.rs && op.i.imm == 0;
487         case OP_BGTZ:
488                 return (op.i.rs == 0 || op.i.imm == 1);
489         case OP_REGIMM:
490                 return (op.i.op == OP_REGIMM_BLTZ ||
491                                 op.i.op == OP_REGIMM_BLTZAL) &&
492                         (op.i.rs == 0 || op.i.imm == 1);
493         case OP_BNE:
494                 return (op.i.rs == op.i.rt || op.i.imm == 1);
495         default:
496                 return false;
497         }
498 }
499
500 static void lightrec_optimize_sll_sra(struct opcode *list, unsigned int offset,
501                                       struct constprop_data *v)
502 {
503         struct opcode *ldop = NULL, *curr = &list[offset], *next;
504         struct opcode *to_change, *to_nop;
505         int idx, idx2;
506
507         if (curr->r.imm != 24 && curr->r.imm != 16)
508                 return;
509
510         if (is_delay_slot(list, offset))
511                 return;
512
513         idx = find_next_reader(list, offset + 1, curr->r.rd);
514         if (idx < 0)
515                 return;
516
517         next = &list[idx];
518
519         if (next->i.op != OP_SPECIAL || next->r.op != OP_SPECIAL_SRA ||
520             next->r.imm != curr->r.imm || next->r.rt != curr->r.rd)
521                 return;
522
523         if (curr->r.rd != curr->r.rt && next->r.rd != next->r.rt) {
524                 /* sll rY, rX, 16
525                  * ...
526                  * sra rZ, rY, 16 */
527
528                 if (!reg_is_dead(list, idx, curr->r.rd) ||
529                     reg_is_read_or_written(list, offset, idx, next->r.rd))
530                         return;
531
532                 /* If rY is dead after the SRL, and rZ is not used after the SLL,
533                  * we can change rY to rZ */
534
535                 pr_debug("Detected SLL/SRA with middle temp register\n");
536                 curr->r.rd = next->r.rd;
537                 next->r.rt = curr->r.rd;
538         }
539
540         /* We got a SLL/SRA combo. If imm #16, that's a cast to s16.
541          * If imm #24 that's a cast to s8.
542          *
543          * First of all, make sure that the target register of the SLL is not
544          * read after the SRA. */
545
546         if (curr->r.rd == curr->r.rt) {
547                 /* sll rX, rX, 16
548                  * ...
549                  * sra rY, rX, 16 */
550                 to_change = next;
551                 to_nop = curr;
552
553                 /* rX is used after the SRA - we cannot convert it. */
554                 if (curr->r.rd != next->r.rd && !reg_is_dead(list, idx, curr->r.rd))
555                         return;
556         } else {
557                 /* sll rY, rX, 16
558                  * ...
559                  * sra rY, rY, 16 */
560                 to_change = curr;
561                 to_nop = next;
562         }
563
564         idx2 = find_prev_writer(list, offset, curr->r.rt);
565         if (idx2 >= 0) {
566                 /* Note that PSX games sometimes do casts after
567                  * a LHU or LBU; in this case we can change the
568                  * load opcode to a LH or LB, and the cast can
569                  * be changed to a MOV or a simple NOP. */
570
571                 ldop = &list[idx2];
572
573                 if (next->r.rd != ldop->i.rt &&
574                     !reg_is_dead(list, idx, ldop->i.rt))
575                         ldop = NULL;
576                 else if (curr->r.imm == 16 && ldop->i.op == OP_LHU)
577                         ldop->i.op = OP_LH;
578                 else if (curr->r.imm == 24 && ldop->i.op == OP_LBU)
579                         ldop->i.op = OP_LB;
580                 else
581                         ldop = NULL;
582
583                 if (ldop) {
584                         if (next->r.rd == ldop->i.rt) {
585                                 to_change->opcode = 0;
586                         } else if (reg_is_dead(list, idx, ldop->i.rt) &&
587                                    !reg_is_read_or_written(list, idx2 + 1, idx, next->r.rd)) {
588                                 /* The target register of the SRA is dead after the
589                                  * LBU/LHU; we can change the target register of the
590                                  * LBU/LHU to the one of the SRA. */
591                                 v[ldop->i.rt].known = 0;
592                                 v[ldop->i.rt].sign = 0;
593                                 ldop->i.rt = next->r.rd;
594                                 to_change->opcode = 0;
595                         } else {
596                                 to_change->i.op = OP_META;
597                                 to_change->m.op = OP_META_MOV;
598                                 to_change->m.rd = next->r.rd;
599                                 to_change->m.rs = ldop->i.rt;
600                         }
601
602                         if (to_nop->r.imm == 24)
603                                 pr_debug("Convert LBU+SLL+SRA to LB\n");
604                         else
605                                 pr_debug("Convert LHU+SLL+SRA to LH\n");
606
607                         v[ldop->i.rt].known = 0;
608                         v[ldop->i.rt].sign = 0xffffff80 << (24 - curr->r.imm);
609                 }
610         }
611
612         if (!ldop) {
613                 pr_debug("Convert SLL/SRA #%u to EXT%c\n",
614                          curr->r.imm, curr->r.imm == 24 ? 'C' : 'S');
615
616                 to_change->m.rs = curr->r.rt;
617                 to_change->m.op = to_nop->r.imm == 24 ? OP_META_EXTC : OP_META_EXTS;
618                 to_change->i.op = OP_META;
619         }
620
621         to_nop->opcode = 0;
622 }
623
624 static void
625 lightrec_remove_useless_lui(struct block *block, unsigned int offset,
626                             const struct constprop_data *v)
627 {
628         struct opcode *list = block->opcode_list,
629                       *op = &block->opcode_list[offset];
630         int reader;
631
632         if (!op_flag_sync(op->flags) && is_known(v, op->i.rt) &&
633             v[op->i.rt].value == op->i.imm << 16) {
634                 pr_debug("Converting duplicated LUI to NOP\n");
635                 op->opcode = 0x0;
636                 return;
637         }
638
639         if (op->i.imm != 0 || op->i.rt == 0 || offset == block->nb_ops - 1)
640                 return;
641
642         reader = find_next_reader(list, offset + 1, op->i.rt);
643         if (reader <= 0)
644                 return;
645
646         if (opcode_writes_register(list[reader].c, op->i.rt) ||
647             reg_is_dead(list, reader, op->i.rt)) {
648                 pr_debug("Removing useless LUI 0x0\n");
649
650                 if (list[reader].i.rs == op->i.rt)
651                         list[reader].i.rs = 0;
652                 if (list[reader].i.op == OP_SPECIAL &&
653                     list[reader].i.rt == op->i.rt)
654                         list[reader].i.rt = 0;
655                 op->opcode = 0x0;
656         }
657 }
658
659 static void lightrec_lui_to_movi(struct block *block, unsigned int offset)
660 {
661         struct opcode *ori, *lui = &block->opcode_list[offset];
662         int next;
663
664         if (lui->i.op != OP_LUI)
665                 return;
666
667         next = find_next_reader(block->opcode_list, offset + 1, lui->i.rt);
668         if (next > 0) {
669                 ori = &block->opcode_list[next];
670
671                 switch (ori->i.op) {
672                 case OP_ORI:
673                 case OP_ADDI:
674                 case OP_ADDIU:
675                         if (ori->i.rs == ori->i.rt && ori->i.imm) {
676                                 ori->flags |= LIGHTREC_MOVI;
677                                 lui->flags |= LIGHTREC_MOVI;
678                         }
679                         break;
680                 }
681         }
682 }
683
684 static void lightrec_modify_lui(struct block *block, unsigned int offset)
685 {
686         union code c, *lui = &block->opcode_list[offset].c;
687         bool stop = false, stop_next = false;
688         unsigned int i;
689
690         for (i = offset + 1; !stop && i < block->nb_ops; i++) {
691                 c = block->opcode_list[i].c;
692                 stop = stop_next;
693
694                 if ((opcode_is_store(c) && c.i.rt == lui->i.rt)
695                     || (!opcode_is_load(c) && opcode_reads_register(c, lui->i.rt)))
696                         break;
697
698                 if (opcode_writes_register(c, lui->i.rt)) {
699                         if (c.i.op == OP_LWL || c.i.op == OP_LWR) {
700                                 /* LWL/LWR only partially write their target register;
701                                  * therefore the LUI should not write a different value. */
702                                 break;
703                         }
704
705                         pr_debug("Convert LUI at offset 0x%x to kuseg\n",
706                                  (i - 1) << 2);
707                         lui->i.imm = kunseg(lui->i.imm << 16) >> 16;
708                         break;
709                 }
710
711                 if (has_delay_slot(c))
712                         stop_next = true;
713         }
714 }
715
716 static int lightrec_transform_branches(struct lightrec_state *state,
717                                        struct block *block)
718 {
719         struct opcode *op;
720         unsigned int i;
721         s32 offset;
722
723         for (i = 0; i < block->nb_ops; i++) {
724                 op = &block->opcode_list[i];
725
726                 switch (op->i.op) {
727                 case OP_J:
728                         /* Transform J opcode into BEQ $zero, $zero if possible. */
729                         offset = (s32)((block->pc & 0xf0000000) >> 2 | op->j.imm)
730                                 - (s32)(block->pc >> 2) - (s32)i - 1;
731
732                         if (offset == (s16)offset) {
733                                 pr_debug("Transform J into BEQ $zero, $zero\n");
734                                 op->i.op = OP_BEQ;
735                                 op->i.rs = 0;
736                                 op->i.rt = 0;
737                                 op->i.imm = offset;
738
739                         }
740                         fallthrough;
741                 default:
742                         break;
743                 }
744         }
745
746         return 0;
747 }
748
749 static inline bool is_power_of_two(u32 value)
750 {
751         return popcount32(value) == 1;
752 }
753
754 static void lightrec_patch_known_zero(struct opcode *op,
755                                       const struct constprop_data *v)
756 {
757         switch (op->i.op) {
758         case OP_SPECIAL:
759                 switch (op->r.op) {
760                 case OP_SPECIAL_JR:
761                 case OP_SPECIAL_JALR:
762                 case OP_SPECIAL_MTHI:
763                 case OP_SPECIAL_MTLO:
764                         if (is_known_zero(v, op->r.rs))
765                                 op->r.rs = 0;
766                         break;
767                 default:
768                         if (is_known_zero(v, op->r.rs))
769                                 op->r.rs = 0;
770                         fallthrough;
771                 case OP_SPECIAL_SLL:
772                 case OP_SPECIAL_SRL:
773                 case OP_SPECIAL_SRA:
774                         if (is_known_zero(v, op->r.rt))
775                                 op->r.rt = 0;
776                         break;
777                 case OP_SPECIAL_SYSCALL:
778                 case OP_SPECIAL_BREAK:
779                 case OP_SPECIAL_MFHI:
780                 case OP_SPECIAL_MFLO:
781                         break;
782                 }
783                 break;
784         case OP_CP0:
785                 switch (op->r.rs) {
786                 case OP_CP0_MTC0:
787                 case OP_CP0_CTC0:
788                         if (is_known_zero(v, op->r.rt))
789                                 op->r.rt = 0;
790                         break;
791                 default:
792                         break;
793                 }
794                 break;
795         case OP_CP2:
796                 if (op->r.op == OP_CP2_BASIC) {
797                         switch (op->r.rs) {
798                         case OP_CP2_BASIC_MTC2:
799                         case OP_CP2_BASIC_CTC2:
800                                 if (is_known_zero(v, op->r.rt))
801                                         op->r.rt = 0;
802                                 break;
803                         default:
804                                 break;
805                         }
806                 }
807                 break;
808         case OP_BEQ:
809         case OP_BNE:
810                 if (is_known_zero(v, op->i.rt))
811                         op->i.rt = 0;
812                 fallthrough;
813         case OP_REGIMM:
814         case OP_BLEZ:
815         case OP_BGTZ:
816         case OP_ADDI:
817         case OP_ADDIU:
818         case OP_SLTI:
819         case OP_SLTIU:
820         case OP_ANDI:
821         case OP_ORI:
822         case OP_XORI:
823         case OP_META_MULT2:
824         case OP_META_MULTU2:
825         case OP_META:
826                 if (is_known_zero(v, op->m.rs))
827                         op->m.rs = 0;
828                 break;
829         case OP_SB:
830         case OP_SH:
831         case OP_SWL:
832         case OP_SW:
833         case OP_SWR:
834         case OP_META_SWU:
835                 if (is_known_zero(v, op->i.rt))
836                         op->i.rt = 0;
837                 fallthrough;
838         case OP_LB:
839         case OP_LH:
840         case OP_LWL:
841         case OP_LW:
842         case OP_LBU:
843         case OP_LHU:
844         case OP_LWR:
845         case OP_LWC2:
846         case OP_SWC2:
847         case OP_META_LWU:
848                 if (is_known(v, op->i.rs)
849                     && kunseg(v[op->i.rs].value) == 0)
850                         op->i.rs = 0;
851                 break;
852         default:
853                 break;
854         }
855 }
856
857 static void lightrec_reset_syncs(struct block *block)
858 {
859         struct opcode *op, *list = block->opcode_list;
860         unsigned int i;
861         s32 offset;
862
863         for (i = 0; i < block->nb_ops; i++)
864                 list[i].flags &= ~LIGHTREC_SYNC;
865
866         for (i = 0; i < block->nb_ops; i++) {
867                 op = &list[i];
868
869                 if (has_delay_slot(op->c)) {
870                         if (op_flag_local_branch(op->flags)) {
871                                 offset = i + 1 - op_flag_no_ds(op->flags) + (s16)op->i.imm;
872                                 list[offset].flags |= LIGHTREC_SYNC;
873                         }
874
875                         if (op_flag_emulate_branch(op->flags) && i + 2 < block->nb_ops)
876                                 list[i + 2].flags |= LIGHTREC_SYNC;
877                 }
878         }
879 }
880
881 static void maybe_remove_load_delay(struct opcode *op)
882 {
883         if (op_flag_load_delay(op->flags) && opcode_is_load(op->c))
884                 op->flags &= ~LIGHTREC_LOAD_DELAY;
885 }
886
887 static int lightrec_transform_ops(struct lightrec_state *state, struct block *block)
888 {
889         struct opcode *op, *list = block->opcode_list;
890         struct constprop_data v[32] = LIGHTREC_CONSTPROP_INITIALIZER;
891         unsigned int i;
892         bool local;
893         int idx;
894         u8 tmp;
895
896         for (i = 0; i < block->nb_ops; i++) {
897                 op = &list[i];
898
899                 lightrec_consts_propagate(block, i, v);
900
901                 lightrec_patch_known_zero(op, v);
902
903                 /* Transform all opcodes detected as useless to real NOPs
904                  * (0x0: SLL r0, r0, #0) */
905                 if (op->opcode != 0 && is_nop(op->c)) {
906                         pr_debug("Converting useless opcode 0x%08x to NOP\n",
907                                         op->opcode);
908                         op->opcode = 0x0;
909                 }
910
911                 if (!op->opcode)
912                         continue;
913
914                 switch (op->i.op) {
915                 case OP_BEQ:
916                         if (op->i.rs == op->i.rt ||
917                             (is_known(v, op->i.rs) && is_known(v, op->i.rt) &&
918                              v[op->i.rs].value == v[op->i.rt].value)) {
919                                 if (op->i.rs != op->i.rt)
920                                         pr_debug("Found always-taken BEQ\n");
921
922                                 op->i.rs = 0;
923                                 op->i.rt = 0;
924                         } else if (v[op->i.rs].known & v[op->i.rt].known &
925                                    (v[op->i.rs].value ^ v[op->i.rt].value)) {
926                                 pr_debug("Found never-taken BEQ\n");
927
928                                 if (!op_flag_no_ds(op->flags))
929                                         maybe_remove_load_delay(&list[i + 1]);
930
931                                 local = op_flag_local_branch(op->flags);
932                                 op->opcode = 0;
933                                 op->flags = 0;
934
935                                 if (local)
936                                         lightrec_reset_syncs(block);
937                         } else if (op->i.rs == 0) {
938                                 op->i.rs = op->i.rt;
939                                 op->i.rt = 0;
940                         }
941                         break;
942
943                 case OP_BNE:
944                         if (v[op->i.rs].known & v[op->i.rt].known &
945                             (v[op->i.rs].value ^ v[op->i.rt].value)) {
946                                 pr_debug("Found always-taken BNE\n");
947
948                                 op->i.op = OP_BEQ;
949                                 op->i.rs = 0;
950                                 op->i.rt = 0;
951                         } else if (is_known(v, op->i.rs) && is_known(v, op->i.rt) &&
952                                    v[op->i.rs].value == v[op->i.rt].value) {
953                                 pr_debug("Found never-taken BNE\n");
954
955                                 if (!op_flag_no_ds(op->flags))
956                                         maybe_remove_load_delay(&list[i + 1]);
957
958                                 local = op_flag_local_branch(op->flags);
959                                 op->opcode = 0;
960                                 op->flags = 0;
961
962                                 if (local)
963                                         lightrec_reset_syncs(block);
964                         } else if (op->i.rs == 0) {
965                                 op->i.rs = op->i.rt;
966                                 op->i.rt = 0;
967                         }
968                         break;
969
970                 case OP_BLEZ:
971                         if (v[op->i.rs].known & BIT(31) &&
972                             v[op->i.rs].value & BIT(31)) {
973                                 pr_debug("Found always-taken BLEZ\n");
974
975                                 op->i.op = OP_BEQ;
976                                 op->i.rs = 0;
977                                 op->i.rt = 0;
978                         }
979                         break;
980
981                 case OP_BGTZ:
982                         if (v[op->i.rs].known & BIT(31) &&
983                             v[op->i.rs].value & BIT(31)) {
984                                 pr_debug("Found never-taken BGTZ\n");
985
986                                 if (!op_flag_no_ds(op->flags))
987                                         maybe_remove_load_delay(&list[i + 1]);
988
989                                 local = op_flag_local_branch(op->flags);
990                                 op->opcode = 0;
991                                 op->flags = 0;
992
993                                 if (local)
994                                         lightrec_reset_syncs(block);
995                         }
996                         break;
997
998                 case OP_LUI:
999                         if (i == 0 || !has_delay_slot(list[i - 1].c))
1000                                 lightrec_modify_lui(block, i);
1001                         lightrec_remove_useless_lui(block, i, v);
1002                         if (i == 0 || !has_delay_slot(list[i - 1].c))
1003                                 lightrec_lui_to_movi(block, i);
1004                         break;
1005
1006                 /* Transform ORI/ADDI/ADDIU with imm #0 or ORR/ADD/ADDU/SUB/SUBU
1007                  * with register $zero to the MOV meta-opcode */
1008                 case OP_ORI:
1009                 case OP_ADDI:
1010                 case OP_ADDIU:
1011                         if (op->i.imm == 0) {
1012                                 pr_debug("Convert ORI/ADDI/ADDIU #0 to MOV\n");
1013                                 op->m.rd = op->i.rt;
1014                                 op->m.op = OP_META_MOV;
1015                                 op->i.op = OP_META;
1016                         }
1017                         break;
1018                 case OP_ANDI:
1019                         if (bits_are_known_zero(v, op->i.rs, ~op->i.imm)) {
1020                                 pr_debug("Found useless ANDI 0x%x\n", op->i.imm);
1021
1022                                 if (op->i.rs == op->i.rt) {
1023                                         op->opcode = 0;
1024                                 } else {
1025                                         op->m.rd = op->i.rt;
1026                                         op->m.op = OP_META_MOV;
1027                                         op->i.op = OP_META;
1028                                 }
1029                         }
1030                         break;
1031                 case OP_LWL:
1032                 case OP_LWR:
1033                         if (i == 0 || !has_delay_slot(list[i - 1].c)) {
1034                                 idx = find_next_reader(list, i + 1, op->i.rt);
1035                                 if (idx > 0 && list[idx].i.op == (op->i.op ^ 0x4)
1036                                     && list[idx].i.rs == op->i.rs
1037                                     && list[idx].i.rt == op->i.rt
1038                                     && abs((s16)op->i.imm - (s16)list[idx].i.imm) == 3) {
1039                                         /* Replace a LWL/LWR combo with a META_LWU */
1040                                         if (op->i.op == OP_LWL)
1041                                                 op->i.imm -= 3;
1042                                         op->i.op = OP_META_LWU;
1043                                         list[idx].opcode = 0;
1044                                         pr_debug("Convert LWL/LWR to LWU\n");
1045                                 }
1046                         }
1047                         break;
1048                 case OP_SWL:
1049                 case OP_SWR:
1050                         if (i == 0 || !has_delay_slot(list[i - 1].c)) {
1051                                 idx = find_next_reader(list, i + 1, op->i.rt);
1052                                 if (idx > 0 && list[idx].i.op == (op->i.op ^ 0x4)
1053                                     && list[idx].i.rs == op->i.rs
1054                                     && list[idx].i.rt == op->i.rt
1055                                     && abs((s16)op->i.imm - (s16)list[idx].i.imm) == 3) {
1056                                         /* Replace a SWL/SWR combo with a META_SWU */
1057                                         if (op->i.op == OP_SWL)
1058                                                 op->i.imm -= 3;
1059                                         op->i.op = OP_META_SWU;
1060                                         list[idx].opcode = 0;
1061                                         pr_debug("Convert SWL/SWR to SWU\n");
1062                                 }
1063                         }
1064                         break;
1065                 case OP_REGIMM:
1066                         switch (op->r.rt) {
1067                         case OP_REGIMM_BLTZ:
1068                         case OP_REGIMM_BGEZ:
1069                                 if (!(v[op->r.rs].known & BIT(31)))
1070                                         break;
1071
1072                                 if (!!(v[op->r.rs].value & BIT(31))
1073                                     ^ (op->r.rt == OP_REGIMM_BGEZ)) {
1074                                         pr_debug("Found always-taken BLTZ/BGEZ\n");
1075                                         op->i.op = OP_BEQ;
1076                                         op->i.rs = 0;
1077                                         op->i.rt = 0;
1078                                 } else {
1079                                         pr_debug("Found never-taken BLTZ/BGEZ\n");
1080
1081                                         if (!op_flag_no_ds(op->flags))
1082                                                 maybe_remove_load_delay(&list[i + 1]);
1083
1084                                         local = op_flag_local_branch(op->flags);
1085                                         op->opcode = 0;
1086                                         op->flags = 0;
1087
1088                                         if (local)
1089                                                 lightrec_reset_syncs(block);
1090                                 }
1091                                 break;
1092                         case OP_REGIMM_BLTZAL:
1093                         case OP_REGIMM_BGEZAL:
1094                                 /* TODO: Detect always-taken and replace with JAL */
1095                                 break;
1096                         }
1097                         break;
1098                 case OP_SPECIAL:
1099                         switch (op->r.op) {
1100                         case OP_SPECIAL_SRAV:
1101                                 if ((v[op->r.rs].known & 0x1f) != 0x1f)
1102                                         break;
1103
1104                                 pr_debug("Convert SRAV to SRA\n");
1105                                 op->r.imm = v[op->r.rs].value & 0x1f;
1106                                 op->r.op = OP_SPECIAL_SRA;
1107
1108                                 fallthrough;
1109                         case OP_SPECIAL_SRA:
1110                                 if (op->r.imm == 0) {
1111                                         pr_debug("Convert SRA #0 to MOV\n");
1112                                         op->m.rs = op->r.rt;
1113                                         op->m.op = OP_META_MOV;
1114                                         op->i.op = OP_META;
1115                                         break;
1116                                 }
1117                                 break;
1118
1119                         case OP_SPECIAL_SLLV:
1120                                 if ((v[op->r.rs].known & 0x1f) != 0x1f)
1121                                         break;
1122
1123                                 pr_debug("Convert SLLV to SLL\n");
1124                                 op->r.imm = v[op->r.rs].value & 0x1f;
1125                                 op->r.op = OP_SPECIAL_SLL;
1126
1127                                 fallthrough;
1128                         case OP_SPECIAL_SLL:
1129                                 if (op->r.imm == 0) {
1130                                         pr_debug("Convert SLL #0 to MOV\n");
1131                                         op->m.rs = op->r.rt;
1132                                         op->m.op = OP_META_MOV;
1133                                         op->i.op = OP_META;
1134                                 }
1135
1136                                 lightrec_optimize_sll_sra(block->opcode_list, i, v);
1137                                 break;
1138
1139                         case OP_SPECIAL_SRLV:
1140                                 if ((v[op->r.rs].known & 0x1f) != 0x1f)
1141                                         break;
1142
1143                                 pr_debug("Convert SRLV to SRL\n");
1144                                 op->r.imm = v[op->r.rs].value & 0x1f;
1145                                 op->r.op = OP_SPECIAL_SRL;
1146
1147                                 fallthrough;
1148                         case OP_SPECIAL_SRL:
1149                                 if (op->r.imm == 0) {
1150                                         pr_debug("Convert SRL #0 to MOV\n");
1151                                         op->m.rs = op->r.rt;
1152                                         op->m.op = OP_META_MOV;
1153                                         op->i.op = OP_META;
1154                                 }
1155                                 break;
1156
1157                         case OP_SPECIAL_MULT:
1158                         case OP_SPECIAL_MULTU:
1159                                 if (is_known(v, op->r.rs) &&
1160                                     is_power_of_two(v[op->r.rs].value)) {
1161                                         tmp = op->c.i.rs;
1162                                         op->c.i.rs = op->c.i.rt;
1163                                         op->c.i.rt = tmp;
1164                                 } else if (!is_known(v, op->r.rt) ||
1165                                            !is_power_of_two(v[op->r.rt].value)) {
1166                                         break;
1167                                 }
1168
1169                                 pr_debug("Multiply by power-of-two: %u\n",
1170                                          v[op->r.rt].value);
1171
1172                                 if (op->r.op == OP_SPECIAL_MULT)
1173                                         op->i.op = OP_META_MULT2;
1174                                 else
1175                                         op->i.op = OP_META_MULTU2;
1176
1177                                 op->r.op = ctz32(v[op->r.rt].value);
1178                                 break;
1179                         case OP_SPECIAL_NOR:
1180                                 if (op->r.rs == 0 || op->r.rt == 0) {
1181                                         pr_debug("Convert NOR $zero to COM\n");
1182                                         op->i.op = OP_META;
1183                                         op->m.op = OP_META_COM;
1184                                         if (!op->m.rs)
1185                                                 op->m.rs = op->r.rt;
1186                                 }
1187                                 break;
1188                         case OP_SPECIAL_OR:
1189                         case OP_SPECIAL_ADD:
1190                         case OP_SPECIAL_ADDU:
1191                                 if (op->r.rs == 0) {
1192                                         pr_debug("Convert OR/ADD $zero to MOV\n");
1193                                         op->m.rs = op->r.rt;
1194                                         op->m.op = OP_META_MOV;
1195                                         op->i.op = OP_META;
1196                                 }
1197                                 fallthrough;
1198                         case OP_SPECIAL_SUB:
1199                         case OP_SPECIAL_SUBU:
1200                                 if (op->r.rt == 0) {
1201                                         pr_debug("Convert OR/ADD/SUB $zero to MOV\n");
1202                                         op->m.op = OP_META_MOV;
1203                                         op->i.op = OP_META;
1204                                 }
1205                                 fallthrough;
1206                         default:
1207                                 break;
1208                         }
1209                         fallthrough;
1210                 default:
1211                         break;
1212                 }
1213         }
1214
1215         return 0;
1216 }
1217
1218 static bool lightrec_can_switch_delay_slot(union code op, union code next_op)
1219 {
1220         switch (op.i.op) {
1221         case OP_SPECIAL:
1222                 switch (op.r.op) {
1223                 case OP_SPECIAL_JALR:
1224                         if (opcode_reads_register(next_op, op.r.rd) ||
1225                             opcode_writes_register(next_op, op.r.rd))
1226                                 return false;
1227                         fallthrough;
1228                 case OP_SPECIAL_JR:
1229                         if (opcode_writes_register(next_op, op.r.rs))
1230                                 return false;
1231                         fallthrough;
1232                 default:
1233                         break;
1234                 }
1235                 fallthrough;
1236         case OP_J:
1237                 break;
1238         case OP_JAL:
1239                 if (opcode_reads_register(next_op, 31) ||
1240                     opcode_writes_register(next_op, 31))
1241                         return false;;
1242
1243                 break;
1244         case OP_BEQ:
1245         case OP_BNE:
1246                 if (op.i.rt && opcode_writes_register(next_op, op.i.rt))
1247                         return false;
1248                 fallthrough;
1249         case OP_BLEZ:
1250         case OP_BGTZ:
1251                 if (op.i.rs && opcode_writes_register(next_op, op.i.rs))
1252                         return false;
1253                 break;
1254         case OP_REGIMM:
1255                 switch (op.r.rt) {
1256                 case OP_REGIMM_BLTZAL:
1257                 case OP_REGIMM_BGEZAL:
1258                         if (opcode_reads_register(next_op, 31) ||
1259                             opcode_writes_register(next_op, 31))
1260                                 return false;
1261                         fallthrough;
1262                 case OP_REGIMM_BLTZ:
1263                 case OP_REGIMM_BGEZ:
1264                         if (op.i.rs && opcode_writes_register(next_op, op.i.rs))
1265                                 return false;
1266                         break;
1267                 }
1268                 fallthrough;
1269         default:
1270                 break;
1271         }
1272
1273         return true;
1274 }
1275
1276 static int lightrec_switch_delay_slots(struct lightrec_state *state, struct block *block)
1277 {
1278         struct opcode *list, *next = &block->opcode_list[0];
1279         unsigned int i;
1280         union code op, next_op;
1281         u32 flags;
1282
1283         for (i = 0; i < block->nb_ops - 1; i++) {
1284                 list = next;
1285                 next = &block->opcode_list[i + 1];
1286                 next_op = next->c;
1287                 op = list->c;
1288
1289                 if (!has_delay_slot(op) || op_flag_no_ds(list->flags) ||
1290                     op_flag_emulate_branch(list->flags) ||
1291                     op.opcode == 0 || next_op.opcode == 0)
1292                         continue;
1293
1294                 if (is_delay_slot(block->opcode_list, i))
1295                         continue;
1296
1297                 if (op_flag_sync(next->flags))
1298                         continue;
1299
1300                 if (op_flag_load_delay(next->flags) && opcode_is_load(next_op))
1301                         continue;
1302
1303                 if (!lightrec_can_switch_delay_slot(list->c, next_op))
1304                         continue;
1305
1306                 pr_debug("Swap branch and delay slot opcodes "
1307                          "at offsets 0x%x / 0x%x\n",
1308                          i << 2, (i + 1) << 2);
1309
1310                 flags = next->flags | (list->flags & LIGHTREC_SYNC);
1311                 list->c = next_op;
1312                 next->c = op;
1313                 next->flags = (list->flags | LIGHTREC_NO_DS) & ~LIGHTREC_SYNC;
1314                 list->flags = flags | LIGHTREC_NO_DS;
1315         }
1316
1317         return 0;
1318 }
1319
1320 static int lightrec_detect_impossible_branches(struct lightrec_state *state,
1321                                                struct block *block)
1322 {
1323         struct opcode *op, *list = block->opcode_list, *next = &list[0];
1324         unsigned int i;
1325         int ret = 0;
1326
1327         for (i = 0; i < block->nb_ops - 1; i++) {
1328                 op = next;
1329                 next = &list[i + 1];
1330
1331                 if (!has_delay_slot(op->c) ||
1332                     (!has_delay_slot(next->c) &&
1333                      !opcode_is_mfc(next->c) &&
1334                      !(next->i.op == OP_CP0 && next->r.rs == OP_CP0_RFE)))
1335                         continue;
1336
1337                 if (op->c.opcode == next->c.opcode) {
1338                         /* The delay slot is the exact same opcode as the branch
1339                          * opcode: this is effectively a NOP */
1340                         next->c.opcode = 0;
1341                         continue;
1342                 }
1343
1344                 op->flags |= LIGHTREC_EMULATE_BRANCH;
1345
1346                 if (OPT_LOCAL_BRANCHES && i + 2 < block->nb_ops) {
1347                         /* The interpreter will only emulate the branch, then
1348                          * return to the compiled code. Add a SYNC after the
1349                          * branch + delay slot in the case where the branch
1350                          * was not taken. */
1351                         list[i + 2].flags |= LIGHTREC_SYNC;
1352                 }
1353         }
1354
1355         return ret;
1356 }
1357
1358 static bool is_local_branch(const struct block *block, unsigned int idx)
1359 {
1360         const struct opcode *op = &block->opcode_list[idx];
1361         s32 offset;
1362
1363         switch (op->c.i.op) {
1364         case OP_BEQ:
1365         case OP_BNE:
1366         case OP_BLEZ:
1367         case OP_BGTZ:
1368         case OP_REGIMM:
1369                 offset = idx + 1 + (s16)op->c.i.imm;
1370                 if (offset >= 0 && offset < block->nb_ops)
1371                         return true;
1372                 fallthrough;
1373         default:
1374                 return false;
1375         }
1376 }
1377
1378 static int lightrec_handle_load_delays(struct lightrec_state *state,
1379                                        struct block *block)
1380 {
1381         struct opcode *op, *list = block->opcode_list;
1382         unsigned int i;
1383         s16 imm;
1384
1385         for (i = 0; i < block->nb_ops; i++) {
1386                 op = &list[i];
1387
1388                 if (!opcode_is_load(op->c) || !op->c.i.rt || op->c.i.op == OP_LWC2)
1389                         continue;
1390
1391                 if (!is_delay_slot(list, i)) {
1392                         /* Only handle load delays in delay slots.
1393                          * PSX games never abused load delay slots otherwise. */
1394                         continue;
1395                 }
1396
1397                 if (is_local_branch(block, i - 1)) {
1398                         imm = (s16)list[i - 1].c.i.imm;
1399
1400                         if (!opcode_reads_register(list[i + imm].c, op->c.i.rt)) {
1401                                 /* The target opcode of the branch is inside
1402                                  * the block, and it does not read the register
1403                                  * written to by the load opcode; we can ignore
1404                                  * the load delay. */
1405                                 continue;
1406                         }
1407                 }
1408
1409                 op->flags |= LIGHTREC_LOAD_DELAY;
1410         }
1411
1412         return 0;
1413 }
1414
1415 static int lightrec_swap_load_delays(struct lightrec_state *state,
1416                                      struct block *block)
1417 {
1418         unsigned int i;
1419         union code c, next;
1420         bool in_ds = false, skip_next = false;
1421         struct opcode op;
1422
1423         if (block->nb_ops < 2)
1424                 return 0;
1425
1426         for (i = 0; i < block->nb_ops - 2; i++) {
1427                 c = block->opcode_list[i].c;
1428
1429                 if (skip_next) {
1430                         skip_next = false;
1431                 } else if (!in_ds && opcode_is_load(c) && c.i.op != OP_LWC2) {
1432                         next = block->opcode_list[i + 1].c;
1433
1434                         switch (next.i.op) {
1435                         case OP_LWL:
1436                         case OP_LWR:
1437                         case OP_REGIMM:
1438                         case OP_BEQ:
1439                         case OP_BNE:
1440                         case OP_BLEZ:
1441                         case OP_BGTZ:
1442                                 continue;
1443                         }
1444
1445                         if (opcode_reads_register(next, c.i.rt)
1446                             && !opcode_writes_register(next, c.i.rs)) {
1447                                 pr_debug("Swapping opcodes at offset 0x%x to "
1448                                          "respect load delay\n", i << 2);
1449
1450                                 op = block->opcode_list[i];
1451                                 block->opcode_list[i] = block->opcode_list[i + 1];
1452                                 block->opcode_list[i + 1] = op;
1453                                 skip_next = true;
1454                         }
1455                 }
1456
1457                 in_ds = has_delay_slot(c);
1458         }
1459
1460         return 0;
1461 }
1462
1463 static int lightrec_local_branches(struct lightrec_state *state, struct block *block)
1464 {
1465         const struct opcode *ds;
1466         struct opcode *list;
1467         unsigned int i;
1468         s32 offset;
1469
1470         for (i = 0; i < block->nb_ops; i++) {
1471                 list = &block->opcode_list[i];
1472
1473                 if (should_emulate(list) || !is_local_branch(block, i))
1474                         continue;
1475
1476                 offset = i + 1 + (s16)list->c.i.imm;
1477
1478                 pr_debug("Found local branch to offset 0x%x\n", offset << 2);
1479
1480                 ds = get_delay_slot(block->opcode_list, i);
1481                 if (op_flag_load_delay(ds->flags) && opcode_is_load(ds->c)) {
1482                         pr_debug("Branch delay slot has a load delay - skip\n");
1483                         continue;
1484                 }
1485
1486                 if (should_emulate(&block->opcode_list[offset])) {
1487                         pr_debug("Branch target must be emulated - skip\n");
1488                         continue;
1489                 }
1490
1491                 if (offset && has_delay_slot(block->opcode_list[offset - 1].c)) {
1492                         pr_debug("Branch target is a delay slot - skip\n");
1493                         continue;
1494                 }
1495
1496                 list->flags |= LIGHTREC_LOCAL_BRANCH;
1497         }
1498
1499         lightrec_reset_syncs(block);
1500
1501         return 0;
1502 }
1503
1504 bool has_delay_slot(union code op)
1505 {
1506         switch (op.i.op) {
1507         case OP_SPECIAL:
1508                 switch (op.r.op) {
1509                 case OP_SPECIAL_JR:
1510                 case OP_SPECIAL_JALR:
1511                         return true;
1512                 default:
1513                         return false;
1514                 }
1515         case OP_J:
1516         case OP_JAL:
1517         case OP_BEQ:
1518         case OP_BNE:
1519         case OP_BLEZ:
1520         case OP_BGTZ:
1521         case OP_REGIMM:
1522                 return true;
1523         default:
1524                 return false;
1525         }
1526 }
1527
1528 bool is_delay_slot(const struct opcode *list, unsigned int offset)
1529 {
1530         return offset > 0
1531                 && !op_flag_no_ds(list[offset - 1].flags)
1532                 && has_delay_slot(list[offset - 1].c);
1533 }
1534
1535 bool should_emulate(const struct opcode *list)
1536 {
1537         return op_flag_emulate_branch(list->flags) && has_delay_slot(list->c);
1538 }
1539
1540 static bool op_writes_rd(union code c)
1541 {
1542         switch (c.i.op) {
1543         case OP_SPECIAL:
1544         case OP_META:
1545                 return true;
1546         default:
1547                 return false;
1548         }
1549 }
1550
1551 static void lightrec_add_reg_op(struct opcode *op, u8 reg, u32 reg_op)
1552 {
1553         if (op_writes_rd(op->c) && reg == op->r.rd)
1554                 op->flags |= LIGHTREC_REG_RD(reg_op);
1555         else if (op->i.rs == reg)
1556                 op->flags |= LIGHTREC_REG_RS(reg_op);
1557         else if (op->i.rt == reg)
1558                 op->flags |= LIGHTREC_REG_RT(reg_op);
1559         else
1560                 pr_debug("Cannot add unload/clean/discard flag: "
1561                          "opcode does not touch register %s!\n",
1562                          lightrec_reg_name(reg));
1563 }
1564
1565 static void lightrec_add_unload(struct opcode *op, u8 reg)
1566 {
1567         lightrec_add_reg_op(op, reg, LIGHTREC_REG_UNLOAD);
1568 }
1569
1570 static void lightrec_add_discard(struct opcode *op, u8 reg)
1571 {
1572         lightrec_add_reg_op(op, reg, LIGHTREC_REG_DISCARD);
1573 }
1574
1575 static void lightrec_add_clean(struct opcode *op, u8 reg)
1576 {
1577         lightrec_add_reg_op(op, reg, LIGHTREC_REG_CLEAN);
1578 }
1579
1580 static void
1581 lightrec_early_unload_sync(struct opcode *list, s16 *last_r, s16 *last_w)
1582 {
1583         unsigned int reg;
1584         s16 offset;
1585
1586         for (reg = 0; reg < 34; reg++) {
1587                 offset = s16_max(last_w[reg], last_r[reg]);
1588
1589                 if (offset >= 0)
1590                         lightrec_add_unload(&list[offset], reg);
1591         }
1592
1593         memset(last_r, 0xff, sizeof(*last_r) * 34);
1594         memset(last_w, 0xff, sizeof(*last_w) * 34);
1595 }
1596
1597 static int lightrec_early_unload(struct lightrec_state *state, struct block *block)
1598 {
1599         u16 i, offset;
1600         struct opcode *op;
1601         s16 last_r[34], last_w[34], last_sync = 0, next_sync = 0;
1602         u64 mask_r, mask_w, dirty = 0, loaded = 0;
1603         u8 reg, load_delay_reg = 0;
1604
1605         memset(last_r, 0xff, sizeof(last_r));
1606         memset(last_w, 0xff, sizeof(last_w));
1607
1608         /*
1609          * Clean if:
1610          * - the register is dirty, and is read again after a branch opcode
1611          *
1612          * Unload if:
1613          * - the register is dirty or loaded, and is not read again
1614          * - the register is dirty or loaded, and is written again after a branch opcode
1615          * - the next opcode has the SYNC flag set
1616          *
1617          * Discard if:
1618          * - the register is dirty or loaded, and is written again
1619          */
1620
1621         for (i = 0; i < block->nb_ops; i++) {
1622                 op = &block->opcode_list[i];
1623
1624                 if (OPT_HANDLE_LOAD_DELAYS && load_delay_reg) {
1625                         /* Handle delayed register write from load opcodes in
1626                          * delay slots */
1627                         last_w[load_delay_reg] = i;
1628                         load_delay_reg = 0;
1629                 }
1630
1631                 if (op_flag_sync(op->flags) || should_emulate(op)) {
1632                         /* The next opcode has the SYNC flag set, or is a branch
1633                          * that should be emulated: unload all registers. */
1634                         lightrec_early_unload_sync(block->opcode_list, last_r, last_w);
1635                         dirty = 0;
1636                         loaded = 0;
1637                 }
1638
1639                 if (next_sync == i) {
1640                         last_sync = i;
1641                         pr_debug("Last sync: 0x%x\n", last_sync << 2);
1642                 }
1643
1644                 if (has_delay_slot(op->c)) {
1645                         next_sync = i + 1 + !op_flag_no_ds(op->flags);
1646                         pr_debug("Next sync: 0x%x\n", next_sync << 2);
1647                 }
1648
1649                 mask_r = opcode_read_mask(op->c);
1650                 mask_w = opcode_write_mask(op->c);
1651
1652                 if (op_flag_load_delay(op->flags) && opcode_is_load(op->c)) {
1653                         /* If we have a load opcode in a delay slot, its target
1654                          * register is actually not written there but at a
1655                          * later point, in the dispatcher. Prevent the algorithm
1656                          * from discarding its previous value. */
1657                         load_delay_reg = op->c.i.rt;
1658                         mask_w &= ~BIT(op->c.i.rt);
1659                 }
1660
1661                 for (reg = 0; reg < 34; reg++) {
1662                         if (mask_r & BIT(reg)) {
1663                                 if (dirty & BIT(reg) && last_w[reg] < last_sync) {
1664                                         /* The register is dirty, and is read
1665                                          * again after a branch: clean it */
1666
1667                                         lightrec_add_clean(&block->opcode_list[last_w[reg]], reg);
1668                                         dirty &= ~BIT(reg);
1669                                         loaded |= BIT(reg);
1670                                 }
1671
1672                                 last_r[reg] = i;
1673                         }
1674
1675                         if (mask_w & BIT(reg)) {
1676                                 if ((dirty & BIT(reg) && last_w[reg] < last_sync) ||
1677                                     (loaded & BIT(reg) && last_r[reg] < last_sync)) {
1678                                         /* The register is dirty or loaded, and
1679                                          * is written again after a branch:
1680                                          * unload it */
1681
1682                                         offset = s16_max(last_w[reg], last_r[reg]);
1683                                         lightrec_add_unload(&block->opcode_list[offset], reg);
1684                                         dirty &= ~BIT(reg);
1685                                         loaded &= ~BIT(reg);
1686                                 } else if (!(mask_r & BIT(reg)) &&
1687                                            ((dirty & BIT(reg) && last_w[reg] > last_sync) ||
1688                                            (loaded & BIT(reg) && last_r[reg] > last_sync))) {
1689                                         /* The register is dirty or loaded, and
1690                                          * is written again: discard it */
1691
1692                                         offset = s16_max(last_w[reg], last_r[reg]);
1693                                         lightrec_add_discard(&block->opcode_list[offset], reg);
1694                                         dirty &= ~BIT(reg);
1695                                         loaded &= ~BIT(reg);
1696                                 }
1697
1698                                 last_w[reg] = i;
1699                         }
1700
1701                 }
1702
1703                 dirty |= mask_w;
1704                 loaded |= mask_r;
1705         }
1706
1707         /* Unload all registers that are dirty or loaded at the end of block. */
1708         lightrec_early_unload_sync(block->opcode_list, last_r, last_w);
1709
1710         return 0;
1711 }
1712
1713 static int lightrec_flag_io(struct lightrec_state *state, struct block *block)
1714 {
1715         struct opcode *list;
1716         enum psx_map psx_map;
1717         struct constprop_data v[32] = LIGHTREC_CONSTPROP_INITIALIZER;
1718         unsigned int i;
1719         u32 val, kunseg_val;
1720         bool no_mask;
1721
1722         for (i = 0; i < block->nb_ops; i++) {
1723                 list = &block->opcode_list[i];
1724
1725                 lightrec_consts_propagate(block, i, v);
1726
1727                 switch (list->i.op) {
1728                 case OP_SB:
1729                 case OP_SH:
1730                 case OP_SW:
1731                         /* Mark all store operations that target $sp or $gp
1732                          * as not requiring code invalidation. This is based
1733                          * on the heuristic that stores using one of these
1734                          * registers as address will never hit a code page. */
1735                         if (list->i.rs >= 28 && list->i.rs <= 29 &&
1736                             !state->maps[PSX_MAP_KERNEL_USER_RAM].ops) {
1737                                 pr_debug("Flaging opcode 0x%08x as not requiring invalidation\n",
1738                                          list->opcode);
1739                                 list->flags |= LIGHTREC_NO_INVALIDATE;
1740                         }
1741
1742                         /* Detect writes whose destination address is inside the
1743                          * current block, using constant propagation. When these
1744                          * occur, we mark the blocks as not compilable. */
1745                         if (is_known(v, list->i.rs) &&
1746                             kunseg(v[list->i.rs].value) >= kunseg(block->pc) &&
1747                             kunseg(v[list->i.rs].value) < (kunseg(block->pc) + block->nb_ops * 4)) {
1748                                 pr_debug("Self-modifying block detected\n");
1749                                 block_set_flags(block, BLOCK_NEVER_COMPILE);
1750                                 list->flags |= LIGHTREC_SMC;
1751                         }
1752                         fallthrough;
1753                 case OP_SWL:
1754                 case OP_SWR:
1755                 case OP_SWC2:
1756                 case OP_LB:
1757                 case OP_LBU:
1758                 case OP_LH:
1759                 case OP_LHU:
1760                 case OP_LW:
1761                 case OP_LWL:
1762                 case OP_LWR:
1763                 case OP_LWC2:
1764                         if (v[list->i.rs].known | v[list->i.rs].sign) {
1765                                 psx_map = lightrec_get_constprop_map(state, v,
1766                                                                      list->i.rs,
1767                                                                      (s16) list->i.imm);
1768
1769                                 if (psx_map != PSX_MAP_UNKNOWN && !is_known(v, list->i.rs))
1770                                         pr_debug("Detected map thanks to bit-level const propagation!\n");
1771
1772                                 list->flags &= ~LIGHTREC_IO_MASK;
1773
1774                                 val = v[list->i.rs].value + (s16) list->i.imm;
1775                                 kunseg_val = kunseg(val);
1776
1777                                 no_mask = (v[list->i.rs].known & ~v[list->i.rs].value
1778                                            & 0xe0000000) == 0xe0000000;
1779
1780                                 switch (psx_map) {
1781                                 case PSX_MAP_KERNEL_USER_RAM:
1782                                         if (no_mask)
1783                                                 list->flags |= LIGHTREC_NO_MASK;
1784                                         fallthrough;
1785                                 case PSX_MAP_MIRROR1:
1786                                 case PSX_MAP_MIRROR2:
1787                                 case PSX_MAP_MIRROR3:
1788                                         pr_debug("Flaging opcode %u as RAM access\n", i);
1789                                         list->flags |= LIGHTREC_IO_MODE(LIGHTREC_IO_RAM);
1790                                         if (no_mask && state->mirrors_mapped)
1791                                                 list->flags |= LIGHTREC_NO_MASK;
1792                                         break;
1793                                 case PSX_MAP_BIOS:
1794                                         pr_debug("Flaging opcode %u as BIOS access\n", i);
1795                                         list->flags |= LIGHTREC_IO_MODE(LIGHTREC_IO_BIOS);
1796                                         if (no_mask)
1797                                                 list->flags |= LIGHTREC_NO_MASK;
1798                                         break;
1799                                 case PSX_MAP_SCRATCH_PAD:
1800                                         pr_debug("Flaging opcode %u as scratchpad access\n", i);
1801                                         list->flags |= LIGHTREC_IO_MODE(LIGHTREC_IO_SCRATCH);
1802                                         if (no_mask)
1803                                                 list->flags |= LIGHTREC_NO_MASK;
1804
1805                                         /* Consider that we're never going to run code from
1806                                          * the scratchpad. */
1807                                         list->flags |= LIGHTREC_NO_INVALIDATE;
1808                                         break;
1809                                 case PSX_MAP_HW_REGISTERS:
1810                                         if (state->ops.hw_direct &&
1811                                             state->ops.hw_direct(kunseg_val,
1812                                                                  opcode_is_store(list->c),
1813                                                                  opcode_get_io_size(list->c))) {
1814                                                 pr_debug("Flagging opcode %u as direct I/O access\n",
1815                                                          i);
1816                                                 list->flags |= LIGHTREC_IO_MODE(LIGHTREC_IO_DIRECT_HW);
1817
1818                                                 if (no_mask)
1819                                                         list->flags |= LIGHTREC_NO_MASK;
1820                                         } else {
1821                                                 pr_debug("Flagging opcode %u as I/O access\n",
1822                                                          i);
1823                                                 list->flags |= LIGHTREC_IO_MODE(LIGHTREC_IO_HW);
1824                                         }
1825                                         break;
1826                                 default:
1827                                         break;
1828                                 }
1829                         }
1830
1831                         if (!LIGHTREC_FLAGS_GET_IO_MODE(list->flags)
1832                             && list->i.rs >= 28 && list->i.rs <= 29
1833                             && !state->maps[PSX_MAP_KERNEL_USER_RAM].ops) {
1834                                 /* Assume that all I/O operations that target
1835                                  * $sp or $gp will always only target a mapped
1836                                  * memory (RAM, BIOS, scratchpad). */
1837                                 if (state->opt_flags & LIGHTREC_OPT_SP_GP_HIT_RAM)
1838                                         list->flags |= LIGHTREC_IO_MODE(LIGHTREC_IO_RAM);
1839                                 else
1840                                         list->flags |= LIGHTREC_IO_MODE(LIGHTREC_IO_DIRECT);
1841                         }
1842
1843                         fallthrough;
1844                 default:
1845                         break;
1846                 }
1847         }
1848
1849         return 0;
1850 }
1851
1852 static u8 get_mfhi_mflo_reg(const struct block *block, u16 offset,
1853                             const struct opcode *last,
1854                             u32 mask, bool sync, bool mflo, bool another)
1855 {
1856         const struct opcode *op, *next = &block->opcode_list[offset];
1857         u32 old_mask;
1858         u8 reg2, reg = mflo ? REG_LO : REG_HI;
1859         u16 branch_offset;
1860         unsigned int i;
1861
1862         for (i = offset; i < block->nb_ops; i++) {
1863                 op = next;
1864                 next = &block->opcode_list[i + 1];
1865                 old_mask = mask;
1866
1867                 /* If any other opcode writes or reads to the register
1868                  * we'd use, then we cannot use it anymore. */
1869                 mask |= opcode_read_mask(op->c);
1870                 mask |= opcode_write_mask(op->c);
1871
1872                 if (op_flag_sync(op->flags))
1873                         sync = true;
1874
1875                 switch (op->i.op) {
1876                 case OP_BEQ:
1877                 case OP_BNE:
1878                 case OP_BLEZ:
1879                 case OP_BGTZ:
1880                 case OP_REGIMM:
1881                         /* TODO: handle backwards branches too */
1882                         if (!last && op_flag_local_branch(op->flags) &&
1883                             (s16)op->c.i.imm >= 0) {
1884                                 branch_offset = i + 1 + (s16)op->c.i.imm
1885                                         - !!op_flag_no_ds(op->flags);
1886
1887                                 reg = get_mfhi_mflo_reg(block, branch_offset, NULL,
1888                                                         mask, sync, mflo, false);
1889                                 reg2 = get_mfhi_mflo_reg(block, offset + 1, next,
1890                                                          mask, sync, mflo, false);
1891                                 if (reg > 0 && reg == reg2)
1892                                         return reg;
1893                                 if (!reg && !reg2)
1894                                         return 0;
1895                         }
1896
1897                         return mflo ? REG_LO : REG_HI;
1898                 case OP_META_MULT2:
1899                 case OP_META_MULTU2:
1900                         return 0;
1901                 case OP_SPECIAL:
1902                         switch (op->r.op) {
1903                         case OP_SPECIAL_MULT:
1904                         case OP_SPECIAL_MULTU:
1905                         case OP_SPECIAL_DIV:
1906                         case OP_SPECIAL_DIVU:
1907                                 return 0;
1908                         case OP_SPECIAL_MTHI:
1909                                 if (!mflo)
1910                                         return 0;
1911                                 continue;
1912                         case OP_SPECIAL_MTLO:
1913                                 if (mflo)
1914                                         return 0;
1915                                 continue;
1916                         case OP_SPECIAL_JR:
1917                                 if (op->r.rs != 31)
1918                                         return reg;
1919
1920                                 if (!sync && !op_flag_no_ds(op->flags) &&
1921                                     (next->i.op == OP_SPECIAL) &&
1922                                     ((!mflo && next->r.op == OP_SPECIAL_MFHI) ||
1923                                     (mflo && next->r.op == OP_SPECIAL_MFLO)))
1924                                         return next->r.rd;
1925
1926                                 return 0;
1927                         case OP_SPECIAL_JALR:
1928                                 return reg;
1929                         case OP_SPECIAL_MFHI:
1930                                 if (!mflo) {
1931                                         if (another)
1932                                                 return op->r.rd;
1933                                         /* Must use REG_HI if there is another MFHI target*/
1934                                         reg2 = get_mfhi_mflo_reg(block, i + 1, next,
1935                                                          0, sync, mflo, true);
1936                                         if (reg2 > 0 && reg2 != REG_HI)
1937                                                 return REG_HI;
1938
1939                                         if (!sync && !(old_mask & BIT(op->r.rd)))
1940                                                 return op->r.rd;
1941                                         else
1942                                                 return REG_HI;
1943                                 }
1944                                 continue;
1945                         case OP_SPECIAL_MFLO:
1946                                 if (mflo) {
1947                                         if (another)
1948                                                 return op->r.rd;
1949                                         /* Must use REG_LO if there is another MFLO target*/
1950                                         reg2 = get_mfhi_mflo_reg(block, i + 1, next,
1951                                                          0, sync, mflo, true);
1952                                         if (reg2 > 0 && reg2 != REG_LO)
1953                                                 return REG_LO;
1954
1955                                         if (!sync && !(old_mask & BIT(op->r.rd)))
1956                                                 return op->r.rd;
1957                                         else
1958                                                 return REG_LO;
1959                                 }
1960                                 continue;
1961                         default:
1962                                 break;
1963                         }
1964
1965                         fallthrough;
1966                 default:
1967                         continue;
1968                 }
1969         }
1970
1971         return reg;
1972 }
1973
1974 static void lightrec_replace_lo_hi(struct block *block, u16 offset,
1975                                    u16 last, bool lo)
1976 {
1977         unsigned int i;
1978         u32 branch_offset;
1979
1980         /* This function will remove the following MFLO/MFHI. It must be called
1981          * only if get_mfhi_mflo_reg() returned a non-zero value. */
1982
1983         for (i = offset; i < last; i++) {
1984                 struct opcode *op = &block->opcode_list[i];
1985
1986                 switch (op->i.op) {
1987                 case OP_BEQ:
1988                 case OP_BNE:
1989                 case OP_BLEZ:
1990                 case OP_BGTZ:
1991                 case OP_REGIMM:
1992                         /* TODO: handle backwards branches too */
1993                         if (op_flag_local_branch(op->flags) && (s16)op->c.i.imm >= 0) {
1994                                 branch_offset = i + 1 + (s16)op->c.i.imm
1995                                         - !!op_flag_no_ds(op->flags);
1996
1997                                 lightrec_replace_lo_hi(block, branch_offset, last, lo);
1998                                 lightrec_replace_lo_hi(block, i + 1, branch_offset, lo);
1999                         }
2000                         break;
2001
2002                 case OP_SPECIAL:
2003                         if (lo && op->r.op == OP_SPECIAL_MFLO) {
2004                                 pr_debug("Removing MFLO opcode at offset 0x%x\n",
2005                                          i << 2);
2006                                 op->opcode = 0;
2007                                 return;
2008                         } else if (!lo && op->r.op == OP_SPECIAL_MFHI) {
2009                                 pr_debug("Removing MFHI opcode at offset 0x%x\n",
2010                                          i << 2);
2011                                 op->opcode = 0;
2012                                 return;
2013                         }
2014
2015                         fallthrough;
2016                 default:
2017                         break;
2018                 }
2019         }
2020 }
2021
2022 static bool lightrec_always_skip_div_check(void)
2023 {
2024 #ifdef __mips__
2025         return true;
2026 #else
2027         return false;
2028 #endif
2029 }
2030
2031 static int lightrec_flag_mults_divs(struct lightrec_state *state, struct block *block)
2032 {
2033         struct opcode *list = NULL;
2034         struct constprop_data v[32] = LIGHTREC_CONSTPROP_INITIALIZER;
2035         u8 reg_hi, reg_lo;
2036         unsigned int i;
2037
2038         for (i = 0; i < block->nb_ops - 1; i++) {
2039                 list = &block->opcode_list[i];
2040
2041                 lightrec_consts_propagate(block, i, v);
2042
2043                 switch (list->i.op) {
2044                 case OP_SPECIAL:
2045                         switch (list->r.op) {
2046                         case OP_SPECIAL_DIV:
2047                         case OP_SPECIAL_DIVU:
2048                                 /* If we are dividing by a non-zero constant, don't
2049                                  * emit the div-by-zero check. */
2050                                 if (lightrec_always_skip_div_check() ||
2051                                     (v[list->r.rt].known & v[list->r.rt].value)) {
2052                                         list->flags |= LIGHTREC_NO_DIV_CHECK;
2053                                 }
2054                                 fallthrough;
2055                         case OP_SPECIAL_MULT:
2056                         case OP_SPECIAL_MULTU:
2057                                 break;
2058                         default:
2059                                 continue;
2060                         }
2061                         fallthrough;
2062                 case OP_META_MULT2:
2063                 case OP_META_MULTU2:
2064                         break;
2065                 default:
2066                         continue;
2067                 }
2068
2069                 /* Don't support opcodes in delay slots */
2070                 if (is_delay_slot(block->opcode_list, i) ||
2071                     op_flag_no_ds(list->flags)) {
2072                         continue;
2073                 }
2074
2075                 reg_lo = get_mfhi_mflo_reg(block, i + 1, NULL, 0, false, true, false);
2076                 if (reg_lo == 0) {
2077                         pr_debug("Mark MULT(U)/DIV(U) opcode at offset 0x%x as"
2078                                  " not writing LO\n", i << 2);
2079                         list->flags |= LIGHTREC_NO_LO;
2080                 }
2081
2082                 reg_hi = get_mfhi_mflo_reg(block, i + 1, NULL, 0, false, false, false);
2083                 if (reg_hi == 0) {
2084                         pr_debug("Mark MULT(U)/DIV(U) opcode at offset 0x%x as"
2085                                  " not writing HI\n", i << 2);
2086                         list->flags |= LIGHTREC_NO_HI;
2087                 }
2088
2089                 if (!reg_lo && !reg_hi) {
2090                         pr_debug("Both LO/HI unused in this block, they will "
2091                                  "probably be used in parent block - removing "
2092                                  "flags.\n");
2093                         list->flags &= ~(LIGHTREC_NO_LO | LIGHTREC_NO_HI);
2094                 }
2095
2096                 if (reg_lo > 0 && reg_lo != REG_LO) {
2097                         pr_debug("Found register %s to hold LO (rs = %u, rt = %u)\n",
2098                                  lightrec_reg_name(reg_lo), list->r.rs, list->r.rt);
2099
2100                         lightrec_replace_lo_hi(block, i + 1, block->nb_ops, true);
2101                         list->r.rd = reg_lo;
2102                 } else {
2103                         list->r.rd = 0;
2104                 }
2105
2106                 if (reg_hi > 0 && reg_hi != REG_HI) {
2107                         pr_debug("Found register %s to hold HI (rs = %u, rt = %u)\n",
2108                                  lightrec_reg_name(reg_hi), list->r.rs, list->r.rt);
2109
2110                         lightrec_replace_lo_hi(block, i + 1, block->nb_ops, false);
2111                         list->r.imm = reg_hi;
2112                 } else {
2113                         list->r.imm = 0;
2114                 }
2115         }
2116
2117         return 0;
2118 }
2119
2120 static bool remove_div_sequence(struct block *block, unsigned int offset)
2121 {
2122         struct opcode *op;
2123         unsigned int i, found = 0;
2124
2125         /*
2126          * Scan for the zero-checking sequence that GCC automatically introduced
2127          * after most DIV/DIVU opcodes. This sequence checks the value of the
2128          * divisor, and if zero, executes a BREAK opcode, causing the BIOS
2129          * handler to crash the PS1.
2130          *
2131          * For DIV opcodes, this sequence additionally checks that the signed
2132          * operation does not overflow.
2133          *
2134          * With the assumption that the games never crashed the PS1, we can
2135          * therefore assume that the games never divided by zero or overflowed,
2136          * and these sequences can be removed.
2137          */
2138
2139         for (i = offset; i < block->nb_ops; i++) {
2140                 op = &block->opcode_list[i];
2141
2142                 if (!found) {
2143                         if (op->i.op == OP_SPECIAL &&
2144                             (op->r.op == OP_SPECIAL_DIV || op->r.op == OP_SPECIAL_DIVU))
2145                                 break;
2146
2147                         if ((op->opcode & 0xfc1fffff) == 0x14000002) {
2148                                 /* BNE ???, zero, +8 */
2149                                 found++;
2150                         } else {
2151                                 offset++;
2152                         }
2153                 } else if (found == 1 && !op->opcode) {
2154                         /* NOP */
2155                         found++;
2156                 } else if (found == 2 && op->opcode == 0x0007000d) {
2157                         /* BREAK 0x1c00 */
2158                         found++;
2159                 } else if (found == 3 && op->opcode == 0x2401ffff) {
2160                         /* LI at, -1 */
2161                         found++;
2162                 } else if (found == 4 && (op->opcode & 0xfc1fffff) == 0x14010004) {
2163                         /* BNE ???, at, +16 */
2164                         found++;
2165                 } else if (found == 5 && op->opcode == 0x3c018000) {
2166                         /* LUI at, 0x8000 */
2167                         found++;
2168                 } else if (found == 6 && (op->opcode & 0x141fffff) == 0x14010002) {
2169                         /* BNE ???, at, +16 */
2170                         found++;
2171                 } else if (found == 7 && !op->opcode) {
2172                         /* NOP */
2173                         found++;
2174                 } else if (found == 8 && op->opcode == 0x0006000d) {
2175                         /* BREAK 0x1800 */
2176                         found++;
2177                         break;
2178                 } else {
2179                         break;
2180                 }
2181         }
2182
2183         if (found >= 3) {
2184                 if (found != 9)
2185                         found = 3;
2186
2187                 pr_debug("Removing DIV%s sequence at offset 0x%x\n",
2188                          found == 9 ? "" : "U", offset << 2);
2189
2190                 for (i = 0; i < found; i++)
2191                         block->opcode_list[offset + i].opcode = 0;
2192
2193                 return true;
2194         }
2195
2196         return false;
2197 }
2198
2199 static int lightrec_remove_div_by_zero_check_sequence(struct lightrec_state *state,
2200                                                       struct block *block)
2201 {
2202         struct opcode *op;
2203         unsigned int i;
2204
2205         for (i = 0; i < block->nb_ops; i++) {
2206                 op = &block->opcode_list[i];
2207
2208                 if (op->i.op == OP_SPECIAL &&
2209                     (op->r.op == OP_SPECIAL_DIVU || op->r.op == OP_SPECIAL_DIV) &&
2210                     remove_div_sequence(block, i + 1))
2211                         op->flags |= LIGHTREC_NO_DIV_CHECK;
2212         }
2213
2214         return 0;
2215 }
2216
2217 static const u32 memset_code[] = {
2218         0x10a00006,     // beqz         a1, 2f
2219         0x24a2ffff,     // addiu        v0,a1,-1
2220         0x2403ffff,     // li           v1,-1
2221         0xac800000,     // 1: sw        zero,0(a0)
2222         0x2442ffff,     // addiu        v0,v0,-1
2223         0x1443fffd,     // bne          v0,v1, 1b
2224         0x24840004,     // addiu        a0,a0,4
2225         0x03e00008,     // 2: jr        ra
2226         0x00000000,     // nop
2227 };
2228
2229 static int lightrec_replace_memset(struct lightrec_state *state, struct block *block)
2230 {
2231         unsigned int i;
2232         union code c;
2233
2234         for (i = 0; i < block->nb_ops; i++) {
2235                 c = block->opcode_list[i].c;
2236
2237                 if (c.opcode != memset_code[i])
2238                         return 0;
2239
2240                 if (i == ARRAY_SIZE(memset_code) - 1) {
2241                         /* success! */
2242                         pr_debug("Block at PC 0x%x is a memset\n", block->pc);
2243                         block_set_flags(block,
2244                                         BLOCK_IS_MEMSET | BLOCK_NEVER_COMPILE);
2245
2246                         /* Return non-zero to skip other optimizers. */
2247                         return 1;
2248                 }
2249         }
2250
2251         return 0;
2252 }
2253
2254 static int lightrec_test_preload_pc(struct lightrec_state *state, struct block *block)
2255 {
2256         unsigned int i;
2257         union code c;
2258         u32 flags;
2259
2260         for (i = 0; i < block->nb_ops; i++) {
2261                 c = block->opcode_list[i].c;
2262                 flags = block->opcode_list[i].flags;
2263
2264                 if (op_flag_sync(flags))
2265                         break;
2266
2267                 switch (c.i.op) {
2268                 case OP_J:
2269                 case OP_JAL:
2270                         block->flags |= BLOCK_PRELOAD_PC;
2271                         return 0;
2272
2273                 case OP_REGIMM:
2274                         switch (c.r.rt) {
2275                         case OP_REGIMM_BLTZAL:
2276                         case OP_REGIMM_BGEZAL:
2277                                 block->flags |= BLOCK_PRELOAD_PC;
2278                                 return 0;
2279                         default:
2280                                 break;
2281                         }
2282                         fallthrough;
2283                 case OP_BEQ:
2284                 case OP_BNE:
2285                 case OP_BLEZ:
2286                 case OP_BGTZ:
2287                         if (!op_flag_local_branch(flags)) {
2288                                 block->flags |= BLOCK_PRELOAD_PC;
2289                                 return 0;
2290                         }
2291
2292                 case OP_SPECIAL:
2293                         switch (c.r.op) {
2294                         case OP_SPECIAL_JALR:
2295                                 if (c.r.rd) {
2296                                         block->flags |= BLOCK_PRELOAD_PC;
2297                                         return 0;
2298                                 }
2299                                 break;
2300                         case OP_SPECIAL_SYSCALL:
2301                         case OP_SPECIAL_BREAK:
2302                                 block->flags |= BLOCK_PRELOAD_PC;
2303                                 return 0;
2304                         default:
2305                                 break;
2306                         }
2307                         break;
2308                 }
2309         }
2310
2311         return 0;
2312 }
2313
2314 static int (*lightrec_optimizers[])(struct lightrec_state *state, struct block *) = {
2315         IF_OPT(OPT_REMOVE_DIV_BY_ZERO_SEQ, &lightrec_remove_div_by_zero_check_sequence),
2316         IF_OPT(OPT_REPLACE_MEMSET, &lightrec_replace_memset),
2317         IF_OPT(OPT_DETECT_IMPOSSIBLE_BRANCHES, &lightrec_detect_impossible_branches),
2318         IF_OPT(OPT_HANDLE_LOAD_DELAYS, &lightrec_handle_load_delays),
2319         IF_OPT(OPT_HANDLE_LOAD_DELAYS, &lightrec_swap_load_delays),
2320         IF_OPT(OPT_TRANSFORM_OPS, &lightrec_transform_branches),
2321         IF_OPT(OPT_LOCAL_BRANCHES, &lightrec_local_branches),
2322         IF_OPT(OPT_TRANSFORM_OPS, &lightrec_transform_ops),
2323         IF_OPT(OPT_SWITCH_DELAY_SLOTS, &lightrec_switch_delay_slots),
2324         IF_OPT(OPT_FLAG_IO, &lightrec_flag_io),
2325         IF_OPT(OPT_FLAG_MULT_DIV, &lightrec_flag_mults_divs),
2326         IF_OPT(OPT_EARLY_UNLOAD, &lightrec_early_unload),
2327         IF_OPT(OPT_PRELOAD_PC, &lightrec_test_preload_pc),
2328 };
2329
2330 int lightrec_optimize(struct lightrec_state *state, struct block *block)
2331 {
2332         unsigned int i;
2333         int ret;
2334
2335         for (i = 0; i < ARRAY_SIZE(lightrec_optimizers); i++) {
2336                 if (lightrec_optimizers[i]) {
2337                         ret = (*lightrec_optimizers[i])(state, block);
2338                         if (ret)
2339                                 return ret;
2340                 }
2341         }
2342
2343         return 0;
2344 }