Merge pull request #771 from pcercuei/update-lightrec-20231006
[pcsx_rearmed.git] / deps / lightrec / optimizer.c
1 // SPDX-License-Identifier: LGPL-2.1-or-later
2 /*
3  * Copyright (C) 2014-2021 Paul Cercueil <paul@crapouillou.net>
4  */
5
6 #include "constprop.h"
7 #include "lightrec-config.h"
8 #include "disassembler.h"
9 #include "lightrec.h"
10 #include "memmanager.h"
11 #include "optimizer.h"
12 #include "regcache.h"
13
14 #include <errno.h>
15 #include <stdbool.h>
16 #include <stdlib.h>
17 #include <string.h>
18
19 #define IF_OPT(opt, ptr) ((opt) ? (ptr) : NULL)
20
21 struct optimizer_list {
22         void (**optimizers)(struct opcode *);
23         unsigned int nb_optimizers;
24 };
25
26 static bool is_nop(union code op);
27
28 bool is_unconditional_jump(union code c)
29 {
30         switch (c.i.op) {
31         case OP_SPECIAL:
32                 return c.r.op == OP_SPECIAL_JR || c.r.op == OP_SPECIAL_JALR;
33         case OP_J:
34         case OP_JAL:
35                 return true;
36         case OP_BEQ:
37         case OP_BLEZ:
38                 return c.i.rs == c.i.rt;
39         case OP_REGIMM:
40                 return (c.r.rt == OP_REGIMM_BGEZ ||
41                         c.r.rt == OP_REGIMM_BGEZAL) && c.i.rs == 0;
42         default:
43                 return false;
44         }
45 }
46
47 bool is_syscall(union code c)
48 {
49         return (c.i.op == OP_SPECIAL && c.r.op == OP_SPECIAL_SYSCALL) ||
50                 (c.i.op == OP_CP0 && (c.r.rs == OP_CP0_MTC0 ||
51                                         c.r.rs == OP_CP0_CTC0) &&
52                  (c.r.rd == 12 || c.r.rd == 13));
53 }
54
55 static u64 opcode_read_mask(union code op)
56 {
57         switch (op.i.op) {
58         case OP_SPECIAL:
59                 switch (op.r.op) {
60                 case OP_SPECIAL_SYSCALL:
61                 case OP_SPECIAL_BREAK:
62                         return 0;
63                 case OP_SPECIAL_JR:
64                 case OP_SPECIAL_JALR:
65                 case OP_SPECIAL_MTHI:
66                 case OP_SPECIAL_MTLO:
67                         return BIT(op.r.rs);
68                 case OP_SPECIAL_MFHI:
69                         return BIT(REG_HI);
70                 case OP_SPECIAL_MFLO:
71                         return BIT(REG_LO);
72                 case OP_SPECIAL_SLL:
73                         if (!op.r.imm)
74                                 return 0;
75                         fallthrough;
76                 case OP_SPECIAL_SRL:
77                 case OP_SPECIAL_SRA:
78                         return BIT(op.r.rt);
79                 default:
80                         return BIT(op.r.rs) | BIT(op.r.rt);
81                 }
82         case OP_CP0:
83                 switch (op.r.rs) {
84                 case OP_CP0_MTC0:
85                 case OP_CP0_CTC0:
86                         return BIT(op.r.rt);
87                 default:
88                         return 0;
89                 }
90         case OP_CP2:
91                 if (op.r.op == OP_CP2_BASIC) {
92                         switch (op.r.rs) {
93                         case OP_CP2_BASIC_MTC2:
94                         case OP_CP2_BASIC_CTC2:
95                                 return BIT(op.r.rt);
96                         default:
97                                 break;
98                         }
99                 }
100                 return 0;
101         case OP_J:
102         case OP_JAL:
103         case OP_LUI:
104                 return 0;
105         case OP_BEQ:
106                 if (op.i.rs == op.i.rt)
107                         return 0;
108                 fallthrough;
109         case OP_BNE:
110         case OP_LWL:
111         case OP_LWR:
112         case OP_SB:
113         case OP_SH:
114         case OP_SWL:
115         case OP_SW:
116         case OP_SWR:
117         case OP_META_LWU:
118         case OP_META_SWU:
119                 return BIT(op.i.rs) | BIT(op.i.rt);
120         case OP_META:
121                 return BIT(op.m.rs);
122         default:
123                 return BIT(op.i.rs);
124         }
125 }
126
127 static u64 mult_div_write_mask(union code op)
128 {
129         u64 flags;
130
131         if (!OPT_FLAG_MULT_DIV)
132                 return BIT(REG_LO) | BIT(REG_HI);
133
134         if (op.r.rd)
135                 flags = BIT(op.r.rd);
136         else
137                 flags = BIT(REG_LO);
138         if (op.r.imm)
139                 flags |= BIT(op.r.imm);
140         else
141                 flags |= BIT(REG_HI);
142
143         return flags;
144 }
145
146 u64 opcode_write_mask(union code op)
147 {
148         switch (op.i.op) {
149         case OP_META_MULT2:
150         case OP_META_MULTU2:
151                 return mult_div_write_mask(op);
152         case OP_META:
153                 return BIT(op.m.rd);
154         case OP_SPECIAL:
155                 switch (op.r.op) {
156                 case OP_SPECIAL_JR:
157                 case OP_SPECIAL_SYSCALL:
158                 case OP_SPECIAL_BREAK:
159                         return 0;
160                 case OP_SPECIAL_MULT:
161                 case OP_SPECIAL_MULTU:
162                 case OP_SPECIAL_DIV:
163                 case OP_SPECIAL_DIVU:
164                         return mult_div_write_mask(op);
165                 case OP_SPECIAL_MTHI:
166                         return BIT(REG_HI);
167                 case OP_SPECIAL_MTLO:
168                         return BIT(REG_LO);
169                 case OP_SPECIAL_SLL:
170                         if (!op.r.imm)
171                                 return 0;
172                         fallthrough;
173                 default:
174                         return BIT(op.r.rd);
175                 }
176         case OP_ADDI:
177         case OP_ADDIU:
178         case OP_SLTI:
179         case OP_SLTIU:
180         case OP_ANDI:
181         case OP_ORI:
182         case OP_XORI:
183         case OP_LUI:
184         case OP_LB:
185         case OP_LH:
186         case OP_LWL:
187         case OP_LW:
188         case OP_LBU:
189         case OP_LHU:
190         case OP_LWR:
191         case OP_META_LWU:
192                 return BIT(op.i.rt);
193         case OP_JAL:
194                 return BIT(31);
195         case OP_CP0:
196                 switch (op.r.rs) {
197                 case OP_CP0_MFC0:
198                 case OP_CP0_CFC0:
199                         return BIT(op.i.rt);
200                 default:
201                         return 0;
202                 }
203         case OP_CP2:
204                 if (op.r.op == OP_CP2_BASIC) {
205                         switch (op.r.rs) {
206                         case OP_CP2_BASIC_MFC2:
207                         case OP_CP2_BASIC_CFC2:
208                                 return BIT(op.i.rt);
209                         default:
210                                 break;
211                         }
212                 }
213                 return 0;
214         case OP_REGIMM:
215                 switch (op.r.rt) {
216                 case OP_REGIMM_BLTZAL:
217                 case OP_REGIMM_BGEZAL:
218                         return BIT(31);
219                 default:
220                         return 0;
221                 }
222         default:
223                 return 0;
224         }
225 }
226
227 bool opcode_reads_register(union code op, u8 reg)
228 {
229         return opcode_read_mask(op) & BIT(reg);
230 }
231
232 bool opcode_writes_register(union code op, u8 reg)
233 {
234         return opcode_write_mask(op) & BIT(reg);
235 }
236
237 static int find_prev_writer(const struct opcode *list, unsigned int offset, u8 reg)
238 {
239         union code c;
240         unsigned int i;
241
242         if (op_flag_sync(list[offset].flags))
243                 return -1;
244
245         for (i = offset; i > 0; i--) {
246                 c = list[i - 1].c;
247
248                 if (opcode_writes_register(c, reg)) {
249                         if (i > 1 && has_delay_slot(list[i - 2].c))
250                                 break;
251
252                         return i - 1;
253                 }
254
255                 if (op_flag_sync(list[i - 1].flags) ||
256                     has_delay_slot(c) ||
257                     opcode_reads_register(c, reg))
258                         break;
259         }
260
261         return -1;
262 }
263
264 static int find_next_reader(const struct opcode *list, unsigned int offset, u8 reg)
265 {
266         unsigned int i;
267         union code c;
268
269         if (op_flag_sync(list[offset].flags))
270                 return -1;
271
272         for (i = offset; ; i++) {
273                 c = list[i].c;
274
275                 if (opcode_reads_register(c, reg))
276                         return i;
277
278                 if (op_flag_sync(list[i].flags)
279                     || (op_flag_no_ds(list[i].flags) && has_delay_slot(c))
280                     || is_delay_slot(list, i)
281                     || opcode_writes_register(c, reg))
282                         break;
283         }
284
285         return -1;
286 }
287
288 static bool reg_is_dead(const struct opcode *list, unsigned int offset, u8 reg)
289 {
290         unsigned int i;
291
292         if (op_flag_sync(list[offset].flags) || is_delay_slot(list, offset))
293                 return false;
294
295         for (i = offset + 1; ; i++) {
296                 if (opcode_reads_register(list[i].c, reg))
297                         return false;
298
299                 if (opcode_writes_register(list[i].c, reg))
300                         return true;
301
302                 if (has_delay_slot(list[i].c)) {
303                         if (op_flag_no_ds(list[i].flags) ||
304                             opcode_reads_register(list[i + 1].c, reg))
305                                 return false;
306
307                         return opcode_writes_register(list[i + 1].c, reg);
308                 }
309         }
310 }
311
312 static bool reg_is_read(const struct opcode *list,
313                         unsigned int a, unsigned int b, u8 reg)
314 {
315         /* Return true if reg is read in one of the opcodes of the interval
316          * [a, b[ */
317         for (; a < b; a++) {
318                 if (!is_nop(list[a].c) && opcode_reads_register(list[a].c, reg))
319                         return true;
320         }
321
322         return false;
323 }
324
325 static bool reg_is_written(const struct opcode *list,
326                            unsigned int a, unsigned int b, u8 reg)
327 {
328         /* Return true if reg is written in one of the opcodes of the interval
329          * [a, b[ */
330
331         for (; a < b; a++) {
332                 if (!is_nop(list[a].c) && opcode_writes_register(list[a].c, reg))
333                         return true;
334         }
335
336         return false;
337 }
338
339 static bool reg_is_read_or_written(const struct opcode *list,
340                                    unsigned int a, unsigned int b, u8 reg)
341 {
342         return reg_is_read(list, a, b, reg) || reg_is_written(list, a, b, reg);
343 }
344
345 bool opcode_is_mfc(union code op)
346 {
347         switch (op.i.op) {
348         case OP_CP0:
349                 switch (op.r.rs) {
350                 case OP_CP0_MFC0:
351                 case OP_CP0_CFC0:
352                         return true;
353                 default:
354                         break;
355                 }
356
357                 break;
358         case OP_CP2:
359                 if (op.r.op == OP_CP2_BASIC) {
360                         switch (op.r.rs) {
361                         case OP_CP2_BASIC_MFC2:
362                         case OP_CP2_BASIC_CFC2:
363                                 return true;
364                         default:
365                                 break;
366                         }
367                 }
368
369                 break;
370         default:
371                 break;
372         }
373
374         return false;
375 }
376
377 bool opcode_is_load(union code op)
378 {
379         switch (op.i.op) {
380         case OP_LB:
381         case OP_LH:
382         case OP_LWL:
383         case OP_LW:
384         case OP_LBU:
385         case OP_LHU:
386         case OP_LWR:
387         case OP_LWC2:
388         case OP_META_LWU:
389                 return true;
390         default:
391                 return false;
392         }
393 }
394
395 static bool opcode_is_store(union code op)
396 {
397         switch (op.i.op) {
398         case OP_SB:
399         case OP_SH:
400         case OP_SW:
401         case OP_SWL:
402         case OP_SWR:
403         case OP_SWC2:
404         case OP_META_SWU:
405                 return true;
406         default:
407                 return false;
408         }
409 }
410
411 static u8 opcode_get_io_size(union code op)
412 {
413         switch (op.i.op) {
414         case OP_LB:
415         case OP_LBU:
416         case OP_SB:
417                 return 8;
418         case OP_LH:
419         case OP_LHU:
420         case OP_SH:
421                 return 16;
422         default:
423                 return 32;
424         }
425 }
426
427 bool opcode_is_io(union code op)
428 {
429         return opcode_is_load(op) || opcode_is_store(op);
430 }
431
432 /* TODO: Complete */
433 static bool is_nop(union code op)
434 {
435         if (opcode_writes_register(op, 0)) {
436                 switch (op.i.op) {
437                 case OP_CP0:
438                         return op.r.rs != OP_CP0_MFC0;
439                 case OP_LB:
440                 case OP_LH:
441                 case OP_LWL:
442                 case OP_LW:
443                 case OP_LBU:
444                 case OP_LHU:
445                 case OP_LWR:
446                 case OP_META_LWU:
447                         return false;
448                 default:
449                         return true;
450                 }
451         }
452
453         switch (op.i.op) {
454         case OP_SPECIAL:
455                 switch (op.r.op) {
456                 case OP_SPECIAL_AND:
457                         return op.r.rd == op.r.rt && op.r.rd == op.r.rs;
458                 case OP_SPECIAL_ADD:
459                 case OP_SPECIAL_ADDU:
460                         return (op.r.rd == op.r.rt && op.r.rs == 0) ||
461                                 (op.r.rd == op.r.rs && op.r.rt == 0);
462                 case OP_SPECIAL_SUB:
463                 case OP_SPECIAL_SUBU:
464                         return op.r.rd == op.r.rs && op.r.rt == 0;
465                 case OP_SPECIAL_OR:
466                         if (op.r.rd == op.r.rt)
467                                 return op.r.rd == op.r.rs || op.r.rs == 0;
468                         else
469                                 return (op.r.rd == op.r.rs) && op.r.rt == 0;
470                 case OP_SPECIAL_SLL:
471                 case OP_SPECIAL_SRA:
472                 case OP_SPECIAL_SRL:
473                         return op.r.rd == op.r.rt && op.r.imm == 0;
474                 case OP_SPECIAL_MFHI:
475                 case OP_SPECIAL_MFLO:
476                         return op.r.rd == 0;
477                 default:
478                         return false;
479                 }
480         case OP_ORI:
481         case OP_ADDI:
482         case OP_ADDIU:
483                 return op.i.rt == op.i.rs && op.i.imm == 0;
484         case OP_BGTZ:
485                 return (op.i.rs == 0 || op.i.imm == 1);
486         case OP_REGIMM:
487                 return (op.i.op == OP_REGIMM_BLTZ ||
488                                 op.i.op == OP_REGIMM_BLTZAL) &&
489                         (op.i.rs == 0 || op.i.imm == 1);
490         case OP_BNE:
491                 return (op.i.rs == op.i.rt || op.i.imm == 1);
492         default:
493                 return false;
494         }
495 }
496
497 static void lightrec_optimize_sll_sra(struct opcode *list, unsigned int offset,
498                                       struct constprop_data *v)
499 {
500         struct opcode *ldop = NULL, *curr = &list[offset], *next;
501         struct opcode *to_change, *to_nop;
502         int idx, idx2;
503
504         if (curr->r.imm != 24 && curr->r.imm != 16)
505                 return;
506
507         if (is_delay_slot(list, offset))
508                 return;
509
510         idx = find_next_reader(list, offset + 1, curr->r.rd);
511         if (idx < 0)
512                 return;
513
514         next = &list[idx];
515
516         if (next->i.op != OP_SPECIAL || next->r.op != OP_SPECIAL_SRA ||
517             next->r.imm != curr->r.imm || next->r.rt != curr->r.rd)
518                 return;
519
520         if (curr->r.rd != curr->r.rt && next->r.rd != next->r.rt) {
521                 /* sll rY, rX, 16
522                  * ...
523                  * sra rZ, rY, 16 */
524
525                 if (!reg_is_dead(list, idx, curr->r.rd) ||
526                     reg_is_read_or_written(list, offset, idx, next->r.rd))
527                         return;
528
529                 /* If rY is dead after the SRL, and rZ is not used after the SLL,
530                  * we can change rY to rZ */
531
532                 pr_debug("Detected SLL/SRA with middle temp register\n");
533                 curr->r.rd = next->r.rd;
534                 next->r.rt = curr->r.rd;
535         }
536
537         /* We got a SLL/SRA combo. If imm #16, that's a cast to s16.
538          * If imm #24 that's a cast to s8.
539          *
540          * First of all, make sure that the target register of the SLL is not
541          * read after the SRA. */
542
543         if (curr->r.rd == curr->r.rt) {
544                 /* sll rX, rX, 16
545                  * ...
546                  * sra rY, rX, 16 */
547                 to_change = next;
548                 to_nop = curr;
549
550                 /* rX is used after the SRA - we cannot convert it. */
551                 if (curr->r.rd != next->r.rd && !reg_is_dead(list, idx, curr->r.rd))
552                         return;
553         } else {
554                 /* sll rY, rX, 16
555                  * ...
556                  * sra rY, rY, 16 */
557                 to_change = curr;
558                 to_nop = next;
559         }
560
561         idx2 = find_prev_writer(list, offset, curr->r.rt);
562         if (idx2 >= 0) {
563                 /* Note that PSX games sometimes do casts after
564                  * a LHU or LBU; in this case we can change the
565                  * load opcode to a LH or LB, and the cast can
566                  * be changed to a MOV or a simple NOP. */
567
568                 ldop = &list[idx2];
569
570                 if (next->r.rd != ldop->i.rt &&
571                     !reg_is_dead(list, idx, ldop->i.rt))
572                         ldop = NULL;
573                 else if (curr->r.imm == 16 && ldop->i.op == OP_LHU)
574                         ldop->i.op = OP_LH;
575                 else if (curr->r.imm == 24 && ldop->i.op == OP_LBU)
576                         ldop->i.op = OP_LB;
577                 else
578                         ldop = NULL;
579
580                 if (ldop) {
581                         if (next->r.rd == ldop->i.rt) {
582                                 to_change->opcode = 0;
583                         } else if (reg_is_dead(list, idx, ldop->i.rt) &&
584                                    !reg_is_read_or_written(list, idx2 + 1, idx, next->r.rd)) {
585                                 /* The target register of the SRA is dead after the
586                                  * LBU/LHU; we can change the target register of the
587                                  * LBU/LHU to the one of the SRA. */
588                                 v[ldop->i.rt].known = 0;
589                                 v[ldop->i.rt].sign = 0;
590                                 ldop->i.rt = next->r.rd;
591                                 to_change->opcode = 0;
592                         } else {
593                                 to_change->i.op = OP_META;
594                                 to_change->m.op = OP_META_MOV;
595                                 to_change->m.rd = next->r.rd;
596                                 to_change->m.rs = ldop->i.rt;
597                         }
598
599                         if (to_nop->r.imm == 24)
600                                 pr_debug("Convert LBU+SLL+SRA to LB\n");
601                         else
602                                 pr_debug("Convert LHU+SLL+SRA to LH\n");
603
604                         v[ldop->i.rt].known = 0;
605                         v[ldop->i.rt].sign = 0xffffff80 << (24 - curr->r.imm);
606                 }
607         }
608
609         if (!ldop) {
610                 pr_debug("Convert SLL/SRA #%u to EXT%c\n",
611                          curr->r.imm, curr->r.imm == 24 ? 'C' : 'S');
612
613                 to_change->m.rs = curr->r.rt;
614                 to_change->m.op = to_nop->r.imm == 24 ? OP_META_EXTC : OP_META_EXTS;
615                 to_change->i.op = OP_META;
616         }
617
618         to_nop->opcode = 0;
619 }
620
621 static void
622 lightrec_remove_useless_lui(struct block *block, unsigned int offset,
623                             const struct constprop_data *v)
624 {
625         struct opcode *list = block->opcode_list,
626                       *op = &block->opcode_list[offset];
627         int reader;
628
629         if (!op_flag_sync(op->flags) && is_known(v, op->i.rt) &&
630             v[op->i.rt].value == op->i.imm << 16) {
631                 pr_debug("Converting duplicated LUI to NOP\n");
632                 op->opcode = 0x0;
633                 return;
634         }
635
636         if (op->i.imm != 0 || op->i.rt == 0 || offset == block->nb_ops - 1)
637                 return;
638
639         reader = find_next_reader(list, offset + 1, op->i.rt);
640         if (reader <= 0)
641                 return;
642
643         if (opcode_writes_register(list[reader].c, op->i.rt) ||
644             reg_is_dead(list, reader, op->i.rt)) {
645                 pr_debug("Removing useless LUI 0x0\n");
646
647                 if (list[reader].i.rs == op->i.rt)
648                         list[reader].i.rs = 0;
649                 if (list[reader].i.op == OP_SPECIAL &&
650                     list[reader].i.rt == op->i.rt)
651                         list[reader].i.rt = 0;
652                 op->opcode = 0x0;
653         }
654 }
655
656 static void lightrec_lui_to_movi(struct block *block, unsigned int offset)
657 {
658         struct opcode *ori, *lui = &block->opcode_list[offset];
659         int next;
660
661         if (lui->i.op != OP_LUI)
662                 return;
663
664         next = find_next_reader(block->opcode_list, offset + 1, lui->i.rt);
665         if (next > 0) {
666                 ori = &block->opcode_list[next];
667
668                 switch (ori->i.op) {
669                 case OP_ORI:
670                 case OP_ADDI:
671                 case OP_ADDIU:
672                         if (ori->i.rs == ori->i.rt && ori->i.imm) {
673                                 ori->flags |= LIGHTREC_MOVI;
674                                 lui->flags |= LIGHTREC_MOVI;
675                         }
676                         break;
677                 }
678         }
679 }
680
681 static void lightrec_modify_lui(struct block *block, unsigned int offset)
682 {
683         union code c, *lui = &block->opcode_list[offset].c;
684         bool stop = false, stop_next = false;
685         unsigned int i;
686
687         for (i = offset + 1; !stop && i < block->nb_ops; i++) {
688                 c = block->opcode_list[i].c;
689                 stop = stop_next;
690
691                 if ((opcode_is_store(c) && c.i.rt == lui->i.rt)
692                     || (!opcode_is_load(c) && opcode_reads_register(c, lui->i.rt)))
693                         break;
694
695                 if (opcode_writes_register(c, lui->i.rt)) {
696                         if (c.i.op == OP_LWL || c.i.op == OP_LWR) {
697                                 /* LWL/LWR only partially write their target register;
698                                  * therefore the LUI should not write a different value. */
699                                 break;
700                         }
701
702                         pr_debug("Convert LUI at offset 0x%x to kuseg\n",
703                                  (i - 1) << 2);
704                         lui->i.imm = kunseg(lui->i.imm << 16) >> 16;
705                         break;
706                 }
707
708                 if (has_delay_slot(c))
709                         stop_next = true;
710         }
711 }
712
713 static int lightrec_transform_branches(struct lightrec_state *state,
714                                        struct block *block)
715 {
716         struct opcode *op;
717         unsigned int i;
718         s32 offset;
719
720         for (i = 0; i < block->nb_ops; i++) {
721                 op = &block->opcode_list[i];
722
723                 switch (op->i.op) {
724                 case OP_J:
725                         /* Transform J opcode into BEQ $zero, $zero if possible. */
726                         offset = (s32)((block->pc & 0xf0000000) >> 2 | op->j.imm)
727                                 - (s32)(block->pc >> 2) - (s32)i - 1;
728
729                         if (offset == (s16)offset) {
730                                 pr_debug("Transform J into BEQ $zero, $zero\n");
731                                 op->i.op = OP_BEQ;
732                                 op->i.rs = 0;
733                                 op->i.rt = 0;
734                                 op->i.imm = offset;
735
736                         }
737                         fallthrough;
738                 default:
739                         break;
740                 }
741         }
742
743         return 0;
744 }
745
746 static inline bool is_power_of_two(u32 value)
747 {
748         return popcount32(value) == 1;
749 }
750
751 static void lightrec_patch_known_zero(struct opcode *op,
752                                       const struct constprop_data *v)
753 {
754         switch (op->i.op) {
755         case OP_SPECIAL:
756                 switch (op->r.op) {
757                 case OP_SPECIAL_JR:
758                 case OP_SPECIAL_JALR:
759                 case OP_SPECIAL_MTHI:
760                 case OP_SPECIAL_MTLO:
761                         if (is_known_zero(v, op->r.rs))
762                                 op->r.rs = 0;
763                         break;
764                 default:
765                         if (is_known_zero(v, op->r.rs))
766                                 op->r.rs = 0;
767                         fallthrough;
768                 case OP_SPECIAL_SLL:
769                 case OP_SPECIAL_SRL:
770                 case OP_SPECIAL_SRA:
771                         if (is_known_zero(v, op->r.rt))
772                                 op->r.rt = 0;
773                         break;
774                 case OP_SPECIAL_SYSCALL:
775                 case OP_SPECIAL_BREAK:
776                 case OP_SPECIAL_MFHI:
777                 case OP_SPECIAL_MFLO:
778                         break;
779                 }
780                 break;
781         case OP_CP0:
782                 switch (op->r.rs) {
783                 case OP_CP0_MTC0:
784                 case OP_CP0_CTC0:
785                         if (is_known_zero(v, op->r.rt))
786                                 op->r.rt = 0;
787                         break;
788                 default:
789                         break;
790                 }
791                 break;
792         case OP_CP2:
793                 if (op->r.op == OP_CP2_BASIC) {
794                         switch (op->r.rs) {
795                         case OP_CP2_BASIC_MTC2:
796                         case OP_CP2_BASIC_CTC2:
797                                 if (is_known_zero(v, op->r.rt))
798                                         op->r.rt = 0;
799                                 break;
800                         default:
801                                 break;
802                         }
803                 }
804                 break;
805         case OP_BEQ:
806         case OP_BNE:
807                 if (is_known_zero(v, op->i.rt))
808                         op->i.rt = 0;
809                 fallthrough;
810         case OP_REGIMM:
811         case OP_BLEZ:
812         case OP_BGTZ:
813         case OP_ADDI:
814         case OP_ADDIU:
815         case OP_SLTI:
816         case OP_SLTIU:
817         case OP_ANDI:
818         case OP_ORI:
819         case OP_XORI:
820         case OP_META_MULT2:
821         case OP_META_MULTU2:
822         case OP_META:
823                 if (is_known_zero(v, op->m.rs))
824                         op->m.rs = 0;
825                 break;
826         case OP_SB:
827         case OP_SH:
828         case OP_SWL:
829         case OP_SW:
830         case OP_SWR:
831         case OP_META_SWU:
832                 if (is_known_zero(v, op->i.rt))
833                         op->i.rt = 0;
834                 fallthrough;
835         case OP_LB:
836         case OP_LH:
837         case OP_LWL:
838         case OP_LW:
839         case OP_LBU:
840         case OP_LHU:
841         case OP_LWR:
842         case OP_LWC2:
843         case OP_SWC2:
844         case OP_META_LWU:
845                 if (is_known(v, op->i.rs)
846                     && kunseg(v[op->i.rs].value) == 0)
847                         op->i.rs = 0;
848                 break;
849         default:
850                 break;
851         }
852 }
853
854 static void lightrec_reset_syncs(struct block *block)
855 {
856         struct opcode *op, *list = block->opcode_list;
857         unsigned int i;
858         s32 offset;
859
860         for (i = 0; i < block->nb_ops; i++)
861                 list[i].flags &= ~LIGHTREC_SYNC;
862
863         for (i = 0; i < block->nb_ops; i++) {
864                 op = &list[i];
865
866                 if (has_delay_slot(op->c)) {
867                         if (op_flag_local_branch(op->flags)) {
868                                 offset = i + 1 - op_flag_no_ds(op->flags) + (s16)op->i.imm;
869                                 list[offset].flags |= LIGHTREC_SYNC;
870                         }
871
872                         if (op_flag_emulate_branch(op->flags) && i + 2 < block->nb_ops)
873                                 list[i + 2].flags |= LIGHTREC_SYNC;
874                 }
875         }
876 }
877
878 static void maybe_remove_load_delay(struct opcode *op)
879 {
880         if (op_flag_load_delay(op->flags) && opcode_is_load(op->c))
881                 op->flags &= ~LIGHTREC_LOAD_DELAY;
882 }
883
884 static int lightrec_transform_ops(struct lightrec_state *state, struct block *block)
885 {
886         struct opcode *op, *list = block->opcode_list;
887         struct constprop_data v[32] = LIGHTREC_CONSTPROP_INITIALIZER;
888         unsigned int i;
889         bool local;
890         int idx;
891         u8 tmp;
892
893         for (i = 0; i < block->nb_ops; i++) {
894                 op = &list[i];
895
896                 lightrec_consts_propagate(block, i, v);
897
898                 lightrec_patch_known_zero(op, v);
899
900                 /* Transform all opcodes detected as useless to real NOPs
901                  * (0x0: SLL r0, r0, #0) */
902                 if (op->opcode != 0 && is_nop(op->c)) {
903                         pr_debug("Converting useless opcode 0x%08x to NOP\n",
904                                         op->opcode);
905                         op->opcode = 0x0;
906                 }
907
908                 if (!op->opcode)
909                         continue;
910
911                 switch (op->i.op) {
912                 case OP_BEQ:
913                         if (op->i.rs == op->i.rt ||
914                             (is_known(v, op->i.rs) && is_known(v, op->i.rt) &&
915                              v[op->i.rs].value == v[op->i.rt].value)) {
916                                 if (op->i.rs != op->i.rt)
917                                         pr_debug("Found always-taken BEQ\n");
918
919                                 op->i.rs = 0;
920                                 op->i.rt = 0;
921                         } else if (v[op->i.rs].known & v[op->i.rt].known &
922                                    (v[op->i.rs].value ^ v[op->i.rt].value)) {
923                                 pr_debug("Found never-taken BEQ\n");
924
925                                 if (!op_flag_no_ds(op->flags))
926                                         maybe_remove_load_delay(&list[i + 1]);
927
928                                 local = op_flag_local_branch(op->flags);
929                                 op->opcode = 0;
930                                 op->flags = 0;
931
932                                 if (local)
933                                         lightrec_reset_syncs(block);
934                         } else if (op->i.rs == 0) {
935                                 op->i.rs = op->i.rt;
936                                 op->i.rt = 0;
937                         }
938                         break;
939
940                 case OP_BNE:
941                         if (v[op->i.rs].known & v[op->i.rt].known &
942                             (v[op->i.rs].value ^ v[op->i.rt].value)) {
943                                 pr_debug("Found always-taken BNE\n");
944
945                                 op->i.op = OP_BEQ;
946                                 op->i.rs = 0;
947                                 op->i.rt = 0;
948                         } else if (is_known(v, op->i.rs) && is_known(v, op->i.rt) &&
949                                    v[op->i.rs].value == v[op->i.rt].value) {
950                                 pr_debug("Found never-taken BNE\n");
951
952                                 if (!op_flag_no_ds(op->flags))
953                                         maybe_remove_load_delay(&list[i + 1]);
954
955                                 local = op_flag_local_branch(op->flags);
956                                 op->opcode = 0;
957                                 op->flags = 0;
958
959                                 if (local)
960                                         lightrec_reset_syncs(block);
961                         } else if (op->i.rs == 0) {
962                                 op->i.rs = op->i.rt;
963                                 op->i.rt = 0;
964                         }
965                         break;
966
967                 case OP_BLEZ:
968                         if (v[op->i.rs].known & BIT(31) &&
969                             v[op->i.rs].value & BIT(31)) {
970                                 pr_debug("Found always-taken BLEZ\n");
971
972                                 op->i.op = OP_BEQ;
973                                 op->i.rs = 0;
974                                 op->i.rt = 0;
975                         }
976                         break;
977
978                 case OP_BGTZ:
979                         if (v[op->i.rs].known & BIT(31) &&
980                             v[op->i.rs].value & BIT(31)) {
981                                 pr_debug("Found never-taken BGTZ\n");
982
983                                 if (!op_flag_no_ds(op->flags))
984                                         maybe_remove_load_delay(&list[i + 1]);
985
986                                 local = op_flag_local_branch(op->flags);
987                                 op->opcode = 0;
988                                 op->flags = 0;
989
990                                 if (local)
991                                         lightrec_reset_syncs(block);
992                         }
993                         break;
994
995                 case OP_LUI:
996                         if (i == 0 || !has_delay_slot(list[i - 1].c))
997                                 lightrec_modify_lui(block, i);
998                         lightrec_remove_useless_lui(block, i, v);
999                         if (i == 0 || !has_delay_slot(list[i - 1].c))
1000                                 lightrec_lui_to_movi(block, i);
1001                         break;
1002
1003                 /* Transform ORI/ADDI/ADDIU with imm #0 or ORR/ADD/ADDU/SUB/SUBU
1004                  * with register $zero to the MOV meta-opcode */
1005                 case OP_ORI:
1006                 case OP_ADDI:
1007                 case OP_ADDIU:
1008                         if (op->i.imm == 0) {
1009                                 pr_debug("Convert ORI/ADDI/ADDIU #0 to MOV\n");
1010                                 op->m.rd = op->i.rt;
1011                                 op->m.op = OP_META_MOV;
1012                                 op->i.op = OP_META;
1013                         }
1014                         break;
1015                 case OP_ANDI:
1016                         if (bits_are_known_zero(v, op->i.rs, ~op->i.imm)) {
1017                                 pr_debug("Found useless ANDI 0x%x\n", op->i.imm);
1018
1019                                 if (op->i.rs == op->i.rt) {
1020                                         op->opcode = 0;
1021                                 } else {
1022                                         op->m.rd = op->i.rt;
1023                                         op->m.op = OP_META_MOV;
1024                                         op->i.op = OP_META;
1025                                 }
1026                         }
1027                         break;
1028                 case OP_LWL:
1029                 case OP_LWR:
1030                         if (i == 0 || !has_delay_slot(list[i - 1].c)) {
1031                                 idx = find_next_reader(list, i + 1, op->i.rt);
1032                                 if (idx > 0 && list[idx].i.op == (op->i.op ^ 0x4)
1033                                     && list[idx].i.rs == op->i.rs
1034                                     && list[idx].i.rt == op->i.rt
1035                                     && abs((s16)op->i.imm - (s16)list[idx].i.imm) == 3) {
1036                                         /* Replace a LWL/LWR combo with a META_LWU */
1037                                         if (op->i.op == OP_LWL)
1038                                                 op->i.imm -= 3;
1039                                         op->i.op = OP_META_LWU;
1040                                         list[idx].opcode = 0;
1041                                         pr_debug("Convert LWL/LWR to LWU\n");
1042                                 }
1043                         }
1044                         break;
1045                 case OP_SWL:
1046                 case OP_SWR:
1047                         if (i == 0 || !has_delay_slot(list[i - 1].c)) {
1048                                 idx = find_next_reader(list, i + 1, op->i.rt);
1049                                 if (idx > 0 && list[idx].i.op == (op->i.op ^ 0x4)
1050                                     && list[idx].i.rs == op->i.rs
1051                                     && list[idx].i.rt == op->i.rt
1052                                     && abs((s16)op->i.imm - (s16)list[idx].i.imm) == 3) {
1053                                         /* Replace a SWL/SWR combo with a META_SWU */
1054                                         if (op->i.op == OP_SWL)
1055                                                 op->i.imm -= 3;
1056                                         op->i.op = OP_META_SWU;
1057                                         list[idx].opcode = 0;
1058                                         pr_debug("Convert SWL/SWR to SWU\n");
1059                                 }
1060                         }
1061                         break;
1062                 case OP_REGIMM:
1063                         switch (op->r.rt) {
1064                         case OP_REGIMM_BLTZ:
1065                         case OP_REGIMM_BGEZ:
1066                                 if (!(v[op->r.rs].known & BIT(31)))
1067                                         break;
1068
1069                                 if (!!(v[op->r.rs].value & BIT(31))
1070                                     ^ (op->r.rt == OP_REGIMM_BGEZ)) {
1071                                         pr_debug("Found always-taken BLTZ/BGEZ\n");
1072                                         op->i.op = OP_BEQ;
1073                                         op->i.rs = 0;
1074                                         op->i.rt = 0;
1075                                 } else {
1076                                         pr_debug("Found never-taken BLTZ/BGEZ\n");
1077
1078                                         if (!op_flag_no_ds(op->flags))
1079                                                 maybe_remove_load_delay(&list[i + 1]);
1080
1081                                         local = op_flag_local_branch(op->flags);
1082                                         op->opcode = 0;
1083                                         op->flags = 0;
1084
1085                                         if (local)
1086                                                 lightrec_reset_syncs(block);
1087                                 }
1088                                 break;
1089                         case OP_REGIMM_BLTZAL:
1090                         case OP_REGIMM_BGEZAL:
1091                                 /* TODO: Detect always-taken and replace with JAL */
1092                                 break;
1093                         }
1094                         break;
1095                 case OP_SPECIAL:
1096                         switch (op->r.op) {
1097                         case OP_SPECIAL_SRAV:
1098                                 if ((v[op->r.rs].known & 0x1f) != 0x1f)
1099                                         break;
1100
1101                                 pr_debug("Convert SRAV to SRA\n");
1102                                 op->r.imm = v[op->r.rs].value & 0x1f;
1103                                 op->r.op = OP_SPECIAL_SRA;
1104
1105                                 fallthrough;
1106                         case OP_SPECIAL_SRA:
1107                                 if (op->r.imm == 0) {
1108                                         pr_debug("Convert SRA #0 to MOV\n");
1109                                         op->m.rs = op->r.rt;
1110                                         op->m.op = OP_META_MOV;
1111                                         op->i.op = OP_META;
1112                                         break;
1113                                 }
1114                                 break;
1115
1116                         case OP_SPECIAL_SLLV:
1117                                 if ((v[op->r.rs].known & 0x1f) != 0x1f)
1118                                         break;
1119
1120                                 pr_debug("Convert SLLV to SLL\n");
1121                                 op->r.imm = v[op->r.rs].value & 0x1f;
1122                                 op->r.op = OP_SPECIAL_SLL;
1123
1124                                 fallthrough;
1125                         case OP_SPECIAL_SLL:
1126                                 if (op->r.imm == 0) {
1127                                         pr_debug("Convert SLL #0 to MOV\n");
1128                                         op->m.rs = op->r.rt;
1129                                         op->m.op = OP_META_MOV;
1130                                         op->i.op = OP_META;
1131                                 }
1132
1133                                 lightrec_optimize_sll_sra(block->opcode_list, i, v);
1134                                 break;
1135
1136                         case OP_SPECIAL_SRLV:
1137                                 if ((v[op->r.rs].known & 0x1f) != 0x1f)
1138                                         break;
1139
1140                                 pr_debug("Convert SRLV to SRL\n");
1141                                 op->r.imm = v[op->r.rs].value & 0x1f;
1142                                 op->r.op = OP_SPECIAL_SRL;
1143
1144                                 fallthrough;
1145                         case OP_SPECIAL_SRL:
1146                                 if (op->r.imm == 0) {
1147                                         pr_debug("Convert SRL #0 to MOV\n");
1148                                         op->m.rs = op->r.rt;
1149                                         op->m.op = OP_META_MOV;
1150                                         op->i.op = OP_META;
1151                                 }
1152                                 break;
1153
1154                         case OP_SPECIAL_MULT:
1155                         case OP_SPECIAL_MULTU:
1156                                 if (is_known(v, op->r.rs) &&
1157                                     is_power_of_two(v[op->r.rs].value)) {
1158                                         tmp = op->c.i.rs;
1159                                         op->c.i.rs = op->c.i.rt;
1160                                         op->c.i.rt = tmp;
1161                                 } else if (!is_known(v, op->r.rt) ||
1162                                            !is_power_of_two(v[op->r.rt].value)) {
1163                                         break;
1164                                 }
1165
1166                                 pr_debug("Multiply by power-of-two: %u\n",
1167                                          v[op->r.rt].value);
1168
1169                                 if (op->r.op == OP_SPECIAL_MULT)
1170                                         op->i.op = OP_META_MULT2;
1171                                 else
1172                                         op->i.op = OP_META_MULTU2;
1173
1174                                 op->r.op = ctz32(v[op->r.rt].value);
1175                                 break;
1176                         case OP_SPECIAL_NOR:
1177                                 if (op->r.rs == 0 || op->r.rt == 0) {
1178                                         pr_debug("Convert NOR $zero to COM\n");
1179                                         op->i.op = OP_META;
1180                                         op->m.op = OP_META_COM;
1181                                         if (!op->m.rs)
1182                                                 op->m.rs = op->r.rt;
1183                                 }
1184                                 break;
1185                         case OP_SPECIAL_OR:
1186                         case OP_SPECIAL_ADD:
1187                         case OP_SPECIAL_ADDU:
1188                                 if (op->r.rs == 0) {
1189                                         pr_debug("Convert OR/ADD $zero to MOV\n");
1190                                         op->m.rs = op->r.rt;
1191                                         op->m.op = OP_META_MOV;
1192                                         op->i.op = OP_META;
1193                                 }
1194                                 fallthrough;
1195                         case OP_SPECIAL_SUB:
1196                         case OP_SPECIAL_SUBU:
1197                                 if (op->r.rt == 0) {
1198                                         pr_debug("Convert OR/ADD/SUB $zero to MOV\n");
1199                                         op->m.op = OP_META_MOV;
1200                                         op->i.op = OP_META;
1201                                 }
1202                                 fallthrough;
1203                         default:
1204                                 break;
1205                         }
1206                         fallthrough;
1207                 default:
1208                         break;
1209                 }
1210         }
1211
1212         return 0;
1213 }
1214
1215 static bool lightrec_can_switch_delay_slot(union code op, union code next_op)
1216 {
1217         switch (op.i.op) {
1218         case OP_SPECIAL:
1219                 switch (op.r.op) {
1220                 case OP_SPECIAL_JALR:
1221                         if (opcode_reads_register(next_op, op.r.rd) ||
1222                             opcode_writes_register(next_op, op.r.rd))
1223                                 return false;
1224                         fallthrough;
1225                 case OP_SPECIAL_JR:
1226                         if (opcode_writes_register(next_op, op.r.rs))
1227                                 return false;
1228                         fallthrough;
1229                 default:
1230                         break;
1231                 }
1232                 fallthrough;
1233         case OP_J:
1234                 break;
1235         case OP_JAL:
1236                 if (opcode_reads_register(next_op, 31) ||
1237                     opcode_writes_register(next_op, 31))
1238                         return false;;
1239
1240                 break;
1241         case OP_BEQ:
1242         case OP_BNE:
1243                 if (op.i.rt && opcode_writes_register(next_op, op.i.rt))
1244                         return false;
1245                 fallthrough;
1246         case OP_BLEZ:
1247         case OP_BGTZ:
1248                 if (op.i.rs && opcode_writes_register(next_op, op.i.rs))
1249                         return false;
1250                 break;
1251         case OP_REGIMM:
1252                 switch (op.r.rt) {
1253                 case OP_REGIMM_BLTZAL:
1254                 case OP_REGIMM_BGEZAL:
1255                         if (opcode_reads_register(next_op, 31) ||
1256                             opcode_writes_register(next_op, 31))
1257                                 return false;
1258                         fallthrough;
1259                 case OP_REGIMM_BLTZ:
1260                 case OP_REGIMM_BGEZ:
1261                         if (op.i.rs && opcode_writes_register(next_op, op.i.rs))
1262                                 return false;
1263                         break;
1264                 }
1265                 fallthrough;
1266         default:
1267                 break;
1268         }
1269
1270         return true;
1271 }
1272
1273 static int lightrec_switch_delay_slots(struct lightrec_state *state, struct block *block)
1274 {
1275         struct opcode *list, *next = &block->opcode_list[0];
1276         unsigned int i;
1277         union code op, next_op;
1278         u32 flags;
1279
1280         for (i = 0; i < block->nb_ops - 1; i++) {
1281                 list = next;
1282                 next = &block->opcode_list[i + 1];
1283                 next_op = next->c;
1284                 op = list->c;
1285
1286                 if (!has_delay_slot(op) || op_flag_no_ds(list->flags) ||
1287                     op_flag_emulate_branch(list->flags) ||
1288                     op.opcode == 0 || next_op.opcode == 0)
1289                         continue;
1290
1291                 if (is_delay_slot(block->opcode_list, i))
1292                         continue;
1293
1294                 if (op_flag_sync(next->flags))
1295                         continue;
1296
1297                 if (op_flag_load_delay(next->flags) && opcode_is_load(next_op))
1298                         continue;
1299
1300                 if (!lightrec_can_switch_delay_slot(list->c, next_op))
1301                         continue;
1302
1303                 pr_debug("Swap branch and delay slot opcodes "
1304                          "at offsets 0x%x / 0x%x\n",
1305                          i << 2, (i + 1) << 2);
1306
1307                 flags = next->flags | (list->flags & LIGHTREC_SYNC);
1308                 list->c = next_op;
1309                 next->c = op;
1310                 next->flags = (list->flags | LIGHTREC_NO_DS) & ~LIGHTREC_SYNC;
1311                 list->flags = flags | LIGHTREC_NO_DS;
1312         }
1313
1314         return 0;
1315 }
1316
1317 static int lightrec_detect_impossible_branches(struct lightrec_state *state,
1318                                                struct block *block)
1319 {
1320         struct opcode *op, *list = block->opcode_list, *next = &list[0];
1321         unsigned int i;
1322         int ret = 0;
1323
1324         for (i = 0; i < block->nb_ops - 1; i++) {
1325                 op = next;
1326                 next = &list[i + 1];
1327
1328                 if (!has_delay_slot(op->c) ||
1329                     (!has_delay_slot(next->c) &&
1330                      !opcode_is_mfc(next->c) &&
1331                      !(next->i.op == OP_CP0 && next->r.rs == OP_CP0_RFE)))
1332                         continue;
1333
1334                 if (op->c.opcode == next->c.opcode) {
1335                         /* The delay slot is the exact same opcode as the branch
1336                          * opcode: this is effectively a NOP */
1337                         next->c.opcode = 0;
1338                         continue;
1339                 }
1340
1341                 op->flags |= LIGHTREC_EMULATE_BRANCH;
1342
1343                 if (OPT_LOCAL_BRANCHES && i + 2 < block->nb_ops) {
1344                         /* The interpreter will only emulate the branch, then
1345                          * return to the compiled code. Add a SYNC after the
1346                          * branch + delay slot in the case where the branch
1347                          * was not taken. */
1348                         list[i + 2].flags |= LIGHTREC_SYNC;
1349                 }
1350         }
1351
1352         return ret;
1353 }
1354
1355 static bool is_local_branch(const struct block *block, unsigned int idx)
1356 {
1357         const struct opcode *op = &block->opcode_list[idx];
1358         s32 offset;
1359
1360         switch (op->c.i.op) {
1361         case OP_BEQ:
1362         case OP_BNE:
1363         case OP_BLEZ:
1364         case OP_BGTZ:
1365         case OP_REGIMM:
1366                 offset = idx + 1 + (s16)op->c.i.imm;
1367                 if (offset >= 0 && offset < block->nb_ops)
1368                         return true;
1369                 fallthrough;
1370         default:
1371                 return false;
1372         }
1373 }
1374
1375 static int lightrec_handle_load_delays(struct lightrec_state *state,
1376                                        struct block *block)
1377 {
1378         struct opcode *op, *list = block->opcode_list;
1379         unsigned int i;
1380         s16 imm;
1381
1382         for (i = 0; i < block->nb_ops; i++) {
1383                 op = &list[i];
1384
1385                 if (!opcode_is_load(op->c) || !op->c.i.rt || op->c.i.op == OP_LWC2)
1386                         continue;
1387
1388                 if (!is_delay_slot(list, i)) {
1389                         /* Only handle load delays in delay slots.
1390                          * PSX games never abused load delay slots otherwise. */
1391                         continue;
1392                 }
1393
1394                 if (is_local_branch(block, i - 1)) {
1395                         imm = (s16)list[i - 1].c.i.imm;
1396
1397                         if (!opcode_reads_register(list[i + imm].c, op->c.i.rt)) {
1398                                 /* The target opcode of the branch is inside
1399                                  * the block, and it does not read the register
1400                                  * written to by the load opcode; we can ignore
1401                                  * the load delay. */
1402                                 continue;
1403                         }
1404                 }
1405
1406                 op->flags |= LIGHTREC_LOAD_DELAY;
1407         }
1408
1409         return 0;
1410 }
1411
1412 static int lightrec_swap_load_delays(struct lightrec_state *state,
1413                                      struct block *block)
1414 {
1415         unsigned int i;
1416         union code c, next;
1417         bool in_ds = false, skip_next = false;
1418         struct opcode op;
1419
1420         if (block->nb_ops < 2)
1421                 return 0;
1422
1423         for (i = 0; i < block->nb_ops - 2; i++) {
1424                 c = block->opcode_list[i].c;
1425
1426                 if (skip_next) {
1427                         skip_next = false;
1428                 } else if (!in_ds && opcode_is_load(c) && c.i.op != OP_LWC2) {
1429                         next = block->opcode_list[i + 1].c;
1430
1431                         switch (next.i.op) {
1432                         case OP_LWL:
1433                         case OP_LWR:
1434                         case OP_REGIMM:
1435                         case OP_BEQ:
1436                         case OP_BNE:
1437                         case OP_BLEZ:
1438                         case OP_BGTZ:
1439                                 continue;
1440                         }
1441
1442                         if (opcode_reads_register(next, c.i.rt)
1443                             && !opcode_writes_register(next, c.i.rs)) {
1444                                 pr_debug("Swapping opcodes at offset 0x%x to "
1445                                          "respect load delay\n", i << 2);
1446
1447                                 op = block->opcode_list[i];
1448                                 block->opcode_list[i] = block->opcode_list[i + 1];
1449                                 block->opcode_list[i + 1] = op;
1450                                 skip_next = true;
1451                         }
1452                 }
1453
1454                 in_ds = has_delay_slot(c);
1455         }
1456
1457         return 0;
1458 }
1459
1460 static int lightrec_local_branches(struct lightrec_state *state, struct block *block)
1461 {
1462         const struct opcode *ds;
1463         struct opcode *list;
1464         unsigned int i;
1465         s32 offset;
1466
1467         for (i = 0; i < block->nb_ops; i++) {
1468                 list = &block->opcode_list[i];
1469
1470                 if (should_emulate(list) || !is_local_branch(block, i))
1471                         continue;
1472
1473                 offset = i + 1 + (s16)list->c.i.imm;
1474
1475                 pr_debug("Found local branch to offset 0x%x\n", offset << 2);
1476
1477                 ds = get_delay_slot(block->opcode_list, i);
1478                 if (op_flag_load_delay(ds->flags) && opcode_is_load(ds->c)) {
1479                         pr_debug("Branch delay slot has a load delay - skip\n");
1480                         continue;
1481                 }
1482
1483                 if (should_emulate(&block->opcode_list[offset])) {
1484                         pr_debug("Branch target must be emulated - skip\n");
1485                         continue;
1486                 }
1487
1488                 if (offset && has_delay_slot(block->opcode_list[offset - 1].c)) {
1489                         pr_debug("Branch target is a delay slot - skip\n");
1490                         continue;
1491                 }
1492
1493                 list->flags |= LIGHTREC_LOCAL_BRANCH;
1494         }
1495
1496         lightrec_reset_syncs(block);
1497
1498         return 0;
1499 }
1500
1501 bool has_delay_slot(union code op)
1502 {
1503         switch (op.i.op) {
1504         case OP_SPECIAL:
1505                 switch (op.r.op) {
1506                 case OP_SPECIAL_JR:
1507                 case OP_SPECIAL_JALR:
1508                         return true;
1509                 default:
1510                         return false;
1511                 }
1512         case OP_J:
1513         case OP_JAL:
1514         case OP_BEQ:
1515         case OP_BNE:
1516         case OP_BLEZ:
1517         case OP_BGTZ:
1518         case OP_REGIMM:
1519                 return true;
1520         default:
1521                 return false;
1522         }
1523 }
1524
1525 bool is_delay_slot(const struct opcode *list, unsigned int offset)
1526 {
1527         return offset > 0
1528                 && !op_flag_no_ds(list[offset - 1].flags)
1529                 && has_delay_slot(list[offset - 1].c);
1530 }
1531
1532 bool should_emulate(const struct opcode *list)
1533 {
1534         return op_flag_emulate_branch(list->flags) && has_delay_slot(list->c);
1535 }
1536
1537 static bool op_writes_rd(union code c)
1538 {
1539         switch (c.i.op) {
1540         case OP_SPECIAL:
1541         case OP_META:
1542                 return true;
1543         default:
1544                 return false;
1545         }
1546 }
1547
1548 static void lightrec_add_reg_op(struct opcode *op, u8 reg, u32 reg_op)
1549 {
1550         if (op_writes_rd(op->c) && reg == op->r.rd)
1551                 op->flags |= LIGHTREC_REG_RD(reg_op);
1552         else if (op->i.rs == reg)
1553                 op->flags |= LIGHTREC_REG_RS(reg_op);
1554         else if (op->i.rt == reg)
1555                 op->flags |= LIGHTREC_REG_RT(reg_op);
1556         else
1557                 pr_debug("Cannot add unload/clean/discard flag: "
1558                          "opcode does not touch register %s!\n",
1559                          lightrec_reg_name(reg));
1560 }
1561
1562 static void lightrec_add_unload(struct opcode *op, u8 reg)
1563 {
1564         lightrec_add_reg_op(op, reg, LIGHTREC_REG_UNLOAD);
1565 }
1566
1567 static void lightrec_add_discard(struct opcode *op, u8 reg)
1568 {
1569         lightrec_add_reg_op(op, reg, LIGHTREC_REG_DISCARD);
1570 }
1571
1572 static void lightrec_add_clean(struct opcode *op, u8 reg)
1573 {
1574         lightrec_add_reg_op(op, reg, LIGHTREC_REG_CLEAN);
1575 }
1576
1577 static void
1578 lightrec_early_unload_sync(struct opcode *list, s16 *last_r, s16 *last_w)
1579 {
1580         unsigned int reg;
1581         s16 offset;
1582
1583         for (reg = 0; reg < 34; reg++) {
1584                 offset = s16_max(last_w[reg], last_r[reg]);
1585
1586                 if (offset >= 0)
1587                         lightrec_add_unload(&list[offset], reg);
1588         }
1589
1590         memset(last_r, 0xff, sizeof(*last_r) * 34);
1591         memset(last_w, 0xff, sizeof(*last_w) * 34);
1592 }
1593
1594 static int lightrec_early_unload(struct lightrec_state *state, struct block *block)
1595 {
1596         u16 i, offset;
1597         struct opcode *op;
1598         s16 last_r[34], last_w[34], last_sync = 0, next_sync = 0;
1599         u64 mask_r, mask_w, dirty = 0, loaded = 0;
1600         u8 reg, load_delay_reg = 0;
1601
1602         memset(last_r, 0xff, sizeof(last_r));
1603         memset(last_w, 0xff, sizeof(last_w));
1604
1605         /*
1606          * Clean if:
1607          * - the register is dirty, and is read again after a branch opcode
1608          *
1609          * Unload if:
1610          * - the register is dirty or loaded, and is not read again
1611          * - the register is dirty or loaded, and is written again after a branch opcode
1612          * - the next opcode has the SYNC flag set
1613          *
1614          * Discard if:
1615          * - the register is dirty or loaded, and is written again
1616          */
1617
1618         for (i = 0; i < block->nb_ops; i++) {
1619                 op = &block->opcode_list[i];
1620
1621                 if (OPT_HANDLE_LOAD_DELAYS && load_delay_reg) {
1622                         /* Handle delayed register write from load opcodes in
1623                          * delay slots */
1624                         last_w[load_delay_reg] = i;
1625                         load_delay_reg = 0;
1626                 }
1627
1628                 if (op_flag_sync(op->flags) || should_emulate(op)) {
1629                         /* The next opcode has the SYNC flag set, or is a branch
1630                          * that should be emulated: unload all registers. */
1631                         lightrec_early_unload_sync(block->opcode_list, last_r, last_w);
1632                         dirty = 0;
1633                         loaded = 0;
1634                 }
1635
1636                 if (next_sync == i) {
1637                         last_sync = i;
1638                         pr_debug("Last sync: 0x%x\n", last_sync << 2);
1639                 }
1640
1641                 if (has_delay_slot(op->c)) {
1642                         next_sync = i + 1 + !op_flag_no_ds(op->flags);
1643                         pr_debug("Next sync: 0x%x\n", next_sync << 2);
1644                 }
1645
1646                 mask_r = opcode_read_mask(op->c);
1647                 mask_w = opcode_write_mask(op->c);
1648
1649                 if (op_flag_load_delay(op->flags) && opcode_is_load(op->c)) {
1650                         /* If we have a load opcode in a delay slot, its target
1651                          * register is actually not written there but at a
1652                          * later point, in the dispatcher. Prevent the algorithm
1653                          * from discarding its previous value. */
1654                         load_delay_reg = op->c.i.rt;
1655                         mask_w &= ~BIT(op->c.i.rt);
1656                 }
1657
1658                 for (reg = 0; reg < 34; reg++) {
1659                         if (mask_r & BIT(reg)) {
1660                                 if (dirty & BIT(reg) && last_w[reg] < last_sync) {
1661                                         /* The register is dirty, and is read
1662                                          * again after a branch: clean it */
1663
1664                                         lightrec_add_clean(&block->opcode_list[last_w[reg]], reg);
1665                                         dirty &= ~BIT(reg);
1666                                         loaded |= BIT(reg);
1667                                 }
1668
1669                                 last_r[reg] = i;
1670                         }
1671
1672                         if (mask_w & BIT(reg)) {
1673                                 if ((dirty & BIT(reg) && last_w[reg] < last_sync) ||
1674                                     (loaded & BIT(reg) && last_r[reg] < last_sync)) {
1675                                         /* The register is dirty or loaded, and
1676                                          * is written again after a branch:
1677                                          * unload it */
1678
1679                                         offset = s16_max(last_w[reg], last_r[reg]);
1680                                         lightrec_add_unload(&block->opcode_list[offset], reg);
1681                                         dirty &= ~BIT(reg);
1682                                         loaded &= ~BIT(reg);
1683                                 } else if (!(mask_r & BIT(reg)) &&
1684                                            ((dirty & BIT(reg) && last_w[reg] > last_sync) ||
1685                                            (loaded & BIT(reg) && last_r[reg] > last_sync))) {
1686                                         /* The register is dirty or loaded, and
1687                                          * is written again: discard it */
1688
1689                                         offset = s16_max(last_w[reg], last_r[reg]);
1690                                         lightrec_add_discard(&block->opcode_list[offset], reg);
1691                                         dirty &= ~BIT(reg);
1692                                         loaded &= ~BIT(reg);
1693                                 }
1694
1695                                 last_w[reg] = i;
1696                         }
1697
1698                 }
1699
1700                 dirty |= mask_w;
1701                 loaded |= mask_r;
1702         }
1703
1704         /* Unload all registers that are dirty or loaded at the end of block. */
1705         lightrec_early_unload_sync(block->opcode_list, last_r, last_w);
1706
1707         return 0;
1708 }
1709
1710 static int lightrec_flag_io(struct lightrec_state *state, struct block *block)
1711 {
1712         struct opcode *list;
1713         enum psx_map psx_map;
1714         struct constprop_data v[32] = LIGHTREC_CONSTPROP_INITIALIZER;
1715         unsigned int i;
1716         u32 val, kunseg_val;
1717         bool no_mask;
1718
1719         for (i = 0; i < block->nb_ops; i++) {
1720                 list = &block->opcode_list[i];
1721
1722                 lightrec_consts_propagate(block, i, v);
1723
1724                 switch (list->i.op) {
1725                 case OP_SB:
1726                 case OP_SH:
1727                 case OP_SW:
1728                         /* Mark all store operations that target $sp or $gp
1729                          * as not requiring code invalidation. This is based
1730                          * on the heuristic that stores using one of these
1731                          * registers as address will never hit a code page. */
1732                         if (list->i.rs >= 28 && list->i.rs <= 29 &&
1733                             !state->maps[PSX_MAP_KERNEL_USER_RAM].ops) {
1734                                 pr_debug("Flaging opcode 0x%08x as not requiring invalidation\n",
1735                                          list->opcode);
1736                                 list->flags |= LIGHTREC_NO_INVALIDATE;
1737                         }
1738
1739                         /* Detect writes whose destination address is inside the
1740                          * current block, using constant propagation. When these
1741                          * occur, we mark the blocks as not compilable. */
1742                         if (is_known(v, list->i.rs) &&
1743                             kunseg(v[list->i.rs].value) >= kunseg(block->pc) &&
1744                             kunseg(v[list->i.rs].value) < (kunseg(block->pc) + block->nb_ops * 4)) {
1745                                 pr_debug("Self-modifying block detected\n");
1746                                 block_set_flags(block, BLOCK_NEVER_COMPILE);
1747                                 list->flags |= LIGHTREC_SMC;
1748                         }
1749                         fallthrough;
1750                 case OP_SWL:
1751                 case OP_SWR:
1752                 case OP_SWC2:
1753                 case OP_LB:
1754                 case OP_LBU:
1755                 case OP_LH:
1756                 case OP_LHU:
1757                 case OP_LW:
1758                 case OP_LWL:
1759                 case OP_LWR:
1760                 case OP_LWC2:
1761                         if (v[list->i.rs].known | v[list->i.rs].sign) {
1762                                 psx_map = lightrec_get_constprop_map(state, v,
1763                                                                      list->i.rs,
1764                                                                      (s16) list->i.imm);
1765
1766                                 if (psx_map != PSX_MAP_UNKNOWN && !is_known(v, list->i.rs))
1767                                         pr_debug("Detected map thanks to bit-level const propagation!\n");
1768
1769                                 list->flags &= ~LIGHTREC_IO_MASK;
1770
1771                                 val = v[list->i.rs].value + (s16) list->i.imm;
1772                                 kunseg_val = kunseg(val);
1773
1774                                 no_mask = (v[list->i.rs].known & ~v[list->i.rs].value
1775                                            & 0xe0000000) == 0xe0000000;
1776
1777                                 switch (psx_map) {
1778                                 case PSX_MAP_KERNEL_USER_RAM:
1779                                         if (no_mask)
1780                                                 list->flags |= LIGHTREC_NO_MASK;
1781                                         fallthrough;
1782                                 case PSX_MAP_MIRROR1:
1783                                 case PSX_MAP_MIRROR2:
1784                                 case PSX_MAP_MIRROR3:
1785                                         pr_debug("Flaging opcode %u as RAM access\n", i);
1786                                         list->flags |= LIGHTREC_IO_MODE(LIGHTREC_IO_RAM);
1787                                         if (no_mask && state->mirrors_mapped)
1788                                                 list->flags |= LIGHTREC_NO_MASK;
1789                                         break;
1790                                 case PSX_MAP_BIOS:
1791                                         pr_debug("Flaging opcode %u as BIOS access\n", i);
1792                                         list->flags |= LIGHTREC_IO_MODE(LIGHTREC_IO_BIOS);
1793                                         if (no_mask)
1794                                                 list->flags |= LIGHTREC_NO_MASK;
1795                                         break;
1796                                 case PSX_MAP_SCRATCH_PAD:
1797                                         pr_debug("Flaging opcode %u as scratchpad access\n", i);
1798                                         list->flags |= LIGHTREC_IO_MODE(LIGHTREC_IO_SCRATCH);
1799                                         if (no_mask)
1800                                                 list->flags |= LIGHTREC_NO_MASK;
1801
1802                                         /* Consider that we're never going to run code from
1803                                          * the scratchpad. */
1804                                         list->flags |= LIGHTREC_NO_INVALIDATE;
1805                                         break;
1806                                 case PSX_MAP_HW_REGISTERS:
1807                                         if (state->ops.hw_direct &&
1808                                             state->ops.hw_direct(kunseg_val,
1809                                                                  opcode_is_store(list->c),
1810                                                                  opcode_get_io_size(list->c))) {
1811                                                 pr_debug("Flagging opcode %u as direct I/O access\n",
1812                                                          i);
1813                                                 list->flags |= LIGHTREC_IO_MODE(LIGHTREC_IO_DIRECT_HW);
1814
1815                                                 if (no_mask)
1816                                                         list->flags |= LIGHTREC_NO_MASK;
1817                                         } else {
1818                                                 pr_debug("Flagging opcode %u as I/O access\n",
1819                                                          i);
1820                                                 list->flags |= LIGHTREC_IO_MODE(LIGHTREC_IO_HW);
1821                                         }
1822                                         break;
1823                                 default:
1824                                         break;
1825                                 }
1826                         }
1827
1828                         if (!LIGHTREC_FLAGS_GET_IO_MODE(list->flags)
1829                             && list->i.rs >= 28 && list->i.rs <= 29
1830                             && !state->maps[PSX_MAP_KERNEL_USER_RAM].ops) {
1831                                 /* Assume that all I/O operations that target
1832                                  * $sp or $gp will always only target a mapped
1833                                  * memory (RAM, BIOS, scratchpad). */
1834                                 if (state->opt_flags & LIGHTREC_OPT_SP_GP_HIT_RAM)
1835                                         list->flags |= LIGHTREC_IO_MODE(LIGHTREC_IO_RAM);
1836                                 else
1837                                         list->flags |= LIGHTREC_IO_MODE(LIGHTREC_IO_DIRECT);
1838                         }
1839
1840                         fallthrough;
1841                 default:
1842                         break;
1843                 }
1844         }
1845
1846         return 0;
1847 }
1848
1849 static u8 get_mfhi_mflo_reg(const struct block *block, u16 offset,
1850                             const struct opcode *last,
1851                             u32 mask, bool sync, bool mflo, bool another)
1852 {
1853         const struct opcode *op, *next = &block->opcode_list[offset];
1854         u32 old_mask;
1855         u8 reg2, reg = mflo ? REG_LO : REG_HI;
1856         u16 branch_offset;
1857         unsigned int i;
1858
1859         for (i = offset; i < block->nb_ops; i++) {
1860                 op = next;
1861                 next = &block->opcode_list[i + 1];
1862                 old_mask = mask;
1863
1864                 /* If any other opcode writes or reads to the register
1865                  * we'd use, then we cannot use it anymore. */
1866                 mask |= opcode_read_mask(op->c);
1867                 mask |= opcode_write_mask(op->c);
1868
1869                 if (op_flag_sync(op->flags))
1870                         sync = true;
1871
1872                 switch (op->i.op) {
1873                 case OP_BEQ:
1874                 case OP_BNE:
1875                 case OP_BLEZ:
1876                 case OP_BGTZ:
1877                 case OP_REGIMM:
1878                         /* TODO: handle backwards branches too */
1879                         if (!last && op_flag_local_branch(op->flags) &&
1880                             (s16)op->c.i.imm >= 0) {
1881                                 branch_offset = i + 1 + (s16)op->c.i.imm
1882                                         - !!op_flag_no_ds(op->flags);
1883
1884                                 reg = get_mfhi_mflo_reg(block, branch_offset, NULL,
1885                                                         mask, sync, mflo, false);
1886                                 reg2 = get_mfhi_mflo_reg(block, offset + 1, next,
1887                                                          mask, sync, mflo, false);
1888                                 if (reg > 0 && reg == reg2)
1889                                         return reg;
1890                                 if (!reg && !reg2)
1891                                         return 0;
1892                         }
1893
1894                         return mflo ? REG_LO : REG_HI;
1895                 case OP_META_MULT2:
1896                 case OP_META_MULTU2:
1897                         return 0;
1898                 case OP_SPECIAL:
1899                         switch (op->r.op) {
1900                         case OP_SPECIAL_MULT:
1901                         case OP_SPECIAL_MULTU:
1902                         case OP_SPECIAL_DIV:
1903                         case OP_SPECIAL_DIVU:
1904                                 return 0;
1905                         case OP_SPECIAL_MTHI:
1906                                 if (!mflo)
1907                                         return 0;
1908                                 continue;
1909                         case OP_SPECIAL_MTLO:
1910                                 if (mflo)
1911                                         return 0;
1912                                 continue;
1913                         case OP_SPECIAL_JR:
1914                                 if (op->r.rs != 31)
1915                                         return reg;
1916
1917                                 if (!sync && !op_flag_no_ds(op->flags) &&
1918                                     (next->i.op == OP_SPECIAL) &&
1919                                     ((!mflo && next->r.op == OP_SPECIAL_MFHI) ||
1920                                     (mflo && next->r.op == OP_SPECIAL_MFLO)))
1921                                         return next->r.rd;
1922
1923                                 return 0;
1924                         case OP_SPECIAL_JALR:
1925                                 return reg;
1926                         case OP_SPECIAL_MFHI:
1927                                 if (!mflo) {
1928                                         if (another)
1929                                                 return op->r.rd;
1930                                         /* Must use REG_HI if there is another MFHI target*/
1931                                         reg2 = get_mfhi_mflo_reg(block, i + 1, next,
1932                                                          0, sync, mflo, true);
1933                                         if (reg2 > 0 && reg2 != REG_HI)
1934                                                 return REG_HI;
1935
1936                                         if (!sync && !(old_mask & BIT(op->r.rd)))
1937                                                 return op->r.rd;
1938                                         else
1939                                                 return REG_HI;
1940                                 }
1941                                 continue;
1942                         case OP_SPECIAL_MFLO:
1943                                 if (mflo) {
1944                                         if (another)
1945                                                 return op->r.rd;
1946                                         /* Must use REG_LO if there is another MFLO target*/
1947                                         reg2 = get_mfhi_mflo_reg(block, i + 1, next,
1948                                                          0, sync, mflo, true);
1949                                         if (reg2 > 0 && reg2 != REG_LO)
1950                                                 return REG_LO;
1951
1952                                         if (!sync && !(old_mask & BIT(op->r.rd)))
1953                                                 return op->r.rd;
1954                                         else
1955                                                 return REG_LO;
1956                                 }
1957                                 continue;
1958                         default:
1959                                 break;
1960                         }
1961
1962                         fallthrough;
1963                 default:
1964                         continue;
1965                 }
1966         }
1967
1968         return reg;
1969 }
1970
1971 static void lightrec_replace_lo_hi(struct block *block, u16 offset,
1972                                    u16 last, bool lo)
1973 {
1974         unsigned int i;
1975         u32 branch_offset;
1976
1977         /* This function will remove the following MFLO/MFHI. It must be called
1978          * only if get_mfhi_mflo_reg() returned a non-zero value. */
1979
1980         for (i = offset; i < last; i++) {
1981                 struct opcode *op = &block->opcode_list[i];
1982
1983                 switch (op->i.op) {
1984                 case OP_BEQ:
1985                 case OP_BNE:
1986                 case OP_BLEZ:
1987                 case OP_BGTZ:
1988                 case OP_REGIMM:
1989                         /* TODO: handle backwards branches too */
1990                         if (op_flag_local_branch(op->flags) && (s16)op->c.i.imm >= 0) {
1991                                 branch_offset = i + 1 + (s16)op->c.i.imm
1992                                         - !!op_flag_no_ds(op->flags);
1993
1994                                 lightrec_replace_lo_hi(block, branch_offset, last, lo);
1995                                 lightrec_replace_lo_hi(block, i + 1, branch_offset, lo);
1996                         }
1997                         break;
1998
1999                 case OP_SPECIAL:
2000                         if (lo && op->r.op == OP_SPECIAL_MFLO) {
2001                                 pr_debug("Removing MFLO opcode at offset 0x%x\n",
2002                                          i << 2);
2003                                 op->opcode = 0;
2004                                 return;
2005                         } else if (!lo && op->r.op == OP_SPECIAL_MFHI) {
2006                                 pr_debug("Removing MFHI opcode at offset 0x%x\n",
2007                                          i << 2);
2008                                 op->opcode = 0;
2009                                 return;
2010                         }
2011
2012                         fallthrough;
2013                 default:
2014                         break;
2015                 }
2016         }
2017 }
2018
2019 static bool lightrec_always_skip_div_check(void)
2020 {
2021 #ifdef __mips__
2022         return true;
2023 #else
2024         return false;
2025 #endif
2026 }
2027
2028 static int lightrec_flag_mults_divs(struct lightrec_state *state, struct block *block)
2029 {
2030         struct opcode *list = NULL;
2031         struct constprop_data v[32] = LIGHTREC_CONSTPROP_INITIALIZER;
2032         u8 reg_hi, reg_lo;
2033         unsigned int i;
2034
2035         for (i = 0; i < block->nb_ops - 1; i++) {
2036                 list = &block->opcode_list[i];
2037
2038                 lightrec_consts_propagate(block, i, v);
2039
2040                 switch (list->i.op) {
2041                 case OP_SPECIAL:
2042                         switch (list->r.op) {
2043                         case OP_SPECIAL_DIV:
2044                         case OP_SPECIAL_DIVU:
2045                                 /* If we are dividing by a non-zero constant, don't
2046                                  * emit the div-by-zero check. */
2047                                 if (lightrec_always_skip_div_check() ||
2048                                     (v[list->r.rt].known & v[list->r.rt].value)) {
2049                                         list->flags |= LIGHTREC_NO_DIV_CHECK;
2050                                 }
2051                                 fallthrough;
2052                         case OP_SPECIAL_MULT:
2053                         case OP_SPECIAL_MULTU:
2054                                 break;
2055                         default:
2056                                 continue;
2057                         }
2058                         fallthrough;
2059                 case OP_META_MULT2:
2060                 case OP_META_MULTU2:
2061                         break;
2062                 default:
2063                         continue;
2064                 }
2065
2066                 /* Don't support opcodes in delay slots */
2067                 if (is_delay_slot(block->opcode_list, i) ||
2068                     op_flag_no_ds(list->flags)) {
2069                         continue;
2070                 }
2071
2072                 reg_lo = get_mfhi_mflo_reg(block, i + 1, NULL, 0, false, true, false);
2073                 if (reg_lo == 0) {
2074                         pr_debug("Mark MULT(U)/DIV(U) opcode at offset 0x%x as"
2075                                  " not writing LO\n", i << 2);
2076                         list->flags |= LIGHTREC_NO_LO;
2077                 }
2078
2079                 reg_hi = get_mfhi_mflo_reg(block, i + 1, NULL, 0, false, false, false);
2080                 if (reg_hi == 0) {
2081                         pr_debug("Mark MULT(U)/DIV(U) opcode at offset 0x%x as"
2082                                  " not writing HI\n", i << 2);
2083                         list->flags |= LIGHTREC_NO_HI;
2084                 }
2085
2086                 if (!reg_lo && !reg_hi) {
2087                         pr_debug("Both LO/HI unused in this block, they will "
2088                                  "probably be used in parent block - removing "
2089                                  "flags.\n");
2090                         list->flags &= ~(LIGHTREC_NO_LO | LIGHTREC_NO_HI);
2091                 }
2092
2093                 if (reg_lo > 0 && reg_lo != REG_LO) {
2094                         pr_debug("Found register %s to hold LO (rs = %u, rt = %u)\n",
2095                                  lightrec_reg_name(reg_lo), list->r.rs, list->r.rt);
2096
2097                         lightrec_replace_lo_hi(block, i + 1, block->nb_ops, true);
2098                         list->r.rd = reg_lo;
2099                 } else {
2100                         list->r.rd = 0;
2101                 }
2102
2103                 if (reg_hi > 0 && reg_hi != REG_HI) {
2104                         pr_debug("Found register %s to hold HI (rs = %u, rt = %u)\n",
2105                                  lightrec_reg_name(reg_hi), list->r.rs, list->r.rt);
2106
2107                         lightrec_replace_lo_hi(block, i + 1, block->nb_ops, false);
2108                         list->r.imm = reg_hi;
2109                 } else {
2110                         list->r.imm = 0;
2111                 }
2112         }
2113
2114         return 0;
2115 }
2116
2117 static bool remove_div_sequence(struct block *block, unsigned int offset)
2118 {
2119         struct opcode *op;
2120         unsigned int i, found = 0;
2121
2122         /*
2123          * Scan for the zero-checking sequence that GCC automatically introduced
2124          * after most DIV/DIVU opcodes. This sequence checks the value of the
2125          * divisor, and if zero, executes a BREAK opcode, causing the BIOS
2126          * handler to crash the PS1.
2127          *
2128          * For DIV opcodes, this sequence additionally checks that the signed
2129          * operation does not overflow.
2130          *
2131          * With the assumption that the games never crashed the PS1, we can
2132          * therefore assume that the games never divided by zero or overflowed,
2133          * and these sequences can be removed.
2134          */
2135
2136         for (i = offset; i < block->nb_ops; i++) {
2137                 op = &block->opcode_list[i];
2138
2139                 if (!found) {
2140                         if (op->i.op == OP_SPECIAL &&
2141                             (op->r.op == OP_SPECIAL_DIV || op->r.op == OP_SPECIAL_DIVU))
2142                                 break;
2143
2144                         if ((op->opcode & 0xfc1fffff) == 0x14000002) {
2145                                 /* BNE ???, zero, +8 */
2146                                 found++;
2147                         } else {
2148                                 offset++;
2149                         }
2150                 } else if (found == 1 && !op->opcode) {
2151                         /* NOP */
2152                         found++;
2153                 } else if (found == 2 && op->opcode == 0x0007000d) {
2154                         /* BREAK 0x1c00 */
2155                         found++;
2156                 } else if (found == 3 && op->opcode == 0x2401ffff) {
2157                         /* LI at, -1 */
2158                         found++;
2159                 } else if (found == 4 && (op->opcode & 0xfc1fffff) == 0x14010004) {
2160                         /* BNE ???, at, +16 */
2161                         found++;
2162                 } else if (found == 5 && op->opcode == 0x3c018000) {
2163                         /* LUI at, 0x8000 */
2164                         found++;
2165                 } else if (found == 6 && (op->opcode & 0x141fffff) == 0x14010002) {
2166                         /* BNE ???, at, +16 */
2167                         found++;
2168                 } else if (found == 7 && !op->opcode) {
2169                         /* NOP */
2170                         found++;
2171                 } else if (found == 8 && op->opcode == 0x0006000d) {
2172                         /* BREAK 0x1800 */
2173                         found++;
2174                         break;
2175                 } else {
2176                         break;
2177                 }
2178         }
2179
2180         if (found >= 3) {
2181                 if (found != 9)
2182                         found = 3;
2183
2184                 pr_debug("Removing DIV%s sequence at offset 0x%x\n",
2185                          found == 9 ? "" : "U", offset << 2);
2186
2187                 for (i = 0; i < found; i++)
2188                         block->opcode_list[offset + i].opcode = 0;
2189
2190                 return true;
2191         }
2192
2193         return false;
2194 }
2195
2196 static int lightrec_remove_div_by_zero_check_sequence(struct lightrec_state *state,
2197                                                       struct block *block)
2198 {
2199         struct opcode *op;
2200         unsigned int i;
2201
2202         for (i = 0; i < block->nb_ops; i++) {
2203                 op = &block->opcode_list[i];
2204
2205                 if (op->i.op == OP_SPECIAL &&
2206                     (op->r.op == OP_SPECIAL_DIVU || op->r.op == OP_SPECIAL_DIV) &&
2207                     remove_div_sequence(block, i + 1))
2208                         op->flags |= LIGHTREC_NO_DIV_CHECK;
2209         }
2210
2211         return 0;
2212 }
2213
2214 static const u32 memset_code[] = {
2215         0x10a00006,     // beqz         a1, 2f
2216         0x24a2ffff,     // addiu        v0,a1,-1
2217         0x2403ffff,     // li           v1,-1
2218         0xac800000,     // 1: sw        zero,0(a0)
2219         0x2442ffff,     // addiu        v0,v0,-1
2220         0x1443fffd,     // bne          v0,v1, 1b
2221         0x24840004,     // addiu        a0,a0,4
2222         0x03e00008,     // 2: jr        ra
2223         0x00000000,     // nop
2224 };
2225
2226 static int lightrec_replace_memset(struct lightrec_state *state, struct block *block)
2227 {
2228         unsigned int i;
2229         union code c;
2230
2231         for (i = 0; i < block->nb_ops; i++) {
2232                 c = block->opcode_list[i].c;
2233
2234                 if (c.opcode != memset_code[i])
2235                         return 0;
2236
2237                 if (i == ARRAY_SIZE(memset_code) - 1) {
2238                         /* success! */
2239                         pr_debug("Block at PC 0x%x is a memset\n", block->pc);
2240                         block_set_flags(block,
2241                                         BLOCK_IS_MEMSET | BLOCK_NEVER_COMPILE);
2242
2243                         /* Return non-zero to skip other optimizers. */
2244                         return 1;
2245                 }
2246         }
2247
2248         return 0;
2249 }
2250
2251 static int lightrec_test_preload_pc(struct lightrec_state *state, struct block *block)
2252 {
2253         unsigned int i;
2254         union code c;
2255         u32 flags;
2256
2257         for (i = 0; i < block->nb_ops; i++) {
2258                 c = block->opcode_list[i].c;
2259                 flags = block->opcode_list[i].flags;
2260
2261                 if (op_flag_sync(flags))
2262                         break;
2263
2264                 switch (c.i.op) {
2265                 case OP_J:
2266                 case OP_JAL:
2267                         block->flags |= BLOCK_PRELOAD_PC;
2268                         return 0;
2269
2270                 case OP_REGIMM:
2271                         switch (c.r.rt) {
2272                         case OP_REGIMM_BLTZAL:
2273                         case OP_REGIMM_BGEZAL:
2274                                 block->flags |= BLOCK_PRELOAD_PC;
2275                                 return 0;
2276                         default:
2277                                 break;
2278                         }
2279                         fallthrough;
2280                 case OP_BEQ:
2281                 case OP_BNE:
2282                 case OP_BLEZ:
2283                 case OP_BGTZ:
2284                         if (!op_flag_local_branch(flags)) {
2285                                 block->flags |= BLOCK_PRELOAD_PC;
2286                                 return 0;
2287                         }
2288
2289                 case OP_SPECIAL:
2290                         switch (c.r.op) {
2291                         case OP_SPECIAL_JALR:
2292                                 if (c.r.rd) {
2293                                         block->flags |= BLOCK_PRELOAD_PC;
2294                                         return 0;
2295                                 }
2296                                 break;
2297                         case OP_SPECIAL_SYSCALL:
2298                         case OP_SPECIAL_BREAK:
2299                                 block->flags |= BLOCK_PRELOAD_PC;
2300                                 return 0;
2301                         default:
2302                                 break;
2303                         }
2304                         break;
2305                 }
2306         }
2307
2308         return 0;
2309 }
2310
2311 static int (*lightrec_optimizers[])(struct lightrec_state *state, struct block *) = {
2312         IF_OPT(OPT_REMOVE_DIV_BY_ZERO_SEQ, &lightrec_remove_div_by_zero_check_sequence),
2313         IF_OPT(OPT_REPLACE_MEMSET, &lightrec_replace_memset),
2314         IF_OPT(OPT_DETECT_IMPOSSIBLE_BRANCHES, &lightrec_detect_impossible_branches),
2315         IF_OPT(OPT_HANDLE_LOAD_DELAYS, &lightrec_handle_load_delays),
2316         IF_OPT(OPT_HANDLE_LOAD_DELAYS, &lightrec_swap_load_delays),
2317         IF_OPT(OPT_TRANSFORM_OPS, &lightrec_transform_branches),
2318         IF_OPT(OPT_LOCAL_BRANCHES, &lightrec_local_branches),
2319         IF_OPT(OPT_TRANSFORM_OPS, &lightrec_transform_ops),
2320         IF_OPT(OPT_SWITCH_DELAY_SLOTS, &lightrec_switch_delay_slots),
2321         IF_OPT(OPT_FLAG_IO, &lightrec_flag_io),
2322         IF_OPT(OPT_FLAG_MULT_DIV, &lightrec_flag_mults_divs),
2323         IF_OPT(OPT_EARLY_UNLOAD, &lightrec_early_unload),
2324         IF_OPT(OPT_PRELOAD_PC, &lightrec_test_preload_pc),
2325 };
2326
2327 int lightrec_optimize(struct lightrec_state *state, struct block *block)
2328 {
2329         unsigned int i;
2330         int ret;
2331
2332         for (i = 0; i < ARRAY_SIZE(lightrec_optimizers); i++) {
2333                 if (lightrec_optimizers[i]) {
2334                         ret = (*lightrec_optimizers[i])(state, block);
2335                         if (ret)
2336                                 return ret;
2337                 }
2338         }
2339
2340         return 0;
2341 }