standalone: fix w/h confusion
[pcsx_rearmed.git] / deps / lightrec / optimizer.c
1 // SPDX-License-Identifier: LGPL-2.1-or-later
2 /*
3  * Copyright (C) 2014-2021 Paul Cercueil <paul@crapouillou.net>
4  */
5
6 #include "constprop.h"
7 #include "lightrec-config.h"
8 #include "disassembler.h"
9 #include "lightrec.h"
10 #include "memmanager.h"
11 #include "optimizer.h"
12 #include "regcache.h"
13
14 #include <errno.h>
15 #include <stdbool.h>
16 #include <stdlib.h>
17 #include <string.h>
18
19 #define IF_OPT(opt, ptr) ((opt) ? (ptr) : NULL)
20
21 struct optimizer_list {
22         void (**optimizers)(struct opcode *);
23         unsigned int nb_optimizers;
24 };
25
26 static bool is_nop(union code op);
27
28 bool is_unconditional_jump(union code c)
29 {
30         switch (c.i.op) {
31         case OP_SPECIAL:
32                 return c.r.op == OP_SPECIAL_JR || c.r.op == OP_SPECIAL_JALR;
33         case OP_J:
34         case OP_JAL:
35                 return true;
36         case OP_BEQ:
37         case OP_BLEZ:
38                 return c.i.rs == c.i.rt;
39         case OP_REGIMM:
40                 return (c.r.rt == OP_REGIMM_BGEZ ||
41                         c.r.rt == OP_REGIMM_BGEZAL) && c.i.rs == 0;
42         default:
43                 return false;
44         }
45 }
46
47 bool is_syscall(union code c)
48 {
49         return (c.i.op == OP_SPECIAL && c.r.op == OP_SPECIAL_SYSCALL) ||
50                 (c.i.op == OP_CP0 && (c.r.rs == OP_CP0_MTC0 ||
51                                         c.r.rs == OP_CP0_CTC0) &&
52                  (c.r.rd == 12 || c.r.rd == 13));
53 }
54
55 static u64 opcode_read_mask(union code op)
56 {
57         switch (op.i.op) {
58         case OP_SPECIAL:
59                 switch (op.r.op) {
60                 case OP_SPECIAL_SYSCALL:
61                 case OP_SPECIAL_BREAK:
62                         return 0;
63                 case OP_SPECIAL_JR:
64                 case OP_SPECIAL_JALR:
65                 case OP_SPECIAL_MTHI:
66                 case OP_SPECIAL_MTLO:
67                         return BIT(op.r.rs);
68                 case OP_SPECIAL_MFHI:
69                         return BIT(REG_HI);
70                 case OP_SPECIAL_MFLO:
71                         return BIT(REG_LO);
72                 case OP_SPECIAL_SLL:
73                         if (!op.r.imm)
74                                 return 0;
75                         fallthrough;
76                 case OP_SPECIAL_SRL:
77                 case OP_SPECIAL_SRA:
78                         return BIT(op.r.rt);
79                 default:
80                         return BIT(op.r.rs) | BIT(op.r.rt);
81                 }
82         case OP_CP0:
83                 switch (op.r.rs) {
84                 case OP_CP0_MTC0:
85                 case OP_CP0_CTC0:
86                         return BIT(op.r.rt);
87                 default:
88                         return 0;
89                 }
90         case OP_CP2:
91                 if (op.r.op == OP_CP2_BASIC) {
92                         switch (op.r.rs) {
93                         case OP_CP2_BASIC_MTC2:
94                         case OP_CP2_BASIC_CTC2:
95                                 return BIT(op.r.rt);
96                         default:
97                                 break;
98                         }
99                 }
100                 return 0;
101         case OP_J:
102         case OP_JAL:
103         case OP_LUI:
104                 return 0;
105         case OP_BEQ:
106                 if (op.i.rs == op.i.rt)
107                         return 0;
108                 fallthrough;
109         case OP_BNE:
110         case OP_LWL:
111         case OP_LWR:
112         case OP_SB:
113         case OP_SH:
114         case OP_SWL:
115         case OP_SW:
116         case OP_SWR:
117         case OP_META_LWU:
118         case OP_META_SWU:
119                 return BIT(op.i.rs) | BIT(op.i.rt);
120         case OP_META:
121                 return BIT(op.m.rs);
122         default:
123                 return BIT(op.i.rs);
124         }
125 }
126
127 static u64 mult_div_write_mask(union code op)
128 {
129         u64 flags;
130
131         if (!OPT_FLAG_MULT_DIV)
132                 return BIT(REG_LO) | BIT(REG_HI);
133
134         if (op.r.rd)
135                 flags = BIT(op.r.rd);
136         else
137                 flags = BIT(REG_LO);
138         if (op.r.imm)
139                 flags |= BIT(op.r.imm);
140         else
141                 flags |= BIT(REG_HI);
142
143         return flags;
144 }
145
146 u64 opcode_write_mask(union code op)
147 {
148         switch (op.i.op) {
149         case OP_META_MULT2:
150         case OP_META_MULTU2:
151                 return mult_div_write_mask(op);
152         case OP_META:
153                 return BIT(op.m.rd);
154         case OP_SPECIAL:
155                 switch (op.r.op) {
156                 case OP_SPECIAL_JR:
157                 case OP_SPECIAL_SYSCALL:
158                 case OP_SPECIAL_BREAK:
159                         return 0;
160                 case OP_SPECIAL_MULT:
161                 case OP_SPECIAL_MULTU:
162                 case OP_SPECIAL_DIV:
163                 case OP_SPECIAL_DIVU:
164                         return mult_div_write_mask(op);
165                 case OP_SPECIAL_MTHI:
166                         return BIT(REG_HI);
167                 case OP_SPECIAL_MTLO:
168                         return BIT(REG_LO);
169                 case OP_SPECIAL_SLL:
170                         if (!op.r.imm)
171                                 return 0;
172                         fallthrough;
173                 default:
174                         return BIT(op.r.rd);
175                 }
176         case OP_ADDI:
177         case OP_ADDIU:
178         case OP_SLTI:
179         case OP_SLTIU:
180         case OP_ANDI:
181         case OP_ORI:
182         case OP_XORI:
183         case OP_LUI:
184         case OP_LB:
185         case OP_LH:
186         case OP_LWL:
187         case OP_LW:
188         case OP_LBU:
189         case OP_LHU:
190         case OP_LWR:
191         case OP_META_LWU:
192                 return BIT(op.i.rt);
193         case OP_JAL:
194                 return BIT(31);
195         case OP_CP0:
196                 switch (op.r.rs) {
197                 case OP_CP0_MFC0:
198                 case OP_CP0_CFC0:
199                         return BIT(op.i.rt);
200                 default:
201                         return 0;
202                 }
203         case OP_CP2:
204                 if (op.r.op == OP_CP2_BASIC) {
205                         switch (op.r.rs) {
206                         case OP_CP2_BASIC_MFC2:
207                         case OP_CP2_BASIC_CFC2:
208                                 return BIT(op.i.rt);
209                         default:
210                                 break;
211                         }
212                 }
213                 return 0;
214         case OP_REGIMM:
215                 switch (op.r.rt) {
216                 case OP_REGIMM_BLTZAL:
217                 case OP_REGIMM_BGEZAL:
218                         return BIT(31);
219                 default:
220                         return 0;
221                 }
222         default:
223                 return 0;
224         }
225 }
226
227 bool opcode_reads_register(union code op, u8 reg)
228 {
229         return opcode_read_mask(op) & BIT(reg);
230 }
231
232 bool opcode_writes_register(union code op, u8 reg)
233 {
234         return opcode_write_mask(op) & BIT(reg);
235 }
236
237 static int find_prev_writer(const struct opcode *list, unsigned int offset, u8 reg)
238 {
239         union code c;
240         unsigned int i;
241
242         if (op_flag_sync(list[offset].flags))
243                 return -1;
244
245         for (i = offset; i > 0; i--) {
246                 c = list[i - 1].c;
247
248                 if (opcode_writes_register(c, reg)) {
249                         if (i > 1 && has_delay_slot(list[i - 2].c))
250                                 break;
251
252                         return i - 1;
253                 }
254
255                 if (op_flag_sync(list[i - 1].flags) ||
256                     has_delay_slot(c) ||
257                     opcode_reads_register(c, reg))
258                         break;
259         }
260
261         return -1;
262 }
263
264 static int find_next_reader(const struct opcode *list, unsigned int offset, u8 reg)
265 {
266         unsigned int i;
267         union code c;
268
269         if (op_flag_sync(list[offset].flags))
270                 return -1;
271
272         for (i = offset; ; i++) {
273                 c = list[i].c;
274
275                 if (opcode_reads_register(c, reg))
276                         return i;
277
278                 if (op_flag_sync(list[i].flags)
279                     || (op_flag_no_ds(list[i].flags) && has_delay_slot(c))
280                     || is_delay_slot(list, i)
281                     || opcode_writes_register(c, reg))
282                         break;
283         }
284
285         return -1;
286 }
287
288 static bool reg_is_dead(const struct opcode *list, unsigned int offset, u8 reg)
289 {
290         unsigned int i;
291
292         if (op_flag_sync(list[offset].flags) || is_delay_slot(list, offset))
293                 return false;
294
295         for (i = offset + 1; ; i++) {
296                 if (opcode_reads_register(list[i].c, reg))
297                         return false;
298
299                 if (opcode_writes_register(list[i].c, reg))
300                         return true;
301
302                 if (is_syscall(list[i].c))
303                         return false;
304
305                 if (has_delay_slot(list[i].c)) {
306                         if (op_flag_no_ds(list[i].flags) ||
307                             opcode_reads_register(list[i + 1].c, reg))
308                                 return false;
309
310                         return opcode_writes_register(list[i + 1].c, reg);
311                 }
312         }
313 }
314
315 static bool reg_is_read(const struct opcode *list,
316                         unsigned int a, unsigned int b, u8 reg)
317 {
318         /* Return true if reg is read in one of the opcodes of the interval
319          * [a, b[ */
320         for (; a < b; a++) {
321                 if (!is_nop(list[a].c) && opcode_reads_register(list[a].c, reg))
322                         return true;
323         }
324
325         return false;
326 }
327
328 static bool reg_is_written(const struct opcode *list,
329                            unsigned int a, unsigned int b, u8 reg)
330 {
331         /* Return true if reg is written in one of the opcodes of the interval
332          * [a, b[ */
333
334         for (; a < b; a++) {
335                 if (!is_nop(list[a].c) && opcode_writes_register(list[a].c, reg))
336                         return true;
337         }
338
339         return false;
340 }
341
342 static bool reg_is_read_or_written(const struct opcode *list,
343                                    unsigned int a, unsigned int b, u8 reg)
344 {
345         return reg_is_read(list, a, b, reg) || reg_is_written(list, a, b, reg);
346 }
347
348 static bool opcode_is_mfc(union code op)
349 {
350         switch (op.i.op) {
351         case OP_CP0:
352                 switch (op.r.rs) {
353                 case OP_CP0_MFC0:
354                 case OP_CP0_CFC0:
355                         return true;
356                 default:
357                         break;
358                 }
359
360                 break;
361         case OP_CP2:
362                 if (op.r.op == OP_CP2_BASIC) {
363                         switch (op.r.rs) {
364                         case OP_CP2_BASIC_MFC2:
365                         case OP_CP2_BASIC_CFC2:
366                                 return true;
367                         default:
368                                 break;
369                         }
370                 }
371
372                 break;
373         default:
374                 break;
375         }
376
377         return false;
378 }
379
380 static bool opcode_is_load(union code op)
381 {
382         switch (op.i.op) {
383         case OP_LB:
384         case OP_LH:
385         case OP_LWL:
386         case OP_LW:
387         case OP_LBU:
388         case OP_LHU:
389         case OP_LWR:
390         case OP_LWC2:
391         case OP_META_LWU:
392                 return true;
393         default:
394                 return false;
395         }
396 }
397
398 static bool opcode_is_store(union code op)
399 {
400         switch (op.i.op) {
401         case OP_SB:
402         case OP_SH:
403         case OP_SW:
404         case OP_SWL:
405         case OP_SWR:
406         case OP_SWC2:
407         case OP_META_SWU:
408                 return true;
409         default:
410                 return false;
411         }
412 }
413
414 bool opcode_has_load_delay(union code op)
415 {
416         return (opcode_is_load(op) && op.i.rt && op.i.op != OP_LWC2)
417                 || opcode_is_mfc(op);
418 }
419
420 static u8 opcode_get_io_size(union code op)
421 {
422         switch (op.i.op) {
423         case OP_LB:
424         case OP_LBU:
425         case OP_SB:
426                 return 8;
427         case OP_LH:
428         case OP_LHU:
429         case OP_SH:
430                 return 16;
431         default:
432                 return 32;
433         }
434 }
435
436 bool opcode_is_io(union code op)
437 {
438         return opcode_is_load(op) || opcode_is_store(op);
439 }
440
441 /* TODO: Complete */
442 static bool is_nop(union code op)
443 {
444         if (opcode_writes_register(op, 0)) {
445                 switch (op.i.op) {
446                 case OP_CP0:
447                         return op.r.rs != OP_CP0_MFC0;
448                 case OP_LB:
449                 case OP_LH:
450                 case OP_LWL:
451                 case OP_LW:
452                 case OP_LBU:
453                 case OP_LHU:
454                 case OP_LWR:
455                 case OP_META_LWU:
456                         return false;
457                 default:
458                         return true;
459                 }
460         }
461
462         switch (op.i.op) {
463         case OP_SPECIAL:
464                 switch (op.r.op) {
465                 case OP_SPECIAL_AND:
466                         return op.r.rd == op.r.rt && op.r.rd == op.r.rs;
467                 case OP_SPECIAL_ADD:
468                 case OP_SPECIAL_ADDU:
469                         return (op.r.rd == op.r.rt && op.r.rs == 0) ||
470                                 (op.r.rd == op.r.rs && op.r.rt == 0);
471                 case OP_SPECIAL_SUB:
472                 case OP_SPECIAL_SUBU:
473                         return op.r.rd == op.r.rs && op.r.rt == 0;
474                 case OP_SPECIAL_OR:
475                         if (op.r.rd == op.r.rt)
476                                 return op.r.rd == op.r.rs || op.r.rs == 0;
477                         else
478                                 return (op.r.rd == op.r.rs) && op.r.rt == 0;
479                 case OP_SPECIAL_SLL:
480                 case OP_SPECIAL_SRA:
481                 case OP_SPECIAL_SRL:
482                         return op.r.rd == op.r.rt && op.r.imm == 0;
483                 case OP_SPECIAL_MFHI:
484                 case OP_SPECIAL_MFLO:
485                         return op.r.rd == 0;
486                 default:
487                         return false;
488                 }
489         case OP_ORI:
490         case OP_ADDI:
491         case OP_ADDIU:
492                 return op.i.rt == op.i.rs && op.i.imm == 0;
493         case OP_BGTZ:
494                 return (op.i.rs == 0 || op.i.imm == 1);
495         case OP_REGIMM:
496                 return (op.i.op == OP_REGIMM_BLTZ ||
497                                 op.i.op == OP_REGIMM_BLTZAL) &&
498                         (op.i.rs == 0 || op.i.imm == 1);
499         case OP_BNE:
500                 return (op.i.rs == op.i.rt || op.i.imm == 1);
501         default:
502                 return false;
503         }
504 }
505
506 static void lightrec_optimize_sll_sra(struct opcode *list, unsigned int offset,
507                                       struct constprop_data *v)
508 {
509         struct opcode *ldop = NULL, *curr = &list[offset], *next;
510         struct opcode *to_change, *to_nop;
511         int idx, idx2;
512
513         if (curr->r.imm != 24 && curr->r.imm != 16)
514                 return;
515
516         if (is_delay_slot(list, offset))
517                 return;
518
519         idx = find_next_reader(list, offset + 1, curr->r.rd);
520         if (idx < 0)
521                 return;
522
523         next = &list[idx];
524
525         if (next->i.op != OP_SPECIAL || next->r.op != OP_SPECIAL_SRA ||
526             next->r.imm != curr->r.imm || next->r.rt != curr->r.rd)
527                 return;
528
529         if (curr->r.rd != curr->r.rt && next->r.rd != next->r.rt) {
530                 /* sll rY, rX, 16
531                  * ...
532                  * sra rZ, rY, 16 */
533
534                 if (!reg_is_dead(list, idx, curr->r.rd) ||
535                     reg_is_read_or_written(list, offset, idx, next->r.rd))
536                         return;
537
538                 /* If rY is dead after the SRL, and rZ is not used after the SLL,
539                  * we can change rY to rZ */
540
541                 pr_debug("Detected SLL/SRA with middle temp register\n");
542                 curr->r.rd = next->r.rd;
543                 next->r.rt = curr->r.rd;
544         }
545
546         /* We got a SLL/SRA combo. If imm #16, that's a cast to s16.
547          * If imm #24 that's a cast to s8.
548          *
549          * First of all, make sure that the target register of the SLL is not
550          * read after the SRA. */
551
552         if (curr->r.rd == curr->r.rt) {
553                 /* sll rX, rX, 16
554                  * ...
555                  * sra rY, rX, 16 */
556                 to_change = next;
557                 to_nop = curr;
558
559                 /* rX is used after the SRA - we cannot convert it. */
560                 if (curr->r.rd != next->r.rd && !reg_is_dead(list, idx, curr->r.rd))
561                         return;
562         } else {
563                 /* sll rY, rX, 16
564                  * ...
565                  * sra rY, rY, 16 */
566                 to_change = curr;
567                 to_nop = next;
568         }
569
570         idx2 = find_prev_writer(list, offset, curr->r.rt);
571         if (idx2 >= 0) {
572                 /* Note that PSX games sometimes do casts after
573                  * a LHU or LBU; in this case we can change the
574                  * load opcode to a LH or LB, and the cast can
575                  * be changed to a MOV or a simple NOP. */
576
577                 ldop = &list[idx2];
578
579                 if (next->r.rd != ldop->i.rt &&
580                     !reg_is_dead(list, idx, ldop->i.rt))
581                         ldop = NULL;
582                 else if (curr->r.imm == 16 && ldop->i.op == OP_LHU)
583                         ldop->i.op = OP_LH;
584                 else if (curr->r.imm == 24 && ldop->i.op == OP_LBU)
585                         ldop->i.op = OP_LB;
586                 else
587                         ldop = NULL;
588
589                 if (ldop) {
590                         if (next->r.rd == ldop->i.rt) {
591                                 to_change->opcode = 0;
592                         } else if (reg_is_dead(list, idx, ldop->i.rt) &&
593                                    !reg_is_read_or_written(list, idx2 + 1, idx, next->r.rd)) {
594                                 /* The target register of the SRA is dead after the
595                                  * LBU/LHU; we can change the target register of the
596                                  * LBU/LHU to the one of the SRA. */
597                                 v[ldop->i.rt].known = 0;
598                                 v[ldop->i.rt].sign = 0;
599                                 ldop->i.rt = next->r.rd;
600                                 to_change->opcode = 0;
601                         } else {
602                                 to_change->i.op = OP_META;
603                                 to_change->m.op = OP_META_MOV;
604                                 to_change->m.rd = next->r.rd;
605                                 to_change->m.rs = ldop->i.rt;
606                         }
607
608                         if (to_nop->r.imm == 24)
609                                 pr_debug("Convert LBU+SLL+SRA to LB\n");
610                         else
611                                 pr_debug("Convert LHU+SLL+SRA to LH\n");
612
613                         v[ldop->i.rt].known = 0;
614                         v[ldop->i.rt].sign = 0xffffff80 << (24 - curr->r.imm);
615                 }
616         }
617
618         if (!ldop) {
619                 pr_debug("Convert SLL/SRA #%u to EXT%c\n",
620                          curr->r.imm, curr->r.imm == 24 ? 'C' : 'S');
621
622                 to_change->m.rs = curr->r.rt;
623                 to_change->m.op = to_nop->r.imm == 24 ? OP_META_EXTC : OP_META_EXTS;
624                 to_change->i.op = OP_META;
625         }
626
627         to_nop->opcode = 0;
628 }
629
630 static void
631 lightrec_remove_useless_lui(struct block *block, unsigned int offset,
632                             const struct constprop_data *v)
633 {
634         struct opcode *list = block->opcode_list,
635                       *op = &block->opcode_list[offset];
636         int reader;
637
638         if (!op_flag_sync(op->flags) && is_known(v, op->i.rt) &&
639             v[op->i.rt].value == op->i.imm << 16) {
640                 pr_debug("Converting duplicated LUI to NOP\n");
641                 op->opcode = 0x0;
642                 return;
643         }
644
645         if (op->i.imm != 0 || op->i.rt == 0 || offset == block->nb_ops - 1)
646                 return;
647
648         reader = find_next_reader(list, offset + 1, op->i.rt);
649         if (reader <= 0)
650                 return;
651
652         if (opcode_writes_register(list[reader].c, op->i.rt) ||
653             reg_is_dead(list, reader, op->i.rt)) {
654                 pr_debug("Removing useless LUI 0x0\n");
655
656                 if (list[reader].i.rs == op->i.rt)
657                         list[reader].i.rs = 0;
658                 if (list[reader].i.op == OP_SPECIAL &&
659                     list[reader].i.rt == op->i.rt)
660                         list[reader].i.rt = 0;
661                 op->opcode = 0x0;
662         }
663 }
664
665 static void lightrec_lui_to_movi(struct block *block, unsigned int offset)
666 {
667         struct opcode *ori, *lui = &block->opcode_list[offset];
668         int next;
669
670         if (lui->i.op != OP_LUI)
671                 return;
672
673         next = find_next_reader(block->opcode_list, offset + 1, lui->i.rt);
674         if (next > 0) {
675                 ori = &block->opcode_list[next];
676
677                 switch (ori->i.op) {
678                 case OP_ORI:
679                 case OP_ADDI:
680                 case OP_ADDIU:
681                         if (ori->i.rs == ori->i.rt && ori->i.imm) {
682                                 ori->flags |= LIGHTREC_MOVI;
683                                 lui->flags |= LIGHTREC_MOVI;
684                         }
685                         break;
686                 }
687         }
688 }
689
690 static void lightrec_modify_lui(struct block *block, unsigned int offset)
691 {
692         union code c, *lui = &block->opcode_list[offset].c;
693         bool stop = false, stop_next = false;
694         unsigned int i;
695
696         for (i = offset + 1; !stop && i < block->nb_ops; i++) {
697                 c = block->opcode_list[i].c;
698                 stop = stop_next;
699
700                 if ((opcode_is_store(c) && c.i.rt == lui->i.rt)
701                     || (!opcode_is_load(c) && opcode_reads_register(c, lui->i.rt)))
702                         break;
703
704                 if (opcode_writes_register(c, lui->i.rt)) {
705                         if (c.i.op == OP_LWL || c.i.op == OP_LWR) {
706                                 /* LWL/LWR only partially write their target register;
707                                  * therefore the LUI should not write a different value. */
708                                 break;
709                         }
710
711                         pr_debug("Convert LUI at offset 0x%x to kuseg\n",
712                                  (i - 1) << 2);
713                         lui->i.imm = kunseg(lui->i.imm << 16) >> 16;
714                         break;
715                 }
716
717                 if (has_delay_slot(c))
718                         stop_next = true;
719         }
720 }
721
722 static int lightrec_transform_branches(struct lightrec_state *state,
723                                        struct block *block)
724 {
725         struct opcode *op;
726         unsigned int i;
727         s32 offset;
728
729         for (i = 0; i < block->nb_ops; i++) {
730                 op = &block->opcode_list[i];
731
732                 switch (op->i.op) {
733                 case OP_J:
734                         /* Transform J opcode into BEQ $zero, $zero if possible. */
735                         offset = (s32)((block->pc & 0xf0000000) >> 2 | op->j.imm)
736                                 - (s32)(block->pc >> 2) - (s32)i - 1;
737
738                         if (offset == (s16)offset) {
739                                 pr_debug("Transform J into BEQ $zero, $zero\n");
740                                 op->i.op = OP_BEQ;
741                                 op->i.rs = 0;
742                                 op->i.rt = 0;
743                                 op->i.imm = offset;
744
745                         }
746                         fallthrough;
747                 default:
748                         break;
749                 }
750         }
751
752         return 0;
753 }
754
755 static inline bool is_power_of_two(u32 value)
756 {
757         return popcount32(value) == 1;
758 }
759
760 static void lightrec_patch_known_zero(struct opcode *op,
761                                       const struct constprop_data *v)
762 {
763         switch (op->i.op) {
764         case OP_SPECIAL:
765                 switch (op->r.op) {
766                 case OP_SPECIAL_JR:
767                 case OP_SPECIAL_JALR:
768                 case OP_SPECIAL_MTHI:
769                 case OP_SPECIAL_MTLO:
770                         if (is_known_zero(v, op->r.rs))
771                                 op->r.rs = 0;
772                         break;
773                 default:
774                         if (is_known_zero(v, op->r.rs))
775                                 op->r.rs = 0;
776                         fallthrough;
777                 case OP_SPECIAL_SLL:
778                 case OP_SPECIAL_SRL:
779                 case OP_SPECIAL_SRA:
780                         if (is_known_zero(v, op->r.rt))
781                                 op->r.rt = 0;
782                         break;
783                 case OP_SPECIAL_SYSCALL:
784                 case OP_SPECIAL_BREAK:
785                 case OP_SPECIAL_MFHI:
786                 case OP_SPECIAL_MFLO:
787                         break;
788                 }
789                 break;
790         case OP_CP0:
791                 switch (op->r.rs) {
792                 case OP_CP0_MTC0:
793                 case OP_CP0_CTC0:
794                         if (is_known_zero(v, op->r.rt))
795                                 op->r.rt = 0;
796                         break;
797                 default:
798                         break;
799                 }
800                 break;
801         case OP_CP2:
802                 if (op->r.op == OP_CP2_BASIC) {
803                         switch (op->r.rs) {
804                         case OP_CP2_BASIC_MTC2:
805                         case OP_CP2_BASIC_CTC2:
806                                 if (is_known_zero(v, op->r.rt))
807                                         op->r.rt = 0;
808                                 break;
809                         default:
810                                 break;
811                         }
812                 }
813                 break;
814         case OP_BEQ:
815         case OP_BNE:
816                 if (is_known_zero(v, op->i.rt))
817                         op->i.rt = 0;
818                 fallthrough;
819         case OP_REGIMM:
820         case OP_BLEZ:
821         case OP_BGTZ:
822         case OP_ADDI:
823         case OP_ADDIU:
824         case OP_SLTI:
825         case OP_SLTIU:
826         case OP_ANDI:
827         case OP_ORI:
828         case OP_XORI:
829         case OP_META_MULT2:
830         case OP_META_MULTU2:
831         case OP_META:
832                 if (is_known_zero(v, op->m.rs))
833                         op->m.rs = 0;
834                 break;
835         case OP_SB:
836         case OP_SH:
837         case OP_SWL:
838         case OP_SW:
839         case OP_SWR:
840         case OP_META_SWU:
841                 if (is_known_zero(v, op->i.rt))
842                         op->i.rt = 0;
843                 fallthrough;
844         case OP_LB:
845         case OP_LH:
846         case OP_LWL:
847         case OP_LW:
848         case OP_LBU:
849         case OP_LHU:
850         case OP_LWR:
851         case OP_LWC2:
852         case OP_SWC2:
853         case OP_META_LWU:
854                 if (is_known(v, op->i.rs)
855                     && kunseg(v[op->i.rs].value) == 0)
856                         op->i.rs = 0;
857                 break;
858         default:
859                 break;
860         }
861 }
862
863 static void lightrec_reset_syncs(struct block *block)
864 {
865         struct opcode *op, *list = block->opcode_list;
866         unsigned int i;
867         s32 offset;
868
869         for (i = 0; i < block->nb_ops; i++)
870                 list[i].flags &= ~LIGHTREC_SYNC;
871
872         for (i = 0; i < block->nb_ops; i++) {
873                 op = &list[i];
874
875                 if (has_delay_slot(op->c)) {
876                         if (op_flag_local_branch(op->flags)) {
877                                 offset = i + 1 - op_flag_no_ds(op->flags) + (s16)op->i.imm;
878                                 list[offset].flags |= LIGHTREC_SYNC;
879                         }
880
881                         if (op_flag_emulate_branch(op->flags) && i + 2 < block->nb_ops)
882                                 list[i + 2].flags |= LIGHTREC_SYNC;
883                 }
884         }
885 }
886
887 static void maybe_remove_load_delay(struct opcode *op)
888 {
889         if (op_flag_load_delay(op->flags) && opcode_is_load(op->c))
890                 op->flags &= ~LIGHTREC_LOAD_DELAY;
891 }
892
893 static int lightrec_transform_ops(struct lightrec_state *state, struct block *block)
894 {
895         struct opcode *op, *list = block->opcode_list;
896         struct constprop_data v[32] = LIGHTREC_CONSTPROP_INITIALIZER;
897         unsigned int i;
898         bool local;
899         int idx;
900         u8 tmp;
901
902         for (i = 0; i < block->nb_ops; i++) {
903                 op = &list[i];
904
905                 lightrec_consts_propagate(block, i, v);
906
907                 lightrec_patch_known_zero(op, v);
908
909                 /* Transform all opcodes detected as useless to real NOPs
910                  * (0x0: SLL r0, r0, #0) */
911                 if (op->opcode != 0 && is_nop(op->c)) {
912                         pr_debug("Converting useless opcode 0x%08x to NOP\n",
913                                         op->opcode);
914                         op->opcode = 0x0;
915                 }
916
917                 if (!op->opcode)
918                         continue;
919
920                 switch (op->i.op) {
921                 case OP_BEQ:
922                         if (op->i.rs == op->i.rt ||
923                             (is_known(v, op->i.rs) && is_known(v, op->i.rt) &&
924                              v[op->i.rs].value == v[op->i.rt].value)) {
925                                 if (op->i.rs != op->i.rt)
926                                         pr_debug("Found always-taken BEQ\n");
927
928                                 op->i.rs = 0;
929                                 op->i.rt = 0;
930                         } else if (v[op->i.rs].known & v[op->i.rt].known &
931                                    (v[op->i.rs].value ^ v[op->i.rt].value)) {
932                                 pr_debug("Found never-taken BEQ\n");
933
934                                 if (!op_flag_no_ds(op->flags))
935                                         maybe_remove_load_delay(&list[i + 1]);
936
937                                 local = op_flag_local_branch(op->flags);
938                                 op->opcode = 0;
939                                 op->flags = 0;
940
941                                 if (local)
942                                         lightrec_reset_syncs(block);
943                         } else if (op->i.rs == 0) {
944                                 op->i.rs = op->i.rt;
945                                 op->i.rt = 0;
946                         }
947                         break;
948
949                 case OP_BNE:
950                         if (v[op->i.rs].known & v[op->i.rt].known &
951                             (v[op->i.rs].value ^ v[op->i.rt].value)) {
952                                 pr_debug("Found always-taken BNE\n");
953
954                                 op->i.op = OP_BEQ;
955                                 op->i.rs = 0;
956                                 op->i.rt = 0;
957                         } else if (is_known(v, op->i.rs) && is_known(v, op->i.rt) &&
958                                    v[op->i.rs].value == v[op->i.rt].value) {
959                                 pr_debug("Found never-taken BNE\n");
960
961                                 if (!op_flag_no_ds(op->flags))
962                                         maybe_remove_load_delay(&list[i + 1]);
963
964                                 local = op_flag_local_branch(op->flags);
965                                 op->opcode = 0;
966                                 op->flags = 0;
967
968                                 if (local)
969                                         lightrec_reset_syncs(block);
970                         } else if (op->i.rs == 0) {
971                                 op->i.rs = op->i.rt;
972                                 op->i.rt = 0;
973                         }
974                         break;
975
976                 case OP_BLEZ:
977                         if (v[op->i.rs].known & BIT(31) &&
978                             v[op->i.rs].value & BIT(31)) {
979                                 pr_debug("Found always-taken BLEZ\n");
980
981                                 op->i.op = OP_BEQ;
982                                 op->i.rs = 0;
983                                 op->i.rt = 0;
984                         }
985                         break;
986
987                 case OP_BGTZ:
988                         if (v[op->i.rs].known & BIT(31) &&
989                             v[op->i.rs].value & BIT(31)) {
990                                 pr_debug("Found never-taken BGTZ\n");
991
992                                 if (!op_flag_no_ds(op->flags))
993                                         maybe_remove_load_delay(&list[i + 1]);
994
995                                 local = op_flag_local_branch(op->flags);
996                                 op->opcode = 0;
997                                 op->flags = 0;
998
999                                 if (local)
1000                                         lightrec_reset_syncs(block);
1001                         }
1002                         break;
1003
1004                 case OP_LUI:
1005                         if (i == 0 || !has_delay_slot(list[i - 1].c))
1006                                 lightrec_modify_lui(block, i);
1007                         lightrec_remove_useless_lui(block, i, v);
1008                         if (i == 0 || !has_delay_slot(list[i - 1].c))
1009                                 lightrec_lui_to_movi(block, i);
1010                         break;
1011
1012                 /* Transform ORI/ADDI/ADDIU with imm #0 or ORR/ADD/ADDU/SUB/SUBU
1013                  * with register $zero to the MOV meta-opcode */
1014                 case OP_ORI:
1015                 case OP_ADDI:
1016                 case OP_ADDIU:
1017                         if (op->i.imm == 0) {
1018                                 pr_debug("Convert ORI/ADDI/ADDIU #0 to MOV\n");
1019                                 op->m.rd = op->i.rt;
1020                                 op->m.op = OP_META_MOV;
1021                                 op->i.op = OP_META;
1022                         }
1023                         break;
1024                 case OP_ANDI:
1025                         if (bits_are_known_zero(v, op->i.rs, ~op->i.imm)) {
1026                                 pr_debug("Found useless ANDI 0x%x\n", op->i.imm);
1027
1028                                 if (op->i.rs == op->i.rt) {
1029                                         op->opcode = 0;
1030                                 } else {
1031                                         op->m.rd = op->i.rt;
1032                                         op->m.op = OP_META_MOV;
1033                                         op->i.op = OP_META;
1034                                 }
1035                         }
1036                         break;
1037                 case OP_LWL:
1038                 case OP_LWR:
1039                         if (i == 0 || !has_delay_slot(list[i - 1].c)) {
1040                                 idx = find_next_reader(list, i + 1, op->i.rt);
1041                                 if (idx > 0 && list[idx].i.op == (op->i.op ^ 0x4)
1042                                     && list[idx].i.rs == op->i.rs
1043                                     && list[idx].i.rt == op->i.rt
1044                                     && abs((s16)op->i.imm - (s16)list[idx].i.imm) == 3) {
1045                                         /* Replace a LWL/LWR combo with a META_LWU */
1046                                         if (op->i.op == OP_LWL)
1047                                                 op->i.imm -= 3;
1048                                         op->i.op = OP_META_LWU;
1049                                         list[idx].opcode = 0;
1050                                         pr_debug("Convert LWL/LWR to LWU\n");
1051                                 }
1052                         }
1053                         break;
1054                 case OP_SWL:
1055                 case OP_SWR:
1056                         if (i == 0 || !has_delay_slot(list[i - 1].c)) {
1057                                 idx = find_next_reader(list, i + 1, op->i.rt);
1058                                 if (idx > 0 && list[idx].i.op == (op->i.op ^ 0x4)
1059                                     && list[idx].i.rs == op->i.rs
1060                                     && list[idx].i.rt == op->i.rt
1061                                     && abs((s16)op->i.imm - (s16)list[idx].i.imm) == 3) {
1062                                         /* Replace a SWL/SWR combo with a META_SWU */
1063                                         if (op->i.op == OP_SWL)
1064                                                 op->i.imm -= 3;
1065                                         op->i.op = OP_META_SWU;
1066                                         list[idx].opcode = 0;
1067                                         pr_debug("Convert SWL/SWR to SWU\n");
1068                                 }
1069                         }
1070                         break;
1071                 case OP_REGIMM:
1072                         switch (op->r.rt) {
1073                         case OP_REGIMM_BLTZ:
1074                         case OP_REGIMM_BGEZ:
1075                                 if (!(v[op->r.rs].known & BIT(31)))
1076                                         break;
1077
1078                                 if (!!(v[op->r.rs].value & BIT(31))
1079                                     ^ (op->r.rt == OP_REGIMM_BGEZ)) {
1080                                         pr_debug("Found always-taken BLTZ/BGEZ\n");
1081                                         op->i.op = OP_BEQ;
1082                                         op->i.rs = 0;
1083                                         op->i.rt = 0;
1084                                 } else {
1085                                         pr_debug("Found never-taken BLTZ/BGEZ\n");
1086
1087                                         if (!op_flag_no_ds(op->flags))
1088                                                 maybe_remove_load_delay(&list[i + 1]);
1089
1090                                         local = op_flag_local_branch(op->flags);
1091                                         op->opcode = 0;
1092                                         op->flags = 0;
1093
1094                                         if (local)
1095                                                 lightrec_reset_syncs(block);
1096                                 }
1097                                 break;
1098                         case OP_REGIMM_BLTZAL:
1099                         case OP_REGIMM_BGEZAL:
1100                                 /* TODO: Detect always-taken and replace with JAL */
1101                                 break;
1102                         }
1103                         break;
1104                 case OP_SPECIAL:
1105                         switch (op->r.op) {
1106                         case OP_SPECIAL_SRAV:
1107                                 if ((v[op->r.rs].known & 0x1f) != 0x1f)
1108                                         break;
1109
1110                                 pr_debug("Convert SRAV to SRA\n");
1111                                 op->r.imm = v[op->r.rs].value & 0x1f;
1112                                 op->r.op = OP_SPECIAL_SRA;
1113
1114                                 fallthrough;
1115                         case OP_SPECIAL_SRA:
1116                                 if (op->r.imm == 0) {
1117                                         pr_debug("Convert SRA #0 to MOV\n");
1118                                         op->m.rs = op->r.rt;
1119                                         op->m.op = OP_META_MOV;
1120                                         op->i.op = OP_META;
1121                                         break;
1122                                 }
1123                                 break;
1124
1125                         case OP_SPECIAL_SLLV:
1126                                 if ((v[op->r.rs].known & 0x1f) != 0x1f)
1127                                         break;
1128
1129                                 pr_debug("Convert SLLV to SLL\n");
1130                                 op->r.imm = v[op->r.rs].value & 0x1f;
1131                                 op->r.op = OP_SPECIAL_SLL;
1132
1133                                 fallthrough;
1134                         case OP_SPECIAL_SLL:
1135                                 if (op->r.imm == 0) {
1136                                         pr_debug("Convert SLL #0 to MOV\n");
1137                                         op->m.rs = op->r.rt;
1138                                         op->m.op = OP_META_MOV;
1139                                         op->i.op = OP_META;
1140                                 }
1141
1142                                 lightrec_optimize_sll_sra(block->opcode_list, i, v);
1143                                 break;
1144
1145                         case OP_SPECIAL_SRLV:
1146                                 if ((v[op->r.rs].known & 0x1f) != 0x1f)
1147                                         break;
1148
1149                                 pr_debug("Convert SRLV to SRL\n");
1150                                 op->r.imm = v[op->r.rs].value & 0x1f;
1151                                 op->r.op = OP_SPECIAL_SRL;
1152
1153                                 fallthrough;
1154                         case OP_SPECIAL_SRL:
1155                                 if (op->r.imm == 0) {
1156                                         pr_debug("Convert SRL #0 to MOV\n");
1157                                         op->m.rs = op->r.rt;
1158                                         op->m.op = OP_META_MOV;
1159                                         op->i.op = OP_META;
1160                                 }
1161                                 break;
1162
1163                         case OP_SPECIAL_MULT:
1164                         case OP_SPECIAL_MULTU:
1165                                 if (is_known(v, op->r.rs) &&
1166                                     is_power_of_two(v[op->r.rs].value)) {
1167                                         tmp = op->c.i.rs;
1168                                         op->c.i.rs = op->c.i.rt;
1169                                         op->c.i.rt = tmp;
1170                                 } else if (!is_known(v, op->r.rt) ||
1171                                            !is_power_of_two(v[op->r.rt].value)) {
1172                                         break;
1173                                 }
1174
1175                                 pr_debug("Multiply by power-of-two: %u\n",
1176                                          v[op->r.rt].value);
1177
1178                                 if (op->r.op == OP_SPECIAL_MULT)
1179                                         op->i.op = OP_META_MULT2;
1180                                 else
1181                                         op->i.op = OP_META_MULTU2;
1182
1183                                 op->r.op = ctz32(v[op->r.rt].value);
1184                                 break;
1185                         case OP_SPECIAL_NOR:
1186                                 if (op->r.rs == 0 || op->r.rt == 0) {
1187                                         pr_debug("Convert NOR $zero to COM\n");
1188                                         op->i.op = OP_META;
1189                                         op->m.op = OP_META_COM;
1190                                         if (!op->m.rs)
1191                                                 op->m.rs = op->r.rt;
1192                                 }
1193                                 break;
1194                         case OP_SPECIAL_OR:
1195                         case OP_SPECIAL_ADD:
1196                         case OP_SPECIAL_ADDU:
1197                                 if (op->r.rs == 0) {
1198                                         pr_debug("Convert OR/ADD $zero to MOV\n");
1199                                         op->m.rs = op->r.rt;
1200                                         op->m.op = OP_META_MOV;
1201                                         op->i.op = OP_META;
1202                                 }
1203                                 fallthrough;
1204                         case OP_SPECIAL_SUB:
1205                         case OP_SPECIAL_SUBU:
1206                                 if (op->r.rt == 0) {
1207                                         pr_debug("Convert OR/ADD/SUB $zero to MOV\n");
1208                                         op->m.op = OP_META_MOV;
1209                                         op->i.op = OP_META;
1210                                 }
1211                                 fallthrough;
1212                         default:
1213                                 break;
1214                         }
1215                         fallthrough;
1216                 default:
1217                         break;
1218                 }
1219         }
1220
1221         return 0;
1222 }
1223
1224 static bool lightrec_can_switch_delay_slot(union code op, union code next_op)
1225 {
1226         switch (op.i.op) {
1227         case OP_SPECIAL:
1228                 switch (op.r.op) {
1229                 case OP_SPECIAL_JALR:
1230                         if (opcode_reads_register(next_op, op.r.rd) ||
1231                             opcode_writes_register(next_op, op.r.rd))
1232                                 return false;
1233                         fallthrough;
1234                 case OP_SPECIAL_JR:
1235                         if (opcode_writes_register(next_op, op.r.rs))
1236                                 return false;
1237                         fallthrough;
1238                 default:
1239                         break;
1240                 }
1241                 fallthrough;
1242         case OP_J:
1243                 break;
1244         case OP_JAL:
1245                 if (opcode_reads_register(next_op, 31) ||
1246                     opcode_writes_register(next_op, 31))
1247                         return false;;
1248
1249                 break;
1250         case OP_BEQ:
1251         case OP_BNE:
1252                 if (op.i.rt && opcode_writes_register(next_op, op.i.rt))
1253                         return false;
1254                 fallthrough;
1255         case OP_BLEZ:
1256         case OP_BGTZ:
1257                 if (op.i.rs && opcode_writes_register(next_op, op.i.rs))
1258                         return false;
1259                 break;
1260         case OP_REGIMM:
1261                 switch (op.r.rt) {
1262                 case OP_REGIMM_BLTZAL:
1263                 case OP_REGIMM_BGEZAL:
1264                         if (opcode_reads_register(next_op, 31) ||
1265                             opcode_writes_register(next_op, 31))
1266                                 return false;
1267                         fallthrough;
1268                 case OP_REGIMM_BLTZ:
1269                 case OP_REGIMM_BGEZ:
1270                         if (op.i.rs && opcode_writes_register(next_op, op.i.rs))
1271                                 return false;
1272                         break;
1273                 }
1274                 fallthrough;
1275         default:
1276                 break;
1277         }
1278
1279         return true;
1280 }
1281
1282 static int lightrec_switch_delay_slots(struct lightrec_state *state, struct block *block)
1283 {
1284         struct opcode *list, *next = &block->opcode_list[0];
1285         unsigned int i;
1286         union code op, next_op;
1287         u32 flags;
1288
1289         for (i = 0; i < block->nb_ops - 1; i++) {
1290                 list = next;
1291                 next = &block->opcode_list[i + 1];
1292                 next_op = next->c;
1293                 op = list->c;
1294
1295                 if (!has_delay_slot(op) || op_flag_no_ds(list->flags) ||
1296                     op_flag_emulate_branch(list->flags) ||
1297                     op.opcode == 0 || next_op.opcode == 0)
1298                         continue;
1299
1300                 if (is_delay_slot(block->opcode_list, i))
1301                         continue;
1302
1303                 if (op_flag_sync(next->flags))
1304                         continue;
1305
1306                 if (op_flag_load_delay(next->flags) && opcode_is_load(next_op))
1307                         continue;
1308
1309                 if (!lightrec_can_switch_delay_slot(list->c, next_op))
1310                         continue;
1311
1312                 pr_debug("Swap branch and delay slot opcodes "
1313                          "at offsets 0x%x / 0x%x\n",
1314                          i << 2, (i + 1) << 2);
1315
1316                 flags = next->flags | (list->flags & LIGHTREC_SYNC);
1317                 list->c = next_op;
1318                 next->c = op;
1319                 next->flags = (list->flags | LIGHTREC_NO_DS) & ~LIGHTREC_SYNC;
1320                 list->flags = flags | LIGHTREC_NO_DS;
1321         }
1322
1323         return 0;
1324 }
1325
1326 static int lightrec_detect_impossible_branches(struct lightrec_state *state,
1327                                                struct block *block)
1328 {
1329         struct opcode *op, *list = block->opcode_list, *next = &list[0];
1330         unsigned int i;
1331         int ret = 0;
1332
1333         for (i = 0; i < block->nb_ops - 1; i++) {
1334                 op = next;
1335                 next = &list[i + 1];
1336
1337                 if (!has_delay_slot(op->c) ||
1338                     (!has_delay_slot(next->c) &&
1339                      !opcode_is_mfc(next->c) &&
1340                      !(next->i.op == OP_CP0 && next->r.rs == OP_CP0_RFE)))
1341                         continue;
1342
1343                 if (op->c.opcode == next->c.opcode) {
1344                         /* The delay slot is the exact same opcode as the branch
1345                          * opcode: this is effectively a NOP */
1346                         next->c.opcode = 0;
1347                         continue;
1348                 }
1349
1350                 op->flags |= LIGHTREC_EMULATE_BRANCH;
1351
1352                 if (OPT_LOCAL_BRANCHES && i + 2 < block->nb_ops) {
1353                         /* The interpreter will only emulate the branch, then
1354                          * return to the compiled code. Add a SYNC after the
1355                          * branch + delay slot in the case where the branch
1356                          * was not taken. */
1357                         list[i + 2].flags |= LIGHTREC_SYNC;
1358                 }
1359         }
1360
1361         return ret;
1362 }
1363
1364 static bool is_local_branch(const struct block *block, unsigned int idx)
1365 {
1366         const struct opcode *op = &block->opcode_list[idx];
1367         s32 offset;
1368
1369         switch (op->c.i.op) {
1370         case OP_BEQ:
1371         case OP_BNE:
1372         case OP_BLEZ:
1373         case OP_BGTZ:
1374         case OP_REGIMM:
1375                 offset = idx + 1 + (s16)op->c.i.imm;
1376                 if (offset >= 0 && offset < block->nb_ops)
1377                         return true;
1378                 fallthrough;
1379         default:
1380                 return false;
1381         }
1382 }
1383
1384 static int lightrec_handle_load_delays(struct lightrec_state *state,
1385                                        struct block *block)
1386 {
1387         struct opcode *op, *list = block->opcode_list;
1388         unsigned int i;
1389         s16 imm;
1390
1391         for (i = 0; i < block->nb_ops; i++) {
1392                 op = &list[i];
1393
1394                 if (!opcode_has_load_delay(op->c))
1395                         continue;
1396
1397                 if (!is_delay_slot(list, i)) {
1398                         /* Only handle load delays in delay slots.
1399                          * PSX games never abused load delay slots otherwise. */
1400                         continue;
1401                 }
1402
1403                 if (is_local_branch(block, i - 1)) {
1404                         imm = (s16)list[i - 1].c.i.imm;
1405
1406                         if (!opcode_reads_register(list[i + imm].c, op->c.i.rt)) {
1407                                 /* The target opcode of the branch is inside
1408                                  * the block, and it does not read the register
1409                                  * written to by the load opcode; we can ignore
1410                                  * the load delay. */
1411                                 continue;
1412                         }
1413                 }
1414
1415                 op->flags |= LIGHTREC_LOAD_DELAY;
1416         }
1417
1418         return 0;
1419 }
1420
1421 static int lightrec_swap_load_delays(struct lightrec_state *state,
1422                                      struct block *block)
1423 {
1424         unsigned int i;
1425         union code c, next;
1426         bool in_ds = false, skip_next = false;
1427         struct opcode op;
1428
1429         if (block->nb_ops < 2)
1430                 return 0;
1431
1432         for (i = 0; i < block->nb_ops - 2; i++) {
1433                 c = block->opcode_list[i].c;
1434
1435                 if (skip_next) {
1436                         skip_next = false;
1437                 } else if (!in_ds && opcode_is_load(c) && c.i.op != OP_LWC2) {
1438                         next = block->opcode_list[i + 1].c;
1439
1440                         switch (next.i.op) {
1441                         case OP_LWL:
1442                         case OP_LWR:
1443                         case OP_REGIMM:
1444                         case OP_BEQ:
1445                         case OP_BNE:
1446                         case OP_BLEZ:
1447                         case OP_BGTZ:
1448                                 continue;
1449                         }
1450
1451                         if (opcode_reads_register(next, c.i.rt)
1452                             && !opcode_writes_register(next, c.i.rs)) {
1453                                 pr_debug("Swapping opcodes at offset 0x%x to "
1454                                          "respect load delay\n", i << 2);
1455
1456                                 op = block->opcode_list[i];
1457                                 block->opcode_list[i] = block->opcode_list[i + 1];
1458                                 block->opcode_list[i + 1] = op;
1459                                 skip_next = true;
1460                         }
1461                 }
1462
1463                 in_ds = has_delay_slot(c);
1464         }
1465
1466         return 0;
1467 }
1468
1469 static int lightrec_local_branches(struct lightrec_state *state, struct block *block)
1470 {
1471         const struct opcode *ds;
1472         struct opcode *list;
1473         unsigned int i;
1474         s32 offset;
1475
1476         for (i = 0; i < block->nb_ops; i++) {
1477                 list = &block->opcode_list[i];
1478
1479                 if (should_emulate(list) || !is_local_branch(block, i))
1480                         continue;
1481
1482                 offset = i + 1 + (s16)list->c.i.imm;
1483
1484                 pr_debug("Found local branch to offset 0x%x\n", offset << 2);
1485
1486                 ds = get_delay_slot(block->opcode_list, i);
1487                 if (op_flag_load_delay(ds->flags) && opcode_is_load(ds->c)) {
1488                         pr_debug("Branch delay slot has a load delay - skip\n");
1489                         continue;
1490                 }
1491
1492                 if (should_emulate(&block->opcode_list[offset])) {
1493                         pr_debug("Branch target must be emulated - skip\n");
1494                         continue;
1495                 }
1496
1497                 if (offset && has_delay_slot(block->opcode_list[offset - 1].c)) {
1498                         pr_debug("Branch target is a delay slot - skip\n");
1499                         continue;
1500                 }
1501
1502                 list->flags |= LIGHTREC_LOCAL_BRANCH;
1503         }
1504
1505         lightrec_reset_syncs(block);
1506
1507         return 0;
1508 }
1509
1510 bool has_delay_slot(union code op)
1511 {
1512         switch (op.i.op) {
1513         case OP_SPECIAL:
1514                 switch (op.r.op) {
1515                 case OP_SPECIAL_JR:
1516                 case OP_SPECIAL_JALR:
1517                         return true;
1518                 default:
1519                         return false;
1520                 }
1521         case OP_J:
1522         case OP_JAL:
1523         case OP_BEQ:
1524         case OP_BNE:
1525         case OP_BLEZ:
1526         case OP_BGTZ:
1527         case OP_REGIMM:
1528                 return true;
1529         default:
1530                 return false;
1531         }
1532 }
1533
1534 bool is_delay_slot(const struct opcode *list, unsigned int offset)
1535 {
1536         return offset > 0
1537                 && !op_flag_no_ds(list[offset - 1].flags)
1538                 && has_delay_slot(list[offset - 1].c);
1539 }
1540
1541 bool should_emulate(const struct opcode *list)
1542 {
1543         return op_flag_emulate_branch(list->flags) && has_delay_slot(list->c);
1544 }
1545
1546 static bool op_writes_rd(union code c)
1547 {
1548         switch (c.i.op) {
1549         case OP_SPECIAL:
1550         case OP_META:
1551                 return true;
1552         default:
1553                 return false;
1554         }
1555 }
1556
1557 static void lightrec_add_reg_op(struct opcode *op, u8 reg, u32 reg_op)
1558 {
1559         if (op_writes_rd(op->c) && reg == op->r.rd)
1560                 op->flags |= LIGHTREC_REG_RD(reg_op);
1561         else if (op->i.rs == reg)
1562                 op->flags |= LIGHTREC_REG_RS(reg_op);
1563         else if (op->i.rt == reg)
1564                 op->flags |= LIGHTREC_REG_RT(reg_op);
1565         else
1566                 pr_debug("Cannot add unload/clean/discard flag: "
1567                          "opcode does not touch register %s!\n",
1568                          lightrec_reg_name(reg));
1569 }
1570
1571 static void lightrec_add_unload(struct opcode *op, u8 reg)
1572 {
1573         lightrec_add_reg_op(op, reg, LIGHTREC_REG_UNLOAD);
1574 }
1575
1576 static void lightrec_add_discard(struct opcode *op, u8 reg)
1577 {
1578         lightrec_add_reg_op(op, reg, LIGHTREC_REG_DISCARD);
1579 }
1580
1581 static void lightrec_add_clean(struct opcode *op, u8 reg)
1582 {
1583         lightrec_add_reg_op(op, reg, LIGHTREC_REG_CLEAN);
1584 }
1585
1586 static void
1587 lightrec_early_unload_sync(struct opcode *list, s16 *last_r, s16 *last_w)
1588 {
1589         unsigned int reg;
1590         s16 offset;
1591
1592         for (reg = 0; reg < 34; reg++) {
1593                 offset = s16_max(last_w[reg], last_r[reg]);
1594
1595                 if (offset >= 0)
1596                         lightrec_add_unload(&list[offset], reg);
1597         }
1598
1599         memset(last_r, 0xff, sizeof(*last_r) * 34);
1600         memset(last_w, 0xff, sizeof(*last_w) * 34);
1601 }
1602
1603 static int lightrec_early_unload(struct lightrec_state *state, struct block *block)
1604 {
1605         u16 i, offset;
1606         struct opcode *op;
1607         s16 last_r[34], last_w[34], last_sync = 0, next_sync = 0;
1608         u64 mask_r, mask_w, dirty = 0, loaded = 0;
1609         u8 reg, load_delay_reg = 0;
1610
1611         memset(last_r, 0xff, sizeof(last_r));
1612         memset(last_w, 0xff, sizeof(last_w));
1613
1614         /*
1615          * Clean if:
1616          * - the register is dirty, and is read again after a branch opcode
1617          *
1618          * Unload if:
1619          * - the register is dirty or loaded, and is not read again
1620          * - the register is dirty or loaded, and is written again after a branch opcode
1621          * - the next opcode has the SYNC flag set
1622          *
1623          * Discard if:
1624          * - the register is dirty or loaded, and is written again
1625          */
1626
1627         for (i = 0; i < block->nb_ops; i++) {
1628                 op = &block->opcode_list[i];
1629
1630                 if (OPT_HANDLE_LOAD_DELAYS && load_delay_reg) {
1631                         /* Handle delayed register write from load opcodes in
1632                          * delay slots */
1633                         last_w[load_delay_reg] = i;
1634                         load_delay_reg = 0;
1635                 }
1636
1637                 if (op_flag_sync(op->flags) || should_emulate(op)) {
1638                         /* The next opcode has the SYNC flag set, or is a branch
1639                          * that should be emulated: unload all registers. */
1640                         lightrec_early_unload_sync(block->opcode_list, last_r, last_w);
1641                         dirty = 0;
1642                         loaded = 0;
1643                 }
1644
1645                 if (next_sync == i) {
1646                         last_sync = i;
1647                         pr_debug("Last sync: 0x%x\n", last_sync << 2);
1648                 }
1649
1650                 if (has_delay_slot(op->c)) {
1651                         next_sync = i + 1 + !op_flag_no_ds(op->flags);
1652                         pr_debug("Next sync: 0x%x\n", next_sync << 2);
1653                 }
1654
1655                 mask_r = opcode_read_mask(op->c);
1656                 mask_w = opcode_write_mask(op->c);
1657
1658                 if (op_flag_load_delay(op->flags) && opcode_is_load(op->c)) {
1659                         /* If we have a load opcode in a delay slot, its target
1660                          * register is actually not written there but at a
1661                          * later point, in the dispatcher. Prevent the algorithm
1662                          * from discarding its previous value. */
1663                         load_delay_reg = op->c.i.rt;
1664                         mask_w &= ~BIT(op->c.i.rt);
1665                 }
1666
1667                 for (reg = 0; reg < 34; reg++) {
1668                         if (mask_r & BIT(reg)) {
1669                                 if (dirty & BIT(reg) && last_w[reg] < last_sync) {
1670                                         /* The register is dirty, and is read
1671                                          * again after a branch: clean it */
1672
1673                                         lightrec_add_clean(&block->opcode_list[last_w[reg]], reg);
1674                                         dirty &= ~BIT(reg);
1675                                         loaded |= BIT(reg);
1676                                 }
1677
1678                                 last_r[reg] = i;
1679                         }
1680
1681                         if (mask_w & BIT(reg)) {
1682                                 if ((dirty & BIT(reg) && last_w[reg] < last_sync) ||
1683                                     (loaded & BIT(reg) && last_r[reg] < last_sync)) {
1684                                         /* The register is dirty or loaded, and
1685                                          * is written again after a branch:
1686                                          * unload it */
1687
1688                                         offset = s16_max(last_w[reg], last_r[reg]);
1689                                         lightrec_add_unload(&block->opcode_list[offset], reg);
1690                                         dirty &= ~BIT(reg);
1691                                         loaded &= ~BIT(reg);
1692                                 } else if (!(mask_r & BIT(reg)) &&
1693                                            ((dirty & BIT(reg) && last_w[reg] > last_sync) ||
1694                                            (loaded & BIT(reg) && last_r[reg] > last_sync))) {
1695                                         /* The register is dirty or loaded, and
1696                                          * is written again: discard it */
1697
1698                                         offset = s16_max(last_w[reg], last_r[reg]);
1699                                         lightrec_add_discard(&block->opcode_list[offset], reg);
1700                                         dirty &= ~BIT(reg);
1701                                         loaded &= ~BIT(reg);
1702                                 }
1703
1704                                 last_w[reg] = i;
1705                         }
1706
1707                 }
1708
1709                 dirty |= mask_w;
1710                 loaded |= mask_r;
1711         }
1712
1713         /* Unload all registers that are dirty or loaded at the end of block. */
1714         lightrec_early_unload_sync(block->opcode_list, last_r, last_w);
1715
1716         return 0;
1717 }
1718
1719 static int lightrec_flag_io(struct lightrec_state *state, struct block *block)
1720 {
1721         struct opcode *list;
1722         enum psx_map psx_map;
1723         struct constprop_data v[32] = LIGHTREC_CONSTPROP_INITIALIZER;
1724         unsigned int i;
1725         u32 val, kunseg_val;
1726         bool no_mask;
1727
1728         for (i = 0; i < block->nb_ops; i++) {
1729                 list = &block->opcode_list[i];
1730
1731                 lightrec_consts_propagate(block, i, v);
1732
1733                 switch (list->i.op) {
1734                 case OP_SB:
1735                 case OP_SH:
1736                 case OP_SW:
1737                         /* Mark all store operations that target $sp or $gp
1738                          * as not requiring code invalidation. This is based
1739                          * on the heuristic that stores using one of these
1740                          * registers as address will never hit a code page. */
1741                         if (list->i.rs >= 28 && list->i.rs <= 29 &&
1742                             !state->maps[PSX_MAP_KERNEL_USER_RAM].ops) {
1743                                 pr_debug("Flaging opcode 0x%08x as not requiring invalidation\n",
1744                                          list->opcode);
1745                                 list->flags |= LIGHTREC_NO_INVALIDATE;
1746                         }
1747
1748                         /* Detect writes whose destination address is inside the
1749                          * current block, using constant propagation. When these
1750                          * occur, we mark the blocks as not compilable. */
1751                         if (is_known(v, list->i.rs) &&
1752                             kunseg(v[list->i.rs].value) >= kunseg(block->pc) &&
1753                             kunseg(v[list->i.rs].value) < (kunseg(block->pc) + block->nb_ops * 4)) {
1754                                 pr_debug("Self-modifying block detected\n");
1755                                 block_set_flags(block, BLOCK_NEVER_COMPILE);
1756                                 list->flags |= LIGHTREC_SMC;
1757                         }
1758                         fallthrough;
1759                 case OP_SWL:
1760                 case OP_SWR:
1761                 case OP_SWC2:
1762                 case OP_LB:
1763                 case OP_LBU:
1764                 case OP_LH:
1765                 case OP_LHU:
1766                 case OP_LW:
1767                 case OP_LWL:
1768                 case OP_LWR:
1769                 case OP_LWC2:
1770                         if (v[list->i.rs].known | v[list->i.rs].sign) {
1771                                 psx_map = lightrec_get_constprop_map(state, v,
1772                                                                      list->i.rs,
1773                                                                      (s16) list->i.imm);
1774
1775                                 if (psx_map != PSX_MAP_UNKNOWN && !is_known(v, list->i.rs))
1776                                         pr_debug("Detected map thanks to bit-level const propagation!\n");
1777
1778                                 list->flags &= ~LIGHTREC_IO_MASK;
1779
1780                                 val = v[list->i.rs].value + (s16) list->i.imm;
1781                                 kunseg_val = kunseg(val);
1782
1783                                 no_mask = (v[list->i.rs].known & ~v[list->i.rs].value
1784                                            & 0xe0000000) == 0xe0000000;
1785
1786                                 switch (psx_map) {
1787                                 case PSX_MAP_KERNEL_USER_RAM:
1788                                         if (no_mask)
1789                                                 list->flags |= LIGHTREC_NO_MASK;
1790                                         fallthrough;
1791                                 case PSX_MAP_MIRROR1:
1792                                 case PSX_MAP_MIRROR2:
1793                                 case PSX_MAP_MIRROR3:
1794                                         pr_debug("Flaging opcode %u as RAM access\n", i);
1795                                         list->flags |= LIGHTREC_IO_MODE(LIGHTREC_IO_RAM);
1796                                         if (no_mask && state->mirrors_mapped)
1797                                                 list->flags |= LIGHTREC_NO_MASK;
1798                                         break;
1799                                 case PSX_MAP_BIOS:
1800                                         pr_debug("Flaging opcode %u as BIOS access\n", i);
1801                                         list->flags |= LIGHTREC_IO_MODE(LIGHTREC_IO_BIOS);
1802                                         if (no_mask)
1803                                                 list->flags |= LIGHTREC_NO_MASK;
1804                                         break;
1805                                 case PSX_MAP_SCRATCH_PAD:
1806                                         pr_debug("Flaging opcode %u as scratchpad access\n", i);
1807                                         list->flags |= LIGHTREC_IO_MODE(LIGHTREC_IO_SCRATCH);
1808                                         if (no_mask)
1809                                                 list->flags |= LIGHTREC_NO_MASK;
1810
1811                                         /* Consider that we're never going to run code from
1812                                          * the scratchpad. */
1813                                         list->flags |= LIGHTREC_NO_INVALIDATE;
1814                                         break;
1815                                 case PSX_MAP_HW_REGISTERS:
1816                                         if (state->ops.hw_direct &&
1817                                             state->ops.hw_direct(kunseg_val,
1818                                                                  opcode_is_store(list->c),
1819                                                                  opcode_get_io_size(list->c))) {
1820                                                 pr_debug("Flagging opcode %u as direct I/O access\n",
1821                                                          i);
1822                                                 list->flags |= LIGHTREC_IO_MODE(LIGHTREC_IO_DIRECT_HW);
1823
1824                                                 if (no_mask)
1825                                                         list->flags |= LIGHTREC_NO_MASK;
1826                                         } else {
1827                                                 pr_debug("Flagging opcode %u as I/O access\n",
1828                                                          i);
1829                                                 list->flags |= LIGHTREC_IO_MODE(LIGHTREC_IO_HW);
1830                                         }
1831                                         break;
1832                                 default:
1833                                         break;
1834                                 }
1835                         }
1836
1837                         if (!LIGHTREC_FLAGS_GET_IO_MODE(list->flags)
1838                             && list->i.rs >= 28 && list->i.rs <= 29
1839                             && !state->maps[PSX_MAP_KERNEL_USER_RAM].ops) {
1840                                 /* Assume that all I/O operations that target
1841                                  * $sp or $gp will always only target a mapped
1842                                  * memory (RAM, BIOS, scratchpad). */
1843                                 if (state->opt_flags & LIGHTREC_OPT_SP_GP_HIT_RAM)
1844                                         list->flags |= LIGHTREC_IO_MODE(LIGHTREC_IO_RAM);
1845                                 else
1846                                         list->flags |= LIGHTREC_IO_MODE(LIGHTREC_IO_DIRECT);
1847                         }
1848
1849                         fallthrough;
1850                 default:
1851                         break;
1852                 }
1853         }
1854
1855         return 0;
1856 }
1857
1858 static u8 get_mfhi_mflo_reg(const struct block *block, u16 offset,
1859                             const struct opcode *last,
1860                             u32 mask, bool sync, bool mflo, bool another)
1861 {
1862         const struct opcode *op, *next = &block->opcode_list[offset];
1863         u32 old_mask;
1864         u8 reg2, reg = mflo ? REG_LO : REG_HI;
1865         u16 branch_offset;
1866         unsigned int i;
1867
1868         for (i = offset; i < block->nb_ops; i++) {
1869                 op = next;
1870                 next = &block->opcode_list[i + 1];
1871                 old_mask = mask;
1872
1873                 /* If any other opcode writes or reads to the register
1874                  * we'd use, then we cannot use it anymore. */
1875                 mask |= opcode_read_mask(op->c);
1876                 mask |= opcode_write_mask(op->c);
1877
1878                 if (op_flag_sync(op->flags))
1879                         sync = true;
1880
1881                 switch (op->i.op) {
1882                 case OP_BEQ:
1883                 case OP_BNE:
1884                 case OP_BLEZ:
1885                 case OP_BGTZ:
1886                 case OP_REGIMM:
1887                         /* TODO: handle backwards branches too */
1888                         if (!last && op_flag_local_branch(op->flags) &&
1889                             (s16)op->c.i.imm >= 0) {
1890                                 branch_offset = i + 1 + (s16)op->c.i.imm
1891                                         - !!op_flag_no_ds(op->flags);
1892
1893                                 reg = get_mfhi_mflo_reg(block, branch_offset, NULL,
1894                                                         mask, sync, mflo, false);
1895                                 reg2 = get_mfhi_mflo_reg(block, offset + 1, next,
1896                                                          mask, sync, mflo, false);
1897                                 if (reg > 0 && reg == reg2)
1898                                         return reg;
1899                                 if (!reg && !reg2)
1900                                         return 0;
1901                         }
1902
1903                         return mflo ? REG_LO : REG_HI;
1904                 case OP_META_MULT2:
1905                 case OP_META_MULTU2:
1906                         return 0;
1907                 case OP_SPECIAL:
1908                         switch (op->r.op) {
1909                         case OP_SPECIAL_MULT:
1910                         case OP_SPECIAL_MULTU:
1911                         case OP_SPECIAL_DIV:
1912                         case OP_SPECIAL_DIVU:
1913                                 return 0;
1914                         case OP_SPECIAL_MTHI:
1915                                 if (!mflo)
1916                                         return 0;
1917                                 continue;
1918                         case OP_SPECIAL_MTLO:
1919                                 if (mflo)
1920                                         return 0;
1921                                 continue;
1922                         case OP_SPECIAL_JR:
1923                                 if (op->r.rs != 31)
1924                                         return reg;
1925
1926                                 if (!sync && !op_flag_no_ds(op->flags) &&
1927                                     (next->i.op == OP_SPECIAL) &&
1928                                     ((!mflo && next->r.op == OP_SPECIAL_MFHI) ||
1929                                     (mflo && next->r.op == OP_SPECIAL_MFLO)))
1930                                         return next->r.rd;
1931
1932                                 return 0;
1933                         case OP_SPECIAL_JALR:
1934                                 return reg;
1935                         case OP_SPECIAL_MFHI:
1936                                 if (!mflo) {
1937                                         if (another)
1938                                                 return op->r.rd;
1939                                         /* Must use REG_HI if there is another MFHI target*/
1940                                         reg2 = get_mfhi_mflo_reg(block, i + 1, next,
1941                                                          0, sync, mflo, true);
1942                                         if (reg2 > 0 && reg2 != REG_HI)
1943                                                 return REG_HI;
1944
1945                                         if (!sync && !(old_mask & BIT(op->r.rd)))
1946                                                 return op->r.rd;
1947                                         else
1948                                                 return REG_HI;
1949                                 }
1950                                 continue;
1951                         case OP_SPECIAL_MFLO:
1952                                 if (mflo) {
1953                                         if (another)
1954                                                 return op->r.rd;
1955                                         /* Must use REG_LO if there is another MFLO target*/
1956                                         reg2 = get_mfhi_mflo_reg(block, i + 1, next,
1957                                                          0, sync, mflo, true);
1958                                         if (reg2 > 0 && reg2 != REG_LO)
1959                                                 return REG_LO;
1960
1961                                         if (!sync && !(old_mask & BIT(op->r.rd)))
1962                                                 return op->r.rd;
1963                                         else
1964                                                 return REG_LO;
1965                                 }
1966                                 continue;
1967                         default:
1968                                 break;
1969                         }
1970
1971                         fallthrough;
1972                 default:
1973                         continue;
1974                 }
1975         }
1976
1977         return reg;
1978 }
1979
1980 static void lightrec_replace_lo_hi(struct block *block, u16 offset,
1981                                    u16 last, bool lo)
1982 {
1983         unsigned int i;
1984         u32 branch_offset;
1985
1986         /* This function will remove the following MFLO/MFHI. It must be called
1987          * only if get_mfhi_mflo_reg() returned a non-zero value. */
1988
1989         for (i = offset; i < last; i++) {
1990                 struct opcode *op = &block->opcode_list[i];
1991
1992                 switch (op->i.op) {
1993                 case OP_BEQ:
1994                 case OP_BNE:
1995                 case OP_BLEZ:
1996                 case OP_BGTZ:
1997                 case OP_REGIMM:
1998                         /* TODO: handle backwards branches too */
1999                         if (op_flag_local_branch(op->flags) && (s16)op->c.i.imm >= 0) {
2000                                 branch_offset = i + 1 + (s16)op->c.i.imm
2001                                         - !!op_flag_no_ds(op->flags);
2002
2003                                 lightrec_replace_lo_hi(block, branch_offset, last, lo);
2004                                 lightrec_replace_lo_hi(block, i + 1, branch_offset, lo);
2005                         }
2006                         break;
2007
2008                 case OP_SPECIAL:
2009                         if (lo && op->r.op == OP_SPECIAL_MFLO) {
2010                                 pr_debug("Removing MFLO opcode at offset 0x%x\n",
2011                                          i << 2);
2012                                 op->opcode = 0;
2013                                 return;
2014                         } else if (!lo && op->r.op == OP_SPECIAL_MFHI) {
2015                                 pr_debug("Removing MFHI opcode at offset 0x%x\n",
2016                                          i << 2);
2017                                 op->opcode = 0;
2018                                 return;
2019                         }
2020
2021                         fallthrough;
2022                 default:
2023                         break;
2024                 }
2025         }
2026 }
2027
2028 static bool lightrec_always_skip_div_check(void)
2029 {
2030 #ifdef __mips__
2031         return true;
2032 #else
2033         return false;
2034 #endif
2035 }
2036
2037 static int lightrec_flag_mults_divs(struct lightrec_state *state, struct block *block)
2038 {
2039         struct opcode *list = NULL;
2040         struct constprop_data v[32] = LIGHTREC_CONSTPROP_INITIALIZER;
2041         u8 reg_hi, reg_lo;
2042         unsigned int i;
2043
2044         for (i = 0; i < block->nb_ops - 1; i++) {
2045                 list = &block->opcode_list[i];
2046
2047                 lightrec_consts_propagate(block, i, v);
2048
2049                 switch (list->i.op) {
2050                 case OP_SPECIAL:
2051                         switch (list->r.op) {
2052                         case OP_SPECIAL_DIV:
2053                         case OP_SPECIAL_DIVU:
2054                                 /* If we are dividing by a non-zero constant, don't
2055                                  * emit the div-by-zero check. */
2056                                 if (lightrec_always_skip_div_check() ||
2057                                     (v[list->r.rt].known & v[list->r.rt].value)) {
2058                                         list->flags |= LIGHTREC_NO_DIV_CHECK;
2059                                 }
2060                                 fallthrough;
2061                         case OP_SPECIAL_MULT:
2062                         case OP_SPECIAL_MULTU:
2063                                 break;
2064                         default:
2065                                 continue;
2066                         }
2067                         fallthrough;
2068                 case OP_META_MULT2:
2069                 case OP_META_MULTU2:
2070                         break;
2071                 default:
2072                         continue;
2073                 }
2074
2075                 /* Don't support opcodes in delay slots */
2076                 if (is_delay_slot(block->opcode_list, i) ||
2077                     op_flag_no_ds(list->flags)) {
2078                         continue;
2079                 }
2080
2081                 reg_lo = get_mfhi_mflo_reg(block, i + 1, NULL, 0, false, true, false);
2082                 if (reg_lo == 0) {
2083                         pr_debug("Mark MULT(U)/DIV(U) opcode at offset 0x%x as"
2084                                  " not writing LO\n", i << 2);
2085                         list->flags |= LIGHTREC_NO_LO;
2086                 }
2087
2088                 reg_hi = get_mfhi_mflo_reg(block, i + 1, NULL, 0, false, false, false);
2089                 if (reg_hi == 0) {
2090                         pr_debug("Mark MULT(U)/DIV(U) opcode at offset 0x%x as"
2091                                  " not writing HI\n", i << 2);
2092                         list->flags |= LIGHTREC_NO_HI;
2093                 }
2094
2095                 if (!reg_lo && !reg_hi) {
2096                         pr_debug("Both LO/HI unused in this block, they will "
2097                                  "probably be used in parent block - removing "
2098                                  "flags.\n");
2099                         list->flags &= ~(LIGHTREC_NO_LO | LIGHTREC_NO_HI);
2100                 }
2101
2102                 if (reg_lo > 0 && reg_lo != REG_LO) {
2103                         pr_debug("Found register %s to hold LO (rs = %u, rt = %u)\n",
2104                                  lightrec_reg_name(reg_lo), list->r.rs, list->r.rt);
2105
2106                         lightrec_replace_lo_hi(block, i + 1, block->nb_ops, true);
2107                         list->r.rd = reg_lo;
2108                 } else {
2109                         list->r.rd = 0;
2110                 }
2111
2112                 if (reg_hi > 0 && reg_hi != REG_HI) {
2113                         pr_debug("Found register %s to hold HI (rs = %u, rt = %u)\n",
2114                                  lightrec_reg_name(reg_hi), list->r.rs, list->r.rt);
2115
2116                         lightrec_replace_lo_hi(block, i + 1, block->nb_ops, false);
2117                         list->r.imm = reg_hi;
2118                 } else {
2119                         list->r.imm = 0;
2120                 }
2121         }
2122
2123         return 0;
2124 }
2125
2126 static bool remove_div_sequence(struct block *block, unsigned int offset)
2127 {
2128         struct opcode *op;
2129         unsigned int i, found = 0;
2130
2131         /*
2132          * Scan for the zero-checking sequence that GCC automatically introduced
2133          * after most DIV/DIVU opcodes. This sequence checks the value of the
2134          * divisor, and if zero, executes a BREAK opcode, causing the BIOS
2135          * handler to crash the PS1.
2136          *
2137          * For DIV opcodes, this sequence additionally checks that the signed
2138          * operation does not overflow.
2139          *
2140          * With the assumption that the games never crashed the PS1, we can
2141          * therefore assume that the games never divided by zero or overflowed,
2142          * and these sequences can be removed.
2143          */
2144
2145         for (i = offset; i < block->nb_ops; i++) {
2146                 op = &block->opcode_list[i];
2147
2148                 if (!found) {
2149                         if (op->i.op == OP_SPECIAL &&
2150                             (op->r.op == OP_SPECIAL_DIV || op->r.op == OP_SPECIAL_DIVU))
2151                                 break;
2152
2153                         if ((op->opcode & 0xfc1fffff) == 0x14000002) {
2154                                 /* BNE ???, zero, +8 */
2155                                 found++;
2156                         } else {
2157                                 offset++;
2158                         }
2159                 } else if (found == 1 && !op->opcode) {
2160                         /* NOP */
2161                         found++;
2162                 } else if (found == 2 && op->opcode == 0x0007000d) {
2163                         /* BREAK 0x1c00 */
2164                         found++;
2165                 } else if (found == 3 && op->opcode == 0x2401ffff) {
2166                         /* LI at, -1 */
2167                         found++;
2168                 } else if (found == 4 && (op->opcode & 0xfc1fffff) == 0x14010004) {
2169                         /* BNE ???, at, +16 */
2170                         found++;
2171                 } else if (found == 5 && op->opcode == 0x3c018000) {
2172                         /* LUI at, 0x8000 */
2173                         found++;
2174                 } else if (found == 6 && (op->opcode & 0x141fffff) == 0x14010002) {
2175                         /* BNE ???, at, +16 */
2176                         found++;
2177                 } else if (found == 7 && !op->opcode) {
2178                         /* NOP */
2179                         found++;
2180                 } else if (found == 8 && op->opcode == 0x0006000d) {
2181                         /* BREAK 0x1800 */
2182                         found++;
2183                         break;
2184                 } else {
2185                         break;
2186                 }
2187         }
2188
2189         if (found >= 3) {
2190                 if (found != 9)
2191                         found = 3;
2192
2193                 pr_debug("Removing DIV%s sequence at offset 0x%x\n",
2194                          found == 9 ? "" : "U", offset << 2);
2195
2196                 for (i = 0; i < found; i++)
2197                         block->opcode_list[offset + i].opcode = 0;
2198
2199                 return true;
2200         }
2201
2202         return false;
2203 }
2204
2205 static int lightrec_remove_div_by_zero_check_sequence(struct lightrec_state *state,
2206                                                       struct block *block)
2207 {
2208         struct opcode *op;
2209         unsigned int i;
2210
2211         for (i = 0; i < block->nb_ops; i++) {
2212                 op = &block->opcode_list[i];
2213
2214                 if (op->i.op == OP_SPECIAL &&
2215                     (op->r.op == OP_SPECIAL_DIVU || op->r.op == OP_SPECIAL_DIV) &&
2216                     remove_div_sequence(block, i + 1))
2217                         op->flags |= LIGHTREC_NO_DIV_CHECK;
2218         }
2219
2220         return 0;
2221 }
2222
2223 static const u32 memset_code[] = {
2224         0x10a00006,     // beqz         a1, 2f
2225         0x24a2ffff,     // addiu        v0,a1,-1
2226         0x2403ffff,     // li           v1,-1
2227         0xac800000,     // 1: sw        zero,0(a0)
2228         0x2442ffff,     // addiu        v0,v0,-1
2229         0x1443fffd,     // bne          v0,v1, 1b
2230         0x24840004,     // addiu        a0,a0,4
2231         0x03e00008,     // 2: jr        ra
2232         0x00000000,     // nop
2233 };
2234
2235 static int lightrec_replace_memset(struct lightrec_state *state, struct block *block)
2236 {
2237         unsigned int i;
2238         union code c;
2239
2240         for (i = 0; i < block->nb_ops; i++) {
2241                 c = block->opcode_list[i].c;
2242
2243                 if (c.opcode != memset_code[i])
2244                         return 0;
2245
2246                 if (i == ARRAY_SIZE(memset_code) - 1) {
2247                         /* success! */
2248                         pr_debug("Block at PC 0x%x is a memset\n", block->pc);
2249                         block_set_flags(block,
2250                                         BLOCK_IS_MEMSET | BLOCK_NEVER_COMPILE);
2251
2252                         /* Return non-zero to skip other optimizers. */
2253                         return 1;
2254                 }
2255         }
2256
2257         return 0;
2258 }
2259
2260 static int lightrec_test_preload_pc(struct lightrec_state *state, struct block *block)
2261 {
2262         unsigned int i;
2263         union code c;
2264         u32 flags;
2265
2266         for (i = 0; i < block->nb_ops; i++) {
2267                 c = block->opcode_list[i].c;
2268                 flags = block->opcode_list[i].flags;
2269
2270                 if (op_flag_sync(flags))
2271                         break;
2272
2273                 switch (c.i.op) {
2274                 case OP_J:
2275                 case OP_JAL:
2276                         block->flags |= BLOCK_PRELOAD_PC;
2277                         return 0;
2278
2279                 case OP_REGIMM:
2280                         switch (c.r.rt) {
2281                         case OP_REGIMM_BLTZAL:
2282                         case OP_REGIMM_BGEZAL:
2283                                 block->flags |= BLOCK_PRELOAD_PC;
2284                                 return 0;
2285                         default:
2286                                 break;
2287                         }
2288                         fallthrough;
2289                 case OP_BEQ:
2290                 case OP_BNE:
2291                 case OP_BLEZ:
2292                 case OP_BGTZ:
2293                         if (!op_flag_local_branch(flags)) {
2294                                 block->flags |= BLOCK_PRELOAD_PC;
2295                                 return 0;
2296                         }
2297
2298                 case OP_SPECIAL:
2299                         switch (c.r.op) {
2300                         case OP_SPECIAL_JALR:
2301                                 if (c.r.rd) {
2302                                         block->flags |= BLOCK_PRELOAD_PC;
2303                                         return 0;
2304                                 }
2305                                 break;
2306                         case OP_SPECIAL_SYSCALL:
2307                         case OP_SPECIAL_BREAK:
2308                                 block->flags |= BLOCK_PRELOAD_PC;
2309                                 return 0;
2310                         default:
2311                                 break;
2312                         }
2313                         break;
2314                 }
2315         }
2316
2317         return 0;
2318 }
2319
2320 static int (*lightrec_optimizers[])(struct lightrec_state *state, struct block *) = {
2321         IF_OPT(OPT_REMOVE_DIV_BY_ZERO_SEQ, &lightrec_remove_div_by_zero_check_sequence),
2322         IF_OPT(OPT_REPLACE_MEMSET, &lightrec_replace_memset),
2323         IF_OPT(OPT_DETECT_IMPOSSIBLE_BRANCHES, &lightrec_detect_impossible_branches),
2324         IF_OPT(OPT_HANDLE_LOAD_DELAYS, &lightrec_handle_load_delays),
2325         IF_OPT(OPT_HANDLE_LOAD_DELAYS, &lightrec_swap_load_delays),
2326         IF_OPT(OPT_TRANSFORM_OPS, &lightrec_transform_branches),
2327         IF_OPT(OPT_LOCAL_BRANCHES, &lightrec_local_branches),
2328         IF_OPT(OPT_TRANSFORM_OPS, &lightrec_transform_ops),
2329         IF_OPT(OPT_SWITCH_DELAY_SLOTS, &lightrec_switch_delay_slots),
2330         IF_OPT(OPT_FLAG_IO, &lightrec_flag_io),
2331         IF_OPT(OPT_FLAG_MULT_DIV, &lightrec_flag_mults_divs),
2332         IF_OPT(OPT_EARLY_UNLOAD, &lightrec_early_unload),
2333         IF_OPT(OPT_PRELOAD_PC, &lightrec_test_preload_pc),
2334 };
2335
2336 int lightrec_optimize(struct lightrec_state *state, struct block *block)
2337 {
2338         unsigned int i;
2339         int ret;
2340
2341         for (i = 0; i < ARRAY_SIZE(lightrec_optimizers); i++) {
2342                 if (lightrec_optimizers[i]) {
2343                         ret = (*lightrec_optimizers[i])(state, block);
2344                         if (ret)
2345                                 return ret;
2346                 }
2347         }
2348
2349         return 0;
2350 }