git subrepo pull --force deps/lightrec
[pcsx_rearmed.git] / deps / lightrec / optimizer.c
... / ...
CommitLineData
1// SPDX-License-Identifier: LGPL-2.1-or-later
2/*
3 * Copyright (C) 2014-2021 Paul Cercueil <paul@crapouillou.net>
4 */
5
6#include "constprop.h"
7#include "lightrec-config.h"
8#include "disassembler.h"
9#include "lightrec.h"
10#include "memmanager.h"
11#include "optimizer.h"
12#include "regcache.h"
13
14#include <errno.h>
15#include <stdbool.h>
16#include <stdlib.h>
17#include <string.h>
18
19#define IF_OPT(opt, ptr) ((opt) ? (ptr) : NULL)
20
21struct optimizer_list {
22 void (**optimizers)(struct opcode *);
23 unsigned int nb_optimizers;
24};
25
26static bool is_nop(union code op);
27
28bool is_unconditional_jump(union code c)
29{
30 switch (c.i.op) {
31 case OP_SPECIAL:
32 return c.r.op == OP_SPECIAL_JR || c.r.op == OP_SPECIAL_JALR;
33 case OP_J:
34 case OP_JAL:
35 return true;
36 case OP_BEQ:
37 case OP_BLEZ:
38 return c.i.rs == c.i.rt;
39 case OP_REGIMM:
40 return (c.r.rt == OP_REGIMM_BGEZ ||
41 c.r.rt == OP_REGIMM_BGEZAL) && c.i.rs == 0;
42 default:
43 return false;
44 }
45}
46
47bool is_syscall(union code c)
48{
49 return (c.i.op == OP_SPECIAL && c.r.op == OP_SPECIAL_SYSCALL) ||
50 (c.i.op == OP_CP0 && (c.r.rs == OP_CP0_MTC0 ||
51 c.r.rs == OP_CP0_CTC0) &&
52 (c.r.rd == 12 || c.r.rd == 13));
53}
54
55static u64 opcode_read_mask(union code op)
56{
57 switch (op.i.op) {
58 case OP_SPECIAL:
59 switch (op.r.op) {
60 case OP_SPECIAL_SYSCALL:
61 case OP_SPECIAL_BREAK:
62 return 0;
63 case OP_SPECIAL_JR:
64 case OP_SPECIAL_JALR:
65 case OP_SPECIAL_MTHI:
66 case OP_SPECIAL_MTLO:
67 return BIT(op.r.rs);
68 case OP_SPECIAL_MFHI:
69 return BIT(REG_HI);
70 case OP_SPECIAL_MFLO:
71 return BIT(REG_LO);
72 case OP_SPECIAL_SLL:
73 if (!op.r.imm)
74 return 0;
75 fallthrough;
76 case OP_SPECIAL_SRL:
77 case OP_SPECIAL_SRA:
78 return BIT(op.r.rt);
79 default:
80 return BIT(op.r.rs) | BIT(op.r.rt);
81 }
82 case OP_CP0:
83 switch (op.r.rs) {
84 case OP_CP0_MTC0:
85 case OP_CP0_CTC0:
86 return BIT(op.r.rt);
87 default:
88 return 0;
89 }
90 case OP_CP2:
91 if (op.r.op == OP_CP2_BASIC) {
92 switch (op.r.rs) {
93 case OP_CP2_BASIC_MTC2:
94 case OP_CP2_BASIC_CTC2:
95 return BIT(op.r.rt);
96 default:
97 break;
98 }
99 }
100 return 0;
101 case OP_J:
102 case OP_JAL:
103 case OP_LUI:
104 return 0;
105 case OP_BEQ:
106 if (op.i.rs == op.i.rt)
107 return 0;
108 fallthrough;
109 case OP_BNE:
110 case OP_LWL:
111 case OP_LWR:
112 case OP_SB:
113 case OP_SH:
114 case OP_SWL:
115 case OP_SW:
116 case OP_SWR:
117 return BIT(op.i.rs) | BIT(op.i.rt);
118 case OP_META:
119 return BIT(op.m.rs);
120 default:
121 return BIT(op.i.rs);
122 }
123}
124
125static u64 mult_div_write_mask(union code op)
126{
127 u64 flags;
128
129 if (!OPT_FLAG_MULT_DIV)
130 return BIT(REG_LO) | BIT(REG_HI);
131
132 if (op.r.rd)
133 flags = BIT(op.r.rd);
134 else
135 flags = BIT(REG_LO);
136 if (op.r.imm)
137 flags |= BIT(op.r.imm);
138 else
139 flags |= BIT(REG_HI);
140
141 return flags;
142}
143
144u64 opcode_write_mask(union code op)
145{
146 switch (op.i.op) {
147 case OP_META_MULT2:
148 case OP_META_MULTU2:
149 return mult_div_write_mask(op);
150 case OP_META:
151 return BIT(op.m.rd);
152 case OP_SPECIAL:
153 switch (op.r.op) {
154 case OP_SPECIAL_JR:
155 case OP_SPECIAL_SYSCALL:
156 case OP_SPECIAL_BREAK:
157 return 0;
158 case OP_SPECIAL_MULT:
159 case OP_SPECIAL_MULTU:
160 case OP_SPECIAL_DIV:
161 case OP_SPECIAL_DIVU:
162 return mult_div_write_mask(op);
163 case OP_SPECIAL_MTHI:
164 return BIT(REG_HI);
165 case OP_SPECIAL_MTLO:
166 return BIT(REG_LO);
167 case OP_SPECIAL_SLL:
168 if (!op.r.imm)
169 return 0;
170 fallthrough;
171 default:
172 return BIT(op.r.rd);
173 }
174 case OP_ADDI:
175 case OP_ADDIU:
176 case OP_SLTI:
177 case OP_SLTIU:
178 case OP_ANDI:
179 case OP_ORI:
180 case OP_XORI:
181 case OP_LUI:
182 case OP_LB:
183 case OP_LH:
184 case OP_LWL:
185 case OP_LW:
186 case OP_LBU:
187 case OP_LHU:
188 case OP_LWR:
189 return BIT(op.i.rt);
190 case OP_JAL:
191 return BIT(31);
192 case OP_CP0:
193 switch (op.r.rs) {
194 case OP_CP0_MFC0:
195 case OP_CP0_CFC0:
196 return BIT(op.i.rt);
197 default:
198 return 0;
199 }
200 case OP_CP2:
201 if (op.r.op == OP_CP2_BASIC) {
202 switch (op.r.rs) {
203 case OP_CP2_BASIC_MFC2:
204 case OP_CP2_BASIC_CFC2:
205 return BIT(op.i.rt);
206 default:
207 break;
208 }
209 }
210 return 0;
211 case OP_REGIMM:
212 switch (op.r.rt) {
213 case OP_REGIMM_BLTZAL:
214 case OP_REGIMM_BGEZAL:
215 return BIT(31);
216 default:
217 return 0;
218 }
219 default:
220 return 0;
221 }
222}
223
224bool opcode_reads_register(union code op, u8 reg)
225{
226 return opcode_read_mask(op) & BIT(reg);
227}
228
229bool opcode_writes_register(union code op, u8 reg)
230{
231 return opcode_write_mask(op) & BIT(reg);
232}
233
234static int find_prev_writer(const struct opcode *list, unsigned int offset, u8 reg)
235{
236 union code c;
237 unsigned int i;
238
239 if (op_flag_sync(list[offset].flags))
240 return -1;
241
242 for (i = offset; i > 0; i--) {
243 c = list[i - 1].c;
244
245 if (opcode_writes_register(c, reg)) {
246 if (i > 1 && has_delay_slot(list[i - 2].c))
247 break;
248
249 return i - 1;
250 }
251
252 if (op_flag_sync(list[i - 1].flags) ||
253 has_delay_slot(c) ||
254 opcode_reads_register(c, reg))
255 break;
256 }
257
258 return -1;
259}
260
261static int find_next_reader(const struct opcode *list, unsigned int offset, u8 reg)
262{
263 unsigned int i;
264 union code c;
265
266 if (op_flag_sync(list[offset].flags))
267 return -1;
268
269 for (i = offset; ; i++) {
270 c = list[i].c;
271
272 if (opcode_reads_register(c, reg))
273 return i;
274
275 if (op_flag_sync(list[i].flags)
276 || (op_flag_no_ds(list[i].flags) && has_delay_slot(c))
277 || is_delay_slot(list, i)
278 || opcode_writes_register(c, reg))
279 break;
280 }
281
282 return -1;
283}
284
285static bool reg_is_dead(const struct opcode *list, unsigned int offset, u8 reg)
286{
287 unsigned int i;
288
289 if (op_flag_sync(list[offset].flags) || is_delay_slot(list, offset))
290 return false;
291
292 for (i = offset + 1; ; i++) {
293 if (opcode_reads_register(list[i].c, reg))
294 return false;
295
296 if (opcode_writes_register(list[i].c, reg))
297 return true;
298
299 if (has_delay_slot(list[i].c)) {
300 if (op_flag_no_ds(list[i].flags) ||
301 opcode_reads_register(list[i + 1].c, reg))
302 return false;
303
304 return opcode_writes_register(list[i + 1].c, reg);
305 }
306 }
307}
308
309static bool reg_is_read(const struct opcode *list,
310 unsigned int a, unsigned int b, u8 reg)
311{
312 /* Return true if reg is read in one of the opcodes of the interval
313 * [a, b[ */
314 for (; a < b; a++) {
315 if (!is_nop(list[a].c) && opcode_reads_register(list[a].c, reg))
316 return true;
317 }
318
319 return false;
320}
321
322static bool reg_is_written(const struct opcode *list,
323 unsigned int a, unsigned int b, u8 reg)
324{
325 /* Return true if reg is written in one of the opcodes of the interval
326 * [a, b[ */
327
328 for (; a < b; a++) {
329 if (!is_nop(list[a].c) && opcode_writes_register(list[a].c, reg))
330 return true;
331 }
332
333 return false;
334}
335
336static bool reg_is_read_or_written(const struct opcode *list,
337 unsigned int a, unsigned int b, u8 reg)
338{
339 return reg_is_read(list, a, b, reg) || reg_is_written(list, a, b, reg);
340}
341
342bool opcode_is_mfc(union code op)
343{
344 switch (op.i.op) {
345 case OP_CP0:
346 switch (op.r.rs) {
347 case OP_CP0_MFC0:
348 case OP_CP0_CFC0:
349 return true;
350 default:
351 break;
352 }
353
354 break;
355 case OP_CP2:
356 if (op.r.op == OP_CP2_BASIC) {
357 switch (op.r.rs) {
358 case OP_CP2_BASIC_MFC2:
359 case OP_CP2_BASIC_CFC2:
360 return true;
361 default:
362 break;
363 }
364 }
365
366 break;
367 default:
368 break;
369 }
370
371 return false;
372}
373
374bool opcode_is_load(union code op)
375{
376 switch (op.i.op) {
377 case OP_LB:
378 case OP_LH:
379 case OP_LWL:
380 case OP_LW:
381 case OP_LBU:
382 case OP_LHU:
383 case OP_LWR:
384 case OP_LWC2:
385 return true;
386 default:
387 return false;
388 }
389}
390
391static bool opcode_is_store(union code op)
392{
393 switch (op.i.op) {
394 case OP_SB:
395 case OP_SH:
396 case OP_SW:
397 case OP_SWL:
398 case OP_SWR:
399 case OP_SWC2:
400 return true;
401 default:
402 return false;
403 }
404}
405
406static u8 opcode_get_io_size(union code op)
407{
408 switch (op.i.op) {
409 case OP_LB:
410 case OP_LBU:
411 case OP_SB:
412 return 8;
413 case OP_LH:
414 case OP_LHU:
415 case OP_SH:
416 return 16;
417 default:
418 return 32;
419 }
420}
421
422bool opcode_is_io(union code op)
423{
424 return opcode_is_load(op) || opcode_is_store(op);
425}
426
427/* TODO: Complete */
428static bool is_nop(union code op)
429{
430 if (opcode_writes_register(op, 0)) {
431 switch (op.i.op) {
432 case OP_CP0:
433 return op.r.rs != OP_CP0_MFC0;
434 case OP_LB:
435 case OP_LH:
436 case OP_LWL:
437 case OP_LW:
438 case OP_LBU:
439 case OP_LHU:
440 case OP_LWR:
441 return false;
442 default:
443 return true;
444 }
445 }
446
447 switch (op.i.op) {
448 case OP_SPECIAL:
449 switch (op.r.op) {
450 case OP_SPECIAL_AND:
451 return op.r.rd == op.r.rt && op.r.rd == op.r.rs;
452 case OP_SPECIAL_ADD:
453 case OP_SPECIAL_ADDU:
454 return (op.r.rd == op.r.rt && op.r.rs == 0) ||
455 (op.r.rd == op.r.rs && op.r.rt == 0);
456 case OP_SPECIAL_SUB:
457 case OP_SPECIAL_SUBU:
458 return op.r.rd == op.r.rs && op.r.rt == 0;
459 case OP_SPECIAL_OR:
460 if (op.r.rd == op.r.rt)
461 return op.r.rd == op.r.rs || op.r.rs == 0;
462 else
463 return (op.r.rd == op.r.rs) && op.r.rt == 0;
464 case OP_SPECIAL_SLL:
465 case OP_SPECIAL_SRA:
466 case OP_SPECIAL_SRL:
467 return op.r.rd == op.r.rt && op.r.imm == 0;
468 case OP_SPECIAL_MFHI:
469 case OP_SPECIAL_MFLO:
470 return op.r.rd == 0;
471 default:
472 return false;
473 }
474 case OP_ORI:
475 case OP_ADDI:
476 case OP_ADDIU:
477 return op.i.rt == op.i.rs && op.i.imm == 0;
478 case OP_BGTZ:
479 return (op.i.rs == 0 || op.i.imm == 1);
480 case OP_REGIMM:
481 return (op.i.op == OP_REGIMM_BLTZ ||
482 op.i.op == OP_REGIMM_BLTZAL) &&
483 (op.i.rs == 0 || op.i.imm == 1);
484 case OP_BNE:
485 return (op.i.rs == op.i.rt || op.i.imm == 1);
486 default:
487 return false;
488 }
489}
490
491static void lightrec_optimize_sll_sra(struct opcode *list, unsigned int offset,
492 struct constprop_data *v)
493{
494 struct opcode *ldop = NULL, *curr = &list[offset], *next;
495 struct opcode *to_change, *to_nop;
496 int idx, idx2;
497
498 if (curr->r.imm != 24 && curr->r.imm != 16)
499 return;
500
501 if (is_delay_slot(list, offset))
502 return;
503
504 idx = find_next_reader(list, offset + 1, curr->r.rd);
505 if (idx < 0)
506 return;
507
508 next = &list[idx];
509
510 if (next->i.op != OP_SPECIAL || next->r.op != OP_SPECIAL_SRA ||
511 next->r.imm != curr->r.imm || next->r.rt != curr->r.rd)
512 return;
513
514 if (curr->r.rd != curr->r.rt && next->r.rd != next->r.rt) {
515 /* sll rY, rX, 16
516 * ...
517 * sra rZ, rY, 16 */
518
519 if (!reg_is_dead(list, idx, curr->r.rd) ||
520 reg_is_read_or_written(list, offset, idx, next->r.rd))
521 return;
522
523 /* If rY is dead after the SRL, and rZ is not used after the SLL,
524 * we can change rY to rZ */
525
526 pr_debug("Detected SLL/SRA with middle temp register\n");
527 curr->r.rd = next->r.rd;
528 next->r.rt = curr->r.rd;
529 }
530
531 /* We got a SLL/SRA combo. If imm #16, that's a cast to s16.
532 * If imm #24 that's a cast to s8.
533 *
534 * First of all, make sure that the target register of the SLL is not
535 * read after the SRA. */
536
537 if (curr->r.rd == curr->r.rt) {
538 /* sll rX, rX, 16
539 * ...
540 * sra rY, rX, 16 */
541 to_change = next;
542 to_nop = curr;
543
544 /* rX is used after the SRA - we cannot convert it. */
545 if (curr->r.rd != next->r.rd && !reg_is_dead(list, idx, curr->r.rd))
546 return;
547 } else {
548 /* sll rY, rX, 16
549 * ...
550 * sra rY, rY, 16 */
551 to_change = curr;
552 to_nop = next;
553 }
554
555 idx2 = find_prev_writer(list, offset, curr->r.rt);
556 if (idx2 >= 0) {
557 /* Note that PSX games sometimes do casts after
558 * a LHU or LBU; in this case we can change the
559 * load opcode to a LH or LB, and the cast can
560 * be changed to a MOV or a simple NOP. */
561
562 ldop = &list[idx2];
563
564 if (next->r.rd != ldop->i.rt &&
565 !reg_is_dead(list, idx, ldop->i.rt))
566 ldop = NULL;
567 else if (curr->r.imm == 16 && ldop->i.op == OP_LHU)
568 ldop->i.op = OP_LH;
569 else if (curr->r.imm == 24 && ldop->i.op == OP_LBU)
570 ldop->i.op = OP_LB;
571 else
572 ldop = NULL;
573
574 if (ldop) {
575 if (next->r.rd == ldop->i.rt) {
576 to_change->opcode = 0;
577 } else if (reg_is_dead(list, idx, ldop->i.rt) &&
578 !reg_is_read_or_written(list, idx2 + 1, idx, next->r.rd)) {
579 /* The target register of the SRA is dead after the
580 * LBU/LHU; we can change the target register of the
581 * LBU/LHU to the one of the SRA. */
582 v[ldop->i.rt].known = 0;
583 v[ldop->i.rt].sign = 0;
584 ldop->i.rt = next->r.rd;
585 to_change->opcode = 0;
586 } else {
587 to_change->i.op = OP_META;
588 to_change->m.op = OP_META_MOV;
589 to_change->m.rd = next->r.rd;
590 to_change->m.rs = ldop->i.rt;
591 }
592
593 if (to_nop->r.imm == 24)
594 pr_debug("Convert LBU+SLL+SRA to LB\n");
595 else
596 pr_debug("Convert LHU+SLL+SRA to LH\n");
597
598 v[ldop->i.rt].known = 0;
599 v[ldop->i.rt].sign = 0xffffff80 << 24 - curr->r.imm;
600 }
601 }
602
603 if (!ldop) {
604 pr_debug("Convert SLL/SRA #%u to EXT%c\n",
605 curr->r.imm, curr->r.imm == 24 ? 'C' : 'S');
606
607 to_change->m.rs = curr->r.rt;
608 to_change->m.op = to_nop->r.imm == 24 ? OP_META_EXTC : OP_META_EXTS;
609 to_change->i.op = OP_META;
610 }
611
612 to_nop->opcode = 0;
613}
614
615static void
616lightrec_remove_useless_lui(struct block *block, unsigned int offset,
617 const struct constprop_data *v)
618{
619 struct opcode *list = block->opcode_list,
620 *op = &block->opcode_list[offset];
621 int reader;
622
623 if (!op_flag_sync(op->flags) && is_known(v, op->i.rt) &&
624 v[op->i.rt].value == op->i.imm << 16) {
625 pr_debug("Converting duplicated LUI to NOP\n");
626 op->opcode = 0x0;
627 return;
628 }
629
630 if (op->i.imm != 0 || op->i.rt == 0 || offset == block->nb_ops - 1)
631 return;
632
633 reader = find_next_reader(list, offset + 1, op->i.rt);
634 if (reader <= 0)
635 return;
636
637 if (opcode_writes_register(list[reader].c, op->i.rt) ||
638 reg_is_dead(list, reader, op->i.rt)) {
639 pr_debug("Removing useless LUI 0x0\n");
640
641 if (list[reader].i.rs == op->i.rt)
642 list[reader].i.rs = 0;
643 if (list[reader].i.op == OP_SPECIAL &&
644 list[reader].i.rt == op->i.rt)
645 list[reader].i.rt = 0;
646 op->opcode = 0x0;
647 }
648}
649
650static void lightrec_modify_lui(struct block *block, unsigned int offset)
651{
652 union code c, *lui = &block->opcode_list[offset].c;
653 bool stop = false, stop_next = false;
654 unsigned int i;
655
656 for (i = offset + 1; !stop && i < block->nb_ops; i++) {
657 c = block->opcode_list[i].c;
658 stop = stop_next;
659
660 if ((opcode_is_store(c) && c.i.rt == lui->i.rt)
661 || (!opcode_is_load(c) && opcode_reads_register(c, lui->i.rt)))
662 break;
663
664 if (opcode_writes_register(c, lui->i.rt)) {
665 if (c.i.op == OP_LWL || c.i.op == OP_LWR) {
666 /* LWL/LWR only partially write their target register;
667 * therefore the LUI should not write a different value. */
668 break;
669 }
670
671 pr_debug("Convert LUI at offset 0x%x to kuseg\n",
672 i - 1 << 2);
673 lui->i.imm = kunseg(lui->i.imm << 16) >> 16;
674 break;
675 }
676
677 if (has_delay_slot(c))
678 stop_next = true;
679 }
680}
681
682static int lightrec_transform_branches(struct lightrec_state *state,
683 struct block *block)
684{
685 struct opcode *op;
686 unsigned int i;
687 s32 offset;
688
689 for (i = 0; i < block->nb_ops; i++) {
690 op = &block->opcode_list[i];
691
692 switch (op->i.op) {
693 case OP_J:
694 /* Transform J opcode into BEQ $zero, $zero if possible. */
695 offset = (s32)((block->pc & 0xf0000000) >> 2 | op->j.imm)
696 - (s32)(block->pc >> 2) - (s32)i - 1;
697
698 if (offset == (s16)offset) {
699 pr_debug("Transform J into BEQ $zero, $zero\n");
700 op->i.op = OP_BEQ;
701 op->i.rs = 0;
702 op->i.rt = 0;
703 op->i.imm = offset;
704
705 }
706 fallthrough;
707 default:
708 break;
709 }
710 }
711
712 return 0;
713}
714
715static inline bool is_power_of_two(u32 value)
716{
717 return popcount32(value) == 1;
718}
719
720static void lightrec_patch_known_zero(struct opcode *op,
721 const struct constprop_data *v)
722{
723 switch (op->i.op) {
724 case OP_SPECIAL:
725 switch (op->r.op) {
726 case OP_SPECIAL_JR:
727 case OP_SPECIAL_JALR:
728 case OP_SPECIAL_MTHI:
729 case OP_SPECIAL_MTLO:
730 if (is_known_zero(v, op->r.rs))
731 op->r.rs = 0;
732 break;
733 default:
734 if (is_known_zero(v, op->r.rs))
735 op->r.rs = 0;
736 fallthrough;
737 case OP_SPECIAL_SLL:
738 case OP_SPECIAL_SRL:
739 case OP_SPECIAL_SRA:
740 if (is_known_zero(v, op->r.rt))
741 op->r.rt = 0;
742 break;
743 case OP_SPECIAL_SYSCALL:
744 case OP_SPECIAL_BREAK:
745 case OP_SPECIAL_MFHI:
746 case OP_SPECIAL_MFLO:
747 break;
748 }
749 break;
750 case OP_CP0:
751 switch (op->r.rs) {
752 case OP_CP0_MTC0:
753 case OP_CP0_CTC0:
754 if (is_known_zero(v, op->r.rt))
755 op->r.rt = 0;
756 break;
757 default:
758 break;
759 }
760 break;
761 case OP_CP2:
762 if (op->r.op == OP_CP2_BASIC) {
763 switch (op->r.rs) {
764 case OP_CP2_BASIC_MTC2:
765 case OP_CP2_BASIC_CTC2:
766 if (is_known_zero(v, op->r.rt))
767 op->r.rt = 0;
768 break;
769 default:
770 break;
771 }
772 }
773 break;
774 case OP_BEQ:
775 case OP_BNE:
776 if (is_known_zero(v, op->i.rt))
777 op->i.rt = 0;
778 fallthrough;
779 case OP_REGIMM:
780 case OP_BLEZ:
781 case OP_BGTZ:
782 case OP_ADDI:
783 case OP_ADDIU:
784 case OP_SLTI:
785 case OP_SLTIU:
786 case OP_ANDI:
787 case OP_ORI:
788 case OP_XORI:
789 case OP_META_MULT2:
790 case OP_META_MULTU2:
791 case OP_META:
792 if (is_known_zero(v, op->m.rs))
793 op->m.rs = 0;
794 break;
795 case OP_SB:
796 case OP_SH:
797 case OP_SWL:
798 case OP_SW:
799 case OP_SWR:
800 if (is_known_zero(v, op->i.rt))
801 op->i.rt = 0;
802 fallthrough;
803 case OP_LB:
804 case OP_LH:
805 case OP_LWL:
806 case OP_LW:
807 case OP_LBU:
808 case OP_LHU:
809 case OP_LWR:
810 case OP_LWC2:
811 case OP_SWC2:
812 if (is_known(v, op->i.rs)
813 && kunseg(v[op->i.rs].value) == 0)
814 op->i.rs = 0;
815 break;
816 default:
817 break;
818 }
819}
820
821static void lightrec_reset_syncs(struct block *block)
822{
823 struct opcode *op, *list = block->opcode_list;
824 unsigned int i;
825 s32 offset;
826
827 for (i = 0; i < block->nb_ops; i++)
828 list[i].flags &= ~LIGHTREC_SYNC;
829
830 for (i = 0; i < block->nb_ops; i++) {
831 op = &list[i];
832
833 if (has_delay_slot(op->c)) {
834 if (op_flag_local_branch(op->flags)) {
835 offset = i + 1 - op_flag_no_ds(op->flags) + (s16)op->i.imm;
836 list[offset].flags |= LIGHTREC_SYNC;
837 }
838
839 if (op_flag_emulate_branch(op->flags) && i + 2 < block->nb_ops)
840 list[i + 2].flags |= LIGHTREC_SYNC;
841 }
842 }
843}
844
845static int lightrec_transform_ops(struct lightrec_state *state, struct block *block)
846{
847 struct opcode *op, *list = block->opcode_list;
848 struct constprop_data v[32] = LIGHTREC_CONSTPROP_INITIALIZER;
849 unsigned int i;
850 bool local;
851 u8 tmp;
852
853 for (i = 0; i < block->nb_ops; i++) {
854 op = &list[i];
855
856 lightrec_consts_propagate(block, i, v);
857
858 lightrec_patch_known_zero(op, v);
859
860 /* Transform all opcodes detected as useless to real NOPs
861 * (0x0: SLL r0, r0, #0) */
862 if (op->opcode != 0 && is_nop(op->c)) {
863 pr_debug("Converting useless opcode 0x%08x to NOP\n",
864 op->opcode);
865 op->opcode = 0x0;
866 }
867
868 if (!op->opcode)
869 continue;
870
871 switch (op->i.op) {
872 case OP_BEQ:
873 if (op->i.rs == op->i.rt ||
874 (is_known(v, op->i.rs) && is_known(v, op->i.rt) &&
875 v[op->i.rs].value == v[op->i.rt].value)) {
876 if (op->i.rs != op->i.rt)
877 pr_debug("Found always-taken BEQ\n");
878
879 op->i.rs = 0;
880 op->i.rt = 0;
881 } else if (v[op->i.rs].known & v[op->i.rt].known &
882 (v[op->i.rs].value ^ v[op->i.rt].value)) {
883 pr_debug("Found never-taken BEQ\n");
884
885 local = op_flag_local_branch(op->flags);
886 op->opcode = 0;
887 op->flags = 0;
888
889 if (local)
890 lightrec_reset_syncs(block);
891 } else if (op->i.rs == 0) {
892 op->i.rs = op->i.rt;
893 op->i.rt = 0;
894 }
895 break;
896
897 case OP_BNE:
898 if (v[op->i.rs].known & v[op->i.rt].known &
899 (v[op->i.rs].value ^ v[op->i.rt].value)) {
900 pr_debug("Found always-taken BNE\n");
901
902 op->i.op = OP_BEQ;
903 op->i.rs = 0;
904 op->i.rt = 0;
905 } else if (is_known(v, op->i.rs) && is_known(v, op->i.rt) &&
906 v[op->i.rs].value == v[op->i.rt].value) {
907 pr_debug("Found never-taken BNE\n");
908
909 local = op_flag_local_branch(op->flags);
910 op->opcode = 0;
911 op->flags = 0;
912
913 if (local)
914 lightrec_reset_syncs(block);
915 } else if (op->i.rs == 0) {
916 op->i.rs = op->i.rt;
917 op->i.rt = 0;
918 }
919 break;
920
921 case OP_BLEZ:
922 if (v[op->i.rs].known & BIT(31) &&
923 v[op->i.rs].value & BIT(31)) {
924 pr_debug("Found always-taken BLEZ\n");
925
926 op->i.op = OP_BEQ;
927 op->i.rs = 0;
928 op->i.rt = 0;
929 }
930 break;
931
932 case OP_BGTZ:
933 if (v[op->i.rs].known & BIT(31) &&
934 v[op->i.rs].value & BIT(31)) {
935 pr_debug("Found never-taken BGTZ\n");
936
937 local = op_flag_local_branch(op->flags);
938 op->opcode = 0;
939 op->flags = 0;
940
941 if (local)
942 lightrec_reset_syncs(block);
943 }
944 break;
945
946 case OP_LUI:
947 if (i == 0 || !has_delay_slot(list[i - 1].c))
948 lightrec_modify_lui(block, i);
949 lightrec_remove_useless_lui(block, i, v);
950 break;
951
952 /* Transform ORI/ADDI/ADDIU with imm #0 or ORR/ADD/ADDU/SUB/SUBU
953 * with register $zero to the MOV meta-opcode */
954 case OP_ORI:
955 case OP_ADDI:
956 case OP_ADDIU:
957 if (op->i.imm == 0) {
958 pr_debug("Convert ORI/ADDI/ADDIU #0 to MOV\n");
959 op->m.rd = op->i.rt;
960 op->m.op = OP_META_MOV;
961 op->i.op = OP_META;
962 }
963 break;
964 case OP_ANDI:
965 if (bits_are_known_zero(v, op->i.rs, ~op->i.imm)) {
966 pr_debug("Found useless ANDI 0x%x\n", op->i.imm);
967
968 if (op->i.rs == op->i.rt) {
969 op->opcode = 0;
970 } else {
971 op->m.rd = op->i.rt;
972 op->m.op = OP_META_MOV;
973 op->i.op = OP_META;
974 }
975 }
976 break;
977 case OP_REGIMM:
978 switch (op->r.rt) {
979 case OP_REGIMM_BLTZ:
980 case OP_REGIMM_BGEZ:
981 if (!(v[op->r.rs].known & BIT(31)))
982 break;
983
984 if (!!(v[op->r.rs].value & BIT(31))
985 ^ (op->r.rt == OP_REGIMM_BGEZ)) {
986 pr_debug("Found always-taken BLTZ/BGEZ\n");
987 op->i.op = OP_BEQ;
988 op->i.rs = 0;
989 op->i.rt = 0;
990 } else {
991 pr_debug("Found never-taken BLTZ/BGEZ\n");
992
993 local = op_flag_local_branch(op->flags);
994 op->opcode = 0;
995 op->flags = 0;
996
997 if (local)
998 lightrec_reset_syncs(block);
999 }
1000 break;
1001 case OP_REGIMM_BLTZAL:
1002 case OP_REGIMM_BGEZAL:
1003 /* TODO: Detect always-taken and replace with JAL */
1004 break;
1005 }
1006 break;
1007 case OP_SPECIAL:
1008 switch (op->r.op) {
1009 case OP_SPECIAL_SRAV:
1010 if ((v[op->r.rs].known & 0x1f) != 0x1f)
1011 break;
1012
1013 pr_debug("Convert SRAV to SRA\n");
1014 op->r.imm = v[op->r.rs].value & 0x1f;
1015 op->r.op = OP_SPECIAL_SRA;
1016
1017 fallthrough;
1018 case OP_SPECIAL_SRA:
1019 if (op->r.imm == 0) {
1020 pr_debug("Convert SRA #0 to MOV\n");
1021 op->m.rs = op->r.rt;
1022 op->m.op = OP_META_MOV;
1023 op->i.op = OP_META;
1024 break;
1025 }
1026 break;
1027
1028 case OP_SPECIAL_SLLV:
1029 if ((v[op->r.rs].known & 0x1f) != 0x1f)
1030 break;
1031
1032 pr_debug("Convert SLLV to SLL\n");
1033 op->r.imm = v[op->r.rs].value & 0x1f;
1034 op->r.op = OP_SPECIAL_SLL;
1035
1036 fallthrough;
1037 case OP_SPECIAL_SLL:
1038 if (op->r.imm == 0) {
1039 pr_debug("Convert SLL #0 to MOV\n");
1040 op->m.rs = op->r.rt;
1041 op->m.op = OP_META_MOV;
1042 op->i.op = OP_META;
1043 }
1044
1045 lightrec_optimize_sll_sra(block->opcode_list, i, v);
1046 break;
1047
1048 case OP_SPECIAL_SRLV:
1049 if ((v[op->r.rs].known & 0x1f) != 0x1f)
1050 break;
1051
1052 pr_debug("Convert SRLV to SRL\n");
1053 op->r.imm = v[op->r.rs].value & 0x1f;
1054 op->r.op = OP_SPECIAL_SRL;
1055
1056 fallthrough;
1057 case OP_SPECIAL_SRL:
1058 if (op->r.imm == 0) {
1059 pr_debug("Convert SRL #0 to MOV\n");
1060 op->m.rs = op->r.rt;
1061 op->m.op = OP_META_MOV;
1062 op->i.op = OP_META;
1063 }
1064 break;
1065
1066 case OP_SPECIAL_MULT:
1067 case OP_SPECIAL_MULTU:
1068 if (is_known(v, op->r.rs) &&
1069 is_power_of_two(v[op->r.rs].value)) {
1070 tmp = op->c.i.rs;
1071 op->c.i.rs = op->c.i.rt;
1072 op->c.i.rt = tmp;
1073 } else if (!is_known(v, op->r.rt) ||
1074 !is_power_of_two(v[op->r.rt].value)) {
1075 break;
1076 }
1077
1078 pr_debug("Multiply by power-of-two: %u\n",
1079 v[op->r.rt].value);
1080
1081 if (op->r.op == OP_SPECIAL_MULT)
1082 op->i.op = OP_META_MULT2;
1083 else
1084 op->i.op = OP_META_MULTU2;
1085
1086 op->r.op = ctz32(v[op->r.rt].value);
1087 break;
1088 case OP_SPECIAL_NOR:
1089 if (op->r.rs == 0 || op->r.rt == 0) {
1090 pr_debug("Convert NOR $zero to COM\n");
1091 op->i.op = OP_META;
1092 op->m.op = OP_META_COM;
1093 if (!op->m.rs)
1094 op->m.rs = op->r.rt;
1095 }
1096 break;
1097 case OP_SPECIAL_OR:
1098 case OP_SPECIAL_ADD:
1099 case OP_SPECIAL_ADDU:
1100 if (op->r.rs == 0) {
1101 pr_debug("Convert OR/ADD $zero to MOV\n");
1102 op->m.rs = op->r.rt;
1103 op->m.op = OP_META_MOV;
1104 op->i.op = OP_META;
1105 }
1106 fallthrough;
1107 case OP_SPECIAL_SUB:
1108 case OP_SPECIAL_SUBU:
1109 if (op->r.rt == 0) {
1110 pr_debug("Convert OR/ADD/SUB $zero to MOV\n");
1111 op->m.op = OP_META_MOV;
1112 op->i.op = OP_META;
1113 }
1114 fallthrough;
1115 default:
1116 break;
1117 }
1118 fallthrough;
1119 default:
1120 break;
1121 }
1122 }
1123
1124 return 0;
1125}
1126
1127static bool lightrec_can_switch_delay_slot(union code op, union code next_op)
1128{
1129 switch (op.i.op) {
1130 case OP_SPECIAL:
1131 switch (op.r.op) {
1132 case OP_SPECIAL_JALR:
1133 if (opcode_reads_register(next_op, op.r.rd) ||
1134 opcode_writes_register(next_op, op.r.rd))
1135 return false;
1136 fallthrough;
1137 case OP_SPECIAL_JR:
1138 if (opcode_writes_register(next_op, op.r.rs))
1139 return false;
1140 fallthrough;
1141 default:
1142 break;
1143 }
1144 fallthrough;
1145 case OP_J:
1146 break;
1147 case OP_JAL:
1148 if (opcode_reads_register(next_op, 31) ||
1149 opcode_writes_register(next_op, 31))
1150 return false;;
1151
1152 break;
1153 case OP_BEQ:
1154 case OP_BNE:
1155 if (op.i.rt && opcode_writes_register(next_op, op.i.rt))
1156 return false;
1157 fallthrough;
1158 case OP_BLEZ:
1159 case OP_BGTZ:
1160 if (op.i.rs && opcode_writes_register(next_op, op.i.rs))
1161 return false;
1162 break;
1163 case OP_REGIMM:
1164 switch (op.r.rt) {
1165 case OP_REGIMM_BLTZAL:
1166 case OP_REGIMM_BGEZAL:
1167 if (opcode_reads_register(next_op, 31) ||
1168 opcode_writes_register(next_op, 31))
1169 return false;
1170 fallthrough;
1171 case OP_REGIMM_BLTZ:
1172 case OP_REGIMM_BGEZ:
1173 if (op.i.rs && opcode_writes_register(next_op, op.i.rs))
1174 return false;
1175 break;
1176 }
1177 fallthrough;
1178 default:
1179 break;
1180 }
1181
1182 return true;
1183}
1184
1185static int lightrec_switch_delay_slots(struct lightrec_state *state, struct block *block)
1186{
1187 struct opcode *list, *next = &block->opcode_list[0];
1188 unsigned int i;
1189 union code op, next_op;
1190 u32 flags;
1191
1192 for (i = 0; i < block->nb_ops - 1; i++) {
1193 list = next;
1194 next = &block->opcode_list[i + 1];
1195 next_op = next->c;
1196 op = list->c;
1197
1198 if (!has_delay_slot(op) || op_flag_no_ds(list->flags) ||
1199 op_flag_emulate_branch(list->flags) ||
1200 op.opcode == 0 || next_op.opcode == 0)
1201 continue;
1202
1203 if (is_delay_slot(block->opcode_list, i))
1204 continue;
1205
1206 if (op_flag_sync(next->flags))
1207 continue;
1208
1209 if (op_flag_load_delay(next->flags) && opcode_is_load(next_op))
1210 continue;
1211
1212 if (!lightrec_can_switch_delay_slot(list->c, next_op))
1213 continue;
1214
1215 pr_debug("Swap branch and delay slot opcodes "
1216 "at offsets 0x%x / 0x%x\n",
1217 i << 2, (i + 1) << 2);
1218
1219 flags = next->flags | (list->flags & LIGHTREC_SYNC);
1220 list->c = next_op;
1221 next->c = op;
1222 next->flags = (list->flags | LIGHTREC_NO_DS) & ~LIGHTREC_SYNC;
1223 list->flags = flags | LIGHTREC_NO_DS;
1224 }
1225
1226 return 0;
1227}
1228
1229static int lightrec_detect_impossible_branches(struct lightrec_state *state,
1230 struct block *block)
1231{
1232 struct opcode *op, *list = block->opcode_list, *next = &list[0];
1233 unsigned int i;
1234 int ret = 0;
1235
1236 for (i = 0; i < block->nb_ops - 1; i++) {
1237 op = next;
1238 next = &list[i + 1];
1239
1240 if (!has_delay_slot(op->c) ||
1241 (!has_delay_slot(next->c) &&
1242 !opcode_is_mfc(next->c) &&
1243 !(next->i.op == OP_CP0 && next->r.rs == OP_CP0_RFE)))
1244 continue;
1245
1246 if (op->c.opcode == next->c.opcode) {
1247 /* The delay slot is the exact same opcode as the branch
1248 * opcode: this is effectively a NOP */
1249 next->c.opcode = 0;
1250 continue;
1251 }
1252
1253 op->flags |= LIGHTREC_EMULATE_BRANCH;
1254
1255 if (OPT_LOCAL_BRANCHES && i + 2 < block->nb_ops) {
1256 /* The interpreter will only emulate the branch, then
1257 * return to the compiled code. Add a SYNC after the
1258 * branch + delay slot in the case where the branch
1259 * was not taken. */
1260 list[i + 2].flags |= LIGHTREC_SYNC;
1261 }
1262 }
1263
1264 return ret;
1265}
1266
1267static bool is_local_branch(const struct block *block, unsigned int idx)
1268{
1269 const struct opcode *op = &block->opcode_list[idx];
1270 s32 offset;
1271
1272 switch (op->c.i.op) {
1273 case OP_BEQ:
1274 case OP_BNE:
1275 case OP_BLEZ:
1276 case OP_BGTZ:
1277 case OP_REGIMM:
1278 offset = idx + 1 + (s16)op->c.i.imm;
1279 if (offset >= 0 && offset < block->nb_ops)
1280 return true;
1281 fallthrough;
1282 default:
1283 return false;
1284 }
1285}
1286
1287static int lightrec_handle_load_delays(struct lightrec_state *state,
1288 struct block *block)
1289{
1290 struct opcode *op, *list = block->opcode_list;
1291 unsigned int i;
1292 s16 imm;
1293
1294 for (i = 0; i < block->nb_ops; i++) {
1295 op = &list[i];
1296
1297 if (!opcode_is_load(op->c) || !op->c.i.rt || op->c.i.op == OP_LWC2)
1298 continue;
1299
1300 if (!is_delay_slot(list, i)) {
1301 /* Only handle load delays in delay slots.
1302 * PSX games never abused load delay slots otherwise. */
1303 continue;
1304 }
1305
1306 if (is_local_branch(block, i - 1)) {
1307 imm = (s16)list[i - 1].c.i.imm;
1308
1309 if (!opcode_reads_register(list[i + imm].c, op->c.i.rt)) {
1310 /* The target opcode of the branch is inside
1311 * the block, and it does not read the register
1312 * written to by the load opcode; we can ignore
1313 * the load delay. */
1314 continue;
1315 }
1316 }
1317
1318 op->flags |= LIGHTREC_LOAD_DELAY;
1319 }
1320
1321 return 0;
1322}
1323
1324static int lightrec_swap_load_delays(struct lightrec_state *state,
1325 struct block *block)
1326{
1327 unsigned int i;
1328 union code c, next;
1329 bool in_ds = false, skip_next = false;
1330 struct opcode op;
1331
1332 if (block->nb_ops < 2)
1333 return 0;
1334
1335 for (i = 0; i < block->nb_ops - 2; i++) {
1336 c = block->opcode_list[i].c;
1337
1338 if (skip_next) {
1339 skip_next = false;
1340 } else if (!in_ds && opcode_is_load(c) && c.i.op != OP_LWC2) {
1341 next = block->opcode_list[i + 1].c;
1342
1343 switch (next.i.op) {
1344 case OP_LWL:
1345 case OP_LWR:
1346 case OP_REGIMM:
1347 case OP_BEQ:
1348 case OP_BNE:
1349 case OP_BLEZ:
1350 case OP_BGTZ:
1351 continue;
1352 }
1353
1354 if (opcode_reads_register(next, c.i.rt)
1355 && !opcode_writes_register(next, c.i.rs)) {
1356 pr_debug("Swapping opcodes at offset 0x%x to "
1357 "respect load delay\n", i << 2);
1358
1359 op = block->opcode_list[i];
1360 block->opcode_list[i] = block->opcode_list[i + 1];
1361 block->opcode_list[i + 1] = op;
1362 skip_next = true;
1363 }
1364 }
1365
1366 in_ds = has_delay_slot(c);
1367 }
1368
1369 return 0;
1370}
1371
1372static int lightrec_local_branches(struct lightrec_state *state, struct block *block)
1373{
1374 const struct opcode *ds;
1375 struct opcode *list;
1376 unsigned int i;
1377 s32 offset;
1378
1379 for (i = 0; i < block->nb_ops; i++) {
1380 list = &block->opcode_list[i];
1381
1382 if (should_emulate(list) || !is_local_branch(block, i))
1383 continue;
1384
1385 offset = i + 1 + (s16)list->c.i.imm;
1386
1387 pr_debug("Found local branch to offset 0x%x\n", offset << 2);
1388
1389 ds = get_delay_slot(block->opcode_list, i);
1390 if (op_flag_load_delay(ds->flags) && opcode_is_load(ds->c)) {
1391 pr_debug("Branch delay slot has a load delay - skip\n");
1392 continue;
1393 }
1394
1395 if (should_emulate(&block->opcode_list[offset])) {
1396 pr_debug("Branch target must be emulated - skip\n");
1397 continue;
1398 }
1399
1400 if (offset && has_delay_slot(block->opcode_list[offset - 1].c)) {
1401 pr_debug("Branch target is a delay slot - skip\n");
1402 continue;
1403 }
1404
1405 list->flags |= LIGHTREC_LOCAL_BRANCH;
1406 }
1407
1408 lightrec_reset_syncs(block);
1409
1410 return 0;
1411}
1412
1413bool has_delay_slot(union code op)
1414{
1415 switch (op.i.op) {
1416 case OP_SPECIAL:
1417 switch (op.r.op) {
1418 case OP_SPECIAL_JR:
1419 case OP_SPECIAL_JALR:
1420 return true;
1421 default:
1422 return false;
1423 }
1424 case OP_J:
1425 case OP_JAL:
1426 case OP_BEQ:
1427 case OP_BNE:
1428 case OP_BLEZ:
1429 case OP_BGTZ:
1430 case OP_REGIMM:
1431 return true;
1432 default:
1433 return false;
1434 }
1435}
1436
1437bool is_delay_slot(const struct opcode *list, unsigned int offset)
1438{
1439 return offset > 0
1440 && !op_flag_no_ds(list[offset - 1].flags)
1441 && has_delay_slot(list[offset - 1].c);
1442}
1443
1444bool should_emulate(const struct opcode *list)
1445{
1446 return op_flag_emulate_branch(list->flags) && has_delay_slot(list->c);
1447}
1448
1449static bool op_writes_rd(union code c)
1450{
1451 switch (c.i.op) {
1452 case OP_SPECIAL:
1453 case OP_META:
1454 return true;
1455 default:
1456 return false;
1457 }
1458}
1459
1460static void lightrec_add_reg_op(struct opcode *op, u8 reg, u32 reg_op)
1461{
1462 if (op_writes_rd(op->c) && reg == op->r.rd)
1463 op->flags |= LIGHTREC_REG_RD(reg_op);
1464 else if (op->i.rs == reg)
1465 op->flags |= LIGHTREC_REG_RS(reg_op);
1466 else if (op->i.rt == reg)
1467 op->flags |= LIGHTREC_REG_RT(reg_op);
1468 else
1469 pr_debug("Cannot add unload/clean/discard flag: "
1470 "opcode does not touch register %s!\n",
1471 lightrec_reg_name(reg));
1472}
1473
1474static void lightrec_add_unload(struct opcode *op, u8 reg)
1475{
1476 lightrec_add_reg_op(op, reg, LIGHTREC_REG_UNLOAD);
1477}
1478
1479static void lightrec_add_discard(struct opcode *op, u8 reg)
1480{
1481 lightrec_add_reg_op(op, reg, LIGHTREC_REG_DISCARD);
1482}
1483
1484static void lightrec_add_clean(struct opcode *op, u8 reg)
1485{
1486 lightrec_add_reg_op(op, reg, LIGHTREC_REG_CLEAN);
1487}
1488
1489static void
1490lightrec_early_unload_sync(struct opcode *list, s16 *last_r, s16 *last_w)
1491{
1492 unsigned int reg;
1493 s16 offset;
1494
1495 for (reg = 0; reg < 34; reg++) {
1496 offset = s16_max(last_w[reg], last_r[reg]);
1497
1498 if (offset >= 0)
1499 lightrec_add_unload(&list[offset], reg);
1500 }
1501
1502 memset(last_r, 0xff, sizeof(*last_r) * 34);
1503 memset(last_w, 0xff, sizeof(*last_w) * 34);
1504}
1505
1506static int lightrec_early_unload(struct lightrec_state *state, struct block *block)
1507{
1508 u16 i, offset;
1509 struct opcode *op;
1510 s16 last_r[34], last_w[34], last_sync = 0, next_sync = 0;
1511 u64 mask_r, mask_w, dirty = 0, loaded = 0;
1512 u8 reg, load_delay_reg = 0;
1513
1514 memset(last_r, 0xff, sizeof(last_r));
1515 memset(last_w, 0xff, sizeof(last_w));
1516
1517 /*
1518 * Clean if:
1519 * - the register is dirty, and is read again after a branch opcode
1520 *
1521 * Unload if:
1522 * - the register is dirty or loaded, and is not read again
1523 * - the register is dirty or loaded, and is written again after a branch opcode
1524 * - the next opcode has the SYNC flag set
1525 *
1526 * Discard if:
1527 * - the register is dirty or loaded, and is written again
1528 */
1529
1530 for (i = 0; i < block->nb_ops; i++) {
1531 op = &block->opcode_list[i];
1532
1533 if (OPT_HANDLE_LOAD_DELAYS && load_delay_reg) {
1534 /* Handle delayed register write from load opcodes in
1535 * delay slots */
1536 last_w[load_delay_reg] = i;
1537 load_delay_reg = 0;
1538 }
1539
1540 if (op_flag_sync(op->flags) || should_emulate(op)) {
1541 /* The next opcode has the SYNC flag set, or is a branch
1542 * that should be emulated: unload all registers. */
1543 lightrec_early_unload_sync(block->opcode_list, last_r, last_w);
1544 dirty = 0;
1545 loaded = 0;
1546 }
1547
1548 if (next_sync == i) {
1549 last_sync = i;
1550 pr_debug("Last sync: 0x%x\n", last_sync << 2);
1551 }
1552
1553 if (has_delay_slot(op->c)) {
1554 next_sync = i + 1 + !op_flag_no_ds(op->flags);
1555 pr_debug("Next sync: 0x%x\n", next_sync << 2);
1556 }
1557
1558 mask_r = opcode_read_mask(op->c);
1559 mask_w = opcode_write_mask(op->c);
1560
1561 if (op_flag_load_delay(op->flags) && opcode_is_load(op->c)) {
1562 /* If we have a load opcode in a delay slot, its target
1563 * register is actually not written there but at a
1564 * later point, in the dispatcher. Prevent the algorithm
1565 * from discarding its previous value. */
1566 load_delay_reg = op->c.i.rt;
1567 mask_w &= ~BIT(op->c.i.rt);
1568 }
1569
1570 for (reg = 0; reg < 34; reg++) {
1571 if (mask_r & BIT(reg)) {
1572 if (dirty & BIT(reg) && last_w[reg] < last_sync) {
1573 /* The register is dirty, and is read
1574 * again after a branch: clean it */
1575
1576 lightrec_add_clean(&block->opcode_list[last_w[reg]], reg);
1577 dirty &= ~BIT(reg);
1578 loaded |= BIT(reg);
1579 }
1580
1581 last_r[reg] = i;
1582 }
1583
1584 if (mask_w & BIT(reg)) {
1585 if ((dirty & BIT(reg) && last_w[reg] < last_sync) ||
1586 (loaded & BIT(reg) && last_r[reg] < last_sync)) {
1587 /* The register is dirty or loaded, and
1588 * is written again after a branch:
1589 * unload it */
1590
1591 offset = s16_max(last_w[reg], last_r[reg]);
1592 lightrec_add_unload(&block->opcode_list[offset], reg);
1593 dirty &= ~BIT(reg);
1594 loaded &= ~BIT(reg);
1595 } else if (!(mask_r & BIT(reg)) &&
1596 ((dirty & BIT(reg) && last_w[reg] > last_sync) ||
1597 (loaded & BIT(reg) && last_r[reg] > last_sync))) {
1598 /* The register is dirty or loaded, and
1599 * is written again: discard it */
1600
1601 offset = s16_max(last_w[reg], last_r[reg]);
1602 lightrec_add_discard(&block->opcode_list[offset], reg);
1603 dirty &= ~BIT(reg);
1604 loaded &= ~BIT(reg);
1605 }
1606
1607 last_w[reg] = i;
1608 }
1609
1610 }
1611
1612 dirty |= mask_w;
1613 loaded |= mask_r;
1614 }
1615
1616 /* Unload all registers that are dirty or loaded at the end of block. */
1617 lightrec_early_unload_sync(block->opcode_list, last_r, last_w);
1618
1619 return 0;
1620}
1621
1622static int lightrec_flag_io(struct lightrec_state *state, struct block *block)
1623{
1624 struct opcode *list;
1625 enum psx_map psx_map;
1626 struct constprop_data v[32] = LIGHTREC_CONSTPROP_INITIALIZER;
1627 unsigned int i;
1628 u32 val, kunseg_val;
1629 bool no_mask;
1630
1631 for (i = 0; i < block->nb_ops; i++) {
1632 list = &block->opcode_list[i];
1633
1634 lightrec_consts_propagate(block, i, v);
1635
1636 switch (list->i.op) {
1637 case OP_SB:
1638 case OP_SH:
1639 case OP_SW:
1640 /* Mark all store operations that target $sp or $gp
1641 * as not requiring code invalidation. This is based
1642 * on the heuristic that stores using one of these
1643 * registers as address will never hit a code page. */
1644 if (list->i.rs >= 28 && list->i.rs <= 29 &&
1645 !state->maps[PSX_MAP_KERNEL_USER_RAM].ops) {
1646 pr_debug("Flaging opcode 0x%08x as not requiring invalidation\n",
1647 list->opcode);
1648 list->flags |= LIGHTREC_NO_INVALIDATE;
1649 }
1650
1651 /* Detect writes whose destination address is inside the
1652 * current block, using constant propagation. When these
1653 * occur, we mark the blocks as not compilable. */
1654 if (is_known(v, list->i.rs) &&
1655 kunseg(v[list->i.rs].value) >= kunseg(block->pc) &&
1656 kunseg(v[list->i.rs].value) < (kunseg(block->pc) + block->nb_ops * 4)) {
1657 pr_debug("Self-modifying block detected\n");
1658 block_set_flags(block, BLOCK_NEVER_COMPILE);
1659 list->flags |= LIGHTREC_SMC;
1660 }
1661 fallthrough;
1662 case OP_SWL:
1663 case OP_SWR:
1664 case OP_SWC2:
1665 case OP_LB:
1666 case OP_LBU:
1667 case OP_LH:
1668 case OP_LHU:
1669 case OP_LW:
1670 case OP_LWL:
1671 case OP_LWR:
1672 case OP_LWC2:
1673 if (v[list->i.rs].known | v[list->i.rs].sign) {
1674 psx_map = lightrec_get_constprop_map(state, v,
1675 list->i.rs,
1676 (s16) list->i.imm);
1677
1678 if (psx_map != PSX_MAP_UNKNOWN && !is_known(v, list->i.rs))
1679 pr_debug("Detected map thanks to bit-level const propagation!\n");
1680
1681 list->flags &= ~LIGHTREC_IO_MASK;
1682
1683 val = v[list->i.rs].value + (s16) list->i.imm;
1684 kunseg_val = kunseg(val);
1685
1686 no_mask = (v[list->i.rs].known & ~v[list->i.rs].value
1687 & 0xe0000000) == 0xe0000000;
1688
1689 switch (psx_map) {
1690 case PSX_MAP_KERNEL_USER_RAM:
1691 if (no_mask)
1692 list->flags |= LIGHTREC_NO_MASK;
1693 fallthrough;
1694 case PSX_MAP_MIRROR1:
1695 case PSX_MAP_MIRROR2:
1696 case PSX_MAP_MIRROR3:
1697 pr_debug("Flaging opcode %u as RAM access\n", i);
1698 list->flags |= LIGHTREC_IO_MODE(LIGHTREC_IO_RAM);
1699 if (no_mask && state->mirrors_mapped)
1700 list->flags |= LIGHTREC_NO_MASK;
1701 break;
1702 case PSX_MAP_BIOS:
1703 pr_debug("Flaging opcode %u as BIOS access\n", i);
1704 list->flags |= LIGHTREC_IO_MODE(LIGHTREC_IO_BIOS);
1705 if (no_mask)
1706 list->flags |= LIGHTREC_NO_MASK;
1707 break;
1708 case PSX_MAP_SCRATCH_PAD:
1709 pr_debug("Flaging opcode %u as scratchpad access\n", i);
1710 list->flags |= LIGHTREC_IO_MODE(LIGHTREC_IO_SCRATCH);
1711 if (no_mask)
1712 list->flags |= LIGHTREC_NO_MASK;
1713
1714 /* Consider that we're never going to run code from
1715 * the scratchpad. */
1716 list->flags |= LIGHTREC_NO_INVALIDATE;
1717 break;
1718 case PSX_MAP_HW_REGISTERS:
1719 if (state->ops.hw_direct &&
1720 state->ops.hw_direct(kunseg_val,
1721 opcode_is_store(list->c),
1722 opcode_get_io_size(list->c))) {
1723 pr_debug("Flagging opcode %u as direct I/O access\n",
1724 i);
1725 list->flags |= LIGHTREC_IO_MODE(LIGHTREC_IO_DIRECT_HW);
1726
1727 if (no_mask)
1728 list->flags |= LIGHTREC_NO_MASK;
1729 } else {
1730 pr_debug("Flagging opcode %u as I/O access\n",
1731 i);
1732 list->flags |= LIGHTREC_IO_MODE(LIGHTREC_IO_HW);
1733 }
1734 break;
1735 default:
1736 break;
1737 }
1738 }
1739
1740 if (!LIGHTREC_FLAGS_GET_IO_MODE(list->flags)
1741 && list->i.rs >= 28 && list->i.rs <= 29
1742 && !state->maps[PSX_MAP_KERNEL_USER_RAM].ops) {
1743 /* Assume that all I/O operations that target
1744 * $sp or $gp will always only target a mapped
1745 * memory (RAM, BIOS, scratchpad). */
1746 list->flags |= LIGHTREC_IO_MODE(LIGHTREC_IO_DIRECT);
1747 }
1748
1749 fallthrough;
1750 default:
1751 break;
1752 }
1753 }
1754
1755 return 0;
1756}
1757
1758static u8 get_mfhi_mflo_reg(const struct block *block, u16 offset,
1759 const struct opcode *last,
1760 u32 mask, bool sync, bool mflo, bool another)
1761{
1762 const struct opcode *op, *next = &block->opcode_list[offset];
1763 u32 old_mask;
1764 u8 reg2, reg = mflo ? REG_LO : REG_HI;
1765 u16 branch_offset;
1766 unsigned int i;
1767
1768 for (i = offset; i < block->nb_ops; i++) {
1769 op = next;
1770 next = &block->opcode_list[i + 1];
1771 old_mask = mask;
1772
1773 /* If any other opcode writes or reads to the register
1774 * we'd use, then we cannot use it anymore. */
1775 mask |= opcode_read_mask(op->c);
1776 mask |= opcode_write_mask(op->c);
1777
1778 if (op_flag_sync(op->flags))
1779 sync = true;
1780
1781 switch (op->i.op) {
1782 case OP_BEQ:
1783 case OP_BNE:
1784 case OP_BLEZ:
1785 case OP_BGTZ:
1786 case OP_REGIMM:
1787 /* TODO: handle backwards branches too */
1788 if (!last && op_flag_local_branch(op->flags) &&
1789 (s16)op->c.i.imm >= 0) {
1790 branch_offset = i + 1 + (s16)op->c.i.imm
1791 - !!op_flag_no_ds(op->flags);
1792
1793 reg = get_mfhi_mflo_reg(block, branch_offset, NULL,
1794 mask, sync, mflo, false);
1795 reg2 = get_mfhi_mflo_reg(block, offset + 1, next,
1796 mask, sync, mflo, false);
1797 if (reg > 0 && reg == reg2)
1798 return reg;
1799 if (!reg && !reg2)
1800 return 0;
1801 }
1802
1803 return mflo ? REG_LO : REG_HI;
1804 case OP_META_MULT2:
1805 case OP_META_MULTU2:
1806 return 0;
1807 case OP_SPECIAL:
1808 switch (op->r.op) {
1809 case OP_SPECIAL_MULT:
1810 case OP_SPECIAL_MULTU:
1811 case OP_SPECIAL_DIV:
1812 case OP_SPECIAL_DIVU:
1813 return 0;
1814 case OP_SPECIAL_MTHI:
1815 if (!mflo)
1816 return 0;
1817 continue;
1818 case OP_SPECIAL_MTLO:
1819 if (mflo)
1820 return 0;
1821 continue;
1822 case OP_SPECIAL_JR:
1823 if (op->r.rs != 31)
1824 return reg;
1825
1826 if (!sync && !op_flag_no_ds(op->flags) &&
1827 (next->i.op == OP_SPECIAL) &&
1828 ((!mflo && next->r.op == OP_SPECIAL_MFHI) ||
1829 (mflo && next->r.op == OP_SPECIAL_MFLO)))
1830 return next->r.rd;
1831
1832 return 0;
1833 case OP_SPECIAL_JALR:
1834 return reg;
1835 case OP_SPECIAL_MFHI:
1836 if (!mflo) {
1837 if (another)
1838 return op->r.rd;
1839 /* Must use REG_HI if there is another MFHI target*/
1840 reg2 = get_mfhi_mflo_reg(block, i + 1, next,
1841 0, sync, mflo, true);
1842 if (reg2 > 0 && reg2 != REG_HI)
1843 return REG_HI;
1844
1845 if (!sync && !(old_mask & BIT(op->r.rd)))
1846 return op->r.rd;
1847 else
1848 return REG_HI;
1849 }
1850 continue;
1851 case OP_SPECIAL_MFLO:
1852 if (mflo) {
1853 if (another)
1854 return op->r.rd;
1855 /* Must use REG_LO if there is another MFLO target*/
1856 reg2 = get_mfhi_mflo_reg(block, i + 1, next,
1857 0, sync, mflo, true);
1858 if (reg2 > 0 && reg2 != REG_LO)
1859 return REG_LO;
1860
1861 if (!sync && !(old_mask & BIT(op->r.rd)))
1862 return op->r.rd;
1863 else
1864 return REG_LO;
1865 }
1866 continue;
1867 default:
1868 break;
1869 }
1870
1871 fallthrough;
1872 default:
1873 continue;
1874 }
1875 }
1876
1877 return reg;
1878}
1879
1880static void lightrec_replace_lo_hi(struct block *block, u16 offset,
1881 u16 last, bool lo)
1882{
1883 unsigned int i;
1884 u32 branch_offset;
1885
1886 /* This function will remove the following MFLO/MFHI. It must be called
1887 * only if get_mfhi_mflo_reg() returned a non-zero value. */
1888
1889 for (i = offset; i < last; i++) {
1890 struct opcode *op = &block->opcode_list[i];
1891
1892 switch (op->i.op) {
1893 case OP_BEQ:
1894 case OP_BNE:
1895 case OP_BLEZ:
1896 case OP_BGTZ:
1897 case OP_REGIMM:
1898 /* TODO: handle backwards branches too */
1899 if (op_flag_local_branch(op->flags) && (s16)op->c.i.imm >= 0) {
1900 branch_offset = i + 1 + (s16)op->c.i.imm
1901 - !!op_flag_no_ds(op->flags);
1902
1903 lightrec_replace_lo_hi(block, branch_offset, last, lo);
1904 lightrec_replace_lo_hi(block, i + 1, branch_offset, lo);
1905 }
1906 break;
1907
1908 case OP_SPECIAL:
1909 if (lo && op->r.op == OP_SPECIAL_MFLO) {
1910 pr_debug("Removing MFLO opcode at offset 0x%x\n",
1911 i << 2);
1912 op->opcode = 0;
1913 return;
1914 } else if (!lo && op->r.op == OP_SPECIAL_MFHI) {
1915 pr_debug("Removing MFHI opcode at offset 0x%x\n",
1916 i << 2);
1917 op->opcode = 0;
1918 return;
1919 }
1920
1921 fallthrough;
1922 default:
1923 break;
1924 }
1925 }
1926}
1927
1928static bool lightrec_always_skip_div_check(void)
1929{
1930#ifdef __mips__
1931 return true;
1932#else
1933 return false;
1934#endif
1935}
1936
1937static int lightrec_flag_mults_divs(struct lightrec_state *state, struct block *block)
1938{
1939 struct opcode *list = NULL;
1940 struct constprop_data v[32] = LIGHTREC_CONSTPROP_INITIALIZER;
1941 u8 reg_hi, reg_lo;
1942 unsigned int i;
1943
1944 for (i = 0; i < block->nb_ops - 1; i++) {
1945 list = &block->opcode_list[i];
1946
1947 lightrec_consts_propagate(block, i, v);
1948
1949 switch (list->i.op) {
1950 case OP_SPECIAL:
1951 switch (list->r.op) {
1952 case OP_SPECIAL_DIV:
1953 case OP_SPECIAL_DIVU:
1954 /* If we are dividing by a non-zero constant, don't
1955 * emit the div-by-zero check. */
1956 if (lightrec_always_skip_div_check() ||
1957 (v[list->r.rt].known & v[list->r.rt].value)) {
1958 list->flags |= LIGHTREC_NO_DIV_CHECK;
1959 }
1960 fallthrough;
1961 case OP_SPECIAL_MULT:
1962 case OP_SPECIAL_MULTU:
1963 break;
1964 default:
1965 continue;
1966 }
1967 fallthrough;
1968 case OP_META_MULT2:
1969 case OP_META_MULTU2:
1970 break;
1971 default:
1972 continue;
1973 }
1974
1975 /* Don't support opcodes in delay slots */
1976 if (is_delay_slot(block->opcode_list, i) ||
1977 op_flag_no_ds(list->flags)) {
1978 continue;
1979 }
1980
1981 reg_lo = get_mfhi_mflo_reg(block, i + 1, NULL, 0, false, true, false);
1982 if (reg_lo == 0) {
1983 pr_debug("Mark MULT(U)/DIV(U) opcode at offset 0x%x as"
1984 " not writing LO\n", i << 2);
1985 list->flags |= LIGHTREC_NO_LO;
1986 }
1987
1988 reg_hi = get_mfhi_mflo_reg(block, i + 1, NULL, 0, false, false, false);
1989 if (reg_hi == 0) {
1990 pr_debug("Mark MULT(U)/DIV(U) opcode at offset 0x%x as"
1991 " not writing HI\n", i << 2);
1992 list->flags |= LIGHTREC_NO_HI;
1993 }
1994
1995 if (!reg_lo && !reg_hi) {
1996 pr_debug("Both LO/HI unused in this block, they will "
1997 "probably be used in parent block - removing "
1998 "flags.\n");
1999 list->flags &= ~(LIGHTREC_NO_LO | LIGHTREC_NO_HI);
2000 }
2001
2002 if (reg_lo > 0 && reg_lo != REG_LO) {
2003 pr_debug("Found register %s to hold LO (rs = %u, rt = %u)\n",
2004 lightrec_reg_name(reg_lo), list->r.rs, list->r.rt);
2005
2006 lightrec_replace_lo_hi(block, i + 1, block->nb_ops, true);
2007 list->r.rd = reg_lo;
2008 } else {
2009 list->r.rd = 0;
2010 }
2011
2012 if (reg_hi > 0 && reg_hi != REG_HI) {
2013 pr_debug("Found register %s to hold HI (rs = %u, rt = %u)\n",
2014 lightrec_reg_name(reg_hi), list->r.rs, list->r.rt);
2015
2016 lightrec_replace_lo_hi(block, i + 1, block->nb_ops, false);
2017 list->r.imm = reg_hi;
2018 } else {
2019 list->r.imm = 0;
2020 }
2021 }
2022
2023 return 0;
2024}
2025
2026static bool remove_div_sequence(struct block *block, unsigned int offset)
2027{
2028 struct opcode *op;
2029 unsigned int i, found = 0;
2030
2031 /*
2032 * Scan for the zero-checking sequence that GCC automatically introduced
2033 * after most DIV/DIVU opcodes. This sequence checks the value of the
2034 * divisor, and if zero, executes a BREAK opcode, causing the BIOS
2035 * handler to crash the PS1.
2036 *
2037 * For DIV opcodes, this sequence additionally checks that the signed
2038 * operation does not overflow.
2039 *
2040 * With the assumption that the games never crashed the PS1, we can
2041 * therefore assume that the games never divided by zero or overflowed,
2042 * and these sequences can be removed.
2043 */
2044
2045 for (i = offset; i < block->nb_ops; i++) {
2046 op = &block->opcode_list[i];
2047
2048 if (!found) {
2049 if (op->i.op == OP_SPECIAL &&
2050 (op->r.op == OP_SPECIAL_DIV || op->r.op == OP_SPECIAL_DIVU))
2051 break;
2052
2053 if ((op->opcode & 0xfc1fffff) == 0x14000002) {
2054 /* BNE ???, zero, +8 */
2055 found++;
2056 } else {
2057 offset++;
2058 }
2059 } else if (found == 1 && !op->opcode) {
2060 /* NOP */
2061 found++;
2062 } else if (found == 2 && op->opcode == 0x0007000d) {
2063 /* BREAK 0x1c00 */
2064 found++;
2065 } else if (found == 3 && op->opcode == 0x2401ffff) {
2066 /* LI at, -1 */
2067 found++;
2068 } else if (found == 4 && (op->opcode & 0xfc1fffff) == 0x14010004) {
2069 /* BNE ???, at, +16 */
2070 found++;
2071 } else if (found == 5 && op->opcode == 0x3c018000) {
2072 /* LUI at, 0x8000 */
2073 found++;
2074 } else if (found == 6 && (op->opcode & 0x141fffff) == 0x14010002) {
2075 /* BNE ???, at, +16 */
2076 found++;
2077 } else if (found == 7 && !op->opcode) {
2078 /* NOP */
2079 found++;
2080 } else if (found == 8 && op->opcode == 0x0006000d) {
2081 /* BREAK 0x1800 */
2082 found++;
2083 break;
2084 } else {
2085 break;
2086 }
2087 }
2088
2089 if (found >= 3) {
2090 if (found != 9)
2091 found = 3;
2092
2093 pr_debug("Removing DIV%s sequence at offset 0x%x\n",
2094 found == 9 ? "" : "U", offset << 2);
2095
2096 for (i = 0; i < found; i++)
2097 block->opcode_list[offset + i].opcode = 0;
2098
2099 return true;
2100 }
2101
2102 return false;
2103}
2104
2105static int lightrec_remove_div_by_zero_check_sequence(struct lightrec_state *state,
2106 struct block *block)
2107{
2108 struct opcode *op;
2109 unsigned int i;
2110
2111 for (i = 0; i < block->nb_ops; i++) {
2112 op = &block->opcode_list[i];
2113
2114 if (op->i.op == OP_SPECIAL &&
2115 (op->r.op == OP_SPECIAL_DIVU || op->r.op == OP_SPECIAL_DIV) &&
2116 remove_div_sequence(block, i + 1))
2117 op->flags |= LIGHTREC_NO_DIV_CHECK;
2118 }
2119
2120 return 0;
2121}
2122
2123static const u32 memset_code[] = {
2124 0x10a00006, // beqz a1, 2f
2125 0x24a2ffff, // addiu v0,a1,-1
2126 0x2403ffff, // li v1,-1
2127 0xac800000, // 1: sw zero,0(a0)
2128 0x2442ffff, // addiu v0,v0,-1
2129 0x1443fffd, // bne v0,v1, 1b
2130 0x24840004, // addiu a0,a0,4
2131 0x03e00008, // 2: jr ra
2132 0x00000000, // nop
2133};
2134
2135static int lightrec_replace_memset(struct lightrec_state *state, struct block *block)
2136{
2137 unsigned int i;
2138 union code c;
2139
2140 for (i = 0; i < block->nb_ops; i++) {
2141 c = block->opcode_list[i].c;
2142
2143 if (c.opcode != memset_code[i])
2144 return 0;
2145
2146 if (i == ARRAY_SIZE(memset_code) - 1) {
2147 /* success! */
2148 pr_debug("Block at PC 0x%x is a memset\n", block->pc);
2149 block_set_flags(block,
2150 BLOCK_IS_MEMSET | BLOCK_NEVER_COMPILE);
2151
2152 /* Return non-zero to skip other optimizers. */
2153 return 1;
2154 }
2155 }
2156
2157 return 0;
2158}
2159
2160static int (*lightrec_optimizers[])(struct lightrec_state *state, struct block *) = {
2161 IF_OPT(OPT_REMOVE_DIV_BY_ZERO_SEQ, &lightrec_remove_div_by_zero_check_sequence),
2162 IF_OPT(OPT_REPLACE_MEMSET, &lightrec_replace_memset),
2163 IF_OPT(OPT_DETECT_IMPOSSIBLE_BRANCHES, &lightrec_detect_impossible_branches),
2164 IF_OPT(OPT_HANDLE_LOAD_DELAYS, &lightrec_handle_load_delays),
2165 IF_OPT(OPT_HANDLE_LOAD_DELAYS, &lightrec_swap_load_delays),
2166 IF_OPT(OPT_TRANSFORM_OPS, &lightrec_transform_branches),
2167 IF_OPT(OPT_LOCAL_BRANCHES, &lightrec_local_branches),
2168 IF_OPT(OPT_TRANSFORM_OPS, &lightrec_transform_ops),
2169 IF_OPT(OPT_SWITCH_DELAY_SLOTS, &lightrec_switch_delay_slots),
2170 IF_OPT(OPT_FLAG_IO, &lightrec_flag_io),
2171 IF_OPT(OPT_FLAG_MULT_DIV, &lightrec_flag_mults_divs),
2172 IF_OPT(OPT_EARLY_UNLOAD, &lightrec_early_unload),
2173};
2174
2175int lightrec_optimize(struct lightrec_state *state, struct block *block)
2176{
2177 unsigned int i;
2178 int ret;
2179
2180 for (i = 0; i < ARRAY_SIZE(lightrec_optimizers); i++) {
2181 if (lightrec_optimizers[i]) {
2182 ret = (*lightrec_optimizers[i])(state, block);
2183 if (ret)
2184 return ret;
2185 }
2186 }
2187
2188 return 0;
2189}