1 // SPDX-License-Identifier: LGPL-2.1-or-later
3 * Copyright (C) 2014-2021 Paul Cercueil <paul@crapouillou.net>
6 #include "lightrec-config.h"
7 #include "disassembler.h"
9 #include "memmanager.h"
10 #include "optimizer.h"
18 #define IF_OPT(opt, ptr) ((opt) ? (ptr) : NULL)
20 struct optimizer_list {
21 void (**optimizers)(struct opcode *);
22 unsigned int nb_optimizers;
25 static bool is_nop(union code op);
27 bool is_unconditional_jump(union code c)
31 return c.r.op == OP_SPECIAL_JR || c.r.op == OP_SPECIAL_JALR;
37 return c.i.rs == c.i.rt;
39 return (c.r.rt == OP_REGIMM_BGEZ ||
40 c.r.rt == OP_REGIMM_BGEZAL) && c.i.rs == 0;
46 bool is_syscall(union code c)
48 return (c.i.op == OP_SPECIAL && c.r.op == OP_SPECIAL_SYSCALL) ||
49 (c.i.op == OP_CP0 && (c.r.rs == OP_CP0_MTC0 ||
50 c.r.rs == OP_CP0_CTC0) &&
51 (c.r.rd == 12 || c.r.rd == 13));
54 static u64 opcode_read_mask(union code op)
59 case OP_SPECIAL_SYSCALL:
60 case OP_SPECIAL_BREAK:
79 return BIT(op.r.rs) | BIT(op.r.rt);
90 if (op.r.op == OP_CP2_BASIC) {
92 case OP_CP2_BASIC_MTC2:
93 case OP_CP2_BASIC_CTC2:
105 if (op.i.rs == op.i.rt)
116 return BIT(op.i.rs) | BIT(op.i.rt);
122 static u64 opcode_write_mask(union code op)
130 case OP_SPECIAL_SYSCALL:
131 case OP_SPECIAL_BREAK:
133 case OP_SPECIAL_MULT:
134 case OP_SPECIAL_MULTU:
136 case OP_SPECIAL_DIVU:
137 if (!OPT_FLAG_MULT_DIV)
138 return BIT(REG_LO) | BIT(REG_HI);
141 flags = BIT(op.r.rd);
145 flags |= BIT(op.r.imm);
147 flags |= BIT(REG_HI);
149 case OP_SPECIAL_MTHI:
151 case OP_SPECIAL_MTLO:
187 if (op.r.op == OP_CP2_BASIC) {
189 case OP_CP2_BASIC_MFC2:
190 case OP_CP2_BASIC_CFC2:
199 case OP_REGIMM_BLTZAL:
200 case OP_REGIMM_BGEZAL:
212 bool opcode_reads_register(union code op, u8 reg)
214 return opcode_read_mask(op) & BIT(reg);
217 bool opcode_writes_register(union code op, u8 reg)
219 return opcode_write_mask(op) & BIT(reg);
222 static int find_prev_writer(const struct opcode *list, unsigned int offset, u8 reg)
227 if (op_flag_sync(list[offset].flags))
230 for (i = offset; i > 0; i--) {
233 if (opcode_writes_register(c, reg)) {
234 if (i > 1 && has_delay_slot(list[i - 2].c))
240 if (op_flag_sync(list[i - 1].flags) ||
242 opcode_reads_register(c, reg))
249 static int find_next_reader(const struct opcode *list, unsigned int offset, u8 reg)
254 if (op_flag_sync(list[offset].flags))
257 for (i = offset; ; i++) {
260 if (opcode_reads_register(c, reg)) {
261 if (i > 0 && has_delay_slot(list[i - 1].c))
267 if (op_flag_sync(list[i].flags) ||
268 has_delay_slot(c) || opcode_writes_register(c, reg))
275 static bool reg_is_dead(const struct opcode *list, unsigned int offset, u8 reg)
279 if (op_flag_sync(list[offset].flags))
282 for (i = offset + 1; ; i++) {
283 if (opcode_reads_register(list[i].c, reg))
286 if (opcode_writes_register(list[i].c, reg))
289 if (has_delay_slot(list[i].c)) {
290 if (op_flag_no_ds(list[i].flags) ||
291 opcode_reads_register(list[i + 1].c, reg))
294 return opcode_writes_register(list[i + 1].c, reg);
299 static bool reg_is_read(const struct opcode *list,
300 unsigned int a, unsigned int b, u8 reg)
302 /* Return true if reg is read in one of the opcodes of the interval
305 if (!is_nop(list[a].c) && opcode_reads_register(list[a].c, reg))
312 static bool reg_is_written(const struct opcode *list,
313 unsigned int a, unsigned int b, u8 reg)
315 /* Return true if reg is written in one of the opcodes of the interval
319 if (!is_nop(list[a].c) && opcode_writes_register(list[a].c, reg))
326 static bool reg_is_read_or_written(const struct opcode *list,
327 unsigned int a, unsigned int b, u8 reg)
329 return reg_is_read(list, a, b, reg) || reg_is_written(list, a, b, reg);
332 static bool opcode_is_load(union code op)
349 static bool opcode_is_store(union code op)
364 bool opcode_is_io(union code op)
366 return opcode_is_load(op) || opcode_is_store(op);
370 static bool is_nop(union code op)
372 if (opcode_writes_register(op, 0)) {
375 return op.r.rs != OP_CP0_MFC0;
393 return op.r.rd == op.r.rt && op.r.rd == op.r.rs;
395 case OP_SPECIAL_ADDU:
396 return (op.r.rd == op.r.rt && op.r.rs == 0) ||
397 (op.r.rd == op.r.rs && op.r.rt == 0);
399 case OP_SPECIAL_SUBU:
400 return op.r.rd == op.r.rs && op.r.rt == 0;
402 if (op.r.rd == op.r.rt)
403 return op.r.rd == op.r.rs || op.r.rs == 0;
405 return (op.r.rd == op.r.rs) && op.r.rt == 0;
409 return op.r.rd == op.r.rt && op.r.imm == 0;
410 case OP_SPECIAL_MFHI:
411 case OP_SPECIAL_MFLO:
419 return op.i.rt == op.i.rs && op.i.imm == 0;
421 return (op.i.rs == 0 || op.i.imm == 1);
423 return (op.i.op == OP_REGIMM_BLTZ ||
424 op.i.op == OP_REGIMM_BLTZAL) &&
425 (op.i.rs == 0 || op.i.imm == 1);
427 return (op.i.rs == op.i.rt || op.i.imm == 1);
433 bool load_in_delay_slot(union code op)
447 if (op.r.op == OP_CP2_BASIC) {
449 case OP_CP2_BASIC_MFC2:
450 case OP_CP2_BASIC_CFC2:
473 static u32 lightrec_propagate_consts(const struct opcode *op,
474 const struct opcode *prev,
477 union code c = prev->c;
479 /* Register $zero is always, well, zero */
483 if (op_flag_sync(op->flags))
490 if (known & BIT(c.r.rt)) {
491 known |= BIT(c.r.rd);
492 v[c.r.rd] = v[c.r.rt] << c.r.imm;
494 known &= ~BIT(c.r.rd);
498 if (known & BIT(c.r.rt)) {
499 known |= BIT(c.r.rd);
500 v[c.r.rd] = v[c.r.rt] >> c.r.imm;
502 known &= ~BIT(c.r.rd);
506 if (known & BIT(c.r.rt)) {
507 known |= BIT(c.r.rd);
508 v[c.r.rd] = (s32)v[c.r.rt] >> c.r.imm;
510 known &= ~BIT(c.r.rd);
513 case OP_SPECIAL_SLLV:
514 if (known & BIT(c.r.rt) && known & BIT(c.r.rs)) {
515 known |= BIT(c.r.rd);
516 v[c.r.rd] = v[c.r.rt] << (v[c.r.rs] & 0x1f);
518 known &= ~BIT(c.r.rd);
521 case OP_SPECIAL_SRLV:
522 if (known & BIT(c.r.rt) && known & BIT(c.r.rs)) {
523 known |= BIT(c.r.rd);
524 v[c.r.rd] = v[c.r.rt] >> (v[c.r.rs] & 0x1f);
526 known &= ~BIT(c.r.rd);
529 case OP_SPECIAL_SRAV:
530 if (known & BIT(c.r.rt) && known & BIT(c.r.rs)) {
531 known |= BIT(c.r.rd);
532 v[c.r.rd] = (s32)v[c.r.rt]
533 >> (v[c.r.rs] & 0x1f);
535 known &= ~BIT(c.r.rd);
539 case OP_SPECIAL_ADDU:
540 if (known & BIT(c.r.rt) && known & BIT(c.r.rs)) {
541 known |= BIT(c.r.rd);
542 v[c.r.rd] = (s32)v[c.r.rt] + (s32)v[c.r.rs];
544 known &= ~BIT(c.r.rd);
548 case OP_SPECIAL_SUBU:
549 if (known & BIT(c.r.rt) && known & BIT(c.r.rs)) {
550 known |= BIT(c.r.rd);
551 v[c.r.rd] = v[c.r.rt] - v[c.r.rs];
553 known &= ~BIT(c.r.rd);
557 if (known & BIT(c.r.rt) && known & BIT(c.r.rs)) {
558 known |= BIT(c.r.rd);
559 v[c.r.rd] = v[c.r.rt] & v[c.r.rs];
561 known &= ~BIT(c.r.rd);
565 if (known & BIT(c.r.rt) && known & BIT(c.r.rs)) {
566 known |= BIT(c.r.rd);
567 v[c.r.rd] = v[c.r.rt] | v[c.r.rs];
569 known &= ~BIT(c.r.rd);
573 if (known & BIT(c.r.rt) && known & BIT(c.r.rs)) {
574 known |= BIT(c.r.rd);
575 v[c.r.rd] = v[c.r.rt] ^ v[c.r.rs];
577 known &= ~BIT(c.r.rd);
581 if (known & BIT(c.r.rt) && known & BIT(c.r.rs)) {
582 known |= BIT(c.r.rd);
583 v[c.r.rd] = ~(v[c.r.rt] | v[c.r.rs]);
585 known &= ~BIT(c.r.rd);
589 if (known & BIT(c.r.rt) && known & BIT(c.r.rs)) {
590 known |= BIT(c.r.rd);
591 v[c.r.rd] = (s32)v[c.r.rs] < (s32)v[c.r.rt];
593 known &= ~BIT(c.r.rd);
596 case OP_SPECIAL_SLTU:
597 if (known & BIT(c.r.rt) && known & BIT(c.r.rs)) {
598 known |= BIT(c.r.rd);
599 v[c.r.rd] = v[c.r.rs] < v[c.r.rt];
601 known &= ~BIT(c.r.rd);
612 if (known & BIT(c.i.rs)) {
613 known |= BIT(c.i.rt);
614 v[c.i.rt] = v[c.i.rs] + (s32)(s16)c.i.imm;
616 known &= ~BIT(c.i.rt);
620 if (known & BIT(c.i.rs)) {
621 known |= BIT(c.i.rt);
622 v[c.i.rt] = (s32)v[c.i.rs] < (s32)(s16)c.i.imm;
624 known &= ~BIT(c.i.rt);
628 if (known & BIT(c.i.rs)) {
629 known |= BIT(c.i.rt);
630 v[c.i.rt] = v[c.i.rs] < (u32)(s32)(s16)c.i.imm;
632 known &= ~BIT(c.i.rt);
636 if (known & BIT(c.i.rs)) {
637 known |= BIT(c.i.rt);
638 v[c.i.rt] = v[c.i.rs] & c.i.imm;
640 known &= ~BIT(c.i.rt);
644 if (known & BIT(c.i.rs)) {
645 known |= BIT(c.i.rt);
646 v[c.i.rt] = v[c.i.rs] | c.i.imm;
648 known &= ~BIT(c.i.rt);
652 if (known & BIT(c.i.rs)) {
653 known |= BIT(c.i.rt);
654 v[c.i.rt] = v[c.i.rs] ^ c.i.imm;
656 known &= ~BIT(c.i.rt);
660 known |= BIT(c.i.rt);
661 v[c.i.rt] = c.i.imm << 16;
667 known &= ~BIT(c.r.rt);
672 if (c.r.op == OP_CP2_BASIC) {
674 case OP_CP2_BASIC_MFC2:
675 case OP_CP2_BASIC_CFC2:
676 known &= ~BIT(c.r.rt);
689 known &= ~BIT(c.i.rt);
692 if (known & BIT(c.r.rs)) {
693 known |= BIT(c.r.rd);
694 v[c.r.rd] = v[c.r.rs];
696 known &= ~BIT(c.r.rd);
706 static void lightrec_optimize_sll_sra(struct opcode *list, unsigned int offset)
708 struct opcode *prev, *prev2 = NULL, *curr = &list[offset];
709 struct opcode *to_change, *to_nop;
712 if (curr->r.imm != 24 && curr->r.imm != 16)
715 idx = find_prev_writer(list, offset, curr->r.rt);
721 if (prev->i.op != OP_SPECIAL || prev->r.op != OP_SPECIAL_SLL ||
722 prev->r.imm != curr->r.imm || prev->r.rd != curr->r.rt)
725 if (prev->r.rd != prev->r.rt && curr->r.rd != curr->r.rt) {
730 if (!reg_is_dead(list, offset, curr->r.rt) ||
731 reg_is_read_or_written(list, idx, offset, curr->r.rd))
734 /* If rY is dead after the SRL, and rZ is not used after the SLL,
735 * we can change rY to rZ */
737 pr_debug("Detected SLL/SRA with middle temp register\n");
738 prev->r.rd = curr->r.rd;
739 curr->r.rt = prev->r.rd;
742 /* We got a SLL/SRA combo. If imm #16, that's a cast to u16.
743 * If imm #24 that's a cast to u8.
745 * First of all, make sure that the target register of the SLL is not
746 * read before the SRA. */
748 if (prev->r.rd == prev->r.rt) {
755 /* rX is used after the SRA - we cannot convert it. */
756 if (prev->r.rd != curr->r.rd && !reg_is_dead(list, offset, prev->r.rd))
766 idx2 = find_prev_writer(list, idx, prev->r.rt);
768 /* Note that PSX games sometimes do casts after
769 * a LHU or LBU; in this case we can change the
770 * load opcode to a LH or LB, and the cast can
771 * be changed to a MOV or a simple NOP. */
775 if (curr->r.rd != prev2->i.rt &&
776 !reg_is_dead(list, offset, prev2->i.rt))
778 else if (curr->r.imm == 16 && prev2->i.op == OP_LHU)
780 else if (curr->r.imm == 24 && prev2->i.op == OP_LBU)
786 if (curr->r.rd == prev2->i.rt) {
787 to_change->opcode = 0;
788 } else if (reg_is_dead(list, offset, prev2->i.rt) &&
789 !reg_is_read_or_written(list, idx2 + 1, offset, curr->r.rd)) {
790 /* The target register of the SRA is dead after the
791 * LBU/LHU; we can change the target register of the
792 * LBU/LHU to the one of the SRA. */
793 prev2->i.rt = curr->r.rd;
794 to_change->opcode = 0;
796 to_change->i.op = OP_META_MOV;
797 to_change->r.rd = curr->r.rd;
798 to_change->r.rs = prev2->i.rt;
801 if (to_nop->r.imm == 24)
802 pr_debug("Convert LBU+SLL+SRA to LB\n");
804 pr_debug("Convert LHU+SLL+SRA to LH\n");
809 pr_debug("Convert SLL/SRA #%u to EXT%c\n",
811 prev->r.imm == 24 ? 'C' : 'S');
813 if (to_change == prev) {
814 to_change->i.rs = prev->r.rt;
815 to_change->i.rt = curr->r.rd;
817 to_change->i.rt = curr->r.rd;
818 to_change->i.rs = prev->r.rt;
821 if (to_nop->r.imm == 24)
822 to_change->i.op = OP_META_EXTC;
824 to_change->i.op = OP_META_EXTS;
830 static void lightrec_remove_useless_lui(struct block *block, unsigned int offset,
831 u32 known, u32 *values)
833 struct opcode *list = block->opcode_list,
834 *op = &block->opcode_list[offset];
837 if (!op_flag_sync(op->flags) && (known & BIT(op->i.rt)) &&
838 values[op->i.rt] == op->i.imm << 16) {
839 pr_debug("Converting duplicated LUI to NOP\n");
844 if (op->i.imm != 0 || op->i.rt == 0)
847 reader = find_next_reader(list, offset + 1, op->i.rt);
851 if (opcode_writes_register(list[reader].c, op->i.rt) ||
852 reg_is_dead(list, reader, op->i.rt)) {
853 pr_debug("Removing useless LUI 0x0\n");
855 if (list[reader].i.rs == op->i.rt)
856 list[reader].i.rs = 0;
857 if (list[reader].i.op == OP_SPECIAL &&
858 list[reader].i.rt == op->i.rt)
859 list[reader].i.rt = 0;
864 static void lightrec_modify_lui(struct block *block, unsigned int offset)
866 union code c, *lui = &block->opcode_list[offset].c;
867 bool stop = false, stop_next = false;
870 for (i = offset + 1; !stop && i < block->nb_ops; i++) {
871 c = block->opcode_list[i].c;
874 if ((opcode_is_store(c) && c.i.rt == lui->i.rt)
875 || (!opcode_is_load(c) && opcode_reads_register(c, lui->i.rt)))
878 if (opcode_writes_register(c, lui->i.rt)) {
879 pr_debug("Convert LUI at offset 0x%x to kuseg\n",
881 lui->i.imm = kunseg(lui->i.imm << 16) >> 16;
885 if (has_delay_slot(c))
890 static int lightrec_transform_branches(struct lightrec_state *state,
897 for (i = 0; i < block->nb_ops; i++) {
898 op = &block->opcode_list[i];
902 /* Transform J opcode into BEQ $zero, $zero if possible. */
903 offset = (s32)((block->pc & 0xf0000000) >> 2 | op->j.imm)
904 - (s32)(block->pc >> 2) - (s32)i - 1;
906 if (offset == (s16)offset) {
907 pr_debug("Transform J into BEQ $zero, $zero\n");
914 default: /* fall-through */
922 static int lightrec_transform_ops(struct lightrec_state *state, struct block *block)
924 struct opcode *list = block->opcode_list;
925 struct opcode *prev, *op = NULL;
927 u32 values[32] = { 0 };
930 for (i = 0; i < block->nb_ops; i++) {
935 known = lightrec_propagate_consts(op, prev, known, values);
937 /* Transform all opcodes detected as useless to real NOPs
938 * (0x0: SLL r0, r0, #0) */
939 if (op->opcode != 0 && is_nop(op->c)) {
940 pr_debug("Converting useless opcode 0x%08x to NOP\n",
950 if (op->i.rs == op->i.rt) {
953 } else if (op->i.rs == 0) {
967 if (!prev || !has_delay_slot(prev->c))
968 lightrec_modify_lui(block, i);
969 lightrec_remove_useless_lui(block, i, known, values);
972 /* Transform ORI/ADDI/ADDIU with imm #0 or ORR/ADD/ADDU/SUB/SUBU
973 * with register $zero to the MOV meta-opcode */
977 if (op->i.imm == 0) {
978 pr_debug("Convert ORI/ADDI/ADDIU #0 to MOV\n");
979 op->i.op = OP_META_MOV;
986 if (op->r.imm == 0) {
987 pr_debug("Convert SRA #0 to MOV\n");
988 op->i.op = OP_META_MOV;
993 lightrec_optimize_sll_sra(block->opcode_list, i);
997 if (op->r.imm == 0) {
998 pr_debug("Convert SLL/SRL #0 to MOV\n");
999 op->i.op = OP_META_MOV;
1000 op->r.rs = op->r.rt;
1004 case OP_SPECIAL_ADD:
1005 case OP_SPECIAL_ADDU:
1006 if (op->r.rs == 0) {
1007 pr_debug("Convert OR/ADD $zero to MOV\n");
1008 op->i.op = OP_META_MOV;
1009 op->r.rs = op->r.rt;
1012 case OP_SPECIAL_SUB:
1013 case OP_SPECIAL_SUBU:
1014 if (op->r.rt == 0) {
1015 pr_debug("Convert OR/ADD/SUB $zero to MOV\n");
1016 op->i.op = OP_META_MOV;
1031 static int lightrec_switch_delay_slots(struct lightrec_state *state, struct block *block)
1033 struct opcode *list, *next = &block->opcode_list[0];
1035 union code op, next_op;
1038 for (i = 0; i < block->nb_ops - 1; i++) {
1040 next = &block->opcode_list[i + 1];
1044 if (!has_delay_slot(op) || op_flag_no_ds(list->flags) ||
1045 op_flag_emulate_branch(list->flags) ||
1046 op.opcode == 0 || next_op.opcode == 0)
1049 if (i && has_delay_slot(block->opcode_list[i - 1].c) &&
1050 !op_flag_no_ds(block->opcode_list[i - 1].flags))
1053 if (op_flag_sync(list->flags) || op_flag_sync(next->flags))
1056 switch (list->i.op) {
1059 case OP_SPECIAL_JALR:
1060 if (opcode_reads_register(next_op, op.r.rd) ||
1061 opcode_writes_register(next_op, op.r.rd))
1065 if (opcode_writes_register(next_op, op.r.rs))
1075 if (opcode_reads_register(next_op, 31) ||
1076 opcode_writes_register(next_op, 31))
1082 if (op.i.rt && opcode_writes_register(next_op, op.i.rt))
1087 if (op.i.rs && opcode_writes_register(next_op, op.i.rs))
1092 case OP_REGIMM_BLTZAL:
1093 case OP_REGIMM_BGEZAL:
1094 if (opcode_reads_register(next_op, 31) ||
1095 opcode_writes_register(next_op, 31))
1098 case OP_REGIMM_BLTZ:
1099 case OP_REGIMM_BGEZ:
1101 opcode_writes_register(next_op, op.i.rs))
1110 pr_debug("Swap branch and delay slot opcodes "
1111 "at offsets 0x%x / 0x%x\n",
1112 i << 2, (i + 1) << 2);
1114 flags = next->flags;
1117 next->flags = list->flags | LIGHTREC_NO_DS;
1118 list->flags = flags | LIGHTREC_NO_DS;
1124 static int shrink_opcode_list(struct lightrec_state *state, struct block *block, u16 new_size)
1126 struct opcode *list;
1128 if (new_size >= block->nb_ops) {
1129 pr_err("Invalid shrink size (%u vs %u)\n",
1130 new_size, block->nb_ops);
1135 list = lightrec_malloc(state, MEM_FOR_IR,
1136 sizeof(*list) * new_size);
1138 pr_err("Unable to allocate memory\n");
1142 memcpy(list, block->opcode_list, sizeof(*list) * new_size);
1144 lightrec_free_opcode_list(state, block);
1145 block->opcode_list = list;
1146 block->nb_ops = new_size;
1148 pr_debug("Shrunk opcode list of block PC 0x%08x to %u opcodes\n",
1149 block->pc, new_size);
1154 static int lightrec_detect_impossible_branches(struct lightrec_state *state,
1155 struct block *block)
1157 struct opcode *op, *list = block->opcode_list, *next = &list[0];
1162 for (i = 0; i < block->nb_ops - 1; i++) {
1164 next = &list[i + 1];
1166 if (!has_delay_slot(op->c) ||
1167 (!load_in_delay_slot(next->c) &&
1168 !has_delay_slot(next->c) &&
1169 !(next->i.op == OP_CP0 && next->r.rs == OP_CP0_RFE)))
1172 if (op->c.opcode == next->c.opcode) {
1173 /* The delay slot is the exact same opcode as the branch
1174 * opcode: this is effectively a NOP */
1179 offset = i + 1 + (s16)op->i.imm;
1180 if (load_in_delay_slot(next->c) &&
1181 (offset >= 0 && offset < block->nb_ops) &&
1182 !opcode_reads_register(list[offset].c, next->c.i.rt)) {
1183 /* The 'impossible' branch is a local branch - we can
1184 * verify here that the first opcode of the target does
1185 * not use the target register of the delay slot */
1187 pr_debug("Branch at offset 0x%x has load delay slot, "
1188 "but is local and dest opcode does not read "
1189 "dest register\n", i << 2);
1193 op->flags |= LIGHTREC_EMULATE_BRANCH;
1196 pr_debug("First opcode of block PC 0x%08x is an impossible branch\n",
1199 /* If the first opcode is an 'impossible' branch, we
1200 * only keep the first two opcodes of the block (the
1201 * branch itself + its delay slot) */
1202 if (block->nb_ops > 2)
1203 ret = shrink_opcode_list(state, block, 2);
1211 static int lightrec_local_branches(struct lightrec_state *state, struct block *block)
1213 struct opcode *list;
1217 for (i = 0; i < block->nb_ops; i++) {
1218 list = &block->opcode_list[i];
1220 if (should_emulate(list))
1223 switch (list->i.op) {
1229 offset = i + 1 + (s16)list->i.imm;
1230 if (offset >= 0 && offset < block->nb_ops)
1237 pr_debug("Found local branch to offset 0x%x\n", offset << 2);
1239 if (should_emulate(&block->opcode_list[offset])) {
1240 pr_debug("Branch target must be emulated - skip\n");
1244 if (offset && has_delay_slot(block->opcode_list[offset - 1].c)) {
1245 pr_debug("Branch target is a delay slot - skip\n");
1249 pr_debug("Adding sync at offset 0x%x\n", offset << 2);
1251 block->opcode_list[offset].flags |= LIGHTREC_SYNC;
1252 list->flags |= LIGHTREC_LOCAL_BRANCH;
1258 bool has_delay_slot(union code op)
1264 case OP_SPECIAL_JALR:
1282 bool should_emulate(const struct opcode *list)
1284 return op_flag_emulate_branch(list->flags) && has_delay_slot(list->c);
1287 static bool op_writes_rd(union code c)
1298 static void lightrec_add_reg_op(struct opcode *op, u8 reg, u32 reg_op)
1300 if (op_writes_rd(op->c) && reg == op->r.rd)
1301 op->flags |= LIGHTREC_REG_RD(reg_op);
1302 else if (op->i.rs == reg)
1303 op->flags |= LIGHTREC_REG_RS(reg_op);
1304 else if (op->i.rt == reg)
1305 op->flags |= LIGHTREC_REG_RT(reg_op);
1307 pr_debug("Cannot add unload/clean/discard flag: "
1308 "opcode does not touch register %s!\n",
1309 lightrec_reg_name(reg));
1312 static void lightrec_add_unload(struct opcode *op, u8 reg)
1314 lightrec_add_reg_op(op, reg, LIGHTREC_REG_UNLOAD);
1317 static void lightrec_add_discard(struct opcode *op, u8 reg)
1319 lightrec_add_reg_op(op, reg, LIGHTREC_REG_DISCARD);
1322 static void lightrec_add_clean(struct opcode *op, u8 reg)
1324 lightrec_add_reg_op(op, reg, LIGHTREC_REG_CLEAN);
1328 lightrec_early_unload_sync(struct opcode *list, s16 *last_r, s16 *last_w)
1333 for (reg = 0; reg < 34; reg++) {
1334 offset = s16_max(last_w[reg], last_r[reg]);
1337 lightrec_add_unload(&list[offset], reg);
1340 memset(last_r, 0xff, sizeof(*last_r) * 34);
1341 memset(last_w, 0xff, sizeof(*last_w) * 34);
1344 static int lightrec_early_unload(struct lightrec_state *state, struct block *block)
1348 s16 last_r[34], last_w[34], last_sync = 0, next_sync = 0;
1349 u64 mask_r, mask_w, dirty = 0, loaded = 0;
1352 memset(last_r, 0xff, sizeof(last_r));
1353 memset(last_w, 0xff, sizeof(last_w));
1357 * - the register is dirty, and is read again after a branch opcode
1360 * - the register is dirty or loaded, and is not read again
1361 * - the register is dirty or loaded, and is written again after a branch opcode
1362 * - the next opcode has the SYNC flag set
1365 * - the register is dirty or loaded, and is written again
1368 for (i = 0; i < block->nb_ops; i++) {
1369 op = &block->opcode_list[i];
1371 if (op_flag_sync(op->flags) || should_emulate(op)) {
1372 /* The next opcode has the SYNC flag set, or is a branch
1373 * that should be emulated: unload all registers. */
1374 lightrec_early_unload_sync(block->opcode_list, last_r, last_w);
1379 if (next_sync == i) {
1381 pr_debug("Last sync: 0x%x\n", last_sync << 2);
1384 if (has_delay_slot(op->c)) {
1385 next_sync = i + 1 + !op_flag_no_ds(op->flags);
1386 pr_debug("Next sync: 0x%x\n", next_sync << 2);
1389 mask_r = opcode_read_mask(op->c);
1390 mask_w = opcode_write_mask(op->c);
1392 for (reg = 0; reg < 34; reg++) {
1393 if (mask_r & BIT(reg)) {
1394 if (dirty & BIT(reg) && last_w[reg] < last_sync) {
1395 /* The register is dirty, and is read
1396 * again after a branch: clean it */
1398 lightrec_add_clean(&block->opcode_list[last_w[reg]], reg);
1406 if (mask_w & BIT(reg)) {
1407 if ((dirty & BIT(reg) && last_w[reg] < last_sync) ||
1408 (loaded & BIT(reg) && last_r[reg] < last_sync)) {
1409 /* The register is dirty or loaded, and
1410 * is written again after a branch:
1413 offset = s16_max(last_w[reg], last_r[reg]);
1414 lightrec_add_unload(&block->opcode_list[offset], reg);
1416 loaded &= ~BIT(reg);
1417 } else if (!(mask_r & BIT(reg)) &&
1418 ((dirty & BIT(reg) && last_w[reg] > last_sync) ||
1419 (loaded & BIT(reg) && last_r[reg] > last_sync))) {
1420 /* The register is dirty or loaded, and
1421 * is written again: discard it */
1423 offset = s16_max(last_w[reg], last_r[reg]);
1424 lightrec_add_discard(&block->opcode_list[offset], reg);
1426 loaded &= ~BIT(reg);
1438 /* Unload all registers that are dirty or loaded at the end of block. */
1439 lightrec_early_unload_sync(block->opcode_list, last_r, last_w);
1444 static int lightrec_flag_io(struct lightrec_state *state, struct block *block)
1446 struct opcode *prev = NULL, *list = NULL;
1447 enum psx_map psx_map;
1449 u32 values[32] = { 0 };
1451 u32 val, kunseg_val;
1453 for (i = 0; i < block->nb_ops; i++) {
1455 list = &block->opcode_list[i];
1458 known = lightrec_propagate_consts(list, prev, known, values);
1460 switch (list->i.op) {
1464 if (OPT_FLAG_STORES) {
1465 /* Mark all store operations that target $sp or $gp
1466 * as not requiring code invalidation. This is based
1467 * on the heuristic that stores using one of these
1468 * registers as address will never hit a code page. */
1469 if (list->i.rs >= 28 && list->i.rs <= 29 &&
1470 !state->maps[PSX_MAP_KERNEL_USER_RAM].ops) {
1471 pr_debug("Flaging opcode 0x%08x as not "
1472 "requiring invalidation\n",
1474 list->flags |= LIGHTREC_NO_INVALIDATE;
1475 list->flags |= LIGHTREC_IO_MODE(LIGHTREC_IO_DIRECT);
1478 /* Detect writes whose destination address is inside the
1479 * current block, using constant propagation. When these
1480 * occur, we mark the blocks as not compilable. */
1481 if ((known & BIT(list->i.rs)) &&
1482 kunseg(values[list->i.rs]) >= kunseg(block->pc) &&
1483 kunseg(values[list->i.rs]) < (kunseg(block->pc) +
1484 block->nb_ops * 4)) {
1485 pr_debug("Self-modifying block detected\n");
1486 block->flags |= BLOCK_NEVER_COMPILE;
1487 list->flags |= LIGHTREC_SMC;
1502 if (OPT_FLAG_IO && (known & BIT(list->i.rs))) {
1503 val = values[list->i.rs] + (s16) list->i.imm;
1504 kunseg_val = kunseg(val);
1505 psx_map = lightrec_get_map_idx(state, kunseg_val);
1507 list->flags &= ~LIGHTREC_IO_MASK;
1510 case PSX_MAP_KERNEL_USER_RAM:
1511 if (val == kunseg_val)
1512 list->flags |= LIGHTREC_NO_MASK;
1514 case PSX_MAP_MIRROR1:
1515 case PSX_MAP_MIRROR2:
1516 case PSX_MAP_MIRROR3:
1517 pr_debug("Flaging opcode %u as RAM access\n", i);
1518 list->flags |= LIGHTREC_IO_MODE(LIGHTREC_IO_RAM);
1521 pr_debug("Flaging opcode %u as BIOS access\n", i);
1522 list->flags |= LIGHTREC_IO_MODE(LIGHTREC_IO_BIOS);
1524 case PSX_MAP_SCRATCH_PAD:
1525 pr_debug("Flaging opcode %u as scratchpad access\n", i);
1526 list->flags |= LIGHTREC_IO_MODE(LIGHTREC_IO_SCRATCH);
1528 /* Consider that we're never going to run code from
1529 * the scratchpad. */
1530 list->flags |= LIGHTREC_NO_INVALIDATE;
1533 pr_debug("Flagging opcode %u as I/O access\n",
1535 list->flags |= LIGHTREC_IO_MODE(LIGHTREC_IO_HW);
1548 static u8 get_mfhi_mflo_reg(const struct block *block, u16 offset,
1549 const struct opcode *last,
1550 u32 mask, bool sync, bool mflo, bool another)
1552 const struct opcode *op, *next = &block->opcode_list[offset];
1554 u8 reg2, reg = mflo ? REG_LO : REG_HI;
1558 for (i = offset; i < block->nb_ops; i++) {
1560 next = &block->opcode_list[i + 1];
1563 /* If any other opcode writes or reads to the register
1564 * we'd use, then we cannot use it anymore. */
1565 mask |= opcode_read_mask(op->c);
1566 mask |= opcode_write_mask(op->c);
1568 if (op_flag_sync(op->flags))
1577 /* TODO: handle backwards branches too */
1578 if (!last && op_flag_local_branch(op->flags) &&
1579 (s16)op->c.i.imm >= 0) {
1580 branch_offset = i + 1 + (s16)op->c.i.imm
1581 - !!op_flag_no_ds(op->flags);
1583 reg = get_mfhi_mflo_reg(block, branch_offset, NULL,
1584 mask, sync, mflo, false);
1585 reg2 = get_mfhi_mflo_reg(block, offset + 1, next,
1586 mask, sync, mflo, false);
1587 if (reg > 0 && reg == reg2)
1593 return mflo ? REG_LO : REG_HI;
1596 case OP_SPECIAL_MULT:
1597 case OP_SPECIAL_MULTU:
1598 case OP_SPECIAL_DIV:
1599 case OP_SPECIAL_DIVU:
1601 case OP_SPECIAL_MTHI:
1605 case OP_SPECIAL_MTLO:
1613 if (!sync && !op_flag_no_ds(op->flags) &&
1614 (next->i.op == OP_SPECIAL) &&
1615 ((!mflo && next->r.op == OP_SPECIAL_MFHI) ||
1616 (mflo && next->r.op == OP_SPECIAL_MFLO)))
1620 case OP_SPECIAL_JALR:
1622 case OP_SPECIAL_MFHI:
1626 /* Must use REG_HI if there is another MFHI target*/
1627 reg2 = get_mfhi_mflo_reg(block, i + 1, next,
1628 0, sync, mflo, true);
1629 if (reg2 > 0 && reg2 != REG_HI)
1632 if (!sync && !(old_mask & BIT(op->r.rd)))
1638 case OP_SPECIAL_MFLO:
1642 /* Must use REG_LO if there is another MFLO target*/
1643 reg2 = get_mfhi_mflo_reg(block, i + 1, next,
1644 0, sync, mflo, true);
1645 if (reg2 > 0 && reg2 != REG_LO)
1648 if (!sync && !(old_mask & BIT(op->r.rd)))
1667 static void lightrec_replace_lo_hi(struct block *block, u16 offset,
1673 /* This function will remove the following MFLO/MFHI. It must be called
1674 * only if get_mfhi_mflo_reg() returned a non-zero value. */
1676 for (i = offset; i < last; i++) {
1677 struct opcode *op = &block->opcode_list[i];
1685 /* TODO: handle backwards branches too */
1686 if (op_flag_local_branch(op->flags) && (s16)op->c.i.imm >= 0) {
1687 branch_offset = i + 1 + (s16)op->c.i.imm
1688 - !!op_flag_no_ds(op->flags);
1690 lightrec_replace_lo_hi(block, branch_offset, last, lo);
1691 lightrec_replace_lo_hi(block, i + 1, branch_offset, lo);
1696 if (lo && op->r.op == OP_SPECIAL_MFLO) {
1697 pr_debug("Removing MFLO opcode at offset 0x%x\n",
1701 } else if (!lo && op->r.op == OP_SPECIAL_MFHI) {
1702 pr_debug("Removing MFHI opcode at offset 0x%x\n",
1715 static bool lightrec_always_skip_div_check(void)
1724 static int lightrec_flag_mults_divs(struct lightrec_state *state, struct block *block)
1726 struct opcode *prev, *list = NULL;
1730 u32 values[32] = { 0 };
1732 for (i = 0; i < block->nb_ops - 1; i++) {
1734 list = &block->opcode_list[i];
1737 known = lightrec_propagate_consts(list, prev, known, values);
1739 if (list->i.op != OP_SPECIAL)
1742 switch (list->r.op) {
1743 case OP_SPECIAL_DIV:
1744 case OP_SPECIAL_DIVU:
1745 /* If we are dividing by a non-zero constant, don't
1746 * emit the div-by-zero check. */
1747 if (lightrec_always_skip_div_check() ||
1748 (known & BIT(list->c.r.rt) && values[list->c.r.rt]))
1749 list->flags |= LIGHTREC_NO_DIV_CHECK;
1751 case OP_SPECIAL_MULT:
1752 case OP_SPECIAL_MULTU:
1758 /* Don't support opcodes in delay slots */
1759 if ((i && has_delay_slot(block->opcode_list[i - 1].c)) ||
1760 op_flag_no_ds(list->flags)) {
1764 reg_lo = get_mfhi_mflo_reg(block, i + 1, NULL, 0, false, true, false);
1766 pr_debug("Mark MULT(U)/DIV(U) opcode at offset 0x%x as"
1767 " not writing LO\n", i << 2);
1768 list->flags |= LIGHTREC_NO_LO;
1771 reg_hi = get_mfhi_mflo_reg(block, i + 1, NULL, 0, false, false, false);
1773 pr_debug("Mark MULT(U)/DIV(U) opcode at offset 0x%x as"
1774 " not writing HI\n", i << 2);
1775 list->flags |= LIGHTREC_NO_HI;
1778 if (!reg_lo && !reg_hi) {
1779 pr_debug("Both LO/HI unused in this block, they will "
1780 "probably be used in parent block - removing "
1782 list->flags &= ~(LIGHTREC_NO_LO | LIGHTREC_NO_HI);
1785 if (reg_lo > 0 && reg_lo != REG_LO) {
1786 pr_debug("Found register %s to hold LO (rs = %u, rt = %u)\n",
1787 lightrec_reg_name(reg_lo), list->r.rs, list->r.rt);
1789 lightrec_replace_lo_hi(block, i + 1, block->nb_ops, true);
1790 list->r.rd = reg_lo;
1795 if (reg_hi > 0 && reg_hi != REG_HI) {
1796 pr_debug("Found register %s to hold HI (rs = %u, rt = %u)\n",
1797 lightrec_reg_name(reg_hi), list->r.rs, list->r.rt);
1799 lightrec_replace_lo_hi(block, i + 1, block->nb_ops, false);
1800 list->r.imm = reg_hi;
1809 static bool remove_div_sequence(struct block *block, unsigned int offset)
1812 unsigned int i, found = 0;
1815 * Scan for the zero-checking sequence that GCC automatically introduced
1816 * after most DIV/DIVU opcodes. This sequence checks the value of the
1817 * divisor, and if zero, executes a BREAK opcode, causing the BIOS
1818 * handler to crash the PS1.
1820 * For DIV opcodes, this sequence additionally checks that the signed
1821 * operation does not overflow.
1823 * With the assumption that the games never crashed the PS1, we can
1824 * therefore assume that the games never divided by zero or overflowed,
1825 * and these sequences can be removed.
1828 for (i = offset; i < block->nb_ops; i++) {
1829 op = &block->opcode_list[i];
1832 if (op->i.op == OP_SPECIAL &&
1833 (op->r.op == OP_SPECIAL_DIV || op->r.op == OP_SPECIAL_DIVU))
1836 if ((op->opcode & 0xfc1fffff) == 0x14000002) {
1837 /* BNE ???, zero, +8 */
1842 } else if (found == 1 && !op->opcode) {
1845 } else if (found == 2 && op->opcode == 0x0007000d) {
1848 } else if (found == 3 && op->opcode == 0x2401ffff) {
1851 } else if (found == 4 && (op->opcode & 0xfc1fffff) == 0x14010004) {
1852 /* BNE ???, at, +16 */
1854 } else if (found == 5 && op->opcode == 0x3c018000) {
1855 /* LUI at, 0x8000 */
1857 } else if (found == 6 && (op->opcode & 0x141fffff) == 0x14010002) {
1858 /* BNE ???, at, +16 */
1860 } else if (found == 7 && !op->opcode) {
1863 } else if (found == 8 && op->opcode == 0x0006000d) {
1876 pr_debug("Removing DIV%s sequence at offset 0x%x\n",
1877 found == 9 ? "" : "U", offset << 2);
1879 for (i = 0; i < found; i++)
1880 block->opcode_list[offset + i].opcode = 0;
1888 static int lightrec_remove_div_by_zero_check_sequence(struct lightrec_state *state,
1889 struct block *block)
1894 for (i = 0; i < block->nb_ops; i++) {
1895 op = &block->opcode_list[i];
1897 if (op->i.op == OP_SPECIAL &&
1898 (op->r.op == OP_SPECIAL_DIVU || op->r.op == OP_SPECIAL_DIV) &&
1899 remove_div_sequence(block, i + 1))
1900 op->flags |= LIGHTREC_NO_DIV_CHECK;
1906 static const u32 memset_code[] = {
1907 0x10a00006, // beqz a1, 2f
1908 0x24a2ffff, // addiu v0,a1,-1
1909 0x2403ffff, // li v1,-1
1910 0xac800000, // 1: sw zero,0(a0)
1911 0x2442ffff, // addiu v0,v0,-1
1912 0x1443fffd, // bne v0,v1, 1b
1913 0x24840004, // addiu a0,a0,4
1914 0x03e00008, // 2: jr ra
1918 static int lightrec_replace_memset(struct lightrec_state *state, struct block *block)
1923 for (i = 0; i < block->nb_ops; i++) {
1924 c = block->opcode_list[i].c;
1926 if (c.opcode != memset_code[i])
1929 if (i == ARRAY_SIZE(memset_code) - 1) {
1931 pr_debug("Block at PC 0x%x is a memset\n", block->pc);
1932 block->flags |= BLOCK_IS_MEMSET | BLOCK_NEVER_COMPILE;
1934 /* Return non-zero to skip other optimizers. */
1942 static int (*lightrec_optimizers[])(struct lightrec_state *state, struct block *) = {
1943 IF_OPT(OPT_REMOVE_DIV_BY_ZERO_SEQ, &lightrec_remove_div_by_zero_check_sequence),
1944 IF_OPT(OPT_REPLACE_MEMSET, &lightrec_replace_memset),
1945 IF_OPT(OPT_DETECT_IMPOSSIBLE_BRANCHES, &lightrec_detect_impossible_branches),
1946 IF_OPT(OPT_TRANSFORM_OPS, &lightrec_transform_branches),
1947 IF_OPT(OPT_LOCAL_BRANCHES, &lightrec_local_branches),
1948 IF_OPT(OPT_TRANSFORM_OPS, &lightrec_transform_ops),
1949 IF_OPT(OPT_SWITCH_DELAY_SLOTS, &lightrec_switch_delay_slots),
1950 IF_OPT(OPT_FLAG_IO || OPT_FLAG_STORES, &lightrec_flag_io),
1951 IF_OPT(OPT_FLAG_MULT_DIV, &lightrec_flag_mults_divs),
1952 IF_OPT(OPT_EARLY_UNLOAD, &lightrec_early_unload),
1955 int lightrec_optimize(struct lightrec_state *state, struct block *block)
1960 for (i = 0; i < ARRAY_SIZE(lightrec_optimizers); i++) {
1961 if (lightrec_optimizers[i]) {
1962 ret = (*lightrec_optimizers[i])(state, block);