1 // SPDX-License-Identifier: LGPL-2.1-or-later
3 * Copyright (C) 2014-2021 Paul Cercueil <paul@crapouillou.net>
7 #include "lightrec-config.h"
8 #include "disassembler.h"
10 #include "memmanager.h"
11 #include "optimizer.h"
19 #define IF_OPT(opt, ptr) ((opt) ? (ptr) : NULL)
21 struct optimizer_list {
22 void (**optimizers)(struct opcode *);
23 unsigned int nb_optimizers;
26 static bool is_nop(union code op);
28 bool is_unconditional_jump(union code c)
32 return c.r.op == OP_SPECIAL_JR || c.r.op == OP_SPECIAL_JALR;
38 return c.i.rs == c.i.rt;
40 return (c.r.rt == OP_REGIMM_BGEZ ||
41 c.r.rt == OP_REGIMM_BGEZAL) && c.i.rs == 0;
47 bool is_syscall(union code c)
49 return (c.i.op == OP_SPECIAL && c.r.op == OP_SPECIAL_SYSCALL) ||
50 (c.i.op == OP_CP0 && (c.r.rs == OP_CP0_MTC0 ||
51 c.r.rs == OP_CP0_CTC0) &&
52 (c.r.rd == 12 || c.r.rd == 13));
55 static u64 opcode_read_mask(union code op)
60 case OP_SPECIAL_SYSCALL:
61 case OP_SPECIAL_BREAK:
80 return BIT(op.r.rs) | BIT(op.r.rt);
91 if (op.r.op == OP_CP2_BASIC) {
93 case OP_CP2_BASIC_MTC2:
94 case OP_CP2_BASIC_CTC2:
106 if (op.i.rs == op.i.rt)
119 return BIT(op.i.rs) | BIT(op.i.rt);
127 static u64 mult_div_write_mask(union code op)
131 if (!OPT_FLAG_MULT_DIV)
132 return BIT(REG_LO) | BIT(REG_HI);
135 flags = BIT(op.r.rd);
139 flags |= BIT(op.r.imm);
141 flags |= BIT(REG_HI);
146 u64 opcode_write_mask(union code op)
151 return mult_div_write_mask(op);
157 case OP_SPECIAL_SYSCALL:
158 case OP_SPECIAL_BREAK:
160 case OP_SPECIAL_MULT:
161 case OP_SPECIAL_MULTU:
163 case OP_SPECIAL_DIVU:
164 return mult_div_write_mask(op);
165 case OP_SPECIAL_MTHI:
167 case OP_SPECIAL_MTLO:
204 if (op.r.op == OP_CP2_BASIC) {
206 case OP_CP2_BASIC_MFC2:
207 case OP_CP2_BASIC_CFC2:
216 case OP_REGIMM_BLTZAL:
217 case OP_REGIMM_BGEZAL:
227 bool opcode_reads_register(union code op, u8 reg)
229 return opcode_read_mask(op) & BIT(reg);
232 bool opcode_writes_register(union code op, u8 reg)
234 return opcode_write_mask(op) & BIT(reg);
237 static int find_prev_writer(const struct opcode *list, unsigned int offset, u8 reg)
242 if (op_flag_sync(list[offset].flags))
245 for (i = offset; i > 0; i--) {
248 if (opcode_writes_register(c, reg)) {
249 if (i > 1 && has_delay_slot(list[i - 2].c))
255 if (op_flag_sync(list[i - 1].flags) ||
257 opcode_reads_register(c, reg))
264 static int find_next_reader(const struct opcode *list, unsigned int offset, u8 reg)
269 if (op_flag_sync(list[offset].flags))
272 for (i = offset; ; i++) {
275 if (opcode_reads_register(c, reg))
278 if (op_flag_sync(list[i].flags)
279 || (op_flag_no_ds(list[i].flags) && has_delay_slot(c))
280 || is_delay_slot(list, i)
281 || opcode_writes_register(c, reg))
288 static bool reg_is_dead(const struct opcode *list, unsigned int offset, u8 reg)
292 if (op_flag_sync(list[offset].flags) || is_delay_slot(list, offset))
295 for (i = offset + 1; ; i++) {
296 if (opcode_reads_register(list[i].c, reg))
299 if (opcode_writes_register(list[i].c, reg))
302 if (is_syscall(list[i].c))
305 if (has_delay_slot(list[i].c)) {
306 if (op_flag_no_ds(list[i].flags) ||
307 opcode_reads_register(list[i + 1].c, reg))
310 return opcode_writes_register(list[i + 1].c, reg);
315 static bool reg_is_read(const struct opcode *list,
316 unsigned int a, unsigned int b, u8 reg)
318 /* Return true if reg is read in one of the opcodes of the interval
321 if (!is_nop(list[a].c) && opcode_reads_register(list[a].c, reg))
328 static bool reg_is_written(const struct opcode *list,
329 unsigned int a, unsigned int b, u8 reg)
331 /* Return true if reg is written in one of the opcodes of the interval
335 if (!is_nop(list[a].c) && opcode_writes_register(list[a].c, reg))
342 static bool reg_is_read_or_written(const struct opcode *list,
343 unsigned int a, unsigned int b, u8 reg)
345 return reg_is_read(list, a, b, reg) || reg_is_written(list, a, b, reg);
348 bool opcode_is_mfc(union code op)
362 if (op.r.op == OP_CP2_BASIC) {
364 case OP_CP2_BASIC_MFC2:
365 case OP_CP2_BASIC_CFC2:
380 bool opcode_is_load(union code op)
398 static bool opcode_is_store(union code op)
414 static u8 opcode_get_io_size(union code op)
430 bool opcode_is_io(union code op)
432 return opcode_is_load(op) || opcode_is_store(op);
436 static bool is_nop(union code op)
438 if (opcode_writes_register(op, 0)) {
441 return op.r.rs != OP_CP0_MFC0;
460 return op.r.rd == op.r.rt && op.r.rd == op.r.rs;
462 case OP_SPECIAL_ADDU:
463 return (op.r.rd == op.r.rt && op.r.rs == 0) ||
464 (op.r.rd == op.r.rs && op.r.rt == 0);
466 case OP_SPECIAL_SUBU:
467 return op.r.rd == op.r.rs && op.r.rt == 0;
469 if (op.r.rd == op.r.rt)
470 return op.r.rd == op.r.rs || op.r.rs == 0;
472 return (op.r.rd == op.r.rs) && op.r.rt == 0;
476 return op.r.rd == op.r.rt && op.r.imm == 0;
477 case OP_SPECIAL_MFHI:
478 case OP_SPECIAL_MFLO:
486 return op.i.rt == op.i.rs && op.i.imm == 0;
488 return (op.i.rs == 0 || op.i.imm == 1);
490 return (op.i.op == OP_REGIMM_BLTZ ||
491 op.i.op == OP_REGIMM_BLTZAL) &&
492 (op.i.rs == 0 || op.i.imm == 1);
494 return (op.i.rs == op.i.rt || op.i.imm == 1);
500 static void lightrec_optimize_sll_sra(struct opcode *list, unsigned int offset,
501 struct constprop_data *v)
503 struct opcode *ldop = NULL, *curr = &list[offset], *next;
504 struct opcode *to_change, *to_nop;
507 if (curr->r.imm != 24 && curr->r.imm != 16)
510 if (is_delay_slot(list, offset))
513 idx = find_next_reader(list, offset + 1, curr->r.rd);
519 if (next->i.op != OP_SPECIAL || next->r.op != OP_SPECIAL_SRA ||
520 next->r.imm != curr->r.imm || next->r.rt != curr->r.rd)
523 if (curr->r.rd != curr->r.rt && next->r.rd != next->r.rt) {
528 if (!reg_is_dead(list, idx, curr->r.rd) ||
529 reg_is_read_or_written(list, offset, idx, next->r.rd))
532 /* If rY is dead after the SRL, and rZ is not used after the SLL,
533 * we can change rY to rZ */
535 pr_debug("Detected SLL/SRA with middle temp register\n");
536 curr->r.rd = next->r.rd;
537 next->r.rt = curr->r.rd;
540 /* We got a SLL/SRA combo. If imm #16, that's a cast to s16.
541 * If imm #24 that's a cast to s8.
543 * First of all, make sure that the target register of the SLL is not
544 * read after the SRA. */
546 if (curr->r.rd == curr->r.rt) {
553 /* rX is used after the SRA - we cannot convert it. */
554 if (curr->r.rd != next->r.rd && !reg_is_dead(list, idx, curr->r.rd))
564 idx2 = find_prev_writer(list, offset, curr->r.rt);
566 /* Note that PSX games sometimes do casts after
567 * a LHU or LBU; in this case we can change the
568 * load opcode to a LH or LB, and the cast can
569 * be changed to a MOV or a simple NOP. */
573 if (next->r.rd != ldop->i.rt &&
574 !reg_is_dead(list, idx, ldop->i.rt))
576 else if (curr->r.imm == 16 && ldop->i.op == OP_LHU)
578 else if (curr->r.imm == 24 && ldop->i.op == OP_LBU)
584 if (next->r.rd == ldop->i.rt) {
585 to_change->opcode = 0;
586 } else if (reg_is_dead(list, idx, ldop->i.rt) &&
587 !reg_is_read_or_written(list, idx2 + 1, idx, next->r.rd)) {
588 /* The target register of the SRA is dead after the
589 * LBU/LHU; we can change the target register of the
590 * LBU/LHU to the one of the SRA. */
591 v[ldop->i.rt].known = 0;
592 v[ldop->i.rt].sign = 0;
593 ldop->i.rt = next->r.rd;
594 to_change->opcode = 0;
596 to_change->i.op = OP_META;
597 to_change->m.op = OP_META_MOV;
598 to_change->m.rd = next->r.rd;
599 to_change->m.rs = ldop->i.rt;
602 if (to_nop->r.imm == 24)
603 pr_debug("Convert LBU+SLL+SRA to LB\n");
605 pr_debug("Convert LHU+SLL+SRA to LH\n");
607 v[ldop->i.rt].known = 0;
608 v[ldop->i.rt].sign = 0xffffff80 << (24 - curr->r.imm);
613 pr_debug("Convert SLL/SRA #%u to EXT%c\n",
614 curr->r.imm, curr->r.imm == 24 ? 'C' : 'S');
616 to_change->m.rs = curr->r.rt;
617 to_change->m.op = to_nop->r.imm == 24 ? OP_META_EXTC : OP_META_EXTS;
618 to_change->i.op = OP_META;
625 lightrec_remove_useless_lui(struct block *block, unsigned int offset,
626 const struct constprop_data *v)
628 struct opcode *list = block->opcode_list,
629 *op = &block->opcode_list[offset];
632 if (!op_flag_sync(op->flags) && is_known(v, op->i.rt) &&
633 v[op->i.rt].value == op->i.imm << 16) {
634 pr_debug("Converting duplicated LUI to NOP\n");
639 if (op->i.imm != 0 || op->i.rt == 0 || offset == block->nb_ops - 1)
642 reader = find_next_reader(list, offset + 1, op->i.rt);
646 if (opcode_writes_register(list[reader].c, op->i.rt) ||
647 reg_is_dead(list, reader, op->i.rt)) {
648 pr_debug("Removing useless LUI 0x0\n");
650 if (list[reader].i.rs == op->i.rt)
651 list[reader].i.rs = 0;
652 if (list[reader].i.op == OP_SPECIAL &&
653 list[reader].i.rt == op->i.rt)
654 list[reader].i.rt = 0;
659 static void lightrec_lui_to_movi(struct block *block, unsigned int offset)
661 struct opcode *ori, *lui = &block->opcode_list[offset];
664 if (lui->i.op != OP_LUI)
667 next = find_next_reader(block->opcode_list, offset + 1, lui->i.rt);
669 ori = &block->opcode_list[next];
675 if (ori->i.rs == ori->i.rt && ori->i.imm) {
676 ori->flags |= LIGHTREC_MOVI;
677 lui->flags |= LIGHTREC_MOVI;
684 static void lightrec_modify_lui(struct block *block, unsigned int offset)
686 union code c, *lui = &block->opcode_list[offset].c;
687 bool stop = false, stop_next = false;
690 for (i = offset + 1; !stop && i < block->nb_ops; i++) {
691 c = block->opcode_list[i].c;
694 if ((opcode_is_store(c) && c.i.rt == lui->i.rt)
695 || (!opcode_is_load(c) && opcode_reads_register(c, lui->i.rt)))
698 if (opcode_writes_register(c, lui->i.rt)) {
699 if (c.i.op == OP_LWL || c.i.op == OP_LWR) {
700 /* LWL/LWR only partially write their target register;
701 * therefore the LUI should not write a different value. */
705 pr_debug("Convert LUI at offset 0x%x to kuseg\n",
707 lui->i.imm = kunseg(lui->i.imm << 16) >> 16;
711 if (has_delay_slot(c))
716 static int lightrec_transform_branches(struct lightrec_state *state,
723 for (i = 0; i < block->nb_ops; i++) {
724 op = &block->opcode_list[i];
728 /* Transform J opcode into BEQ $zero, $zero if possible. */
729 offset = (s32)((block->pc & 0xf0000000) >> 2 | op->j.imm)
730 - (s32)(block->pc >> 2) - (s32)i - 1;
732 if (offset == (s16)offset) {
733 pr_debug("Transform J into BEQ $zero, $zero\n");
749 static inline bool is_power_of_two(u32 value)
751 return popcount32(value) == 1;
754 static void lightrec_patch_known_zero(struct opcode *op,
755 const struct constprop_data *v)
761 case OP_SPECIAL_JALR:
762 case OP_SPECIAL_MTHI:
763 case OP_SPECIAL_MTLO:
764 if (is_known_zero(v, op->r.rs))
768 if (is_known_zero(v, op->r.rs))
774 if (is_known_zero(v, op->r.rt))
777 case OP_SPECIAL_SYSCALL:
778 case OP_SPECIAL_BREAK:
779 case OP_SPECIAL_MFHI:
780 case OP_SPECIAL_MFLO:
788 if (is_known_zero(v, op->r.rt))
796 if (op->r.op == OP_CP2_BASIC) {
798 case OP_CP2_BASIC_MTC2:
799 case OP_CP2_BASIC_CTC2:
800 if (is_known_zero(v, op->r.rt))
810 if (is_known_zero(v, op->i.rt))
826 if (is_known_zero(v, op->m.rs))
835 if (is_known_zero(v, op->i.rt))
848 if (is_known(v, op->i.rs)
849 && kunseg(v[op->i.rs].value) == 0)
857 static void lightrec_reset_syncs(struct block *block)
859 struct opcode *op, *list = block->opcode_list;
863 for (i = 0; i < block->nb_ops; i++)
864 list[i].flags &= ~LIGHTREC_SYNC;
866 for (i = 0; i < block->nb_ops; i++) {
869 if (has_delay_slot(op->c)) {
870 if (op_flag_local_branch(op->flags)) {
871 offset = i + 1 - op_flag_no_ds(op->flags) + (s16)op->i.imm;
872 list[offset].flags |= LIGHTREC_SYNC;
875 if (op_flag_emulate_branch(op->flags) && i + 2 < block->nb_ops)
876 list[i + 2].flags |= LIGHTREC_SYNC;
881 static void maybe_remove_load_delay(struct opcode *op)
883 if (op_flag_load_delay(op->flags) && opcode_is_load(op->c))
884 op->flags &= ~LIGHTREC_LOAD_DELAY;
887 static int lightrec_transform_ops(struct lightrec_state *state, struct block *block)
889 struct opcode *op, *list = block->opcode_list;
890 struct constprop_data v[32] = LIGHTREC_CONSTPROP_INITIALIZER;
896 for (i = 0; i < block->nb_ops; i++) {
899 lightrec_consts_propagate(block, i, v);
901 lightrec_patch_known_zero(op, v);
903 /* Transform all opcodes detected as useless to real NOPs
904 * (0x0: SLL r0, r0, #0) */
905 if (op->opcode != 0 && is_nop(op->c)) {
906 pr_debug("Converting useless opcode 0x%08x to NOP\n",
916 if (op->i.rs == op->i.rt ||
917 (is_known(v, op->i.rs) && is_known(v, op->i.rt) &&
918 v[op->i.rs].value == v[op->i.rt].value)) {
919 if (op->i.rs != op->i.rt)
920 pr_debug("Found always-taken BEQ\n");
924 } else if (v[op->i.rs].known & v[op->i.rt].known &
925 (v[op->i.rs].value ^ v[op->i.rt].value)) {
926 pr_debug("Found never-taken BEQ\n");
928 if (!op_flag_no_ds(op->flags))
929 maybe_remove_load_delay(&list[i + 1]);
931 local = op_flag_local_branch(op->flags);
936 lightrec_reset_syncs(block);
937 } else if (op->i.rs == 0) {
944 if (v[op->i.rs].known & v[op->i.rt].known &
945 (v[op->i.rs].value ^ v[op->i.rt].value)) {
946 pr_debug("Found always-taken BNE\n");
951 } else if (is_known(v, op->i.rs) && is_known(v, op->i.rt) &&
952 v[op->i.rs].value == v[op->i.rt].value) {
953 pr_debug("Found never-taken BNE\n");
955 if (!op_flag_no_ds(op->flags))
956 maybe_remove_load_delay(&list[i + 1]);
958 local = op_flag_local_branch(op->flags);
963 lightrec_reset_syncs(block);
964 } else if (op->i.rs == 0) {
971 if (v[op->i.rs].known & BIT(31) &&
972 v[op->i.rs].value & BIT(31)) {
973 pr_debug("Found always-taken BLEZ\n");
982 if (v[op->i.rs].known & BIT(31) &&
983 v[op->i.rs].value & BIT(31)) {
984 pr_debug("Found never-taken BGTZ\n");
986 if (!op_flag_no_ds(op->flags))
987 maybe_remove_load_delay(&list[i + 1]);
989 local = op_flag_local_branch(op->flags);
994 lightrec_reset_syncs(block);
999 if (i == 0 || !has_delay_slot(list[i - 1].c))
1000 lightrec_modify_lui(block, i);
1001 lightrec_remove_useless_lui(block, i, v);
1002 if (i == 0 || !has_delay_slot(list[i - 1].c))
1003 lightrec_lui_to_movi(block, i);
1006 /* Transform ORI/ADDI/ADDIU with imm #0 or ORR/ADD/ADDU/SUB/SUBU
1007 * with register $zero to the MOV meta-opcode */
1011 if (op->i.imm == 0) {
1012 pr_debug("Convert ORI/ADDI/ADDIU #0 to MOV\n");
1013 op->m.rd = op->i.rt;
1014 op->m.op = OP_META_MOV;
1019 if (bits_are_known_zero(v, op->i.rs, ~op->i.imm)) {
1020 pr_debug("Found useless ANDI 0x%x\n", op->i.imm);
1022 if (op->i.rs == op->i.rt) {
1025 op->m.rd = op->i.rt;
1026 op->m.op = OP_META_MOV;
1033 if (i == 0 || !has_delay_slot(list[i - 1].c)) {
1034 idx = find_next_reader(list, i + 1, op->i.rt);
1035 if (idx > 0 && list[idx].i.op == (op->i.op ^ 0x4)
1036 && list[idx].i.rs == op->i.rs
1037 && list[idx].i.rt == op->i.rt
1038 && abs((s16)op->i.imm - (s16)list[idx].i.imm) == 3) {
1039 /* Replace a LWL/LWR combo with a META_LWU */
1040 if (op->i.op == OP_LWL)
1042 op->i.op = OP_META_LWU;
1043 list[idx].opcode = 0;
1044 pr_debug("Convert LWL/LWR to LWU\n");
1050 if (i == 0 || !has_delay_slot(list[i - 1].c)) {
1051 idx = find_next_reader(list, i + 1, op->i.rt);
1052 if (idx > 0 && list[idx].i.op == (op->i.op ^ 0x4)
1053 && list[idx].i.rs == op->i.rs
1054 && list[idx].i.rt == op->i.rt
1055 && abs((s16)op->i.imm - (s16)list[idx].i.imm) == 3) {
1056 /* Replace a SWL/SWR combo with a META_SWU */
1057 if (op->i.op == OP_SWL)
1059 op->i.op = OP_META_SWU;
1060 list[idx].opcode = 0;
1061 pr_debug("Convert SWL/SWR to SWU\n");
1067 case OP_REGIMM_BLTZ:
1068 case OP_REGIMM_BGEZ:
1069 if (!(v[op->r.rs].known & BIT(31)))
1072 if (!!(v[op->r.rs].value & BIT(31))
1073 ^ (op->r.rt == OP_REGIMM_BGEZ)) {
1074 pr_debug("Found always-taken BLTZ/BGEZ\n");
1079 pr_debug("Found never-taken BLTZ/BGEZ\n");
1081 if (!op_flag_no_ds(op->flags))
1082 maybe_remove_load_delay(&list[i + 1]);
1084 local = op_flag_local_branch(op->flags);
1089 lightrec_reset_syncs(block);
1092 case OP_REGIMM_BLTZAL:
1093 case OP_REGIMM_BGEZAL:
1094 /* TODO: Detect always-taken and replace with JAL */
1100 case OP_SPECIAL_SRAV:
1101 if ((v[op->r.rs].known & 0x1f) != 0x1f)
1104 pr_debug("Convert SRAV to SRA\n");
1105 op->r.imm = v[op->r.rs].value & 0x1f;
1106 op->r.op = OP_SPECIAL_SRA;
1109 case OP_SPECIAL_SRA:
1110 if (op->r.imm == 0) {
1111 pr_debug("Convert SRA #0 to MOV\n");
1112 op->m.rs = op->r.rt;
1113 op->m.op = OP_META_MOV;
1119 case OP_SPECIAL_SLLV:
1120 if ((v[op->r.rs].known & 0x1f) != 0x1f)
1123 pr_debug("Convert SLLV to SLL\n");
1124 op->r.imm = v[op->r.rs].value & 0x1f;
1125 op->r.op = OP_SPECIAL_SLL;
1128 case OP_SPECIAL_SLL:
1129 if (op->r.imm == 0) {
1130 pr_debug("Convert SLL #0 to MOV\n");
1131 op->m.rs = op->r.rt;
1132 op->m.op = OP_META_MOV;
1136 lightrec_optimize_sll_sra(block->opcode_list, i, v);
1139 case OP_SPECIAL_SRLV:
1140 if ((v[op->r.rs].known & 0x1f) != 0x1f)
1143 pr_debug("Convert SRLV to SRL\n");
1144 op->r.imm = v[op->r.rs].value & 0x1f;
1145 op->r.op = OP_SPECIAL_SRL;
1148 case OP_SPECIAL_SRL:
1149 if (op->r.imm == 0) {
1150 pr_debug("Convert SRL #0 to MOV\n");
1151 op->m.rs = op->r.rt;
1152 op->m.op = OP_META_MOV;
1157 case OP_SPECIAL_MULT:
1158 case OP_SPECIAL_MULTU:
1159 if (is_known(v, op->r.rs) &&
1160 is_power_of_two(v[op->r.rs].value)) {
1162 op->c.i.rs = op->c.i.rt;
1164 } else if (!is_known(v, op->r.rt) ||
1165 !is_power_of_two(v[op->r.rt].value)) {
1169 pr_debug("Multiply by power-of-two: %u\n",
1172 if (op->r.op == OP_SPECIAL_MULT)
1173 op->i.op = OP_META_MULT2;
1175 op->i.op = OP_META_MULTU2;
1177 op->r.op = ctz32(v[op->r.rt].value);
1179 case OP_SPECIAL_NOR:
1180 if (op->r.rs == 0 || op->r.rt == 0) {
1181 pr_debug("Convert NOR $zero to COM\n");
1183 op->m.op = OP_META_COM;
1185 op->m.rs = op->r.rt;
1189 case OP_SPECIAL_ADD:
1190 case OP_SPECIAL_ADDU:
1191 if (op->r.rs == 0) {
1192 pr_debug("Convert OR/ADD $zero to MOV\n");
1193 op->m.rs = op->r.rt;
1194 op->m.op = OP_META_MOV;
1198 case OP_SPECIAL_SUB:
1199 case OP_SPECIAL_SUBU:
1200 if (op->r.rt == 0) {
1201 pr_debug("Convert OR/ADD/SUB $zero to MOV\n");
1202 op->m.op = OP_META_MOV;
1218 static bool lightrec_can_switch_delay_slot(union code op, union code next_op)
1223 case OP_SPECIAL_JALR:
1224 if (opcode_reads_register(next_op, op.r.rd) ||
1225 opcode_writes_register(next_op, op.r.rd))
1229 if (opcode_writes_register(next_op, op.r.rs))
1239 if (opcode_reads_register(next_op, 31) ||
1240 opcode_writes_register(next_op, 31))
1246 if (op.i.rt && opcode_writes_register(next_op, op.i.rt))
1251 if (op.i.rs && opcode_writes_register(next_op, op.i.rs))
1256 case OP_REGIMM_BLTZAL:
1257 case OP_REGIMM_BGEZAL:
1258 if (opcode_reads_register(next_op, 31) ||
1259 opcode_writes_register(next_op, 31))
1262 case OP_REGIMM_BLTZ:
1263 case OP_REGIMM_BGEZ:
1264 if (op.i.rs && opcode_writes_register(next_op, op.i.rs))
1276 static int lightrec_switch_delay_slots(struct lightrec_state *state, struct block *block)
1278 struct opcode *list, *next = &block->opcode_list[0];
1280 union code op, next_op;
1283 for (i = 0; i < block->nb_ops - 1; i++) {
1285 next = &block->opcode_list[i + 1];
1289 if (!has_delay_slot(op) || op_flag_no_ds(list->flags) ||
1290 op_flag_emulate_branch(list->flags) ||
1291 op.opcode == 0 || next_op.opcode == 0)
1294 if (is_delay_slot(block->opcode_list, i))
1297 if (op_flag_sync(next->flags))
1300 if (op_flag_load_delay(next->flags) && opcode_is_load(next_op))
1303 if (!lightrec_can_switch_delay_slot(list->c, next_op))
1306 pr_debug("Swap branch and delay slot opcodes "
1307 "at offsets 0x%x / 0x%x\n",
1308 i << 2, (i + 1) << 2);
1310 flags = next->flags | (list->flags & LIGHTREC_SYNC);
1313 next->flags = (list->flags | LIGHTREC_NO_DS) & ~LIGHTREC_SYNC;
1314 list->flags = flags | LIGHTREC_NO_DS;
1320 static int lightrec_detect_impossible_branches(struct lightrec_state *state,
1321 struct block *block)
1323 struct opcode *op, *list = block->opcode_list, *next = &list[0];
1327 for (i = 0; i < block->nb_ops - 1; i++) {
1329 next = &list[i + 1];
1331 if (!has_delay_slot(op->c) ||
1332 (!has_delay_slot(next->c) &&
1333 !opcode_is_mfc(next->c) &&
1334 !(next->i.op == OP_CP0 && next->r.rs == OP_CP0_RFE)))
1337 if (op->c.opcode == next->c.opcode) {
1338 /* The delay slot is the exact same opcode as the branch
1339 * opcode: this is effectively a NOP */
1344 op->flags |= LIGHTREC_EMULATE_BRANCH;
1346 if (OPT_LOCAL_BRANCHES && i + 2 < block->nb_ops) {
1347 /* The interpreter will only emulate the branch, then
1348 * return to the compiled code. Add a SYNC after the
1349 * branch + delay slot in the case where the branch
1351 list[i + 2].flags |= LIGHTREC_SYNC;
1358 static bool is_local_branch(const struct block *block, unsigned int idx)
1360 const struct opcode *op = &block->opcode_list[idx];
1363 switch (op->c.i.op) {
1369 offset = idx + 1 + (s16)op->c.i.imm;
1370 if (offset >= 0 && offset < block->nb_ops)
1378 static int lightrec_handle_load_delays(struct lightrec_state *state,
1379 struct block *block)
1381 struct opcode *op, *list = block->opcode_list;
1385 for (i = 0; i < block->nb_ops; i++) {
1388 if (!opcode_is_load(op->c) || !op->c.i.rt || op->c.i.op == OP_LWC2)
1391 if (!is_delay_slot(list, i)) {
1392 /* Only handle load delays in delay slots.
1393 * PSX games never abused load delay slots otherwise. */
1397 if (is_local_branch(block, i - 1)) {
1398 imm = (s16)list[i - 1].c.i.imm;
1400 if (!opcode_reads_register(list[i + imm].c, op->c.i.rt)) {
1401 /* The target opcode of the branch is inside
1402 * the block, and it does not read the register
1403 * written to by the load opcode; we can ignore
1404 * the load delay. */
1409 op->flags |= LIGHTREC_LOAD_DELAY;
1415 static int lightrec_swap_load_delays(struct lightrec_state *state,
1416 struct block *block)
1420 bool in_ds = false, skip_next = false;
1423 if (block->nb_ops < 2)
1426 for (i = 0; i < block->nb_ops - 2; i++) {
1427 c = block->opcode_list[i].c;
1431 } else if (!in_ds && opcode_is_load(c) && c.i.op != OP_LWC2) {
1432 next = block->opcode_list[i + 1].c;
1434 switch (next.i.op) {
1445 if (opcode_reads_register(next, c.i.rt)
1446 && !opcode_writes_register(next, c.i.rs)) {
1447 pr_debug("Swapping opcodes at offset 0x%x to "
1448 "respect load delay\n", i << 2);
1450 op = block->opcode_list[i];
1451 block->opcode_list[i] = block->opcode_list[i + 1];
1452 block->opcode_list[i + 1] = op;
1457 in_ds = has_delay_slot(c);
1463 static int lightrec_local_branches(struct lightrec_state *state, struct block *block)
1465 const struct opcode *ds;
1466 struct opcode *list;
1470 for (i = 0; i < block->nb_ops; i++) {
1471 list = &block->opcode_list[i];
1473 if (should_emulate(list) || !is_local_branch(block, i))
1476 offset = i + 1 + (s16)list->c.i.imm;
1478 pr_debug("Found local branch to offset 0x%x\n", offset << 2);
1480 ds = get_delay_slot(block->opcode_list, i);
1481 if (op_flag_load_delay(ds->flags) && opcode_is_load(ds->c)) {
1482 pr_debug("Branch delay slot has a load delay - skip\n");
1486 if (should_emulate(&block->opcode_list[offset])) {
1487 pr_debug("Branch target must be emulated - skip\n");
1491 if (offset && has_delay_slot(block->opcode_list[offset - 1].c)) {
1492 pr_debug("Branch target is a delay slot - skip\n");
1496 list->flags |= LIGHTREC_LOCAL_BRANCH;
1499 lightrec_reset_syncs(block);
1504 bool has_delay_slot(union code op)
1510 case OP_SPECIAL_JALR:
1528 bool is_delay_slot(const struct opcode *list, unsigned int offset)
1531 && !op_flag_no_ds(list[offset - 1].flags)
1532 && has_delay_slot(list[offset - 1].c);
1535 bool should_emulate(const struct opcode *list)
1537 return op_flag_emulate_branch(list->flags) && has_delay_slot(list->c);
1540 static bool op_writes_rd(union code c)
1551 static void lightrec_add_reg_op(struct opcode *op, u8 reg, u32 reg_op)
1553 if (op_writes_rd(op->c) && reg == op->r.rd)
1554 op->flags |= LIGHTREC_REG_RD(reg_op);
1555 else if (op->i.rs == reg)
1556 op->flags |= LIGHTREC_REG_RS(reg_op);
1557 else if (op->i.rt == reg)
1558 op->flags |= LIGHTREC_REG_RT(reg_op);
1560 pr_debug("Cannot add unload/clean/discard flag: "
1561 "opcode does not touch register %s!\n",
1562 lightrec_reg_name(reg));
1565 static void lightrec_add_unload(struct opcode *op, u8 reg)
1567 lightrec_add_reg_op(op, reg, LIGHTREC_REG_UNLOAD);
1570 static void lightrec_add_discard(struct opcode *op, u8 reg)
1572 lightrec_add_reg_op(op, reg, LIGHTREC_REG_DISCARD);
1575 static void lightrec_add_clean(struct opcode *op, u8 reg)
1577 lightrec_add_reg_op(op, reg, LIGHTREC_REG_CLEAN);
1581 lightrec_early_unload_sync(struct opcode *list, s16 *last_r, s16 *last_w)
1586 for (reg = 0; reg < 34; reg++) {
1587 offset = s16_max(last_w[reg], last_r[reg]);
1590 lightrec_add_unload(&list[offset], reg);
1593 memset(last_r, 0xff, sizeof(*last_r) * 34);
1594 memset(last_w, 0xff, sizeof(*last_w) * 34);
1597 static int lightrec_early_unload(struct lightrec_state *state, struct block *block)
1601 s16 last_r[34], last_w[34], last_sync = 0, next_sync = 0;
1602 u64 mask_r, mask_w, dirty = 0, loaded = 0;
1603 u8 reg, load_delay_reg = 0;
1605 memset(last_r, 0xff, sizeof(last_r));
1606 memset(last_w, 0xff, sizeof(last_w));
1610 * - the register is dirty, and is read again after a branch opcode
1613 * - the register is dirty or loaded, and is not read again
1614 * - the register is dirty or loaded, and is written again after a branch opcode
1615 * - the next opcode has the SYNC flag set
1618 * - the register is dirty or loaded, and is written again
1621 for (i = 0; i < block->nb_ops; i++) {
1622 op = &block->opcode_list[i];
1624 if (OPT_HANDLE_LOAD_DELAYS && load_delay_reg) {
1625 /* Handle delayed register write from load opcodes in
1627 last_w[load_delay_reg] = i;
1631 if (op_flag_sync(op->flags) || should_emulate(op)) {
1632 /* The next opcode has the SYNC flag set, or is a branch
1633 * that should be emulated: unload all registers. */
1634 lightrec_early_unload_sync(block->opcode_list, last_r, last_w);
1639 if (next_sync == i) {
1641 pr_debug("Last sync: 0x%x\n", last_sync << 2);
1644 if (has_delay_slot(op->c)) {
1645 next_sync = i + 1 + !op_flag_no_ds(op->flags);
1646 pr_debug("Next sync: 0x%x\n", next_sync << 2);
1649 mask_r = opcode_read_mask(op->c);
1650 mask_w = opcode_write_mask(op->c);
1652 if (op_flag_load_delay(op->flags) && opcode_is_load(op->c)) {
1653 /* If we have a load opcode in a delay slot, its target
1654 * register is actually not written there but at a
1655 * later point, in the dispatcher. Prevent the algorithm
1656 * from discarding its previous value. */
1657 load_delay_reg = op->c.i.rt;
1658 mask_w &= ~BIT(op->c.i.rt);
1661 for (reg = 0; reg < 34; reg++) {
1662 if (mask_r & BIT(reg)) {
1663 if (dirty & BIT(reg) && last_w[reg] < last_sync) {
1664 /* The register is dirty, and is read
1665 * again after a branch: clean it */
1667 lightrec_add_clean(&block->opcode_list[last_w[reg]], reg);
1675 if (mask_w & BIT(reg)) {
1676 if ((dirty & BIT(reg) && last_w[reg] < last_sync) ||
1677 (loaded & BIT(reg) && last_r[reg] < last_sync)) {
1678 /* The register is dirty or loaded, and
1679 * is written again after a branch:
1682 offset = s16_max(last_w[reg], last_r[reg]);
1683 lightrec_add_unload(&block->opcode_list[offset], reg);
1685 loaded &= ~BIT(reg);
1686 } else if (!(mask_r & BIT(reg)) &&
1687 ((dirty & BIT(reg) && last_w[reg] > last_sync) ||
1688 (loaded & BIT(reg) && last_r[reg] > last_sync))) {
1689 /* The register is dirty or loaded, and
1690 * is written again: discard it */
1692 offset = s16_max(last_w[reg], last_r[reg]);
1693 lightrec_add_discard(&block->opcode_list[offset], reg);
1695 loaded &= ~BIT(reg);
1707 /* Unload all registers that are dirty or loaded at the end of block. */
1708 lightrec_early_unload_sync(block->opcode_list, last_r, last_w);
1713 static int lightrec_flag_io(struct lightrec_state *state, struct block *block)
1715 struct opcode *list;
1716 enum psx_map psx_map;
1717 struct constprop_data v[32] = LIGHTREC_CONSTPROP_INITIALIZER;
1719 u32 val, kunseg_val;
1722 for (i = 0; i < block->nb_ops; i++) {
1723 list = &block->opcode_list[i];
1725 lightrec_consts_propagate(block, i, v);
1727 switch (list->i.op) {
1731 /* Mark all store operations that target $sp or $gp
1732 * as not requiring code invalidation. This is based
1733 * on the heuristic that stores using one of these
1734 * registers as address will never hit a code page. */
1735 if (list->i.rs >= 28 && list->i.rs <= 29 &&
1736 !state->maps[PSX_MAP_KERNEL_USER_RAM].ops) {
1737 pr_debug("Flaging opcode 0x%08x as not requiring invalidation\n",
1739 list->flags |= LIGHTREC_NO_INVALIDATE;
1742 /* Detect writes whose destination address is inside the
1743 * current block, using constant propagation. When these
1744 * occur, we mark the blocks as not compilable. */
1745 if (is_known(v, list->i.rs) &&
1746 kunseg(v[list->i.rs].value) >= kunseg(block->pc) &&
1747 kunseg(v[list->i.rs].value) < (kunseg(block->pc) + block->nb_ops * 4)) {
1748 pr_debug("Self-modifying block detected\n");
1749 block_set_flags(block, BLOCK_NEVER_COMPILE);
1750 list->flags |= LIGHTREC_SMC;
1764 if (v[list->i.rs].known | v[list->i.rs].sign) {
1765 psx_map = lightrec_get_constprop_map(state, v,
1769 if (psx_map != PSX_MAP_UNKNOWN && !is_known(v, list->i.rs))
1770 pr_debug("Detected map thanks to bit-level const propagation!\n");
1772 list->flags &= ~LIGHTREC_IO_MASK;
1774 val = v[list->i.rs].value + (s16) list->i.imm;
1775 kunseg_val = kunseg(val);
1777 no_mask = (v[list->i.rs].known & ~v[list->i.rs].value
1778 & 0xe0000000) == 0xe0000000;
1781 case PSX_MAP_KERNEL_USER_RAM:
1783 list->flags |= LIGHTREC_NO_MASK;
1785 case PSX_MAP_MIRROR1:
1786 case PSX_MAP_MIRROR2:
1787 case PSX_MAP_MIRROR3:
1788 pr_debug("Flaging opcode %u as RAM access\n", i);
1789 list->flags |= LIGHTREC_IO_MODE(LIGHTREC_IO_RAM);
1790 if (no_mask && state->mirrors_mapped)
1791 list->flags |= LIGHTREC_NO_MASK;
1794 pr_debug("Flaging opcode %u as BIOS access\n", i);
1795 list->flags |= LIGHTREC_IO_MODE(LIGHTREC_IO_BIOS);
1797 list->flags |= LIGHTREC_NO_MASK;
1799 case PSX_MAP_SCRATCH_PAD:
1800 pr_debug("Flaging opcode %u as scratchpad access\n", i);
1801 list->flags |= LIGHTREC_IO_MODE(LIGHTREC_IO_SCRATCH);
1803 list->flags |= LIGHTREC_NO_MASK;
1805 /* Consider that we're never going to run code from
1806 * the scratchpad. */
1807 list->flags |= LIGHTREC_NO_INVALIDATE;
1809 case PSX_MAP_HW_REGISTERS:
1810 if (state->ops.hw_direct &&
1811 state->ops.hw_direct(kunseg_val,
1812 opcode_is_store(list->c),
1813 opcode_get_io_size(list->c))) {
1814 pr_debug("Flagging opcode %u as direct I/O access\n",
1816 list->flags |= LIGHTREC_IO_MODE(LIGHTREC_IO_DIRECT_HW);
1819 list->flags |= LIGHTREC_NO_MASK;
1821 pr_debug("Flagging opcode %u as I/O access\n",
1823 list->flags |= LIGHTREC_IO_MODE(LIGHTREC_IO_HW);
1831 if (!LIGHTREC_FLAGS_GET_IO_MODE(list->flags)
1832 && list->i.rs >= 28 && list->i.rs <= 29
1833 && !state->maps[PSX_MAP_KERNEL_USER_RAM].ops) {
1834 /* Assume that all I/O operations that target
1835 * $sp or $gp will always only target a mapped
1836 * memory (RAM, BIOS, scratchpad). */
1837 if (state->opt_flags & LIGHTREC_OPT_SP_GP_HIT_RAM)
1838 list->flags |= LIGHTREC_IO_MODE(LIGHTREC_IO_RAM);
1840 list->flags |= LIGHTREC_IO_MODE(LIGHTREC_IO_DIRECT);
1852 static u8 get_mfhi_mflo_reg(const struct block *block, u16 offset,
1853 const struct opcode *last,
1854 u32 mask, bool sync, bool mflo, bool another)
1856 const struct opcode *op, *next = &block->opcode_list[offset];
1858 u8 reg2, reg = mflo ? REG_LO : REG_HI;
1862 for (i = offset; i < block->nb_ops; i++) {
1864 next = &block->opcode_list[i + 1];
1867 /* If any other opcode writes or reads to the register
1868 * we'd use, then we cannot use it anymore. */
1869 mask |= opcode_read_mask(op->c);
1870 mask |= opcode_write_mask(op->c);
1872 if (op_flag_sync(op->flags))
1881 /* TODO: handle backwards branches too */
1882 if (!last && op_flag_local_branch(op->flags) &&
1883 (s16)op->c.i.imm >= 0) {
1884 branch_offset = i + 1 + (s16)op->c.i.imm
1885 - !!op_flag_no_ds(op->flags);
1887 reg = get_mfhi_mflo_reg(block, branch_offset, NULL,
1888 mask, sync, mflo, false);
1889 reg2 = get_mfhi_mflo_reg(block, offset + 1, next,
1890 mask, sync, mflo, false);
1891 if (reg > 0 && reg == reg2)
1897 return mflo ? REG_LO : REG_HI;
1899 case OP_META_MULTU2:
1903 case OP_SPECIAL_MULT:
1904 case OP_SPECIAL_MULTU:
1905 case OP_SPECIAL_DIV:
1906 case OP_SPECIAL_DIVU:
1908 case OP_SPECIAL_MTHI:
1912 case OP_SPECIAL_MTLO:
1920 if (!sync && !op_flag_no_ds(op->flags) &&
1921 (next->i.op == OP_SPECIAL) &&
1922 ((!mflo && next->r.op == OP_SPECIAL_MFHI) ||
1923 (mflo && next->r.op == OP_SPECIAL_MFLO)))
1927 case OP_SPECIAL_JALR:
1929 case OP_SPECIAL_MFHI:
1933 /* Must use REG_HI if there is another MFHI target*/
1934 reg2 = get_mfhi_mflo_reg(block, i + 1, next,
1935 0, sync, mflo, true);
1936 if (reg2 > 0 && reg2 != REG_HI)
1939 if (!sync && !(old_mask & BIT(op->r.rd)))
1945 case OP_SPECIAL_MFLO:
1949 /* Must use REG_LO if there is another MFLO target*/
1950 reg2 = get_mfhi_mflo_reg(block, i + 1, next,
1951 0, sync, mflo, true);
1952 if (reg2 > 0 && reg2 != REG_LO)
1955 if (!sync && !(old_mask & BIT(op->r.rd)))
1974 static void lightrec_replace_lo_hi(struct block *block, u16 offset,
1980 /* This function will remove the following MFLO/MFHI. It must be called
1981 * only if get_mfhi_mflo_reg() returned a non-zero value. */
1983 for (i = offset; i < last; i++) {
1984 struct opcode *op = &block->opcode_list[i];
1992 /* TODO: handle backwards branches too */
1993 if (op_flag_local_branch(op->flags) && (s16)op->c.i.imm >= 0) {
1994 branch_offset = i + 1 + (s16)op->c.i.imm
1995 - !!op_flag_no_ds(op->flags);
1997 lightrec_replace_lo_hi(block, branch_offset, last, lo);
1998 lightrec_replace_lo_hi(block, i + 1, branch_offset, lo);
2003 if (lo && op->r.op == OP_SPECIAL_MFLO) {
2004 pr_debug("Removing MFLO opcode at offset 0x%x\n",
2008 } else if (!lo && op->r.op == OP_SPECIAL_MFHI) {
2009 pr_debug("Removing MFHI opcode at offset 0x%x\n",
2022 static bool lightrec_always_skip_div_check(void)
2031 static int lightrec_flag_mults_divs(struct lightrec_state *state, struct block *block)
2033 struct opcode *list = NULL;
2034 struct constprop_data v[32] = LIGHTREC_CONSTPROP_INITIALIZER;
2038 for (i = 0; i < block->nb_ops - 1; i++) {
2039 list = &block->opcode_list[i];
2041 lightrec_consts_propagate(block, i, v);
2043 switch (list->i.op) {
2045 switch (list->r.op) {
2046 case OP_SPECIAL_DIV:
2047 case OP_SPECIAL_DIVU:
2048 /* If we are dividing by a non-zero constant, don't
2049 * emit the div-by-zero check. */
2050 if (lightrec_always_skip_div_check() ||
2051 (v[list->r.rt].known & v[list->r.rt].value)) {
2052 list->flags |= LIGHTREC_NO_DIV_CHECK;
2055 case OP_SPECIAL_MULT:
2056 case OP_SPECIAL_MULTU:
2063 case OP_META_MULTU2:
2069 /* Don't support opcodes in delay slots */
2070 if (is_delay_slot(block->opcode_list, i) ||
2071 op_flag_no_ds(list->flags)) {
2075 reg_lo = get_mfhi_mflo_reg(block, i + 1, NULL, 0, false, true, false);
2077 pr_debug("Mark MULT(U)/DIV(U) opcode at offset 0x%x as"
2078 " not writing LO\n", i << 2);
2079 list->flags |= LIGHTREC_NO_LO;
2082 reg_hi = get_mfhi_mflo_reg(block, i + 1, NULL, 0, false, false, false);
2084 pr_debug("Mark MULT(U)/DIV(U) opcode at offset 0x%x as"
2085 " not writing HI\n", i << 2);
2086 list->flags |= LIGHTREC_NO_HI;
2089 if (!reg_lo && !reg_hi) {
2090 pr_debug("Both LO/HI unused in this block, they will "
2091 "probably be used in parent block - removing "
2093 list->flags &= ~(LIGHTREC_NO_LO | LIGHTREC_NO_HI);
2096 if (reg_lo > 0 && reg_lo != REG_LO) {
2097 pr_debug("Found register %s to hold LO (rs = %u, rt = %u)\n",
2098 lightrec_reg_name(reg_lo), list->r.rs, list->r.rt);
2100 lightrec_replace_lo_hi(block, i + 1, block->nb_ops, true);
2101 list->r.rd = reg_lo;
2106 if (reg_hi > 0 && reg_hi != REG_HI) {
2107 pr_debug("Found register %s to hold HI (rs = %u, rt = %u)\n",
2108 lightrec_reg_name(reg_hi), list->r.rs, list->r.rt);
2110 lightrec_replace_lo_hi(block, i + 1, block->nb_ops, false);
2111 list->r.imm = reg_hi;
2120 static bool remove_div_sequence(struct block *block, unsigned int offset)
2123 unsigned int i, found = 0;
2126 * Scan for the zero-checking sequence that GCC automatically introduced
2127 * after most DIV/DIVU opcodes. This sequence checks the value of the
2128 * divisor, and if zero, executes a BREAK opcode, causing the BIOS
2129 * handler to crash the PS1.
2131 * For DIV opcodes, this sequence additionally checks that the signed
2132 * operation does not overflow.
2134 * With the assumption that the games never crashed the PS1, we can
2135 * therefore assume that the games never divided by zero or overflowed,
2136 * and these sequences can be removed.
2139 for (i = offset; i < block->nb_ops; i++) {
2140 op = &block->opcode_list[i];
2143 if (op->i.op == OP_SPECIAL &&
2144 (op->r.op == OP_SPECIAL_DIV || op->r.op == OP_SPECIAL_DIVU))
2147 if ((op->opcode & 0xfc1fffff) == 0x14000002) {
2148 /* BNE ???, zero, +8 */
2153 } else if (found == 1 && !op->opcode) {
2156 } else if (found == 2 && op->opcode == 0x0007000d) {
2159 } else if (found == 3 && op->opcode == 0x2401ffff) {
2162 } else if (found == 4 && (op->opcode & 0xfc1fffff) == 0x14010004) {
2163 /* BNE ???, at, +16 */
2165 } else if (found == 5 && op->opcode == 0x3c018000) {
2166 /* LUI at, 0x8000 */
2168 } else if (found == 6 && (op->opcode & 0x141fffff) == 0x14010002) {
2169 /* BNE ???, at, +16 */
2171 } else if (found == 7 && !op->opcode) {
2174 } else if (found == 8 && op->opcode == 0x0006000d) {
2187 pr_debug("Removing DIV%s sequence at offset 0x%x\n",
2188 found == 9 ? "" : "U", offset << 2);
2190 for (i = 0; i < found; i++)
2191 block->opcode_list[offset + i].opcode = 0;
2199 static int lightrec_remove_div_by_zero_check_sequence(struct lightrec_state *state,
2200 struct block *block)
2205 for (i = 0; i < block->nb_ops; i++) {
2206 op = &block->opcode_list[i];
2208 if (op->i.op == OP_SPECIAL &&
2209 (op->r.op == OP_SPECIAL_DIVU || op->r.op == OP_SPECIAL_DIV) &&
2210 remove_div_sequence(block, i + 1))
2211 op->flags |= LIGHTREC_NO_DIV_CHECK;
2217 static const u32 memset_code[] = {
2218 0x10a00006, // beqz a1, 2f
2219 0x24a2ffff, // addiu v0,a1,-1
2220 0x2403ffff, // li v1,-1
2221 0xac800000, // 1: sw zero,0(a0)
2222 0x2442ffff, // addiu v0,v0,-1
2223 0x1443fffd, // bne v0,v1, 1b
2224 0x24840004, // addiu a0,a0,4
2225 0x03e00008, // 2: jr ra
2229 static int lightrec_replace_memset(struct lightrec_state *state, struct block *block)
2234 for (i = 0; i < block->nb_ops; i++) {
2235 c = block->opcode_list[i].c;
2237 if (c.opcode != memset_code[i])
2240 if (i == ARRAY_SIZE(memset_code) - 1) {
2242 pr_debug("Block at PC 0x%x is a memset\n", block->pc);
2243 block_set_flags(block,
2244 BLOCK_IS_MEMSET | BLOCK_NEVER_COMPILE);
2246 /* Return non-zero to skip other optimizers. */
2254 static int lightrec_test_preload_pc(struct lightrec_state *state, struct block *block)
2260 for (i = 0; i < block->nb_ops; i++) {
2261 c = block->opcode_list[i].c;
2262 flags = block->opcode_list[i].flags;
2264 if (op_flag_sync(flags))
2270 block->flags |= BLOCK_PRELOAD_PC;
2275 case OP_REGIMM_BLTZAL:
2276 case OP_REGIMM_BGEZAL:
2277 block->flags |= BLOCK_PRELOAD_PC;
2287 if (!op_flag_local_branch(flags)) {
2288 block->flags |= BLOCK_PRELOAD_PC;
2294 case OP_SPECIAL_JALR:
2296 block->flags |= BLOCK_PRELOAD_PC;
2300 case OP_SPECIAL_SYSCALL:
2301 case OP_SPECIAL_BREAK:
2302 block->flags |= BLOCK_PRELOAD_PC;
2314 static int (*lightrec_optimizers[])(struct lightrec_state *state, struct block *) = {
2315 IF_OPT(OPT_REMOVE_DIV_BY_ZERO_SEQ, &lightrec_remove_div_by_zero_check_sequence),
2316 IF_OPT(OPT_REPLACE_MEMSET, &lightrec_replace_memset),
2317 IF_OPT(OPT_DETECT_IMPOSSIBLE_BRANCHES, &lightrec_detect_impossible_branches),
2318 IF_OPT(OPT_HANDLE_LOAD_DELAYS, &lightrec_handle_load_delays),
2319 IF_OPT(OPT_HANDLE_LOAD_DELAYS, &lightrec_swap_load_delays),
2320 IF_OPT(OPT_TRANSFORM_OPS, &lightrec_transform_branches),
2321 IF_OPT(OPT_LOCAL_BRANCHES, &lightrec_local_branches),
2322 IF_OPT(OPT_TRANSFORM_OPS, &lightrec_transform_ops),
2323 IF_OPT(OPT_SWITCH_DELAY_SLOTS, &lightrec_switch_delay_slots),
2324 IF_OPT(OPT_FLAG_IO, &lightrec_flag_io),
2325 IF_OPT(OPT_FLAG_MULT_DIV, &lightrec_flag_mults_divs),
2326 IF_OPT(OPT_EARLY_UNLOAD, &lightrec_early_unload),
2327 IF_OPT(OPT_PRELOAD_PC, &lightrec_test_preload_pc),
2330 int lightrec_optimize(struct lightrec_state *state, struct block *block)
2335 for (i = 0; i < ARRAY_SIZE(lightrec_optimizers); i++) {
2336 if (lightrec_optimizers[i]) {
2337 ret = (*lightrec_optimizers[i])(state, block);