* Copyright (C) 2014-2021 Paul Cercueil <paul@crapouillou.net>
*/
+#include "constprop.h"
#include "lightrec-config.h"
#include "disassembler.h"
#include "lightrec.h"
case OP_SW:
case OP_SWR:
return BIT(op.i.rs) | BIT(op.i.rt);
+ case OP_META:
+ return BIT(op.m.rs);
default:
return BIT(op.i.rs);
}
return flags;
}
-static u64 opcode_write_mask(union code op)
+u64 opcode_write_mask(union code op)
{
switch (op.i.op) {
case OP_META_MULT2:
case OP_META_MULTU2:
return mult_div_write_mask(op);
+ case OP_META:
+ return BIT(op.m.rd);
case OP_SPECIAL:
switch (op.r.op) {
case OP_SPECIAL_JR:
default:
return 0;
}
- case OP_META_MOV:
- return BIT(op.r.rd);
default:
return 0;
}
for (i = offset; ; i++) {
c = list[i].c;
- if (opcode_reads_register(c, reg)) {
- if (i > 0 && has_delay_slot(list[i - 1].c))
- break;
-
+ if (opcode_reads_register(c, reg))
return i;
- }
- if (op_flag_sync(list[i].flags) ||
- has_delay_slot(c) || opcode_writes_register(c, reg))
+ if (op_flag_sync(list[i].flags)
+ || (op_flag_no_ds(list[i].flags) && has_delay_slot(c))
+ || is_delay_slot(list, i)
+ || opcode_writes_register(c, reg))
break;
}
{
unsigned int i;
- if (op_flag_sync(list[offset].flags))
+ if (op_flag_sync(list[offset].flags) || is_delay_slot(list, offset))
return false;
for (i = offset + 1; ; i++) {
return reg_is_read(list, a, b, reg) || reg_is_written(list, a, b, reg);
}
-static bool opcode_is_load(union code op)
+bool opcode_is_mfc(union code op)
+{
+ switch (op.i.op) {
+ case OP_CP0:
+ switch (op.r.rs) {
+ case OP_CP0_MFC0:
+ case OP_CP0_CFC0:
+ return true;
+ default:
+ break;
+ }
+
+ break;
+ case OP_CP2:
+ if (op.r.op == OP_CP2_BASIC) {
+ switch (op.r.rs) {
+ case OP_CP2_BASIC_MFC2:
+ case OP_CP2_BASIC_CFC2:
+ return true;
+ default:
+ break;
+ }
+ }
+
+ break;
+ default:
+ break;
+ }
+
+ return false;
+}
+
+bool opcode_is_load(union code op)
{
switch (op.i.op) {
case OP_LB:
}
}
-bool load_in_delay_slot(union code op)
+static void lightrec_optimize_sll_sra(struct opcode *list, unsigned int offset,
+ struct constprop_data *v)
{
- switch (op.i.op) {
- case OP_CP0:
- switch (op.r.rs) {
- case OP_CP0_MFC0:
- case OP_CP0_CFC0:
- return true;
- default:
- break;
- }
-
- break;
- case OP_CP2:
- if (op.r.op == OP_CP2_BASIC) {
- switch (op.r.rs) {
- case OP_CP2_BASIC_MFC2:
- case OP_CP2_BASIC_CFC2:
- return true;
- default:
- break;
- }
- }
-
- break;
- case OP_LB:
- case OP_LH:
- case OP_LW:
- case OP_LWL:
- case OP_LWR:
- case OP_LBU:
- case OP_LHU:
- return true;
- default:
- break;
- }
-
- return false;
-}
-
-static u32 lightrec_propagate_consts(const struct opcode *op,
- const struct opcode *prev,
- u32 known, u32 *v)
-{
- union code c = prev->c;
-
- /* Register $zero is always, well, zero */
- known |= BIT(0);
- v[0] = 0;
-
- if (op_flag_sync(op->flags))
- return BIT(0);
-
- switch (c.i.op) {
- case OP_SPECIAL:
- switch (c.r.op) {
- case OP_SPECIAL_SLL:
- if (known & BIT(c.r.rt)) {
- known |= BIT(c.r.rd);
- v[c.r.rd] = v[c.r.rt] << c.r.imm;
- } else {
- known &= ~BIT(c.r.rd);
- }
- break;
- case OP_SPECIAL_SRL:
- if (known & BIT(c.r.rt)) {
- known |= BIT(c.r.rd);
- v[c.r.rd] = v[c.r.rt] >> c.r.imm;
- } else {
- known &= ~BIT(c.r.rd);
- }
- break;
- case OP_SPECIAL_SRA:
- if (known & BIT(c.r.rt)) {
- known |= BIT(c.r.rd);
- v[c.r.rd] = (s32)v[c.r.rt] >> c.r.imm;
- } else {
- known &= ~BIT(c.r.rd);
- }
- break;
- case OP_SPECIAL_SLLV:
- if (known & BIT(c.r.rt) && known & BIT(c.r.rs)) {
- known |= BIT(c.r.rd);
- v[c.r.rd] = v[c.r.rt] << (v[c.r.rs] & 0x1f);
- } else {
- known &= ~BIT(c.r.rd);
- }
- break;
- case OP_SPECIAL_SRLV:
- if (known & BIT(c.r.rt) && known & BIT(c.r.rs)) {
- known |= BIT(c.r.rd);
- v[c.r.rd] = v[c.r.rt] >> (v[c.r.rs] & 0x1f);
- } else {
- known &= ~BIT(c.r.rd);
- }
- break;
- case OP_SPECIAL_SRAV:
- if (known & BIT(c.r.rt) && known & BIT(c.r.rs)) {
- known |= BIT(c.r.rd);
- v[c.r.rd] = (s32)v[c.r.rt]
- >> (v[c.r.rs] & 0x1f);
- } else {
- known &= ~BIT(c.r.rd);
- }
- break;
- case OP_SPECIAL_ADD:
- case OP_SPECIAL_ADDU:
- if (known & BIT(c.r.rt) && known & BIT(c.r.rs)) {
- known |= BIT(c.r.rd);
- v[c.r.rd] = (s32)v[c.r.rt] + (s32)v[c.r.rs];
- } else {
- known &= ~BIT(c.r.rd);
- }
- break;
- case OP_SPECIAL_SUB:
- case OP_SPECIAL_SUBU:
- if (known & BIT(c.r.rt) && known & BIT(c.r.rs)) {
- known |= BIT(c.r.rd);
- v[c.r.rd] = v[c.r.rt] - v[c.r.rs];
- } else {
- known &= ~BIT(c.r.rd);
- }
- break;
- case OP_SPECIAL_AND:
- if (known & BIT(c.r.rt) && known & BIT(c.r.rs)) {
- known |= BIT(c.r.rd);
- v[c.r.rd] = v[c.r.rt] & v[c.r.rs];
- } else {
- known &= ~BIT(c.r.rd);
- }
- break;
- case OP_SPECIAL_OR:
- if (known & BIT(c.r.rt) && known & BIT(c.r.rs)) {
- known |= BIT(c.r.rd);
- v[c.r.rd] = v[c.r.rt] | v[c.r.rs];
- } else {
- known &= ~BIT(c.r.rd);
- }
- break;
- case OP_SPECIAL_XOR:
- if (known & BIT(c.r.rt) && known & BIT(c.r.rs)) {
- known |= BIT(c.r.rd);
- v[c.r.rd] = v[c.r.rt] ^ v[c.r.rs];
- } else {
- known &= ~BIT(c.r.rd);
- }
- break;
- case OP_SPECIAL_NOR:
- if (known & BIT(c.r.rt) && known & BIT(c.r.rs)) {
- known |= BIT(c.r.rd);
- v[c.r.rd] = ~(v[c.r.rt] | v[c.r.rs]);
- } else {
- known &= ~BIT(c.r.rd);
- }
- break;
- case OP_SPECIAL_SLT:
- if (known & BIT(c.r.rt) && known & BIT(c.r.rs)) {
- known |= BIT(c.r.rd);
- v[c.r.rd] = (s32)v[c.r.rs] < (s32)v[c.r.rt];
- } else {
- known &= ~BIT(c.r.rd);
- }
- break;
- case OP_SPECIAL_SLTU:
- if (known & BIT(c.r.rt) && known & BIT(c.r.rs)) {
- known |= BIT(c.r.rd);
- v[c.r.rd] = v[c.r.rs] < v[c.r.rt];
- } else {
- known &= ~BIT(c.r.rd);
- }
- break;
- case OP_SPECIAL_MULT:
- case OP_SPECIAL_MULTU:
- case OP_SPECIAL_DIV:
- case OP_SPECIAL_DIVU:
- if (OPT_FLAG_MULT_DIV && c.r.rd)
- known &= ~BIT(c.r.rd);
- if (OPT_FLAG_MULT_DIV && c.r.imm)
- known &= ~BIT(c.r.imm);
- break;
- default:
- break;
- }
- break;
- case OP_META_MULT2:
- case OP_META_MULTU2:
- if (OPT_FLAG_MULT_DIV && (known & BIT(c.r.rs))) {
- if (c.r.rd) {
- known |= BIT(c.r.rd);
-
- if (c.r.op < 32)
- v[c.r.rd] = v[c.r.rs] << c.r.op;
- else
- v[c.r.rd] = 0;
- }
-
- if (c.r.imm) {
- known |= BIT(c.r.imm);
-
- if (c.r.op >= 32)
- v[c.r.imm] = v[c.r.rs] << (c.r.op - 32);
- else if (c.i.op == OP_META_MULT2)
- v[c.r.imm] = (s32) v[c.r.rs] >> (32 - c.r.op);
- else
- v[c.r.imm] = v[c.r.rs] >> (32 - c.r.op);
- }
- } else {
- if (OPT_FLAG_MULT_DIV && c.r.rd)
- known &= ~BIT(c.r.rd);
- if (OPT_FLAG_MULT_DIV && c.r.imm)
- known &= ~BIT(c.r.imm);
- }
- break;
- case OP_REGIMM:
- break;
- case OP_ADDI:
- case OP_ADDIU:
- if (known & BIT(c.i.rs)) {
- known |= BIT(c.i.rt);
- v[c.i.rt] = v[c.i.rs] + (s32)(s16)c.i.imm;
- } else {
- known &= ~BIT(c.i.rt);
- }
- break;
- case OP_SLTI:
- if (known & BIT(c.i.rs)) {
- known |= BIT(c.i.rt);
- v[c.i.rt] = (s32)v[c.i.rs] < (s32)(s16)c.i.imm;
- } else {
- known &= ~BIT(c.i.rt);
- }
- break;
- case OP_SLTIU:
- if (known & BIT(c.i.rs)) {
- known |= BIT(c.i.rt);
- v[c.i.rt] = v[c.i.rs] < (u32)(s32)(s16)c.i.imm;
- } else {
- known &= ~BIT(c.i.rt);
- }
- break;
- case OP_ANDI:
- if (known & BIT(c.i.rs)) {
- known |= BIT(c.i.rt);
- v[c.i.rt] = v[c.i.rs] & c.i.imm;
- } else {
- known &= ~BIT(c.i.rt);
- }
- break;
- case OP_ORI:
- if (known & BIT(c.i.rs)) {
- known |= BIT(c.i.rt);
- v[c.i.rt] = v[c.i.rs] | c.i.imm;
- } else {
- known &= ~BIT(c.i.rt);
- }
- break;
- case OP_XORI:
- if (known & BIT(c.i.rs)) {
- known |= BIT(c.i.rt);
- v[c.i.rt] = v[c.i.rs] ^ c.i.imm;
- } else {
- known &= ~BIT(c.i.rt);
- }
- break;
- case OP_LUI:
- known |= BIT(c.i.rt);
- v[c.i.rt] = c.i.imm << 16;
- break;
- case OP_CP0:
- switch (c.r.rs) {
- case OP_CP0_MFC0:
- case OP_CP0_CFC0:
- known &= ~BIT(c.r.rt);
- break;
- }
- break;
- case OP_CP2:
- if (c.r.op == OP_CP2_BASIC) {
- switch (c.r.rs) {
- case OP_CP2_BASIC_MFC2:
- case OP_CP2_BASIC_CFC2:
- known &= ~BIT(c.r.rt);
- break;
- }
- }
- break;
- case OP_LB:
- case OP_LH:
- case OP_LWL:
- case OP_LW:
- case OP_LBU:
- case OP_LHU:
- case OP_LWR:
- case OP_LWC2:
- known &= ~BIT(c.i.rt);
- break;
- case OP_META_MOV:
- if (known & BIT(c.r.rs)) {
- known |= BIT(c.r.rd);
- v[c.r.rd] = v[c.r.rs];
- } else {
- known &= ~BIT(c.r.rd);
- }
- break;
- default:
- break;
- }
-
- return known;
-}
-
-static void lightrec_optimize_sll_sra(struct opcode *list, unsigned int offset)
-{
- struct opcode *prev, *prev2 = NULL, *curr = &list[offset];
+ struct opcode *ldop = NULL, *curr = &list[offset], *next;
struct opcode *to_change, *to_nop;
int idx, idx2;
if (curr->r.imm != 24 && curr->r.imm != 16)
return;
- idx = find_prev_writer(list, offset, curr->r.rt);
+ if (is_delay_slot(list, offset))
+ return;
+
+ idx = find_next_reader(list, offset + 1, curr->r.rd);
if (idx < 0)
return;
- prev = &list[idx];
+ next = &list[idx];
- if (prev->i.op != OP_SPECIAL || prev->r.op != OP_SPECIAL_SLL ||
- prev->r.imm != curr->r.imm || prev->r.rd != curr->r.rt)
+ if (next->i.op != OP_SPECIAL || next->r.op != OP_SPECIAL_SRA ||
+ next->r.imm != curr->r.imm || next->r.rt != curr->r.rd)
return;
- if (prev->r.rd != prev->r.rt && curr->r.rd != curr->r.rt) {
+ if (curr->r.rd != curr->r.rt && next->r.rd != next->r.rt) {
/* sll rY, rX, 16
* ...
- * srl rZ, rY, 16 */
+ * sra rZ, rY, 16 */
- if (!reg_is_dead(list, offset, curr->r.rt) ||
- reg_is_read_or_written(list, idx, offset, curr->r.rd))
+ if (!reg_is_dead(list, idx, curr->r.rd) ||
+ reg_is_read_or_written(list, offset, idx, next->r.rd))
return;
/* If rY is dead after the SRL, and rZ is not used after the SLL,
* we can change rY to rZ */
pr_debug("Detected SLL/SRA with middle temp register\n");
- prev->r.rd = curr->r.rd;
- curr->r.rt = prev->r.rd;
+ curr->r.rd = next->r.rd;
+ next->r.rt = curr->r.rd;
}
- /* We got a SLL/SRA combo. If imm #16, that's a cast to u16.
- * If imm #24 that's a cast to u8.
+ /* We got a SLL/SRA combo. If imm #16, that's a cast to s16.
+ * If imm #24 that's a cast to s8.
*
* First of all, make sure that the target register of the SLL is not
- * read before the SRA. */
+ * read after the SRA. */
- if (prev->r.rd == prev->r.rt) {
+ if (curr->r.rd == curr->r.rt) {
/* sll rX, rX, 16
* ...
- * srl rY, rX, 16 */
- to_change = curr;
- to_nop = prev;
+ * sra rY, rX, 16 */
+ to_change = next;
+ to_nop = curr;
/* rX is used after the SRA - we cannot convert it. */
- if (prev->r.rd != curr->r.rd && !reg_is_dead(list, offset, prev->r.rd))
+ if (curr->r.rd != next->r.rd && !reg_is_dead(list, idx, curr->r.rd))
return;
} else {
/* sll rY, rX, 16
* ...
- * srl rY, rY, 16 */
- to_change = prev;
- to_nop = curr;
+ * sra rY, rY, 16 */
+ to_change = curr;
+ to_nop = next;
}
- idx2 = find_prev_writer(list, idx, prev->r.rt);
+ idx2 = find_prev_writer(list, offset, curr->r.rt);
if (idx2 >= 0) {
/* Note that PSX games sometimes do casts after
* a LHU or LBU; in this case we can change the
* load opcode to a LH or LB, and the cast can
* be changed to a MOV or a simple NOP. */
- prev2 = &list[idx2];
+ ldop = &list[idx2];
- if (curr->r.rd != prev2->i.rt &&
- !reg_is_dead(list, offset, prev2->i.rt))
- prev2 = NULL;
- else if (curr->r.imm == 16 && prev2->i.op == OP_LHU)
- prev2->i.op = OP_LH;
- else if (curr->r.imm == 24 && prev2->i.op == OP_LBU)
- prev2->i.op = OP_LB;
+ if (next->r.rd != ldop->i.rt &&
+ !reg_is_dead(list, idx, ldop->i.rt))
+ ldop = NULL;
+ else if (curr->r.imm == 16 && ldop->i.op == OP_LHU)
+ ldop->i.op = OP_LH;
+ else if (curr->r.imm == 24 && ldop->i.op == OP_LBU)
+ ldop->i.op = OP_LB;
else
- prev2 = NULL;
+ ldop = NULL;
- if (prev2) {
- if (curr->r.rd == prev2->i.rt) {
+ if (ldop) {
+ if (next->r.rd == ldop->i.rt) {
to_change->opcode = 0;
- } else if (reg_is_dead(list, offset, prev2->i.rt) &&
- !reg_is_read_or_written(list, idx2 + 1, offset, curr->r.rd)) {
+ } else if (reg_is_dead(list, idx, ldop->i.rt) &&
+ !reg_is_read_or_written(list, idx2 + 1, idx, next->r.rd)) {
/* The target register of the SRA is dead after the
* LBU/LHU; we can change the target register of the
* LBU/LHU to the one of the SRA. */
- prev2->i.rt = curr->r.rd;
+ v[ldop->i.rt].known = 0;
+ v[ldop->i.rt].sign = 0;
+ ldop->i.rt = next->r.rd;
to_change->opcode = 0;
} else {
- to_change->i.op = OP_META_MOV;
- to_change->r.rd = curr->r.rd;
- to_change->r.rs = prev2->i.rt;
+ to_change->i.op = OP_META;
+ to_change->m.op = OP_META_MOV;
+ to_change->m.rd = next->r.rd;
+ to_change->m.rs = ldop->i.rt;
}
if (to_nop->r.imm == 24)
pr_debug("Convert LBU+SLL+SRA to LB\n");
else
pr_debug("Convert LHU+SLL+SRA to LH\n");
+
+ v[ldop->i.rt].known = 0;
+ v[ldop->i.rt].sign = 0xffffff80 << 24 - curr->r.imm;
}
}
- if (!prev2) {
+ if (!ldop) {
pr_debug("Convert SLL/SRA #%u to EXT%c\n",
- prev->r.imm,
- prev->r.imm == 24 ? 'C' : 'S');
+ curr->r.imm, curr->r.imm == 24 ? 'C' : 'S');
- if (to_change == prev) {
- to_change->i.rs = prev->r.rt;
- to_change->i.rt = curr->r.rd;
- } else {
- to_change->i.rt = curr->r.rd;
- to_change->i.rs = prev->r.rt;
- }
-
- if (to_nop->r.imm == 24)
- to_change->i.op = OP_META_EXTC;
- else
- to_change->i.op = OP_META_EXTS;
+ to_change->m.rs = curr->r.rt;
+ to_change->m.op = to_nop->r.imm == 24 ? OP_META_EXTC : OP_META_EXTS;
+ to_change->i.op = OP_META;
}
to_nop->opcode = 0;
}
-static void lightrec_remove_useless_lui(struct block *block, unsigned int offset,
- u32 known, u32 *values)
+static void
+lightrec_remove_useless_lui(struct block *block, unsigned int offset,
+ const struct constprop_data *v)
{
struct opcode *list = block->opcode_list,
*op = &block->opcode_list[offset];
int reader;
- if (!op_flag_sync(op->flags) && (known & BIT(op->i.rt)) &&
- values[op->i.rt] == op->i.imm << 16) {
+ if (!op_flag_sync(op->flags) && is_known(v, op->i.rt) &&
+ v[op->i.rt].value == op->i.imm << 16) {
pr_debug("Converting duplicated LUI to NOP\n");
op->opcode = 0x0;
return;
}
- if (op->i.imm != 0 || op->i.rt == 0)
+ if (op->i.imm != 0 || op->i.rt == 0 || offset == block->nb_ops - 1)
return;
reader = find_next_reader(list, offset + 1, op->i.rt);
break;
if (opcode_writes_register(c, lui->i.rt)) {
+ if (c.i.op == OP_LWL || c.i.op == OP_LWR) {
+ /* LWL/LWR only partially write their target register;
+ * therefore the LUI should not write a different value. */
+ break;
+ }
+
pr_debug("Convert LUI at offset 0x%x to kuseg\n",
i - 1 << 2);
lui->i.imm = kunseg(lui->i.imm << 16) >> 16;
return popcount32(value) == 1;
}
+static void lightrec_patch_known_zero(struct opcode *op,
+ const struct constprop_data *v)
+{
+ switch (op->i.op) {
+ case OP_SPECIAL:
+ switch (op->r.op) {
+ case OP_SPECIAL_JR:
+ case OP_SPECIAL_JALR:
+ case OP_SPECIAL_MTHI:
+ case OP_SPECIAL_MTLO:
+ if (is_known_zero(v, op->r.rs))
+ op->r.rs = 0;
+ break;
+ default:
+ if (is_known_zero(v, op->r.rs))
+ op->r.rs = 0;
+ fallthrough;
+ case OP_SPECIAL_SLL:
+ case OP_SPECIAL_SRL:
+ case OP_SPECIAL_SRA:
+ if (is_known_zero(v, op->r.rt))
+ op->r.rt = 0;
+ break;
+ case OP_SPECIAL_SYSCALL:
+ case OP_SPECIAL_BREAK:
+ case OP_SPECIAL_MFHI:
+ case OP_SPECIAL_MFLO:
+ break;
+ }
+ break;
+ case OP_CP0:
+ switch (op->r.rs) {
+ case OP_CP0_MTC0:
+ case OP_CP0_CTC0:
+ if (is_known_zero(v, op->r.rt))
+ op->r.rt = 0;
+ break;
+ default:
+ break;
+ }
+ break;
+ case OP_CP2:
+ if (op->r.op == OP_CP2_BASIC) {
+ switch (op->r.rs) {
+ case OP_CP2_BASIC_MTC2:
+ case OP_CP2_BASIC_CTC2:
+ if (is_known_zero(v, op->r.rt))
+ op->r.rt = 0;
+ break;
+ default:
+ break;
+ }
+ }
+ break;
+ case OP_BEQ:
+ case OP_BNE:
+ if (is_known_zero(v, op->i.rt))
+ op->i.rt = 0;
+ fallthrough;
+ case OP_REGIMM:
+ case OP_BLEZ:
+ case OP_BGTZ:
+ case OP_ADDI:
+ case OP_ADDIU:
+ case OP_SLTI:
+ case OP_SLTIU:
+ case OP_ANDI:
+ case OP_ORI:
+ case OP_XORI:
+ case OP_META_MULT2:
+ case OP_META_MULTU2:
+ case OP_META:
+ if (is_known_zero(v, op->m.rs))
+ op->m.rs = 0;
+ break;
+ case OP_SB:
+ case OP_SH:
+ case OP_SWL:
+ case OP_SW:
+ case OP_SWR:
+ if (is_known_zero(v, op->i.rt))
+ op->i.rt = 0;
+ fallthrough;
+ case OP_LB:
+ case OP_LH:
+ case OP_LWL:
+ case OP_LW:
+ case OP_LBU:
+ case OP_LHU:
+ case OP_LWR:
+ case OP_LWC2:
+ case OP_SWC2:
+ if (is_known(v, op->i.rs)
+ && kunseg(v[op->i.rs].value) == 0)
+ op->i.rs = 0;
+ break;
+ default:
+ break;
+ }
+}
+
+static void lightrec_reset_syncs(struct block *block)
+{
+ struct opcode *op, *list = block->opcode_list;
+ unsigned int i;
+ s32 offset;
+
+ for (i = 0; i < block->nb_ops; i++)
+ list[i].flags &= ~LIGHTREC_SYNC;
+
+ for (i = 0; i < block->nb_ops; i++) {
+ op = &list[i];
+
+ if (has_delay_slot(op->c)) {
+ if (op_flag_local_branch(op->flags)) {
+ offset = i + 1 - op_flag_no_ds(op->flags) + (s16)op->i.imm;
+ list[offset].flags |= LIGHTREC_SYNC;
+ }
+
+ if (op_flag_emulate_branch(op->flags) && i + 2 < block->nb_ops)
+ list[i + 2].flags |= LIGHTREC_SYNC;
+ }
+ }
+}
+
static int lightrec_transform_ops(struct lightrec_state *state, struct block *block)
{
- struct opcode *list = block->opcode_list;
- struct opcode *prev, *op = NULL;
- u32 known = BIT(0);
- u32 values[32] = { 0 };
+ struct opcode *op, *list = block->opcode_list;
+ struct constprop_data v[32] = LIGHTREC_CONSTPROP_INITIALIZER;
unsigned int i;
+ bool local;
u8 tmp;
for (i = 0; i < block->nb_ops; i++) {
- prev = op;
op = &list[i];
- if (prev)
- known = lightrec_propagate_consts(op, prev, known, values);
+ lightrec_consts_propagate(block, i, v);
+
+ lightrec_patch_known_zero(op, v);
/* Transform all opcodes detected as useless to real NOPs
* (0x0: SLL r0, r0, #0) */
switch (op->i.op) {
case OP_BEQ:
- if (op->i.rs == op->i.rt) {
+ if (op->i.rs == op->i.rt ||
+ (is_known(v, op->i.rs) && is_known(v, op->i.rt) &&
+ v[op->i.rs].value == v[op->i.rt].value)) {
+ if (op->i.rs != op->i.rt)
+ pr_debug("Found always-taken BEQ\n");
+
op->i.rs = 0;
op->i.rt = 0;
+ } else if (v[op->i.rs].known & v[op->i.rt].known &
+ (v[op->i.rs].value ^ v[op->i.rt].value)) {
+ pr_debug("Found never-taken BEQ\n");
+
+ local = op_flag_local_branch(op->flags);
+ op->opcode = 0;
+ op->flags = 0;
+
+ if (local)
+ lightrec_reset_syncs(block);
} else if (op->i.rs == 0) {
op->i.rs = op->i.rt;
op->i.rt = 0;
break;
case OP_BNE:
- if (op->i.rs == 0) {
+ if (v[op->i.rs].known & v[op->i.rt].known &
+ (v[op->i.rs].value ^ v[op->i.rt].value)) {
+ pr_debug("Found always-taken BNE\n");
+
+ op->i.op = OP_BEQ;
+ op->i.rs = 0;
+ op->i.rt = 0;
+ } else if (is_known(v, op->i.rs) && is_known(v, op->i.rt) &&
+ v[op->i.rs].value == v[op->i.rt].value) {
+ pr_debug("Found never-taken BNE\n");
+
+ local = op_flag_local_branch(op->flags);
+ op->opcode = 0;
+ op->flags = 0;
+
+ if (local)
+ lightrec_reset_syncs(block);
+ } else if (op->i.rs == 0) {
op->i.rs = op->i.rt;
op->i.rt = 0;
}
break;
+ case OP_BLEZ:
+ if (v[op->i.rs].known & BIT(31) &&
+ v[op->i.rs].value & BIT(31)) {
+ pr_debug("Found always-taken BLEZ\n");
+
+ op->i.op = OP_BEQ;
+ op->i.rs = 0;
+ op->i.rt = 0;
+ }
+ break;
+
+ case OP_BGTZ:
+ if (v[op->i.rs].known & BIT(31) &&
+ v[op->i.rs].value & BIT(31)) {
+ pr_debug("Found never-taken BGTZ\n");
+
+ local = op_flag_local_branch(op->flags);
+ op->opcode = 0;
+ op->flags = 0;
+
+ if (local)
+ lightrec_reset_syncs(block);
+ }
+ break;
+
case OP_LUI:
- if (!prev || !has_delay_slot(prev->c))
+ if (i == 0 || !has_delay_slot(list[i - 1].c))
lightrec_modify_lui(block, i);
- lightrec_remove_useless_lui(block, i, known, values);
+ lightrec_remove_useless_lui(block, i, v);
break;
/* Transform ORI/ADDI/ADDIU with imm #0 or ORR/ADD/ADDU/SUB/SUBU
case OP_ADDIU:
if (op->i.imm == 0) {
pr_debug("Convert ORI/ADDI/ADDIU #0 to MOV\n");
- op->i.op = OP_META_MOV;
- op->r.rd = op->i.rt;
+ op->m.rd = op->i.rt;
+ op->m.op = OP_META_MOV;
+ op->i.op = OP_META;
+ }
+ break;
+ case OP_ANDI:
+ if (bits_are_known_zero(v, op->i.rs, ~op->i.imm)) {
+ pr_debug("Found useless ANDI 0x%x\n", op->i.imm);
+
+ if (op->i.rs == op->i.rt) {
+ op->opcode = 0;
+ } else {
+ op->m.rd = op->i.rt;
+ op->m.op = OP_META_MOV;
+ op->i.op = OP_META;
+ }
+ }
+ break;
+ case OP_REGIMM:
+ switch (op->r.rt) {
+ case OP_REGIMM_BLTZ:
+ case OP_REGIMM_BGEZ:
+ if (!(v[op->r.rs].known & BIT(31)))
+ break;
+
+ if (!!(v[op->r.rs].value & BIT(31))
+ ^ (op->r.rt == OP_REGIMM_BGEZ)) {
+ pr_debug("Found always-taken BLTZ/BGEZ\n");
+ op->i.op = OP_BEQ;
+ op->i.rs = 0;
+ op->i.rt = 0;
+ } else {
+ pr_debug("Found never-taken BLTZ/BGEZ\n");
+
+ local = op_flag_local_branch(op->flags);
+ op->opcode = 0;
+ op->flags = 0;
+
+ if (local)
+ lightrec_reset_syncs(block);
+ }
+ break;
+ case OP_REGIMM_BLTZAL:
+ case OP_REGIMM_BGEZAL:
+ /* TODO: Detect always-taken and replace with JAL */
+ break;
}
break;
case OP_SPECIAL:
switch (op->r.op) {
+ case OP_SPECIAL_SRAV:
+ if ((v[op->r.rs].known & 0x1f) != 0x1f)
+ break;
+
+ pr_debug("Convert SRAV to SRA\n");
+ op->r.imm = v[op->r.rs].value & 0x1f;
+ op->r.op = OP_SPECIAL_SRA;
+
+ fallthrough;
case OP_SPECIAL_SRA:
if (op->r.imm == 0) {
pr_debug("Convert SRA #0 to MOV\n");
- op->i.op = OP_META_MOV;
- op->r.rs = op->r.rt;
+ op->m.rs = op->r.rt;
+ op->m.op = OP_META_MOV;
+ op->i.op = OP_META;
break;
}
-
- lightrec_optimize_sll_sra(block->opcode_list, i);
break;
+
+ case OP_SPECIAL_SLLV:
+ if ((v[op->r.rs].known & 0x1f) != 0x1f)
+ break;
+
+ pr_debug("Convert SLLV to SLL\n");
+ op->r.imm = v[op->r.rs].value & 0x1f;
+ op->r.op = OP_SPECIAL_SLL;
+
+ fallthrough;
case OP_SPECIAL_SLL:
+ if (op->r.imm == 0) {
+ pr_debug("Convert SLL #0 to MOV\n");
+ op->m.rs = op->r.rt;
+ op->m.op = OP_META_MOV;
+ op->i.op = OP_META;
+ }
+
+ lightrec_optimize_sll_sra(block->opcode_list, i, v);
+ break;
+
+ case OP_SPECIAL_SRLV:
+ if ((v[op->r.rs].known & 0x1f) != 0x1f)
+ break;
+
+ pr_debug("Convert SRLV to SRL\n");
+ op->r.imm = v[op->r.rs].value & 0x1f;
+ op->r.op = OP_SPECIAL_SRL;
+
+ fallthrough;
case OP_SPECIAL_SRL:
if (op->r.imm == 0) {
- pr_debug("Convert SLL/SRL #0 to MOV\n");
- op->i.op = OP_META_MOV;
- op->r.rs = op->r.rt;
+ pr_debug("Convert SRL #0 to MOV\n");
+ op->m.rs = op->r.rt;
+ op->m.op = OP_META_MOV;
+ op->i.op = OP_META;
}
break;
+
case OP_SPECIAL_MULT:
case OP_SPECIAL_MULTU:
- if ((known & BIT(op->r.rs)) &&
- is_power_of_two(values[op->r.rs])) {
+ if (is_known(v, op->r.rs) &&
+ is_power_of_two(v[op->r.rs].value)) {
tmp = op->c.i.rs;
op->c.i.rs = op->c.i.rt;
op->c.i.rt = tmp;
- } else if (!(known & BIT(op->r.rt)) ||
- !is_power_of_two(values[op->r.rt])) {
+ } else if (!is_known(v, op->r.rt) ||
+ !is_power_of_two(v[op->r.rt].value)) {
break;
}
pr_debug("Multiply by power-of-two: %u\n",
- values[op->r.rt]);
+ v[op->r.rt].value);
if (op->r.op == OP_SPECIAL_MULT)
op->i.op = OP_META_MULT2;
else
op->i.op = OP_META_MULTU2;
- op->r.op = ffs32(values[op->r.rt]);
+ op->r.op = ctz32(v[op->r.rt].value);
+ break;
+ case OP_SPECIAL_NOR:
+ if (op->r.rs == 0 || op->r.rt == 0) {
+ pr_debug("Convert NOR $zero to COM\n");
+ op->i.op = OP_META;
+ op->m.op = OP_META_COM;
+ if (!op->m.rs)
+ op->m.rs = op->r.rt;
+ }
break;
case OP_SPECIAL_OR:
case OP_SPECIAL_ADD:
case OP_SPECIAL_ADDU:
if (op->r.rs == 0) {
pr_debug("Convert OR/ADD $zero to MOV\n");
- op->i.op = OP_META_MOV;
- op->r.rs = op->r.rt;
+ op->m.rs = op->r.rt;
+ op->m.op = OP_META_MOV;
+ op->i.op = OP_META;
}
fallthrough;
case OP_SPECIAL_SUB:
case OP_SPECIAL_SUBU:
if (op->r.rt == 0) {
pr_debug("Convert OR/ADD/SUB $zero to MOV\n");
- op->i.op = OP_META_MOV;
+ op->m.op = OP_META_MOV;
+ op->i.op = OP_META;
}
fallthrough;
default:
op.opcode == 0 || next_op.opcode == 0)
continue;
- if (i && has_delay_slot(block->opcode_list[i - 1].c) &&
- !op_flag_no_ds(block->opcode_list[i - 1].flags))
+ if (is_delay_slot(block->opcode_list, i))
continue;
if (op_flag_sync(next->flags))
continue;
+ if (op_flag_load_delay(next->flags) && opcode_is_load(next_op))
+ continue;
+
if (!lightrec_can_switch_delay_slot(list->c, next_op))
continue;
return 0;
}
-static int shrink_opcode_list(struct lightrec_state *state, struct block *block, u16 new_size)
-{
- struct opcode_list *list, *old_list;
-
- if (new_size >= block->nb_ops) {
- pr_err("Invalid shrink size (%u vs %u)\n",
- new_size, block->nb_ops);
- return -EINVAL;
- }
-
- list = lightrec_malloc(state, MEM_FOR_IR,
- sizeof(*list) + sizeof(struct opcode) * new_size);
- if (!list) {
- pr_err("Unable to allocate memory\n");
- return -ENOMEM;
- }
-
- old_list = container_of(block->opcode_list, struct opcode_list, ops);
- memcpy(list->ops, old_list->ops, sizeof(struct opcode) * new_size);
-
- lightrec_free_opcode_list(state, block->opcode_list);
- list->nb_ops = new_size;
- block->nb_ops = new_size;
- block->opcode_list = list->ops;
-
- pr_debug("Shrunk opcode list of block PC 0x%08x to %u opcodes\n",
- block->pc, new_size);
-
- return 0;
-}
-
static int lightrec_detect_impossible_branches(struct lightrec_state *state,
struct block *block)
{
struct opcode *op, *list = block->opcode_list, *next = &list[0];
unsigned int i;
int ret = 0;
- s16 offset;
for (i = 0; i < block->nb_ops - 1; i++) {
op = next;
next = &list[i + 1];
if (!has_delay_slot(op->c) ||
- (!load_in_delay_slot(next->c) &&
- !has_delay_slot(next->c) &&
+ (!has_delay_slot(next->c) &&
+ !opcode_is_mfc(next->c) &&
!(next->i.op == OP_CP0 && next->r.rs == OP_CP0_RFE)))
continue;
continue;
}
- offset = i + 1 + (s16)op->i.imm;
- if (load_in_delay_slot(next->c) &&
- (offset >= 0 && offset < block->nb_ops) &&
- !opcode_reads_register(list[offset].c, next->c.i.rt)) {
- /* The 'impossible' branch is a local branch - we can
- * verify here that the first opcode of the target does
- * not use the target register of the delay slot */
-
- pr_debug("Branch at offset 0x%x has load delay slot, "
- "but is local and dest opcode does not read "
- "dest register\n", i << 2);
+ op->flags |= LIGHTREC_EMULATE_BRANCH;
+
+ if (OPT_LOCAL_BRANCHES && i + 2 < block->nb_ops) {
+ /* The interpreter will only emulate the branch, then
+ * return to the compiled code. Add a SYNC after the
+ * branch + delay slot in the case where the branch
+ * was not taken. */
+ list[i + 2].flags |= LIGHTREC_SYNC;
+ }
+ }
+
+ return ret;
+}
+
+static bool is_local_branch(const struct block *block, unsigned int idx)
+{
+ const struct opcode *op = &block->opcode_list[idx];
+ s32 offset;
+
+ switch (op->c.i.op) {
+ case OP_BEQ:
+ case OP_BNE:
+ case OP_BLEZ:
+ case OP_BGTZ:
+ case OP_REGIMM:
+ offset = idx + 1 + (s16)op->c.i.imm;
+ if (offset >= 0 && offset < block->nb_ops)
+ return true;
+ fallthrough;
+ default:
+ return false;
+ }
+}
+
+static int lightrec_handle_load_delays(struct lightrec_state *state,
+ struct block *block)
+{
+ struct opcode *op, *list = block->opcode_list;
+ unsigned int i;
+ s16 imm;
+
+ for (i = 0; i < block->nb_ops; i++) {
+ op = &list[i];
+
+ if (!opcode_is_load(op->c) || !op->c.i.rt || op->c.i.op == OP_LWC2)
+ continue;
+
+ if (!is_delay_slot(list, i)) {
+ /* Only handle load delays in delay slots.
+ * PSX games never abused load delay slots otherwise. */
continue;
}
- op->flags |= LIGHTREC_EMULATE_BRANCH;
+ if (is_local_branch(block, i - 1)) {
+ imm = (s16)list[i - 1].c.i.imm;
- if (op == list) {
- pr_debug("First opcode of block PC 0x%08x is an impossible branch\n",
- block->pc);
+ if (!opcode_reads_register(list[i + imm].c, op->c.i.rt)) {
+ /* The target opcode of the branch is inside
+ * the block, and it does not read the register
+ * written to by the load opcode; we can ignore
+ * the load delay. */
+ continue;
+ }
+ }
- /* If the first opcode is an 'impossible' branch, we
- * only keep the first two opcodes of the block (the
- * branch itself + its delay slot) */
- if (block->nb_ops > 2)
- ret = shrink_opcode_list(state, block, 2);
- break;
+ op->flags |= LIGHTREC_LOAD_DELAY;
+ }
+
+ return 0;
+}
+
+static int lightrec_swap_load_delays(struct lightrec_state *state,
+ struct block *block)
+{
+ unsigned int i;
+ union code c, next;
+ bool in_ds = false, skip_next = false;
+ struct opcode op;
+
+ if (block->nb_ops < 2)
+ return 0;
+
+ for (i = 0; i < block->nb_ops - 2; i++) {
+ c = block->opcode_list[i].c;
+
+ if (skip_next) {
+ skip_next = false;
+ } else if (!in_ds && opcode_is_load(c) && c.i.op != OP_LWC2) {
+ next = block->opcode_list[i + 1].c;
+
+ if (c.i.op == OP_LWL && next.i.op == OP_LWR)
+ continue;
+
+ if (opcode_reads_register(next, c.i.rt)
+ && !opcode_writes_register(next, c.i.rs)) {
+ pr_debug("Swapping opcodes at offset 0x%x to "
+ "respect load delay\n", i << 2);
+
+ op = block->opcode_list[i];
+ block->opcode_list[i] = block->opcode_list[i + 1];
+ block->opcode_list[i + 1] = op;
+ skip_next = true;
+ }
}
+
+ in_ds = has_delay_slot(c);
}
- return ret;
+ return 0;
}
static int lightrec_local_branches(struct lightrec_state *state, struct block *block)
{
+ const struct opcode *ds;
struct opcode *list;
unsigned int i;
s32 offset;
for (i = 0; i < block->nb_ops; i++) {
list = &block->opcode_list[i];
- if (should_emulate(list))
+ if (should_emulate(list) || !is_local_branch(block, i))
continue;
- switch (list->i.op) {
- case OP_BEQ:
- case OP_BNE:
- case OP_BLEZ:
- case OP_BGTZ:
- case OP_REGIMM:
- offset = i + 1 + (s16)list->i.imm;
- if (offset >= 0 && offset < block->nb_ops)
- break;
- fallthrough;
- default:
- continue;
- }
+ offset = i + 1 + (s16)list->c.i.imm;
pr_debug("Found local branch to offset 0x%x\n", offset << 2);
+ ds = get_delay_slot(block->opcode_list, i);
+ if (op_flag_load_delay(ds->flags) && opcode_is_load(ds->c)) {
+ pr_debug("Branch delay slot has a load delay - skip\n");
+ continue;
+ }
+
if (should_emulate(&block->opcode_list[offset])) {
pr_debug("Branch target must be emulated - skip\n");
continue;
continue;
}
- pr_debug("Adding sync at offset 0x%x\n", offset << 2);
-
- block->opcode_list[offset].flags |= LIGHTREC_SYNC;
list->flags |= LIGHTREC_LOCAL_BRANCH;
}
+ lightrec_reset_syncs(block);
+
return 0;
}
}
}
+bool is_delay_slot(const struct opcode *list, unsigned int offset)
+{
+ return offset > 0
+ && !op_flag_no_ds(list[offset - 1].flags)
+ && has_delay_slot(list[offset - 1].c);
+}
+
bool should_emulate(const struct opcode *list)
{
return op_flag_emulate_branch(list->flags) && has_delay_slot(list->c);
{
switch (c.i.op) {
case OP_SPECIAL:
- case OP_META_MOV:
+ case OP_META:
return true;
default:
return false;
struct opcode *op;
s16 last_r[34], last_w[34], last_sync = 0, next_sync = 0;
u64 mask_r, mask_w, dirty = 0, loaded = 0;
- u8 reg;
+ u8 reg, load_delay_reg = 0;
memset(last_r, 0xff, sizeof(last_r));
memset(last_w, 0xff, sizeof(last_w));
for (i = 0; i < block->nb_ops; i++) {
op = &block->opcode_list[i];
+ if (OPT_HANDLE_LOAD_DELAYS && load_delay_reg) {
+ /* Handle delayed register write from load opcodes in
+ * delay slots */
+ last_w[load_delay_reg] = i;
+ load_delay_reg = 0;
+ }
+
if (op_flag_sync(op->flags) || should_emulate(op)) {
/* The next opcode has the SYNC flag set, or is a branch
* that should be emulated: unload all registers. */
mask_r = opcode_read_mask(op->c);
mask_w = opcode_write_mask(op->c);
+ if (op_flag_load_delay(op->flags) && opcode_is_load(op->c)) {
+ /* If we have a load opcode in a delay slot, its target
+ * register is actually not written there but at a
+ * later point, in the dispatcher. Prevent the algorithm
+ * from discarding its previous value. */
+ load_delay_reg = op->c.i.rt;
+ mask_w &= ~BIT(op->c.i.rt);
+ }
+
for (reg = 0; reg < 34; reg++) {
if (mask_r & BIT(reg)) {
if (dirty & BIT(reg) && last_w[reg] < last_sync) {
static int lightrec_flag_io(struct lightrec_state *state, struct block *block)
{
- struct opcode *prev = NULL, *list = NULL;
+ struct opcode *list;
enum psx_map psx_map;
- u32 known = BIT(0);
- u32 values[32] = { 0 };
+ struct constprop_data v[32] = LIGHTREC_CONSTPROP_INITIALIZER;
unsigned int i;
u32 val, kunseg_val;
bool no_mask;
for (i = 0; i < block->nb_ops; i++) {
- prev = list;
list = &block->opcode_list[i];
- if (prev)
- known = lightrec_propagate_consts(list, prev, known, values);
+ lightrec_consts_propagate(block, i, v);
switch (list->i.op) {
case OP_SB:
case OP_SH:
case OP_SW:
- if (OPT_FLAG_STORES) {
- /* Mark all store operations that target $sp or $gp
- * as not requiring code invalidation. This is based
- * on the heuristic that stores using one of these
- * registers as address will never hit a code page. */
- if (list->i.rs >= 28 && list->i.rs <= 29 &&
- !state->maps[PSX_MAP_KERNEL_USER_RAM].ops) {
- pr_debug("Flaging opcode 0x%08x as not "
- "requiring invalidation\n",
- list->opcode);
- list->flags |= LIGHTREC_NO_INVALIDATE;
- list->flags |= LIGHTREC_IO_MODE(LIGHTREC_IO_DIRECT);
- }
+ /* Mark all store operations that target $sp or $gp
+ * as not requiring code invalidation. This is based
+ * on the heuristic that stores using one of these
+ * registers as address will never hit a code page. */
+ if (list->i.rs >= 28 && list->i.rs <= 29 &&
+ !state->maps[PSX_MAP_KERNEL_USER_RAM].ops) {
+ pr_debug("Flaging opcode 0x%08x as not requiring invalidation\n",
+ list->opcode);
+ list->flags |= LIGHTREC_NO_INVALIDATE;
+ }
- /* Detect writes whose destination address is inside the
- * current block, using constant propagation. When these
- * occur, we mark the blocks as not compilable. */
- if ((known & BIT(list->i.rs)) &&
- kunseg(values[list->i.rs]) >= kunseg(block->pc) &&
- kunseg(values[list->i.rs]) < (kunseg(block->pc) +
- block->nb_ops * 4)) {
- pr_debug("Self-modifying block detected\n");
- block_set_flags(block, BLOCK_NEVER_COMPILE);
- list->flags |= LIGHTREC_SMC;
- }
+ /* Detect writes whose destination address is inside the
+ * current block, using constant propagation. When these
+ * occur, we mark the blocks as not compilable. */
+ if (is_known(v, list->i.rs) &&
+ kunseg(v[list->i.rs].value) >= kunseg(block->pc) &&
+ kunseg(v[list->i.rs].value) < (kunseg(block->pc) + block->nb_ops * 4)) {
+ pr_debug("Self-modifying block detected\n");
+ block_set_flags(block, BLOCK_NEVER_COMPILE);
+ list->flags |= LIGHTREC_SMC;
}
fallthrough;
case OP_SWL:
case OP_LWL:
case OP_LWR:
case OP_LWC2:
- if (OPT_FLAG_IO && (known & BIT(list->i.rs))) {
- val = values[list->i.rs] + (s16) list->i.imm;
- kunseg_val = kunseg(val);
- psx_map = lightrec_get_map_idx(state, kunseg_val);
+ if (v[list->i.rs].known | v[list->i.rs].sign) {
+ psx_map = lightrec_get_constprop_map(state, v,
+ list->i.rs,
+ (s16) list->i.imm);
+
+ if (psx_map != PSX_MAP_UNKNOWN && !is_known(v, list->i.rs))
+ pr_debug("Detected map thanks to bit-level const propagation!\n");
list->flags &= ~LIGHTREC_IO_MASK;
- no_mask = val == kunseg_val;
+
+ val = v[list->i.rs].value + (s16) list->i.imm;
+ kunseg_val = kunseg(val);
+
+ no_mask = (v[list->i.rs].known & ~v[list->i.rs].value
+ & 0xe0000000) == 0xe0000000;
switch (psx_map) {
case PSX_MAP_KERNEL_USER_RAM:
pr_debug("Flagging opcode %u as direct I/O access\n",
i);
list->flags |= LIGHTREC_IO_MODE(LIGHTREC_IO_DIRECT_HW);
- break;
+
+ if (no_mask)
+ list->flags |= LIGHTREC_NO_MASK;
+ } else {
+ pr_debug("Flagging opcode %u as I/O access\n",
+ i);
+ list->flags |= LIGHTREC_IO_MODE(LIGHTREC_IO_HW);
}
- fallthrough;
+ break;
default:
- pr_debug("Flagging opcode %u as I/O access\n",
- i);
- list->flags |= LIGHTREC_IO_MODE(LIGHTREC_IO_HW);
break;
}
}
+
+ if (!LIGHTREC_FLAGS_GET_IO_MODE(list->flags)
+ && list->i.rs >= 28 && list->i.rs <= 29
+ && !state->maps[PSX_MAP_KERNEL_USER_RAM].ops) {
+ /* Assume that all I/O operations that target
+ * $sp or $gp will always only target a mapped
+ * memory (RAM, BIOS, scratchpad). */
+ list->flags |= LIGHTREC_IO_MODE(LIGHTREC_IO_DIRECT);
+ }
+
fallthrough;
default:
break;
static int lightrec_flag_mults_divs(struct lightrec_state *state, struct block *block)
{
- struct opcode *prev, *list = NULL;
+ struct opcode *list = NULL;
+ struct constprop_data v[32] = LIGHTREC_CONSTPROP_INITIALIZER;
u8 reg_hi, reg_lo;
unsigned int i;
- u32 known = BIT(0);
- u32 values[32] = { 0 };
for (i = 0; i < block->nb_ops - 1; i++) {
- prev = list;
list = &block->opcode_list[i];
- if (prev)
- known = lightrec_propagate_consts(list, prev, known, values);
+ lightrec_consts_propagate(block, i, v);
switch (list->i.op) {
case OP_SPECIAL:
/* If we are dividing by a non-zero constant, don't
* emit the div-by-zero check. */
if (lightrec_always_skip_div_check() ||
- ((known & BIT(list->c.r.rt)) && values[list->c.r.rt]))
+ (v[list->r.rt].known & v[list->r.rt].value)) {
list->flags |= LIGHTREC_NO_DIV_CHECK;
+ }
fallthrough;
case OP_SPECIAL_MULT:
case OP_SPECIAL_MULTU:
}
/* Don't support opcodes in delay slots */
- if ((i && has_delay_slot(block->opcode_list[i - 1].c)) ||
+ if (is_delay_slot(block->opcode_list, i) ||
op_flag_no_ds(list->flags)) {
continue;
}
IF_OPT(OPT_REMOVE_DIV_BY_ZERO_SEQ, &lightrec_remove_div_by_zero_check_sequence),
IF_OPT(OPT_REPLACE_MEMSET, &lightrec_replace_memset),
IF_OPT(OPT_DETECT_IMPOSSIBLE_BRANCHES, &lightrec_detect_impossible_branches),
+ IF_OPT(OPT_HANDLE_LOAD_DELAYS, &lightrec_handle_load_delays),
+ IF_OPT(OPT_HANDLE_LOAD_DELAYS, &lightrec_swap_load_delays),
IF_OPT(OPT_TRANSFORM_OPS, &lightrec_transform_branches),
IF_OPT(OPT_LOCAL_BRANCHES, &lightrec_local_branches),
IF_OPT(OPT_TRANSFORM_OPS, &lightrec_transform_ops),
IF_OPT(OPT_SWITCH_DELAY_SLOTS, &lightrec_switch_delay_slots),
- IF_OPT(OPT_FLAG_IO || OPT_FLAG_STORES, &lightrec_flag_io),
+ IF_OPT(OPT_FLAG_IO, &lightrec_flag_io),
IF_OPT(OPT_FLAG_MULT_DIV, &lightrec_flag_mults_divs),
IF_OPT(OPT_EARLY_UNLOAD, &lightrec_early_unload),
};