case OP_SPECIAL_MFLO:
return BIT(REG_LO);
case OP_SPECIAL_SLL:
+ if (!op.r.imm)
+ return 0;
+ fallthrough;
case OP_SPECIAL_SRL:
case OP_SPECIAL_SRA:
return BIT(op.r.rt);
case OP_LUI:
return 0;
case OP_BEQ:
+ if (op.i.rs == op.i.rt)
+ return 0;
+ fallthrough;
case OP_BNE:
case OP_LWL:
case OP_LWR:
}
}
-static u64 opcode_write_mask(union code op)
+static u64 mult_div_write_mask(union code op)
{
u64 flags;
+ if (!OPT_FLAG_MULT_DIV)
+ return BIT(REG_LO) | BIT(REG_HI);
+
+ if (op.r.rd)
+ flags = BIT(op.r.rd);
+ else
+ flags = BIT(REG_LO);
+ if (op.r.imm)
+ flags |= BIT(op.r.imm);
+ else
+ flags |= BIT(REG_HI);
+
+ return flags;
+}
+
+static u64 opcode_write_mask(union code op)
+{
switch (op.i.op) {
+ case OP_META_MULT2:
+ case OP_META_MULTU2:
+ return mult_div_write_mask(op);
case OP_SPECIAL:
switch (op.r.op) {
case OP_SPECIAL_JR:
case OP_SPECIAL_MULTU:
case OP_SPECIAL_DIV:
case OP_SPECIAL_DIVU:
- if (!OPT_FLAG_MULT_DIV)
- return BIT(REG_LO) | BIT(REG_HI);
-
- if (op.r.rd)
- flags = BIT(op.r.rd);
- else
- flags = BIT(REG_LO);
- if (op.r.imm)
- flags |= BIT(op.r.imm);
- else
- flags |= BIT(REG_HI);
- return flags;
+ return mult_div_write_mask(op);
case OP_SPECIAL_MTHI:
return BIT(REG_HI);
case OP_SPECIAL_MTLO:
return BIT(REG_LO);
+ case OP_SPECIAL_SLL:
+ if (!op.r.imm)
+ return 0;
+ fallthrough;
default:
return BIT(op.r.rd);
}
case OP_LBU:
case OP_LHU:
case OP_LWR:
+ case OP_META_EXTC:
+ case OP_META_EXTS:
return BIT(op.i.rt);
case OP_JAL:
return BIT(31);
union code c;
unsigned int i;
- if (list[offset].flags & LIGHTREC_SYNC)
+ if (op_flag_sync(list[offset].flags))
return -1;
for (i = offset; i > 0; i--) {
return i - 1;
}
- if ((list[i - 1].flags & LIGHTREC_SYNC) ||
+ if (op_flag_sync(list[i - 1].flags) ||
has_delay_slot(c) ||
opcode_reads_register(c, reg))
break;
unsigned int i;
union code c;
- if (list[offset].flags & LIGHTREC_SYNC)
+ if (op_flag_sync(list[offset].flags))
return -1;
for (i = offset; ; i++) {
return i;
}
- if ((list[i].flags & LIGHTREC_SYNC) ||
+ if (op_flag_sync(list[i].flags) ||
has_delay_slot(c) || opcode_writes_register(c, reg))
break;
}
{
unsigned int i;
- if (list[offset].flags & LIGHTREC_SYNC)
+ if (op_flag_sync(list[offset].flags))
return false;
for (i = offset + 1; ; i++) {
return true;
if (has_delay_slot(list[i].c)) {
- if (list[i].flags & LIGHTREC_NO_DS ||
+ if (op_flag_no_ds(list[i].flags) ||
opcode_reads_register(list[i + 1].c, reg))
return false;
}
}
+static u8 opcode_get_io_size(union code op)
+{
+ switch (op.i.op) {
+ case OP_LB:
+ case OP_LBU:
+ case OP_SB:
+ return 8;
+ case OP_LH:
+ case OP_LHU:
+ case OP_SH:
+ return 16;
+ default:
+ return 32;
+ }
+}
+
bool opcode_is_io(union code op)
{
return opcode_is_load(op) || opcode_is_store(op);
known |= BIT(0);
v[0] = 0;
- if (op->flags & LIGHTREC_SYNC)
+ if (op_flag_sync(op->flags))
return BIT(0);
switch (c.i.op) {
known &= ~BIT(c.r.rd);
}
break;
+ case OP_SPECIAL_MULT:
+ case OP_SPECIAL_MULTU:
+ case OP_SPECIAL_DIV:
+ case OP_SPECIAL_DIVU:
+ if (OPT_FLAG_MULT_DIV && c.r.rd)
+ known &= ~BIT(c.r.rd);
+ if (OPT_FLAG_MULT_DIV && c.r.imm)
+ known &= ~BIT(c.r.imm);
+ break;
+ case OP_SPECIAL_MFLO:
+ case OP_SPECIAL_MFHI:
+ known &= ~BIT(c.r.rd);
+ break;
default:
break;
}
break;
+ case OP_META_MULT2:
+ case OP_META_MULTU2:
+ if (OPT_FLAG_MULT_DIV && (known & BIT(c.r.rs))) {
+ if (c.r.rd) {
+ known |= BIT(c.r.rd);
+
+ if (c.r.op < 32)
+ v[c.r.rd] = v[c.r.rs] << c.r.op;
+ else
+ v[c.r.rd] = 0;
+ }
+
+ if (c.r.imm) {
+ known |= BIT(c.r.imm);
+
+ if (c.r.op >= 32)
+ v[c.r.imm] = v[c.r.rs] << (c.r.op - 32);
+ else if (c.i.op == OP_META_MULT2)
+ v[c.r.imm] = (s32) v[c.r.rs] >> (32 - c.r.op);
+ else
+ v[c.r.imm] = v[c.r.rs] >> (32 - c.r.op);
+ }
+ } else {
+ if (OPT_FLAG_MULT_DIV && c.r.rd)
+ known &= ~BIT(c.r.rd);
+ if (OPT_FLAG_MULT_DIV && c.r.imm)
+ known &= ~BIT(c.r.imm);
+ }
+ break;
case OP_REGIMM:
break;
case OP_ADDI:
known &= ~BIT(c.r.rd);
}
break;
+ case OP_META_EXTC:
+ if (known & BIT(c.i.rs)) {
+ known |= BIT(c.i.rt);
+ v[c.i.rt] = (s32)(s8)v[c.i.rs];
+ } else {
+ known &= ~BIT(c.i.rt);
+ }
+ break;
+ case OP_META_EXTS:
+ if (known & BIT(c.i.rs)) {
+ known |= BIT(c.i.rt);
+ v[c.i.rt] = (s32)(s16)v[c.i.rs];
+ } else {
+ known &= ~BIT(c.i.rt);
+ }
+ break;
default:
break;
}
to_nop->opcode = 0;
}
+static void lightrec_remove_useless_lui(struct block *block, unsigned int offset,
+ u32 known, u32 *values)
+{
+ struct opcode *list = block->opcode_list,
+ *op = &block->opcode_list[offset];
+ int reader;
+
+ if (!op_flag_sync(op->flags) && (known & BIT(op->i.rt)) &&
+ values[op->i.rt] == op->i.imm << 16) {
+ pr_debug("Converting duplicated LUI to NOP\n");
+ op->opcode = 0x0;
+ return;
+ }
+
+ if (op->i.imm != 0 || op->i.rt == 0)
+ return;
+
+ reader = find_next_reader(list, offset + 1, op->i.rt);
+ if (reader <= 0)
+ return;
+
+ if (opcode_writes_register(list[reader].c, op->i.rt) ||
+ reg_is_dead(list, reader, op->i.rt)) {
+ pr_debug("Removing useless LUI 0x0\n");
+
+ if (list[reader].i.rs == op->i.rt)
+ list[reader].i.rs = 0;
+ if (list[reader].i.op == OP_SPECIAL &&
+ list[reader].i.rt == op->i.rt)
+ list[reader].i.rt = 0;
+ op->opcode = 0x0;
+ }
+}
+
+static void lightrec_modify_lui(struct block *block, unsigned int offset)
+{
+ union code c, *lui = &block->opcode_list[offset].c;
+ bool stop = false, stop_next = false;
+ unsigned int i;
+
+ for (i = offset + 1; !stop && i < block->nb_ops; i++) {
+ c = block->opcode_list[i].c;
+ stop = stop_next;
+
+ if ((opcode_is_store(c) && c.i.rt == lui->i.rt)
+ || (!opcode_is_load(c) && opcode_reads_register(c, lui->i.rt)))
+ break;
+
+ if (opcode_writes_register(c, lui->i.rt)) {
+ pr_debug("Convert LUI at offset 0x%x to kuseg\n",
+ i - 1 << 2);
+ lui->i.imm = kunseg(lui->i.imm << 16) >> 16;
+ break;
+ }
+
+ if (has_delay_slot(c))
+ stop_next = true;
+ }
+}
+
+static int lightrec_transform_branches(struct lightrec_state *state,
+ struct block *block)
+{
+ struct opcode *op;
+ unsigned int i;
+ s32 offset;
+
+ for (i = 0; i < block->nb_ops; i++) {
+ op = &block->opcode_list[i];
+
+ switch (op->i.op) {
+ case OP_J:
+ /* Transform J opcode into BEQ $zero, $zero if possible. */
+ offset = (s32)((block->pc & 0xf0000000) >> 2 | op->j.imm)
+ - (s32)(block->pc >> 2) - (s32)i - 1;
+
+ if (offset == (s16)offset) {
+ pr_debug("Transform J into BEQ $zero, $zero\n");
+ op->i.op = OP_BEQ;
+ op->i.rs = 0;
+ op->i.rt = 0;
+ op->i.imm = offset;
+
+ }
+ fallthrough;
+ default:
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static inline bool is_power_of_two(u32 value)
+{
+ return popcount32(value) == 1;
+}
+
static int lightrec_transform_ops(struct lightrec_state *state, struct block *block)
{
struct opcode *list = block->opcode_list;
u32 known = BIT(0);
u32 values[32] = { 0 };
unsigned int i;
- int reader;
+ u8 tmp;
for (i = 0; i < block->nb_ops; i++) {
prev = op;
break;
case OP_LUI:
- if (!(op->flags & LIGHTREC_SYNC) &&
- (known & BIT(op->i.rt)) &&
- values[op->i.rt] == op->i.imm << 16) {
- pr_debug("Converting duplicated LUI to NOP\n");
- op->opcode = 0x0;
- }
-
- if (op->i.imm != 0 || op->i.rt == 0)
- break;
-
- reader = find_next_reader(list, i + 1, op->i.rt);
- if (reader > 0 &&
- (opcode_writes_register(list[reader].c, op->i.rt) ||
- reg_is_dead(list, reader, op->i.rt))) {
-
- pr_debug("Removing useless LUI 0x0\n");
-
- if (list[reader].i.rs == op->i.rt)
- list[reader].i.rs = 0;
- if (list[reader].i.op == OP_SPECIAL &&
- list[reader].i.rt == op->i.rt)
- list[reader].i.rt = 0;
- op->opcode = 0x0;
- }
+ if (!prev || !has_delay_slot(prev->c))
+ lightrec_modify_lui(block, i);
+ lightrec_remove_useless_lui(block, i, known, values);
break;
/* Transform ORI/ADDI/ADDIU with imm #0 or ORR/ADD/ADDU/SUB/SUBU
op->r.rs = op->r.rt;
}
break;
+ case OP_SPECIAL_MULT:
+ case OP_SPECIAL_MULTU:
+ if ((known & BIT(op->r.rs)) &&
+ is_power_of_two(values[op->r.rs])) {
+ tmp = op->c.i.rs;
+ op->c.i.rs = op->c.i.rt;
+ op->c.i.rt = tmp;
+ } else if (!(known & BIT(op->r.rt)) ||
+ !is_power_of_two(values[op->r.rt])) {
+ break;
+ }
+
+ pr_debug("Multiply by power-of-two: %u\n",
+ values[op->r.rt]);
+
+ if (op->r.op == OP_SPECIAL_MULT)
+ op->i.op = OP_META_MULT2;
+ else
+ op->i.op = OP_META_MULTU2;
+
+ op->r.op = ctz32(values[op->r.rt]);
+ break;
case OP_SPECIAL_OR:
case OP_SPECIAL_ADD:
case OP_SPECIAL_ADDU:
op->i.op = OP_META_MOV;
op->r.rs = op->r.rt;
}
- case OP_SPECIAL_SUB: /* fall-through */
+ fallthrough;
+ case OP_SPECIAL_SUB:
case OP_SPECIAL_SUBU:
if (op->r.rt == 0) {
pr_debug("Convert OR/ADD/SUB $zero to MOV\n");
op->i.op = OP_META_MOV;
}
- default: /* fall-through */
+ fallthrough;
+ default:
break;
}
- default: /* fall-through */
+ fallthrough;
+ default:
break;
}
}
return 0;
}
+static bool lightrec_can_switch_delay_slot(union code op, union code next_op)
+{
+ switch (op.i.op) {
+ case OP_SPECIAL:
+ switch (op.r.op) {
+ case OP_SPECIAL_JALR:
+ if (opcode_reads_register(next_op, op.r.rd) ||
+ opcode_writes_register(next_op, op.r.rd))
+ return false;
+ fallthrough;
+ case OP_SPECIAL_JR:
+ if (opcode_writes_register(next_op, op.r.rs))
+ return false;
+ fallthrough;
+ default:
+ break;
+ }
+ fallthrough;
+ case OP_J:
+ break;
+ case OP_JAL:
+ if (opcode_reads_register(next_op, 31) ||
+ opcode_writes_register(next_op, 31))
+ return false;;
+
+ break;
+ case OP_BEQ:
+ case OP_BNE:
+ if (op.i.rt && opcode_writes_register(next_op, op.i.rt))
+ return false;
+ fallthrough;
+ case OP_BLEZ:
+ case OP_BGTZ:
+ if (op.i.rs && opcode_writes_register(next_op, op.i.rs))
+ return false;
+ break;
+ case OP_REGIMM:
+ switch (op.r.rt) {
+ case OP_REGIMM_BLTZAL:
+ case OP_REGIMM_BGEZAL:
+ if (opcode_reads_register(next_op, 31) ||
+ opcode_writes_register(next_op, 31))
+ return false;
+ fallthrough;
+ case OP_REGIMM_BLTZ:
+ case OP_REGIMM_BGEZ:
+ if (op.i.rs && opcode_writes_register(next_op, op.i.rs))
+ return false;
+ break;
+ }
+ fallthrough;
+ default:
+ break;
+ }
+
+ return true;
+}
+
static int lightrec_switch_delay_slots(struct lightrec_state *state, struct block *block)
{
struct opcode *list, *next = &block->opcode_list[0];
unsigned int i;
union code op, next_op;
- u8 flags;
+ u32 flags;
for (i = 0; i < block->nb_ops - 1; i++) {
list = next;
next_op = next->c;
op = list->c;
- if (!has_delay_slot(op) ||
- list->flags & (LIGHTREC_NO_DS | LIGHTREC_EMULATE_BRANCH) ||
+ if (!has_delay_slot(op) || op_flag_no_ds(list->flags) ||
+ op_flag_emulate_branch(list->flags) ||
op.opcode == 0 || next_op.opcode == 0)
continue;
if (i && has_delay_slot(block->opcode_list[i - 1].c) &&
- !(block->opcode_list[i - 1].flags & LIGHTREC_NO_DS))
+ !op_flag_no_ds(block->opcode_list[i - 1].flags))
continue;
- if ((list->flags & LIGHTREC_SYNC) ||
- (next->flags & LIGHTREC_SYNC))
+ if (op_flag_sync(next->flags))
continue;
- switch (list->i.op) {
- case OP_SPECIAL:
- switch (op.r.op) {
- case OP_SPECIAL_JALR:
- if (opcode_reads_register(next_op, op.r.rd) ||
- opcode_writes_register(next_op, op.r.rd))
- continue;
- case OP_SPECIAL_JR: /* fall-through */
- if (opcode_writes_register(next_op, op.r.rs))
- continue;
- default: /* fall-through */
- break;
- }
- case OP_J: /* fall-through */
- break;
- case OP_JAL:
- if (opcode_reads_register(next_op, 31) ||
- opcode_writes_register(next_op, 31))
- continue;
- else
- break;
- case OP_BEQ:
- case OP_BNE:
- if (op.i.rt && opcode_writes_register(next_op, op.i.rt))
- continue;
- case OP_BLEZ: /* fall-through */
- case OP_BGTZ:
- if (op.i.rs && opcode_writes_register(next_op, op.i.rs))
- continue;
- break;
- case OP_REGIMM:
- switch (op.r.rt) {
- case OP_REGIMM_BLTZAL:
- case OP_REGIMM_BGEZAL:
- if (opcode_reads_register(next_op, 31) ||
- opcode_writes_register(next_op, 31))
- continue;
- case OP_REGIMM_BLTZ: /* fall-through */
- case OP_REGIMM_BGEZ:
- if (op.i.rs &&
- opcode_writes_register(next_op, op.i.rs))
- continue;
- break;
- }
- default: /* fall-through */
- break;
- }
+ if (!lightrec_can_switch_delay_slot(list->c, next_op))
+ continue;
pr_debug("Swap branch and delay slot opcodes "
"at offsets 0x%x / 0x%x\n",
i << 2, (i + 1) << 2);
- flags = next->flags;
+ flags = next->flags | (list->flags & LIGHTREC_SYNC);
list->c = next_op;
next->c = op;
- next->flags = list->flags | LIGHTREC_NO_DS;
+ next->flags = (list->flags | LIGHTREC_NO_DS) & ~LIGHTREC_SYNC;
list->flags = flags | LIGHTREC_NO_DS;
}
static int shrink_opcode_list(struct lightrec_state *state, struct block *block, u16 new_size)
{
- struct opcode *list;
+ struct opcode_list *list, *old_list;
if (new_size >= block->nb_ops) {
pr_err("Invalid shrink size (%u vs %u)\n",
return -EINVAL;
}
-
list = lightrec_malloc(state, MEM_FOR_IR,
- sizeof(*list) * new_size);
+ sizeof(*list) + sizeof(struct opcode) * new_size);
if (!list) {
pr_err("Unable to allocate memory\n");
return -ENOMEM;
}
- memcpy(list, block->opcode_list, sizeof(*list) * new_size);
+ old_list = container_of(block->opcode_list, struct opcode_list, ops);
+ memcpy(list->ops, old_list->ops, sizeof(struct opcode) * new_size);
- lightrec_free_opcode_list(state, block);
- block->opcode_list = list;
+ lightrec_free_opcode_list(state, block->opcode_list);
+ list->nb_ops = new_size;
block->nb_ops = new_size;
+ block->opcode_list = list->ops;
pr_debug("Shrunk opcode list of block PC 0x%08x to %u opcodes\n",
block->pc, new_size);
static int lightrec_detect_impossible_branches(struct lightrec_state *state,
struct block *block)
{
- struct opcode *op, *next = &block->opcode_list[0];
+ struct opcode *op, *list = block->opcode_list, *next = &list[0];
unsigned int i;
int ret = 0;
+ s16 offset;
for (i = 0; i < block->nb_ops - 1; i++) {
op = next;
- next = &block->opcode_list[i + 1];
+ next = &list[i + 1];
if (!has_delay_slot(op->c) ||
(!load_in_delay_slot(next->c) &&
continue;
}
+ offset = i + 1 + (s16)op->i.imm;
+ if (load_in_delay_slot(next->c) &&
+ (offset >= 0 && offset < block->nb_ops) &&
+ !opcode_reads_register(list[offset].c, next->c.i.rt)) {
+ /* The 'impossible' branch is a local branch - we can
+ * verify here that the first opcode of the target does
+ * not use the target register of the delay slot */
+
+ pr_debug("Branch at offset 0x%x has load delay slot, "
+ "but is local and dest opcode does not read "
+ "dest register\n", i << 2);
+ continue;
+ }
+
op->flags |= LIGHTREC_EMULATE_BRANCH;
- if (op == block->opcode_list) {
+ if (op == list) {
pr_debug("First opcode of block PC 0x%08x is an impossible branch\n",
block->pc);
offset = i + 1 + (s16)list->i.imm;
if (offset >= 0 && offset < block->nb_ops)
break;
- default: /* fall-through */
+ fallthrough;
+ default:
continue;
}
bool should_emulate(const struct opcode *list)
{
- return has_delay_slot(list->c) &&
- (list->flags & LIGHTREC_EMULATE_BRANCH);
+ return op_flag_emulate_branch(list->flags) && has_delay_slot(list->c);
+}
+
+static bool op_writes_rd(union code c)
+{
+ switch (c.i.op) {
+ case OP_SPECIAL:
+ case OP_META_MOV:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static void lightrec_add_reg_op(struct opcode *op, u8 reg, u32 reg_op)
+{
+ if (op_writes_rd(op->c) && reg == op->r.rd)
+ op->flags |= LIGHTREC_REG_RD(reg_op);
+ else if (op->i.rs == reg)
+ op->flags |= LIGHTREC_REG_RS(reg_op);
+ else if (op->i.rt == reg)
+ op->flags |= LIGHTREC_REG_RT(reg_op);
+ else
+ pr_debug("Cannot add unload/clean/discard flag: "
+ "opcode does not touch register %s!\n",
+ lightrec_reg_name(reg));
}
static void lightrec_add_unload(struct opcode *op, u8 reg)
{
- if (op->i.op == OP_SPECIAL && reg == op->r.rd)
- op->flags |= LIGHTREC_UNLOAD_RD;
+ lightrec_add_reg_op(op, reg, LIGHTREC_REG_UNLOAD);
+}
+
+static void lightrec_add_discard(struct opcode *op, u8 reg)
+{
+ lightrec_add_reg_op(op, reg, LIGHTREC_REG_DISCARD);
+}
+
+static void lightrec_add_clean(struct opcode *op, u8 reg)
+{
+ lightrec_add_reg_op(op, reg, LIGHTREC_REG_CLEAN);
+}
+
+static void
+lightrec_early_unload_sync(struct opcode *list, s16 *last_r, s16 *last_w)
+{
+ unsigned int reg;
+ s16 offset;
+
+ for (reg = 0; reg < 34; reg++) {
+ offset = s16_max(last_w[reg], last_r[reg]);
- if (op->i.rs == reg)
- op->flags |= LIGHTREC_UNLOAD_RS;
- if (op->i.rt == reg)
- op->flags |= LIGHTREC_UNLOAD_RT;
+ if (offset >= 0)
+ lightrec_add_unload(&list[offset], reg);
+ }
+
+ memset(last_r, 0xff, sizeof(*last_r) * 34);
+ memset(last_w, 0xff, sizeof(*last_w) * 34);
}
static int lightrec_early_unload(struct lightrec_state *state, struct block *block)
{
- unsigned int i, offset;
+ u16 i, offset;
struct opcode *op;
+ s16 last_r[34], last_w[34], last_sync = 0, next_sync = 0;
+ u64 mask_r, mask_w, dirty = 0, loaded = 0;
u8 reg;
- for (reg = 1; reg < 34; reg++) {
- int last_r_id = -1, last_w_id = -1;
+ memset(last_r, 0xff, sizeof(last_r));
+ memset(last_w, 0xff, sizeof(last_w));
+
+ /*
+ * Clean if:
+ * - the register is dirty, and is read again after a branch opcode
+ *
+ * Unload if:
+ * - the register is dirty or loaded, and is not read again
+ * - the register is dirty or loaded, and is written again after a branch opcode
+ * - the next opcode has the SYNC flag set
+ *
+ * Discard if:
+ * - the register is dirty or loaded, and is written again
+ */
+
+ for (i = 0; i < block->nb_ops; i++) {
+ op = &block->opcode_list[i];
+
+ if (op_flag_sync(op->flags) || should_emulate(op)) {
+ /* The next opcode has the SYNC flag set, or is a branch
+ * that should be emulated: unload all registers. */
+ lightrec_early_unload_sync(block->opcode_list, last_r, last_w);
+ dirty = 0;
+ loaded = 0;
+ }
- for (i = 0; i < block->nb_ops; i++) {
- union code c = block->opcode_list[i].c;
+ if (next_sync == i) {
+ last_sync = i;
+ pr_debug("Last sync: 0x%x\n", last_sync << 2);
+ }
- if (opcode_reads_register(c, reg))
- last_r_id = i;
- if (opcode_writes_register(c, reg))
- last_w_id = i;
+ if (has_delay_slot(op->c)) {
+ next_sync = i + 1 + !op_flag_no_ds(op->flags);
+ pr_debug("Next sync: 0x%x\n", next_sync << 2);
}
- if (last_w_id > last_r_id)
- offset = (unsigned int)last_w_id;
- else if (last_r_id >= 0)
- offset = (unsigned int)last_r_id;
- else
- continue;
+ mask_r = opcode_read_mask(op->c);
+ mask_w = opcode_write_mask(op->c);
- op = &block->opcode_list[offset];
+ for (reg = 0; reg < 34; reg++) {
+ if (mask_r & BIT(reg)) {
+ if (dirty & BIT(reg) && last_w[reg] < last_sync) {
+ /* The register is dirty, and is read
+ * again after a branch: clean it */
- if (has_delay_slot(op->c) && (op->flags & LIGHTREC_NO_DS))
- offset++;
+ lightrec_add_clean(&block->opcode_list[last_w[reg]], reg);
+ dirty &= ~BIT(reg);
+ loaded |= BIT(reg);
+ }
- if (offset == block->nb_ops)
- continue;
+ last_r[reg] = i;
+ }
- lightrec_add_unload(&block->opcode_list[offset], reg);
+ if (mask_w & BIT(reg)) {
+ if ((dirty & BIT(reg) && last_w[reg] < last_sync) ||
+ (loaded & BIT(reg) && last_r[reg] < last_sync)) {
+ /* The register is dirty or loaded, and
+ * is written again after a branch:
+ * unload it */
+
+ offset = s16_max(last_w[reg], last_r[reg]);
+ lightrec_add_unload(&block->opcode_list[offset], reg);
+ dirty &= ~BIT(reg);
+ loaded &= ~BIT(reg);
+ } else if (!(mask_r & BIT(reg)) &&
+ ((dirty & BIT(reg) && last_w[reg] > last_sync) ||
+ (loaded & BIT(reg) && last_r[reg] > last_sync))) {
+ /* The register is dirty or loaded, and
+ * is written again: discard it */
+
+ offset = s16_max(last_w[reg], last_r[reg]);
+ lightrec_add_discard(&block->opcode_list[offset], reg);
+ dirty &= ~BIT(reg);
+ loaded &= ~BIT(reg);
+ }
+
+ last_w[reg] = i;
+ }
+
+ }
+
+ dirty |= mask_w;
+ loaded |= mask_r;
}
+ /* Unload all registers that are dirty or loaded at the end of block. */
+ lightrec_early_unload_sync(block->opcode_list, last_r, last_w);
+
return 0;
}
static int lightrec_flag_io(struct lightrec_state *state, struct block *block)
{
- const struct lightrec_mem_map *map;
- struct opcode *prev2, *prev = NULL, *list = NULL;
+ struct opcode *prev = NULL, *list = NULL;
+ enum psx_map psx_map;
u32 known = BIT(0);
u32 values[32] = { 0 };
unsigned int i;
- u32 val;
+ u32 val, kunseg_val;
+ bool no_mask;
for (i = 0; i < block->nb_ops; i++) {
- prev2 = prev;
prev = list;
list = &block->opcode_list[i];
"requiring invalidation\n",
list->opcode);
list->flags |= LIGHTREC_NO_INVALIDATE;
+ list->flags |= LIGHTREC_IO_MODE(LIGHTREC_IO_DIRECT);
}
/* Detect writes whose destination address is inside the
kunseg(values[list->i.rs]) < (kunseg(block->pc) +
block->nb_ops * 4)) {
pr_debug("Self-modifying block detected\n");
- block->flags |= BLOCK_NEVER_COMPILE;
+ block_set_flags(block, BLOCK_NEVER_COMPILE);
list->flags |= LIGHTREC_SMC;
}
}
- case OP_SWL: /* fall-through */
+ fallthrough;
+ case OP_SWL:
case OP_SWR:
case OP_SWC2:
case OP_LB:
case OP_LWR:
case OP_LWC2:
if (OPT_FLAG_IO && (known & BIT(list->i.rs))) {
- if (prev && prev->i.op == OP_LUI &&
- !(prev2 && has_delay_slot(prev2->c)) &&
- prev->i.rt == list->i.rs &&
- list->i.rt == list->i.rs &&
- prev->i.imm & 0x8000) {
- pr_debug("Convert LUI at offset 0x%x to kuseg\n",
- i - 1 << 2);
-
- val = kunseg(prev->i.imm << 16);
- prev->i.imm = val >> 16;
- values[list->i.rs] = val;
- }
-
val = values[list->i.rs] + (s16) list->i.imm;
- map = lightrec_get_map(state, NULL, kunseg(val));
-
- if (!map || map->ops ||
- map == &state->maps[PSX_MAP_PARALLEL_PORT]) {
- pr_debug("Flagging opcode %u as I/O access\n",
- i);
- list->flags |= LIGHTREC_IO_MODE(LIGHTREC_IO_HW);
- break;
- }
-
- if (val - map->pc < map->length)
- list->flags |= LIGHTREC_NO_MASK;
-
- if (map == &state->maps[PSX_MAP_KERNEL_USER_RAM]) {
+ kunseg_val = kunseg(val);
+ psx_map = lightrec_get_map_idx(state, kunseg_val);
+
+ list->flags &= ~LIGHTREC_IO_MASK;
+ no_mask = val == kunseg_val;
+
+ switch (psx_map) {
+ case PSX_MAP_KERNEL_USER_RAM:
+ if (no_mask)
+ list->flags |= LIGHTREC_NO_MASK;
+ fallthrough;
+ case PSX_MAP_MIRROR1:
+ case PSX_MAP_MIRROR2:
+ case PSX_MAP_MIRROR3:
pr_debug("Flaging opcode %u as RAM access\n", i);
list->flags |= LIGHTREC_IO_MODE(LIGHTREC_IO_RAM);
- } else if (map == &state->maps[PSX_MAP_BIOS]) {
+ if (no_mask && state->mirrors_mapped)
+ list->flags |= LIGHTREC_NO_MASK;
+ break;
+ case PSX_MAP_BIOS:
pr_debug("Flaging opcode %u as BIOS access\n", i);
list->flags |= LIGHTREC_IO_MODE(LIGHTREC_IO_BIOS);
- } else if (map == &state->maps[PSX_MAP_SCRATCH_PAD]) {
+ if (no_mask)
+ list->flags |= LIGHTREC_NO_MASK;
+ break;
+ case PSX_MAP_SCRATCH_PAD:
pr_debug("Flaging opcode %u as scratchpad access\n", i);
list->flags |= LIGHTREC_IO_MODE(LIGHTREC_IO_SCRATCH);
+ if (no_mask)
+ list->flags |= LIGHTREC_NO_MASK;
+
+ /* Consider that we're never going to run code from
+ * the scratchpad. */
+ list->flags |= LIGHTREC_NO_INVALIDATE;
+ break;
+ case PSX_MAP_HW_REGISTERS:
+ if (state->ops.hw_direct &&
+ state->ops.hw_direct(kunseg_val,
+ opcode_is_store(list->c),
+ opcode_get_io_size(list->c))) {
+ pr_debug("Flagging opcode %u as direct I/O access\n",
+ i);
+ list->flags |= LIGHTREC_IO_MODE(LIGHTREC_IO_DIRECT_HW);
+
+ if (no_mask)
+ list->flags |= LIGHTREC_NO_MASK;
+ break;
+ }
+ fallthrough;
+ default:
+ pr_debug("Flagging opcode %u as I/O access\n",
+ i);
+ list->flags |= LIGHTREC_IO_MODE(LIGHTREC_IO_HW);
+ break;
}
}
- default: /* fall-through */
+ fallthrough;
+ default:
break;
}
}
mask |= opcode_read_mask(op->c);
mask |= opcode_write_mask(op->c);
- if (op->flags & LIGHTREC_SYNC)
+ if (op_flag_sync(op->flags))
sync = true;
switch (op->i.op) {
case OP_BGTZ:
case OP_REGIMM:
/* TODO: handle backwards branches too */
- if (!last &&
- (op->flags & LIGHTREC_LOCAL_BRANCH) &&
+ if (!last && op_flag_local_branch(op->flags) &&
(s16)op->c.i.imm >= 0) {
branch_offset = i + 1 + (s16)op->c.i.imm
- - !!(OPT_SWITCH_DELAY_SLOTS && (op->flags & LIGHTREC_NO_DS));
+ - !!op_flag_no_ds(op->flags);
reg = get_mfhi_mflo_reg(block, branch_offset, NULL,
mask, sync, mflo, false);
}
return mflo ? REG_LO : REG_HI;
+ case OP_META_MULT2:
+ case OP_META_MULTU2:
+ return 0;
case OP_SPECIAL:
switch (op->r.op) {
case OP_SPECIAL_MULT:
if (op->r.rs != 31)
return reg;
- if (!sync &&
- !(op->flags & LIGHTREC_NO_DS) &&
+ if (!sync && !op_flag_no_ds(op->flags) &&
(next->i.op == OP_SPECIAL) &&
((!mflo && next->r.op == OP_SPECIAL_MFHI) ||
(mflo && next->r.op == OP_SPECIAL_MFLO)))
break;
}
- /* fall-through */
+ fallthrough;
default:
continue;
}
case OP_BGTZ:
case OP_REGIMM:
/* TODO: handle backwards branches too */
- if ((op->flags & LIGHTREC_LOCAL_BRANCH) &&
- (s16)op->c.i.imm >= 0) {
+ if (op_flag_local_branch(op->flags) && (s16)op->c.i.imm >= 0) {
branch_offset = i + 1 + (s16)op->c.i.imm
- - !!(OPT_SWITCH_DELAY_SLOTS && (op->flags & LIGHTREC_NO_DS));
+ - !!op_flag_no_ds(op->flags);
lightrec_replace_lo_hi(block, branch_offset, last, lo);
lightrec_replace_lo_hi(block, i + 1, branch_offset, lo);
return;
}
- /* fall-through */
+ fallthrough;
default:
break;
}
if (prev)
known = lightrec_propagate_consts(list, prev, known, values);
- if (list->i.op != OP_SPECIAL)
- continue;
-
- switch (list->r.op) {
- case OP_SPECIAL_DIV:
- case OP_SPECIAL_DIVU:
- /* If we are dividing by a non-zero constant, don't
- * emit the div-by-zero check. */
- if (lightrec_always_skip_div_check() ||
- (known & BIT(list->c.r.rt) && values[list->c.r.rt]))
- list->flags |= LIGHTREC_NO_DIV_CHECK;
- case OP_SPECIAL_MULT: /* fall-through */
- case OP_SPECIAL_MULTU:
+ switch (list->i.op) {
+ case OP_SPECIAL:
+ switch (list->r.op) {
+ case OP_SPECIAL_DIV:
+ case OP_SPECIAL_DIVU:
+ /* If we are dividing by a non-zero constant, don't
+ * emit the div-by-zero check. */
+ if (lightrec_always_skip_div_check() ||
+ ((known & BIT(list->c.r.rt)) && values[list->c.r.rt]))
+ list->flags |= LIGHTREC_NO_DIV_CHECK;
+ fallthrough;
+ case OP_SPECIAL_MULT:
+ case OP_SPECIAL_MULTU:
+ break;
+ default:
+ continue;
+ }
+ fallthrough;
+ case OP_META_MULT2:
+ case OP_META_MULTU2:
break;
default:
continue;
/* Don't support opcodes in delay slots */
if ((i && has_delay_slot(block->opcode_list[i - 1].c)) ||
- (list->flags & LIGHTREC_NO_DS)) {
+ op_flag_no_ds(list->flags)) {
continue;
}
if (i == ARRAY_SIZE(memset_code) - 1) {
/* success! */
pr_debug("Block at PC 0x%x is a memset\n", block->pc);
- block->flags |= BLOCK_IS_MEMSET | BLOCK_NEVER_COMPILE;
+ block_set_flags(block,
+ BLOCK_IS_MEMSET | BLOCK_NEVER_COMPILE);
/* Return non-zero to skip other optimizers. */
return 1;
IF_OPT(OPT_REMOVE_DIV_BY_ZERO_SEQ, &lightrec_remove_div_by_zero_check_sequence),
IF_OPT(OPT_REPLACE_MEMSET, &lightrec_replace_memset),
IF_OPT(OPT_DETECT_IMPOSSIBLE_BRANCHES, &lightrec_detect_impossible_branches),
+ IF_OPT(OPT_TRANSFORM_OPS, &lightrec_transform_branches),
IF_OPT(OPT_LOCAL_BRANCHES, &lightrec_local_branches),
IF_OPT(OPT_TRANSFORM_OPS, &lightrec_transform_ops),
IF_OPT(OPT_SWITCH_DELAY_SLOTS, &lightrec_switch_delay_slots),