X-Git-Url: https://notaz.gp2x.de/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=deps%2Flightrec%2Foptimizer.c;h=10067a7d082d72d42ece4487a41445c6e87903c6;hb=13b02197fcb7575646408094d5583ed7391b1153;hp=562f7e009ac98692209000c2a6ae3ca7375c7f15;hpb=437b1e617808119c3a24a72c77cd2fa86a5d3220;p=pcsx_rearmed.git diff --git a/deps/lightrec/optimizer.c b/deps/lightrec/optimizer.c index 562f7e00..10067a7d 100644 --- a/deps/lightrec/optimizer.c +++ b/deps/lightrec/optimizer.c @@ -69,6 +69,9 @@ static u64 opcode_read_mask(union code op) case OP_SPECIAL_MFLO: return BIT(REG_LO); case OP_SPECIAL_SLL: + if (!op.r.imm) + return 0; + fallthrough; case OP_SPECIAL_SRL: case OP_SPECIAL_SRA: return BIT(op.r.rt); @@ -99,6 +102,9 @@ static u64 opcode_read_mask(union code op) case OP_LUI: return 0; case OP_BEQ: + if (op.i.rs == op.i.rt) + return 0; + fallthrough; case OP_BNE: case OP_LWL: case OP_LWR: @@ -113,11 +119,31 @@ static u64 opcode_read_mask(union code op) } } -static u64 opcode_write_mask(union code op) +static u64 mult_div_write_mask(union code op) { u64 flags; + if (!OPT_FLAG_MULT_DIV) + return BIT(REG_LO) | BIT(REG_HI); + + if (op.r.rd) + flags = BIT(op.r.rd); + else + flags = BIT(REG_LO); + if (op.r.imm) + flags |= BIT(op.r.imm); + else + flags |= BIT(REG_HI); + + return flags; +} + +static u64 opcode_write_mask(union code op) +{ switch (op.i.op) { + case OP_META_MULT2: + case OP_META_MULTU2: + return mult_div_write_mask(op); case OP_SPECIAL: switch (op.r.op) { case OP_SPECIAL_JR: @@ -128,22 +154,15 @@ static u64 opcode_write_mask(union code op) case OP_SPECIAL_MULTU: case OP_SPECIAL_DIV: case OP_SPECIAL_DIVU: - if (!OPT_FLAG_MULT_DIV) - return BIT(REG_LO) | BIT(REG_HI); - - if (op.r.rd) - flags = BIT(op.r.rd); - else - flags = BIT(REG_LO); - if (op.r.imm) - flags |= BIT(op.r.imm); - else - flags |= BIT(REG_HI); - return flags; + return mult_div_write_mask(op); case OP_SPECIAL_MTHI: return BIT(REG_HI); case OP_SPECIAL_MTLO: return BIT(REG_LO); + case OP_SPECIAL_SLL: + if (!op.r.imm) + return 0; + fallthrough; default: return BIT(op.r.rd); } @@ -162,6 +181,8 @@ static u64 opcode_write_mask(union code op) case OP_LBU: case OP_LHU: case OP_LWR: + case OP_META_EXTC: + case OP_META_EXTS: return BIT(op.i.rt); case OP_JAL: return BIT(31); @@ -214,7 +235,7 @@ static int find_prev_writer(const struct opcode *list, unsigned int offset, u8 r union code c; unsigned int i; - if (list[offset].flags & LIGHTREC_SYNC) + if (op_flag_sync(list[offset].flags)) return -1; for (i = offset; i > 0; i--) { @@ -227,7 +248,7 @@ static int find_prev_writer(const struct opcode *list, unsigned int offset, u8 r return i - 1; } - if ((list[i - 1].flags & LIGHTREC_SYNC) || + if (op_flag_sync(list[i - 1].flags) || has_delay_slot(c) || opcode_reads_register(c, reg)) break; @@ -241,7 +262,7 @@ static int find_next_reader(const struct opcode *list, unsigned int offset, u8 r unsigned int i; union code c; - if (list[offset].flags & LIGHTREC_SYNC) + if (op_flag_sync(list[offset].flags)) return -1; for (i = offset; ; i++) { @@ -254,7 +275,7 @@ static int find_next_reader(const struct opcode *list, unsigned int offset, u8 r return i; } - if ((list[i].flags & LIGHTREC_SYNC) || + if (op_flag_sync(list[i].flags) || has_delay_slot(c) || opcode_writes_register(c, reg)) break; } @@ -266,7 +287,7 @@ static bool reg_is_dead(const struct opcode *list, unsigned int offset, u8 reg) { unsigned int i; - if (list[offset].flags & LIGHTREC_SYNC) + if (op_flag_sync(list[offset].flags)) return false; for (i = offset + 1; ; i++) { @@ -277,7 +298,7 @@ static bool reg_is_dead(const struct opcode *list, unsigned int offset, u8 reg) return true; if (has_delay_slot(list[i].c)) { - if (list[i].flags & LIGHTREC_NO_DS || + if (op_flag_no_ds(list[i].flags) || opcode_reads_register(list[i + 1].c, reg)) return false; @@ -351,6 +372,22 @@ static bool opcode_is_store(union code op) } } +static u8 opcode_get_io_size(union code op) +{ + switch (op.i.op) { + case OP_LB: + case OP_LBU: + case OP_SB: + return 8; + case OP_LH: + case OP_LHU: + case OP_SH: + return 16; + default: + return 32; + } +} + bool opcode_is_io(union code op) { return opcode_is_load(op) || opcode_is_store(op); @@ -470,7 +507,7 @@ static u32 lightrec_propagate_consts(const struct opcode *op, known |= BIT(0); v[0] = 0; - if (op->flags & LIGHTREC_SYNC) + if (op_flag_sync(op->flags)) return BIT(0); switch (c.i.op) { @@ -591,10 +628,52 @@ static u32 lightrec_propagate_consts(const struct opcode *op, known &= ~BIT(c.r.rd); } break; + case OP_SPECIAL_MULT: + case OP_SPECIAL_MULTU: + case OP_SPECIAL_DIV: + case OP_SPECIAL_DIVU: + if (OPT_FLAG_MULT_DIV && c.r.rd) + known &= ~BIT(c.r.rd); + if (OPT_FLAG_MULT_DIV && c.r.imm) + known &= ~BIT(c.r.imm); + break; + case OP_SPECIAL_MFLO: + case OP_SPECIAL_MFHI: + known &= ~BIT(c.r.rd); + break; default: break; } break; + case OP_META_MULT2: + case OP_META_MULTU2: + if (OPT_FLAG_MULT_DIV && (known & BIT(c.r.rs))) { + if (c.r.rd) { + known |= BIT(c.r.rd); + + if (c.r.op < 32) + v[c.r.rd] = v[c.r.rs] << c.r.op; + else + v[c.r.rd] = 0; + } + + if (c.r.imm) { + known |= BIT(c.r.imm); + + if (c.r.op >= 32) + v[c.r.imm] = v[c.r.rs] << (c.r.op - 32); + else if (c.i.op == OP_META_MULT2) + v[c.r.imm] = (s32) v[c.r.rs] >> (32 - c.r.op); + else + v[c.r.imm] = v[c.r.rs] >> (32 - c.r.op); + } + } else { + if (OPT_FLAG_MULT_DIV && c.r.rd) + known &= ~BIT(c.r.rd); + if (OPT_FLAG_MULT_DIV && c.r.imm) + known &= ~BIT(c.r.imm); + } + break; case OP_REGIMM: break; case OP_ADDI: @@ -686,6 +765,22 @@ static u32 lightrec_propagate_consts(const struct opcode *op, known &= ~BIT(c.r.rd); } break; + case OP_META_EXTC: + if (known & BIT(c.i.rs)) { + known |= BIT(c.i.rt); + v[c.i.rt] = (s32)(s8)v[c.i.rs]; + } else { + known &= ~BIT(c.i.rt); + } + break; + case OP_META_EXTS: + if (known & BIT(c.i.rs)) { + known |= BIT(c.i.rt); + v[c.i.rt] = (s32)(s16)v[c.i.rs]; + } else { + known &= ~BIT(c.i.rt); + } + break; default: break; } @@ -824,7 +919,7 @@ static void lightrec_remove_useless_lui(struct block *block, unsigned int offset *op = &block->opcode_list[offset]; int reader; - if (!(op->flags & LIGHTREC_SYNC) && (known & BIT(op->i.rt)) && + if (!op_flag_sync(op->flags) && (known & BIT(op->i.rt)) && values[op->i.rt] == op->i.imm << 16) { pr_debug("Converting duplicated LUI to NOP\n"); op->opcode = 0x0; @@ -877,6 +972,44 @@ static void lightrec_modify_lui(struct block *block, unsigned int offset) } } +static int lightrec_transform_branches(struct lightrec_state *state, + struct block *block) +{ + struct opcode *op; + unsigned int i; + s32 offset; + + for (i = 0; i < block->nb_ops; i++) { + op = &block->opcode_list[i]; + + switch (op->i.op) { + case OP_J: + /* Transform J opcode into BEQ $zero, $zero if possible. */ + offset = (s32)((block->pc & 0xf0000000) >> 2 | op->j.imm) + - (s32)(block->pc >> 2) - (s32)i - 1; + + if (offset == (s16)offset) { + pr_debug("Transform J into BEQ $zero, $zero\n"); + op->i.op = OP_BEQ; + op->i.rs = 0; + op->i.rt = 0; + op->i.imm = offset; + + } + fallthrough; + default: + break; + } + } + + return 0; +} + +static inline bool is_power_of_two(u32 value) +{ + return popcount32(value) == 1; +} + static int lightrec_transform_ops(struct lightrec_state *state, struct block *block) { struct opcode *list = block->opcode_list; @@ -884,6 +1017,7 @@ static int lightrec_transform_ops(struct lightrec_state *state, struct block *bl u32 known = BIT(0); u32 values[32] = { 0 }; unsigned int i; + u8 tmp; for (i = 0; i < block->nb_ops; i++) { prev = op; @@ -922,7 +1056,8 @@ static int lightrec_transform_ops(struct lightrec_state *state, struct block *bl break; case OP_LUI: - lightrec_modify_lui(block, i); + if (!prev || !has_delay_slot(prev->c)) + lightrec_modify_lui(block, i); lightrec_remove_useless_lui(block, i, known, values); break; @@ -957,6 +1092,28 @@ static int lightrec_transform_ops(struct lightrec_state *state, struct block *bl op->r.rs = op->r.rt; } break; + case OP_SPECIAL_MULT: + case OP_SPECIAL_MULTU: + if ((known & BIT(op->r.rs)) && + is_power_of_two(values[op->r.rs])) { + tmp = op->c.i.rs; + op->c.i.rs = op->c.i.rt; + op->c.i.rt = tmp; + } else if (!(known & BIT(op->r.rt)) || + !is_power_of_two(values[op->r.rt])) { + break; + } + + pr_debug("Multiply by power-of-two: %u\n", + values[op->r.rt]); + + if (op->r.op == OP_SPECIAL_MULT) + op->i.op = OP_META_MULT2; + else + op->i.op = OP_META_MULTU2; + + op->r.op = ctz32(values[op->r.rt]); + break; case OP_SPECIAL_OR: case OP_SPECIAL_ADD: case OP_SPECIAL_ADDU: @@ -965,16 +1122,19 @@ static int lightrec_transform_ops(struct lightrec_state *state, struct block *bl op->i.op = OP_META_MOV; op->r.rs = op->r.rt; } - case OP_SPECIAL_SUB: /* fall-through */ + fallthrough; + case OP_SPECIAL_SUB: case OP_SPECIAL_SUBU: if (op->r.rt == 0) { pr_debug("Convert OR/ADD/SUB $zero to MOV\n"); op->i.op = OP_META_MOV; } - default: /* fall-through */ + fallthrough; + default: break; } - default: /* fall-through */ + fallthrough; + default: break; } } @@ -982,12 +1142,70 @@ static int lightrec_transform_ops(struct lightrec_state *state, struct block *bl return 0; } +static bool lightrec_can_switch_delay_slot(union code op, union code next_op) +{ + switch (op.i.op) { + case OP_SPECIAL: + switch (op.r.op) { + case OP_SPECIAL_JALR: + if (opcode_reads_register(next_op, op.r.rd) || + opcode_writes_register(next_op, op.r.rd)) + return false; + fallthrough; + case OP_SPECIAL_JR: + if (opcode_writes_register(next_op, op.r.rs)) + return false; + fallthrough; + default: + break; + } + fallthrough; + case OP_J: + break; + case OP_JAL: + if (opcode_reads_register(next_op, 31) || + opcode_writes_register(next_op, 31)) + return false;; + + break; + case OP_BEQ: + case OP_BNE: + if (op.i.rt && opcode_writes_register(next_op, op.i.rt)) + return false; + fallthrough; + case OP_BLEZ: + case OP_BGTZ: + if (op.i.rs && opcode_writes_register(next_op, op.i.rs)) + return false; + break; + case OP_REGIMM: + switch (op.r.rt) { + case OP_REGIMM_BLTZAL: + case OP_REGIMM_BGEZAL: + if (opcode_reads_register(next_op, 31) || + opcode_writes_register(next_op, 31)) + return false; + fallthrough; + case OP_REGIMM_BLTZ: + case OP_REGIMM_BGEZ: + if (op.i.rs && opcode_writes_register(next_op, op.i.rs)) + return false; + break; + } + fallthrough; + default: + break; + } + + return true; +} + static int lightrec_switch_delay_slots(struct lightrec_state *state, struct block *block) { struct opcode *list, *next = &block->opcode_list[0]; unsigned int i; union code op, next_op; - u8 flags; + u32 flags; for (i = 0; i < block->nb_ops - 1; i++) { list = next; @@ -995,75 +1213,29 @@ static int lightrec_switch_delay_slots(struct lightrec_state *state, struct bloc next_op = next->c; op = list->c; - if (!has_delay_slot(op) || - list->flags & (LIGHTREC_NO_DS | LIGHTREC_EMULATE_BRANCH) || + if (!has_delay_slot(op) || op_flag_no_ds(list->flags) || + op_flag_emulate_branch(list->flags) || op.opcode == 0 || next_op.opcode == 0) continue; if (i && has_delay_slot(block->opcode_list[i - 1].c) && - !(block->opcode_list[i - 1].flags & LIGHTREC_NO_DS)) + !op_flag_no_ds(block->opcode_list[i - 1].flags)) continue; - if ((list->flags & LIGHTREC_SYNC) || - (next->flags & LIGHTREC_SYNC)) + if (op_flag_sync(next->flags)) continue; - switch (list->i.op) { - case OP_SPECIAL: - switch (op.r.op) { - case OP_SPECIAL_JALR: - if (opcode_reads_register(next_op, op.r.rd) || - opcode_writes_register(next_op, op.r.rd)) - continue; - case OP_SPECIAL_JR: /* fall-through */ - if (opcode_writes_register(next_op, op.r.rs)) - continue; - default: /* fall-through */ - break; - } - case OP_J: /* fall-through */ - break; - case OP_JAL: - if (opcode_reads_register(next_op, 31) || - opcode_writes_register(next_op, 31)) - continue; - else - break; - case OP_BEQ: - case OP_BNE: - if (op.i.rt && opcode_writes_register(next_op, op.i.rt)) - continue; - case OP_BLEZ: /* fall-through */ - case OP_BGTZ: - if (op.i.rs && opcode_writes_register(next_op, op.i.rs)) - continue; - break; - case OP_REGIMM: - switch (op.r.rt) { - case OP_REGIMM_BLTZAL: - case OP_REGIMM_BGEZAL: - if (opcode_reads_register(next_op, 31) || - opcode_writes_register(next_op, 31)) - continue; - case OP_REGIMM_BLTZ: /* fall-through */ - case OP_REGIMM_BGEZ: - if (op.i.rs && - opcode_writes_register(next_op, op.i.rs)) - continue; - break; - } - default: /* fall-through */ - break; - } + if (!lightrec_can_switch_delay_slot(list->c, next_op)) + continue; pr_debug("Swap branch and delay slot opcodes " "at offsets 0x%x / 0x%x\n", i << 2, (i + 1) << 2); - flags = next->flags; + flags = next->flags | (list->flags & LIGHTREC_SYNC); list->c = next_op; next->c = op; - next->flags = list->flags | LIGHTREC_NO_DS; + next->flags = (list->flags | LIGHTREC_NO_DS) & ~LIGHTREC_SYNC; list->flags = flags | LIGHTREC_NO_DS; } @@ -1072,7 +1244,7 @@ static int lightrec_switch_delay_slots(struct lightrec_state *state, struct bloc static int shrink_opcode_list(struct lightrec_state *state, struct block *block, u16 new_size) { - struct opcode *list; + struct opcode_list *list, *old_list; if (new_size >= block->nb_ops) { pr_err("Invalid shrink size (%u vs %u)\n", @@ -1080,19 +1252,20 @@ static int shrink_opcode_list(struct lightrec_state *state, struct block *block, return -EINVAL; } - list = lightrec_malloc(state, MEM_FOR_IR, - sizeof(*list) * new_size); + sizeof(*list) + sizeof(struct opcode) * new_size); if (!list) { pr_err("Unable to allocate memory\n"); return -ENOMEM; } - memcpy(list, block->opcode_list, sizeof(*list) * new_size); + old_list = container_of(block->opcode_list, struct opcode_list, ops); + memcpy(list->ops, old_list->ops, sizeof(struct opcode) * new_size); - lightrec_free_opcode_list(state, block); - block->opcode_list = list; + lightrec_free_opcode_list(state, block->opcode_list); + list->nb_ops = new_size; block->nb_ops = new_size; + block->opcode_list = list->ops; pr_debug("Shrunk opcode list of block PC 0x%08x to %u opcodes\n", block->pc, new_size); @@ -1103,13 +1276,14 @@ static int shrink_opcode_list(struct lightrec_state *state, struct block *block, static int lightrec_detect_impossible_branches(struct lightrec_state *state, struct block *block) { - struct opcode *op, *next = &block->opcode_list[0]; + struct opcode *op, *list = block->opcode_list, *next = &list[0]; unsigned int i; int ret = 0; + s16 offset; for (i = 0; i < block->nb_ops - 1; i++) { op = next; - next = &block->opcode_list[i + 1]; + next = &list[i + 1]; if (!has_delay_slot(op->c) || (!load_in_delay_slot(next->c) && @@ -1124,9 +1298,23 @@ static int lightrec_detect_impossible_branches(struct lightrec_state *state, continue; } + offset = i + 1 + (s16)op->i.imm; + if (load_in_delay_slot(next->c) && + (offset >= 0 && offset < block->nb_ops) && + !opcode_reads_register(list[offset].c, next->c.i.rt)) { + /* The 'impossible' branch is a local branch - we can + * verify here that the first opcode of the target does + * not use the target register of the delay slot */ + + pr_debug("Branch at offset 0x%x has load delay slot, " + "but is local and dest opcode does not read " + "dest register\n", i << 2); + continue; + } + op->flags |= LIGHTREC_EMULATE_BRANCH; - if (op == block->opcode_list) { + if (op == list) { pr_debug("First opcode of block PC 0x%08x is an impossible branch\n", block->pc); @@ -1163,7 +1351,8 @@ static int lightrec_local_branches(struct lightrec_state *state, struct block *b offset = i + 1 + (s16)list->i.imm; if (offset >= 0 && offset < block->nb_ops) break; - default: /* fall-through */ + fallthrough; + default: continue; } @@ -1214,57 +1403,163 @@ bool has_delay_slot(union code op) bool should_emulate(const struct opcode *list) { - return has_delay_slot(list->c) && - (list->flags & LIGHTREC_EMULATE_BRANCH); + return op_flag_emulate_branch(list->flags) && has_delay_slot(list->c); +} + +static bool op_writes_rd(union code c) +{ + switch (c.i.op) { + case OP_SPECIAL: + case OP_META_MOV: + return true; + default: + return false; + } +} + +static void lightrec_add_reg_op(struct opcode *op, u8 reg, u32 reg_op) +{ + if (op_writes_rd(op->c) && reg == op->r.rd) + op->flags |= LIGHTREC_REG_RD(reg_op); + else if (op->i.rs == reg) + op->flags |= LIGHTREC_REG_RS(reg_op); + else if (op->i.rt == reg) + op->flags |= LIGHTREC_REG_RT(reg_op); + else + pr_debug("Cannot add unload/clean/discard flag: " + "opcode does not touch register %s!\n", + lightrec_reg_name(reg)); } static void lightrec_add_unload(struct opcode *op, u8 reg) { - if (op->i.op == OP_SPECIAL && reg == op->r.rd) - op->flags |= LIGHTREC_UNLOAD_RD; + lightrec_add_reg_op(op, reg, LIGHTREC_REG_UNLOAD); +} + +static void lightrec_add_discard(struct opcode *op, u8 reg) +{ + lightrec_add_reg_op(op, reg, LIGHTREC_REG_DISCARD); +} - if (op->i.rs == reg) - op->flags |= LIGHTREC_UNLOAD_RS; - if (op->i.rt == reg) - op->flags |= LIGHTREC_UNLOAD_RT; +static void lightrec_add_clean(struct opcode *op, u8 reg) +{ + lightrec_add_reg_op(op, reg, LIGHTREC_REG_CLEAN); +} + +static void +lightrec_early_unload_sync(struct opcode *list, s16 *last_r, s16 *last_w) +{ + unsigned int reg; + s16 offset; + + for (reg = 0; reg < 34; reg++) { + offset = s16_max(last_w[reg], last_r[reg]); + + if (offset >= 0) + lightrec_add_unload(&list[offset], reg); + } + + memset(last_r, 0xff, sizeof(*last_r) * 34); + memset(last_w, 0xff, sizeof(*last_w) * 34); } static int lightrec_early_unload(struct lightrec_state *state, struct block *block) { - unsigned int i, offset; + u16 i, offset; struct opcode *op; + s16 last_r[34], last_w[34], last_sync = 0, next_sync = 0; + u64 mask_r, mask_w, dirty = 0, loaded = 0; u8 reg; - for (reg = 1; reg < 34; reg++) { - int last_r_id = -1, last_w_id = -1; + memset(last_r, 0xff, sizeof(last_r)); + memset(last_w, 0xff, sizeof(last_w)); - for (i = 0; i < block->nb_ops; i++) { - union code c = block->opcode_list[i].c; + /* + * Clean if: + * - the register is dirty, and is read again after a branch opcode + * + * Unload if: + * - the register is dirty or loaded, and is not read again + * - the register is dirty or loaded, and is written again after a branch opcode + * - the next opcode has the SYNC flag set + * + * Discard if: + * - the register is dirty or loaded, and is written again + */ - if (opcode_reads_register(c, reg)) - last_r_id = i; - if (opcode_writes_register(c, reg)) - last_w_id = i; + for (i = 0; i < block->nb_ops; i++) { + op = &block->opcode_list[i]; + + if (op_flag_sync(op->flags) || should_emulate(op)) { + /* The next opcode has the SYNC flag set, or is a branch + * that should be emulated: unload all registers. */ + lightrec_early_unload_sync(block->opcode_list, last_r, last_w); + dirty = 0; + loaded = 0; } - if (last_w_id > last_r_id) - offset = (unsigned int)last_w_id; - else if (last_r_id >= 0) - offset = (unsigned int)last_r_id; - else - continue; + if (next_sync == i) { + last_sync = i; + pr_debug("Last sync: 0x%x\n", last_sync << 2); + } + + if (has_delay_slot(op->c)) { + next_sync = i + 1 + !op_flag_no_ds(op->flags); + pr_debug("Next sync: 0x%x\n", next_sync << 2); + } - op = &block->opcode_list[offset]; + mask_r = opcode_read_mask(op->c); + mask_w = opcode_write_mask(op->c); - if (has_delay_slot(op->c) && (op->flags & LIGHTREC_NO_DS)) - offset++; + for (reg = 0; reg < 34; reg++) { + if (mask_r & BIT(reg)) { + if (dirty & BIT(reg) && last_w[reg] < last_sync) { + /* The register is dirty, and is read + * again after a branch: clean it */ - if (offset == block->nb_ops) - continue; + lightrec_add_clean(&block->opcode_list[last_w[reg]], reg); + dirty &= ~BIT(reg); + loaded |= BIT(reg); + } + + last_r[reg] = i; + } - lightrec_add_unload(&block->opcode_list[offset], reg); + if (mask_w & BIT(reg)) { + if ((dirty & BIT(reg) && last_w[reg] < last_sync) || + (loaded & BIT(reg) && last_r[reg] < last_sync)) { + /* The register is dirty or loaded, and + * is written again after a branch: + * unload it */ + + offset = s16_max(last_w[reg], last_r[reg]); + lightrec_add_unload(&block->opcode_list[offset], reg); + dirty &= ~BIT(reg); + loaded &= ~BIT(reg); + } else if (!(mask_r & BIT(reg)) && + ((dirty & BIT(reg) && last_w[reg] > last_sync) || + (loaded & BIT(reg) && last_r[reg] > last_sync))) { + /* The register is dirty or loaded, and + * is written again: discard it */ + + offset = s16_max(last_w[reg], last_r[reg]); + lightrec_add_discard(&block->opcode_list[offset], reg); + dirty &= ~BIT(reg); + loaded &= ~BIT(reg); + } + + last_w[reg] = i; + } + + } + + dirty |= mask_w; + loaded |= mask_r; } + /* Unload all registers that are dirty or loaded at the end of block. */ + lightrec_early_unload_sync(block->opcode_list, last_r, last_w); + return 0; } @@ -1276,6 +1571,7 @@ static int lightrec_flag_io(struct lightrec_state *state, struct block *block) u32 values[32] = { 0 }; unsigned int i; u32 val, kunseg_val; + bool no_mask; for (i = 0; i < block->nb_ops; i++) { prev = list; @@ -1299,6 +1595,7 @@ static int lightrec_flag_io(struct lightrec_state *state, struct block *block) "requiring invalidation\n", list->opcode); list->flags |= LIGHTREC_NO_INVALIDATE; + list->flags |= LIGHTREC_IO_MODE(LIGHTREC_IO_DIRECT); } /* Detect writes whose destination address is inside the @@ -1309,11 +1606,12 @@ static int lightrec_flag_io(struct lightrec_state *state, struct block *block) kunseg(values[list->i.rs]) < (kunseg(block->pc) + block->nb_ops * 4)) { pr_debug("Self-modifying block detected\n"); - block->flags |= BLOCK_NEVER_COMPILE; + block_set_flags(block, BLOCK_NEVER_COMPILE); list->flags |= LIGHTREC_SMC; } } - case OP_SWL: /* fall-through */ + fallthrough; + case OP_SWL: case OP_SWR: case OP_SWC2: case OP_LB: @@ -1329,29 +1627,52 @@ static int lightrec_flag_io(struct lightrec_state *state, struct block *block) kunseg_val = kunseg(val); psx_map = lightrec_get_map_idx(state, kunseg_val); + list->flags &= ~LIGHTREC_IO_MASK; + no_mask = val == kunseg_val; + switch (psx_map) { case PSX_MAP_KERNEL_USER_RAM: - if (val == kunseg_val) + if (no_mask) list->flags |= LIGHTREC_NO_MASK; - /* fall-through */ + fallthrough; case PSX_MAP_MIRROR1: case PSX_MAP_MIRROR2: case PSX_MAP_MIRROR3: pr_debug("Flaging opcode %u as RAM access\n", i); list->flags |= LIGHTREC_IO_MODE(LIGHTREC_IO_RAM); + if (no_mask && state->mirrors_mapped) + list->flags |= LIGHTREC_NO_MASK; break; case PSX_MAP_BIOS: pr_debug("Flaging opcode %u as BIOS access\n", i); list->flags |= LIGHTREC_IO_MODE(LIGHTREC_IO_BIOS); + if (no_mask) + list->flags |= LIGHTREC_NO_MASK; break; case PSX_MAP_SCRATCH_PAD: pr_debug("Flaging opcode %u as scratchpad access\n", i); list->flags |= LIGHTREC_IO_MODE(LIGHTREC_IO_SCRATCH); + if (no_mask) + list->flags |= LIGHTREC_NO_MASK; /* Consider that we're never going to run code from * the scratchpad. */ list->flags |= LIGHTREC_NO_INVALIDATE; break; + case PSX_MAP_HW_REGISTERS: + if (state->ops.hw_direct && + state->ops.hw_direct(kunseg_val, + opcode_is_store(list->c), + opcode_get_io_size(list->c))) { + pr_debug("Flagging opcode %u as direct I/O access\n", + i); + list->flags |= LIGHTREC_IO_MODE(LIGHTREC_IO_DIRECT_HW); + + if (no_mask) + list->flags |= LIGHTREC_NO_MASK; + break; + } + fallthrough; default: pr_debug("Flagging opcode %u as I/O access\n", i); @@ -1359,7 +1680,8 @@ static int lightrec_flag_io(struct lightrec_state *state, struct block *block) break; } } - default: /* fall-through */ + fallthrough; + default: break; } } @@ -1387,7 +1709,7 @@ static u8 get_mfhi_mflo_reg(const struct block *block, u16 offset, mask |= opcode_read_mask(op->c); mask |= opcode_write_mask(op->c); - if (op->flags & LIGHTREC_SYNC) + if (op_flag_sync(op->flags)) sync = true; switch (op->i.op) { @@ -1397,11 +1719,10 @@ static u8 get_mfhi_mflo_reg(const struct block *block, u16 offset, case OP_BGTZ: case OP_REGIMM: /* TODO: handle backwards branches too */ - if (!last && - (op->flags & LIGHTREC_LOCAL_BRANCH) && + if (!last && op_flag_local_branch(op->flags) && (s16)op->c.i.imm >= 0) { branch_offset = i + 1 + (s16)op->c.i.imm - - !!(OPT_SWITCH_DELAY_SLOTS && (op->flags & LIGHTREC_NO_DS)); + - !!op_flag_no_ds(op->flags); reg = get_mfhi_mflo_reg(block, branch_offset, NULL, mask, sync, mflo, false); @@ -1414,6 +1735,9 @@ static u8 get_mfhi_mflo_reg(const struct block *block, u16 offset, } return mflo ? REG_LO : REG_HI; + case OP_META_MULT2: + case OP_META_MULTU2: + return 0; case OP_SPECIAL: switch (op->r.op) { case OP_SPECIAL_MULT: @@ -1433,8 +1757,7 @@ static u8 get_mfhi_mflo_reg(const struct block *block, u16 offset, if (op->r.rs != 31) return reg; - if (!sync && - !(op->flags & LIGHTREC_NO_DS) && + if (!sync && !op_flag_no_ds(op->flags) && (next->i.op == OP_SPECIAL) && ((!mflo && next->r.op == OP_SPECIAL_MFHI) || (mflo && next->r.op == OP_SPECIAL_MFLO))) @@ -1479,7 +1802,7 @@ static u8 get_mfhi_mflo_reg(const struct block *block, u16 offset, break; } - /* fall-through */ + fallthrough; default: continue; } @@ -1507,10 +1830,9 @@ static void lightrec_replace_lo_hi(struct block *block, u16 offset, case OP_BGTZ: case OP_REGIMM: /* TODO: handle backwards branches too */ - if ((op->flags & LIGHTREC_LOCAL_BRANCH) && - (s16)op->c.i.imm >= 0) { + if (op_flag_local_branch(op->flags) && (s16)op->c.i.imm >= 0) { branch_offset = i + 1 + (s16)op->c.i.imm - - !!(OPT_SWITCH_DELAY_SLOTS && (op->flags & LIGHTREC_NO_DS)); + - !!op_flag_no_ds(op->flags); lightrec_replace_lo_hi(block, branch_offset, last, lo); lightrec_replace_lo_hi(block, i + 1, branch_offset, lo); @@ -1530,7 +1852,7 @@ static void lightrec_replace_lo_hi(struct block *block, u16 offset, return; } - /* fall-through */ + fallthrough; default: break; } @@ -1561,19 +1883,26 @@ static int lightrec_flag_mults_divs(struct lightrec_state *state, struct block * if (prev) known = lightrec_propagate_consts(list, prev, known, values); - if (list->i.op != OP_SPECIAL) - continue; - - switch (list->r.op) { - case OP_SPECIAL_DIV: - case OP_SPECIAL_DIVU: - /* If we are dividing by a non-zero constant, don't - * emit the div-by-zero check. */ - if (lightrec_always_skip_div_check() || - (known & BIT(list->c.r.rt) && values[list->c.r.rt])) - list->flags |= LIGHTREC_NO_DIV_CHECK; - case OP_SPECIAL_MULT: /* fall-through */ - case OP_SPECIAL_MULTU: + switch (list->i.op) { + case OP_SPECIAL: + switch (list->r.op) { + case OP_SPECIAL_DIV: + case OP_SPECIAL_DIVU: + /* If we are dividing by a non-zero constant, don't + * emit the div-by-zero check. */ + if (lightrec_always_skip_div_check() || + ((known & BIT(list->c.r.rt)) && values[list->c.r.rt])) + list->flags |= LIGHTREC_NO_DIV_CHECK; + fallthrough; + case OP_SPECIAL_MULT: + case OP_SPECIAL_MULTU: + break; + default: + continue; + } + fallthrough; + case OP_META_MULT2: + case OP_META_MULTU2: break; default: continue; @@ -1581,7 +1910,7 @@ static int lightrec_flag_mults_divs(struct lightrec_state *state, struct block * /* Don't support opcodes in delay slots */ if ((i && has_delay_slot(block->opcode_list[i - 1].c)) || - (list->flags & LIGHTREC_NO_DS)) { + op_flag_no_ds(list->flags)) { continue; } @@ -1753,7 +2082,8 @@ static int lightrec_replace_memset(struct lightrec_state *state, struct block *b if (i == ARRAY_SIZE(memset_code) - 1) { /* success! */ pr_debug("Block at PC 0x%x is a memset\n", block->pc); - block->flags |= BLOCK_IS_MEMSET | BLOCK_NEVER_COMPILE; + block_set_flags(block, + BLOCK_IS_MEMSET | BLOCK_NEVER_COMPILE); /* Return non-zero to skip other optimizers. */ return 1; @@ -1767,6 +2097,7 @@ static int (*lightrec_optimizers[])(struct lightrec_state *state, struct block * IF_OPT(OPT_REMOVE_DIV_BY_ZERO_SEQ, &lightrec_remove_div_by_zero_check_sequence), IF_OPT(OPT_REPLACE_MEMSET, &lightrec_replace_memset), IF_OPT(OPT_DETECT_IMPOSSIBLE_BRANCHES, &lightrec_detect_impossible_branches), + IF_OPT(OPT_TRANSFORM_OPS, &lightrec_transform_branches), IF_OPT(OPT_LOCAL_BRANCHES, &lightrec_local_branches), IF_OPT(OPT_TRANSFORM_OPS, &lightrec_transform_ops), IF_OPT(OPT_SWITCH_DELAY_SLOTS, &lightrec_switch_delay_slots),