OPF_DATA = (1 << 1), /* data processing - writes to dst opr */
OPF_FLAGS = (1 << 2), /* sets flags */
OPF_JMP = (1 << 3), /* branch, call */
- OPF_CJMP = (1 << 4), /* cond. branch (cc or jecxz) */
+ OPF_CJMP = (1 << 4), /* cond. branch (cc or jecxz/loop) */
OPF_CC = (1 << 5), /* uses flags */
OPF_TAIL = (1 << 6), /* ret or tail call */
OPF_RSAVE = (1 << 7), /* push/pop is local reg save/load */
OPF_VAPUSH = (1 << 18), /* vararg ptr push (as call arg) */
OPF_DONE = (1 << 19), /* already fully handled by analysis */
OPF_PPUSH = (1 << 20), /* part of complex push-pop graph */
+ OPF_NOREGS = (1 << 21), /* don't track regs of this op */
};
enum op_op {
OP_MOVSX,
OP_XCHG,
OP_NOT,
+ OP_XLAT,
OP_CDQ,
OP_LODS,
OP_STOS,
OP_SHL,
OP_SHR,
OP_SAR,
+ OP_SHLD,
OP_SHRD,
OP_ROL,
OP_ROR,
OP_CALL,
OP_JMP,
OP_JECXZ,
+ OP_LOOP,
OP_JCC,
OP_SCC,
// x87
OPLM_QWORD,
};
+#define MAX_EXITS 128
+
#define MAX_OPERANDS 3
#define NAMELEN 112
+#define OPR_INIT(type_, lmod_, reg_) \
+ { type_, lmod_, reg_, }
+
struct parsed_opr {
enum opr_type type;
enum opr_lenmod lmod;
+ int reg;
unsigned int is_ptr:1; // pointer in C
unsigned int is_array:1; // array in C
unsigned int type_from_var:1; // .. in header, sometimes wrong
unsigned int size_lt:1; // type override is larger than C
unsigned int had_ds:1; // had ds: prefix
const struct parsed_proto *pp; // for OPT_LABEL
- int reg;
unsigned int val;
char name[NAMELEN];
};
dump_op(op_), ##__VA_ARGS__)
#define ferr_assert(op_, cond) do { \
- if (!(cond)) ferr(op_, "assertion '%s' failed on ln :%d\n", #cond, \
- __LINE__); \
+ if (!(cond)) ferr(op_, "assertion '%s' failed\n", #cond); \
} while (0)
const char *regs_r32[] = {
static int guess_lmod_from_name(struct parsed_opr *opr)
{
- if (!strncmp(opr->name, "dword_", 6)) {
+ if (IS_START(opr->name, "dword_") || IS_START(opr->name, "off_")) {
opr->lmod = OPLM_DWORD;
return 1;
}
- if (!strncmp(opr->name, "word_", 5)) {
+ if (IS_START(opr->name, "word_")) {
opr->lmod = OPLM_WORD;
return 1;
}
- if (!strncmp(opr->name, "byte_", 5)) {
+ if (IS_START(opr->name, "byte_")) {
opr->lmod = OPLM_BYTE;
return 1;
}
- if (!strncmp(opr->name, "qword_", 6)) {
+ if (IS_START(opr->name, "qword_")) {
opr->lmod = OPLM_QWORD;
return 1;
}
{ "movsx",OP_MOVSX, 2, 2, OPF_DATA },
{ "xchg", OP_XCHG, 2, 2, OPF_DATA },
{ "not", OP_NOT, 1, 1, OPF_DATA },
+ { "xlat", OP_XLAT, 0, 0, OPF_DATA },
{ "cdq", OP_CDQ, 0, 0, OPF_DATA },
{ "lodsb",OP_LODS, 0, 0, OPF_DATA },
{ "lodsw",OP_LODS, 0, 0, OPF_DATA },
{ "shr", OP_SHR, 2, 2, OPF_DATA|OPF_FLAGS },
{ "sal", OP_SHL, 2, 2, OPF_DATA|OPF_FLAGS },
{ "sar", OP_SAR, 2, 2, OPF_DATA|OPF_FLAGS },
+ { "shld", OP_SHLD, 3, 3, OPF_DATA|OPF_FLAGS },
{ "shrd", OP_SHRD, 3, 3, OPF_DATA|OPF_FLAGS },
{ "rol", OP_ROL, 2, 2, OPF_DATA|OPF_FLAGS },
{ "ror", OP_ROR, 2, 2, OPF_DATA|OPF_FLAGS },
{ "call", OP_CALL, 1, 1, OPF_JMP|OPF_DATA|OPF_FLAGS },
{ "jmp", OP_JMP, 1, 1, OPF_JMP },
{ "jecxz",OP_JECXZ, 1, 1, OPF_JMP|OPF_CJMP },
+ { "loop", OP_LOOP, 1, 1, OPF_JMP|OPF_CJMP|OPF_DATA },
{ "jo", OP_JCC, 1, 1, OPF_CJMP_CC, PFO_O, 0 }, // 70 OF=1
{ "jno", OP_JCC, 1, 1, OPF_CJMP_CC, PFO_O, 1 }, // 71 OF=0
{ "jc", OP_JCC, 1, 1, OPF_CJMP_CC, PFO_C, 0 }, // 72 CF=1
break;
// ops with implicit argumets
+ case OP_XLAT:
+ op->operand_cnt = 2;
+ setup_reg_opr(&op->operand[0], xAX, OPLM_BYTE, &op->regmask_src);
+ op->regmask_dst = op->regmask_src;
+ setup_reg_opr(&op->operand[1], xDX, OPLM_DWORD, &op->regmask_src);
+ break;
+
case OP_CDQ:
op->operand_cnt = 2;
setup_reg_opr(&op->operand[0], xDX, OPLM_DWORD, &op->regmask_dst);
op->regmask_dst = op->regmask_src;
break;
+ case OP_LOOP:
+ op->regmask_dst = 1 << xCX;
+ // fallthrough
case OP_JECXZ:
- op->operand_cnt = 1;
+ op->operand_cnt = 2;
op->regmask_src = 1 << xCX;
- op->operand[0].type = OPT_REG;
- op->operand[0].reg = xCX;
- op->operand[0].lmod = OPLM_DWORD;
+ op->operand[1].type = OPT_REG;
+ op->operand[1].reg = xCX;
+ op->operand[1].lmod = OPLM_DWORD;
break;
case OP_IMUL:
+ if (op->operand_cnt == 2) {
+ if (op->operand[0].type != OPT_REG)
+ aerr("reg expected\n");
+ op->regmask_src |= 1 << op->operand[0].reg;
+ }
if (op->operand_cnt != 1)
break;
// fallthrough
&& op->operand[0].reg == op->operand[1].reg
&& IS(op->operand[0].name, op->operand[1].name)) // ! ah, al..
{
- op->flags |= OPF_RMD | OPF_DONE;
+ op->flags |= OPF_RMD | OPF_DONE | OPF_NOREGS;
op->regmask_src = op->regmask_dst = 0;
}
break;
char buf[16];
snprintf(buf, sizeof(buf), "%s+0", op->operand[0].name);
if (IS(buf, op->operand[1].name))
- op->flags |= OPF_RMD | OPF_DONE;
+ op->flags |= OPF_RMD | OPF_DONE | OPF_NOREGS;
}
break;
default:
break;
}
+
+ if (op->operand[0].type == OPT_REG
+ && op->operand[0].lmod == OPLM_DWORD
+ && op->operand[1].type == OPT_CONST)
+ {
+ if ((op->op == OP_AND && op->operand[1].val == 0)
+ || (op->op == OP_OR && op->operand[1].val == ~0))
+ {
+ op->regmask_src = 0;
+ }
+ }
}
static const char *op_name(struct parsed_op *po)
}
}
-static void op_set_clear_flag(struct parsed_op *po,
- enum op_flags flag_set, enum op_flags flag_clear)
-{
- po->flags |= flag_set;
- po->flags &= ~flag_clear;
-}
-
// last op in stream - unconditional branch or ret
#define LAST_OP(_i) ((ops[_i].flags & OPF_TAIL) \
|| ((ops[_i].flags & (OPF_JMP|OPF_CJMP|OPF_RMD)) == OPF_JMP \
if ((i) < 0) \
ferr(po, "bad " #i ": %d\n", i)
-static int scan_for_pop(int i, int opcnt, const char *reg,
- int magic, int depth, int *maxdepth, int do_flags)
+// note: this skips over calls and rm'd stuff assuming they're handled
+// so it's intended to use at one of final passes
+static int scan_for_pop(int i, int opcnt, int magic, int reg,
+ int depth, int flags_set)
{
- const struct parsed_proto *pp;
struct parsed_op *po;
+ int relevant;
int ret = 0;
int j;
for (; i < opcnt; i++) {
po = &ops[i];
if (po->cc_scratch == magic)
- break; // already checked
+ return ret; // already checked
po->cc_scratch = magic;
if (po->flags & OPF_TAIL) {
if (po->op == OP_CALL) {
- pp = proto_parse(g_fhdr, po->operand[0].name, g_quiet_pp);
- if (pp != NULL && pp->is_noreturn)
- // no stack cleanup for noreturn
- return ret;
+ if (po->pp != NULL && po->pp->is_noreturn)
+ // assume no stack cleanup for noreturn
+ return 1;
}
return -1; // deadend
}
- if ((po->flags & (OPF_RMD|OPF_DONE))
- || (po->op == OP_PUSH && po->p_argnum != 0)) // arg push
+ if (po->flags & (OPF_RMD|OPF_DONE|OPF_FARG))
continue;
if ((po->flags & OPF_JMP) && po->op != OP_CALL) {
// jumptable
for (j = 0; j < po->btj->count; j++) {
check_i(po, po->btj->d[j].bt_i);
- ret |= scan_for_pop(po->btj->d[j].bt_i, opcnt, reg, magic,
- depth, maxdepth, do_flags);
+ ret |= scan_for_pop(po->btj->d[j].bt_i, opcnt, magic, reg,
+ depth, flags_set);
if (ret < 0)
return ret; // dead end
}
check_i(po, po->bt_i);
if (po->flags & OPF_CJMP) {
- ret |= scan_for_pop(po->bt_i, opcnt, reg, magic,
- depth, maxdepth, do_flags);
+ ret |= scan_for_pop(po->bt_i, opcnt, magic, reg,
+ depth, flags_set);
if (ret < 0)
return ret; // dead end
}
continue;
}
+ relevant = 0;
if ((po->op == OP_POP || po->op == OP_PUSH)
- && po->operand[0].type == OPT_REG
- && IS(po->operand[0].name, reg))
+ && po->operand[0].type == OPT_REG && po->operand[0].reg == reg)
{
- if (po->op == OP_PUSH && !(po->flags & OPF_FARGNR)) {
- depth++;
- if (depth > *maxdepth)
- *maxdepth = depth;
- if (do_flags)
- op_set_clear_flag(po, OPF_RSAVE, OPF_RMD);
- }
- else if (po->op == OP_POP) {
- if (depth == 0) {
- if (do_flags)
- op_set_clear_flag(po, OPF_RMD, OPF_RSAVE);
- return 1;
- }
- else {
- depth--;
- if (depth < 0) // should not happen
- ferr(po, "fail with depth\n");
- if (do_flags)
- op_set_clear_flag(po, OPF_RSAVE, OPF_RMD);
- }
+ relevant = 1;
+ }
+
+ if (po->op == OP_PUSH) {
+ depth++;
+ }
+ else if (po->op == OP_POP) {
+ if (relevant && depth == 0) {
+ po->flags |= flags_set;
+ return 1;
}
+ depth--;
}
}
- return ret;
+ return -1;
}
-// scan for pop starting from 'ret' op (all paths)
-static int scan_for_pop_ret(int i, int opcnt, const char *reg,
- int flag_set)
+// scan for 'reg' pop backwards starting from i
+// intended to use for register restore search, so other reg
+// references are considered an error
+static int scan_for_rsave_pop_reg(int i, int magic, int reg, int set_flags)
{
- int found = 0;
+ struct parsed_op *po;
+ struct label_ref *lr;
+ int ret = 0;
+
+ ops[i].cc_scratch = magic;
+
+ while (1)
+ {
+ if (g_labels[i] != NULL) {
+ lr = &g_label_refs[i];
+ for (; lr != NULL; lr = lr->next) {
+ check_i(&ops[i], lr->i);
+ ret |= scan_for_rsave_pop_reg(lr->i, magic, reg, set_flags);
+ if (ret < 0)
+ return ret;
+ }
+ if (i > 0 && LAST_OP(i - 1))
+ return ret;
+ }
+
+ i--;
+ if (i < 0)
+ break;
+
+ if (ops[i].cc_scratch == magic)
+ return ret;
+ ops[i].cc_scratch = magic;
+
+ po = &ops[i];
+ if (po->op == OP_POP && po->operand[0].reg == reg) {
+ if (po->flags & (OPF_RMD|OPF_DONE))
+ return -1;
+
+ po->flags |= set_flags;
+ return 1;
+ }
+
+ // this also covers the case where we reach corresponding push
+ if ((po->regmask_dst | po->regmask_src) & (1 << reg))
+ return -1;
+ }
+
+ // nothing interesting on this path
+ return 0;
+}
+
+static void find_reachable_exits(int i, int opcnt, int magic,
+ int *exits, int *exit_count)
+{
+ struct parsed_op *po;
int j;
- for (; i < opcnt; i++) {
- if (!(ops[i].flags & OPF_TAIL))
- continue;
+ for (; i < opcnt; i++)
+ {
+ po = &ops[i];
+ if (po->cc_scratch == magic)
+ return;
+ po->cc_scratch = magic;
- for (j = i - 1; j >= 0; j--) {
- if (ops[j].flags & (OPF_RMD|OPF_DONE))
+ if (po->flags & OPF_TAIL) {
+ ferr_assert(po, *exit_count < MAX_EXITS);
+ exits[*exit_count] = i;
+ (*exit_count)++;
+ return;
+ }
+
+ if ((po->flags & OPF_JMP) && po->op != OP_CALL) {
+ if (po->flags & OPF_RMD)
continue;
- if (ops[j].flags & OPF_JMP)
- return -1;
- if (ops[j].op == OP_POP && ops[j].datap == NULL
- && ops[j].operand[0].type == OPT_REG
- && IS(ops[j].operand[0].name, reg))
- {
- found = 1;
- ops[j].flags |= flag_set;
- break;
+ if (po->btj != NULL) {
+ for (j = 0; j < po->btj->count; j++) {
+ check_i(po, po->btj->d[j].bt_i);
+ find_reachable_exits(po->btj->d[j].bt_i, opcnt, magic,
+ exits, exit_count);
+ }
+ return;
}
- if (g_labels[j] != NULL)
- return -1;
+ check_i(po, po->bt_i);
+ if (po->flags & OPF_CJMP)
+ find_reachable_exits(po->bt_i, opcnt, magic, exits, exit_count);
+ else
+ i = po->bt_i - 1;
+ continue;
}
}
+}
+
+// scan for 'reg' pop backwards starting from exits (all paths)
+static int scan_for_pop_ret(int i, int opcnt, int reg, int set_flags)
+{
+ static int exits[MAX_EXITS];
+ static int exit_count;
+ int j, ret;
+
+ if (!set_flags) {
+ exit_count = 0;
+ find_reachable_exits(i, opcnt, i + opcnt * 15, exits,
+ &exit_count);
+ ferr_assert(&ops[i], exit_count > 0);
+ }
- return found ? 0 : -1;
+ for (j = 0; j < exit_count; j++) {
+ ret = scan_for_rsave_pop_reg(exits[j], i + opcnt * 16 + set_flags,
+ reg, set_flags);
+ if (ret == -1)
+ return -1;
+ }
+
+ return 1;
}
-static void scan_for_pop_const(int i, int opcnt, int *regmask_pp)
+// scan for one or more pop of push <const>
+static int scan_for_pop_const_r(int i, int opcnt, int magic,
+ int push_i, int is_probe)
{
struct parsed_op *po;
- int is_multipath = 0;
+ struct label_ref *lr;
+ int ret = 0;
int j;
- for (j = i + 1; j < opcnt; j++) {
- po = &ops[j];
+ for (; i < opcnt; i++)
+ {
+ po = &ops[i];
+ if (po->cc_scratch == magic)
+ return ret; // already checked
+ po->cc_scratch = magic;
+
+ if (po->flags & OPF_JMP) {
+ if (po->flags & OPF_RMD)
+ continue;
+ if (po->op == OP_CALL)
+ return -1;
+
+ if (po->btj != NULL) {
+ for (j = 0; j < po->btj->count; j++) {
+ check_i(po, po->btj->d[j].bt_i);
+ ret |= scan_for_pop_const_r(po->btj->d[j].bt_i, opcnt, magic,
+ push_i, is_probe);
+ if (ret < 0)
+ return ret;
+ }
+ return ret;
+ }
- if (po->op == OP_JMP && po->btj == NULL) {
- ferr_assert(po, po->bt_i >= 0);
- j = po->bt_i - 1;
+ check_i(po, po->bt_i);
+ if (po->flags & OPF_CJMP) {
+ ret |= scan_for_pop_const_r(po->bt_i, opcnt, magic, push_i,
+ is_probe);
+ if (ret < 0)
+ return ret;
+ }
+ else {
+ i = po->bt_i - 1;
+ }
continue;
}
- if ((po->flags & (OPF_JMP|OPF_TAIL|OPF_RSAVE))
- || po->op == OP_PUSH)
+ if ((po->flags & (OPF_TAIL|OPF_RSAVE)) || po->op == OP_PUSH)
+ return -1;
+
+ if (g_labels[i] != NULL) {
+ // all refs must be visited
+ lr = &g_label_refs[i];
+ for (; lr != NULL; lr = lr->next) {
+ check_i(po, lr->i);
+ if (ops[lr->i].cc_scratch != magic)
+ return -1;
+ }
+ if (i > 0 && !LAST_OP(i - 1) && ops[i - 1].cc_scratch != magic)
+ return -1;
+ }
+
+ if (po->op == OP_POP)
{
- break;
+ if (po->flags & (OPF_RMD|OPF_DONE))
+ return -1;
+
+ if (!is_probe) {
+ po->flags |= OPF_DONE;
+ po->datap = &ops[push_i];
+ }
+ return 1;
}
+ }
- if (g_labels[j] != NULL)
- is_multipath = 1;
+ return -1;
+}
- if (po->op == OP_POP && !(po->flags & OPF_RMD))
- {
- is_multipath |= !!(po->flags & OPF_PPUSH);
- if (is_multipath) {
- ops[i].flags |= OPF_PPUSH | OPF_DONE;
- ops[i].datap = po;
- po->flags |= OPF_PPUSH | OPF_DONE;
- *regmask_pp |= 1 << po->operand[0].reg;
+static void scan_for_pop_const(int i, int opcnt, int magic)
+{
+ int ret;
+
+ ret = scan_for_pop_const_r(i + 1, opcnt, magic, i, 1);
+ if (ret == 1) {
+ ops[i].flags |= OPF_RMD | OPF_DONE;
+ scan_for_pop_const_r(i + 1, opcnt, magic + 1, i, 0);
+ }
+}
+
+// check if all branch targets within a marked path are also marked
+// note: the path checked must not be empty or end with a branch
+static int check_path_branches(int opcnt, int magic)
+{
+ struct parsed_op *po;
+ int i, j;
+
+ for (i = 0; i < opcnt; i++) {
+ po = &ops[i];
+ if (po->cc_scratch != magic)
+ continue;
+
+ if (po->flags & OPF_JMP) {
+ if ((po->flags & OPF_RMD) || po->op == OP_CALL)
+ continue;
+
+ if (po->btj != NULL) {
+ for (j = 0; j < po->btj->count; j++) {
+ check_i(po, po->btj->d[j].bt_i);
+ if (ops[po->btj->d[j].bt_i].cc_scratch != magic)
+ return 0;
+ }
}
- else {
- ops[i].flags |= OPF_RMD | OPF_DONE;
- po->flags |= OPF_DONE;
- po->datap = &ops[i];
+
+ check_i(po, po->bt_i);
+ if (ops[po->bt_i].cc_scratch != magic)
+ return 0;
+ if ((po->flags & OPF_CJMP) && ops[i + 1].cc_scratch != magic)
+ return 0;
+ }
+ }
+
+ return 1;
+}
+
+// scan for multiple pushes for given pop
+static int scan_pushes_for_pop_r(int i, int magic, int pop_i,
+ int is_probe)
+{
+ int reg = ops[pop_i].operand[0].reg;
+ struct parsed_op *po;
+ struct label_ref *lr;
+ int ret = 0;
+
+ ops[i].cc_scratch = magic;
+
+ while (1)
+ {
+ if (g_labels[i] != NULL) {
+ lr = &g_label_refs[i];
+ for (; lr != NULL; lr = lr->next) {
+ check_i(&ops[i], lr->i);
+ ret |= scan_pushes_for_pop_r(lr->i, magic, pop_i, is_probe);
+ if (ret < 0)
+ return ret;
}
+ if (i > 0 && LAST_OP(i - 1))
+ return ret;
+ }
+
+ i--;
+ if (i < 0)
break;
+
+ if (ops[i].cc_scratch == magic)
+ return ret;
+ ops[i].cc_scratch = magic;
+
+ po = &ops[i];
+ if (po->op == OP_CALL)
+ return -1;
+ if ((po->flags & (OPF_TAIL|OPF_RSAVE)) || po->op == OP_POP)
+ return -1;
+
+ if (po->op == OP_PUSH)
+ {
+ if (po->datap != NULL)
+ return -1;
+ if (po->operand[0].type == OPT_REG && po->operand[0].reg == reg)
+ // leave this case for reg save/restore handlers
+ return -1;
+
+ if (!is_probe) {
+ po->flags |= OPF_PPUSH | OPF_DONE;
+ po->datap = &ops[pop_i];
+ }
+ return 1;
+ }
+ }
+
+ return -1;
+}
+
+static void scan_pushes_for_pop(int i, int opcnt, int *regmask_pp)
+{
+ int magic = i + opcnt * 14;
+ int ret;
+
+ ret = scan_pushes_for_pop_r(i, magic, i, 1);
+ if (ret == 1) {
+ ret = check_path_branches(opcnt, magic);
+ if (ret == 1) {
+ ops[i].flags |= OPF_PPUSH | OPF_DONE;
+ *regmask_pp |= 1 << ops[i].operand[0].reg;
+ scan_pushes_for_pop_r(i, magic + 1, i, 0);
}
}
}
return;
}
+ if (po->flags & OPF_RMD)
+ continue;
check_i(po, po->bt_i);
if (po->flags & OPF_CJMP)
scan_propagate_df(po->bt_i, opcnt);
static int is_opr_read(const struct parsed_opr *opr,
const struct parsed_op *po)
{
- int mask;
-
if (opr->type == OPT_REG) {
- mask = po->regmask_src;
- if (po->op == OP_CALL)
- // assume worst case
- mask |= (1 << xAX) | (1 << xCX) | (1 << xDX);
- if ((1 << opr->reg) & mask)
+ if (po->regmask_src & (1 << opr->reg))
return 1;
else
return 0;
{
int mask;
- if (!(po->flags & OPF_DATA))
- return 0;
-
if (opr->type == OPT_REG) {
if (po->op == OP_CALL) {
- mask = (1 << xAX) | (1 << xCX) | (1 << xDX);
- if ((1 << opr->reg) & mask)
+ mask = po->regmask_dst;
+ mask |= (1 << xAX) | (1 << xCX) | (1 << xDX); // ?
+ if (mask & (1 << opr->reg))
return 1;
else
return 0;
}
- if (po->operand[0].type == OPT_REG) {
- if (po->regmask_dst & (1 << opr->reg))
- return 1;
- else
- return 0;
- }
+ if (po->regmask_dst & (1 << opr->reg))
+ return 1;
+ else
+ return 0;
}
return IS(po->operand[0].name, opr->name);
while (i >= 0) {
if (ops[i].cc_scratch == magic) {
- ferr(&ops[i], "%s looped\n", __func__);
- return -1;
+ // is this a problem?
+ //ferr(&ops[i], "%s looped\n", __func__);
+ return 0;
}
ops[i].cc_scratch = magic;
if (do_update && *adj >= 0) {
po->flags |= OPF_RMD;
if (!*is_multipath)
- po->flags |= OPF_DONE;
+ po->flags |= OPF_DONE | OPF_NOREGS;
}
*adj += lmod_bytes(po, po->operand[0].lmod);
if (i < 0) {
// reached the top - can only be an arg-reg
- if (opr->type != OPT_REG)
+ if (opr->type != OPT_REG || g_func_pp == NULL)
return;
for (i = 0; i < g_func_pp->argc; i++) {
return pd;
}
-static void clear_labels(int count)
+static void clear_labels(int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++) {
+ if (g_labels[i] != NULL) {
+ free(g_labels[i]);
+ g_labels[i] = NULL;
+ }
+ }
+}
+
+static int get_pp_arg_regmask_src(const struct parsed_proto *pp)
+{
+ int regmask = 0;
+ int i, reg;
+
+ for (i = 0; i < pp->argc; i++) {
+ if (pp->arg[i].reg != NULL) {
+ reg = char_array_i(regs_r32,
+ ARRAY_SIZE(regs_r32), pp->arg[i].reg);
+ if (reg < 0)
+ ferr(ops, "arg '%s' of func '%s' is not a reg?\n",
+ pp->arg[i].reg, pp->name);
+ regmask |= 1 << reg;
+ }
+ }
+
+ return regmask;
+}
+
+static int get_pp_arg_regmask_dst(const struct parsed_proto *pp)
{
- int i;
+ if (strstr(pp->ret_type.name, "int64"))
+ return (1 << xAX) | (1 << xDX);
+ if (strcasecmp(pp->ret_type.name, "void") == 0)
+ return 0;
- for (i = 0; i < count; i++) {
- if (g_labels[i] != NULL) {
- free(g_labels[i]);
- g_labels[i] = NULL;
- }
- }
+ return (1 << xAX);
}
static void resolve_branches_parse_calls(int opcnt)
struct parsed_data *pd;
struct parsed_op *po;
const char *tmpname;
- int i, l, ret;
+ int i, l;
+ int ret;
for (i = 0; i < opcnt; i++)
{
&& IS(opr_name(&ops[1], 1), "esp"))
{
g_bp_frame = 1;
- ops[0].flags |= OPF_RMD | OPF_DONE;
- ops[1].flags |= OPF_RMD | OPF_DONE;
+ ops[0].flags |= OPF_RMD | OPF_DONE | OPF_NOREGS;
+ ops[1].flags |= OPF_RMD | OPF_DONE | OPF_NOREGS;
i = 2;
if (ops[2].op == OP_SUB && IS(opr_name(&ops[2], 0), "esp")) {
g_stack_fsz = opr_const(&ops[2], 1);
- ops[2].flags |= OPF_RMD | OPF_DONE;
+ ops[2].flags |= OPF_RMD | OPF_DONE | OPF_NOREGS;
i++;
}
else {
i = 2;
while (ops[i].op == OP_PUSH && IS(opr_name(&ops[i], 0), "ecx")) {
g_stack_fsz += 4;
- ops[i].flags |= OPF_RMD | OPF_DONE;
+ ops[i].flags |= OPF_RMD | OPF_DONE | OPF_NOREGS;
ecx_push++;
i++;
}
&& IS(opr_name(&ops[i + 1], 0), "__alloca_probe"))
{
g_stack_fsz += ops[i].operand[1].val;
- ops[i].flags |= OPF_RMD | OPF_DONE;
+ ops[i].flags |= OPF_RMD | OPF_DONE | OPF_NOREGS;
i++;
- ops[i].flags |= OPF_RMD | OPF_DONE;
+ ops[i].flags |= OPF_RMD | OPF_DONE | OPF_NOREGS;
i++;
}
}
if ((ops[j].op == OP_POP && IS(opr_name(&ops[j], 0), "ebp"))
|| ops[j].op == OP_LEAVE)
{
- ops[j].flags |= OPF_RMD | OPF_DONE;
+ ops[j].flags |= OPF_RMD | OPF_DONE | OPF_NOREGS;
}
else if (ops[i].op == OP_CALL && ops[i].pp != NULL
&& ops[i].pp->is_noreturn)
&& IS(opr_name(&ops[j - 1], 0), "esp")
&& IS(opr_name(&ops[j - 1], 1), "ebp"))
{
- ops[j - 1].flags |= OPF_RMD | OPF_DONE;
+ ops[j - 1].flags |= OPF_RMD | OPF_DONE | OPF_NOREGS;
j -= 2;
}
else if (!(g_ida_func_attr & IDAFA_NORETURN))
// non-bp frame
i = 0;
while (ops[i].op == OP_PUSH && IS(opr_name(&ops[i], 0), "ecx")) {
- ops[i].flags |= OPF_RMD | OPF_DONE;
+ ops[i].flags |= OPF_RMD | OPF_DONE | OPF_NOREGS;
g_stack_fsz += 4;
ecx_push++;
i++;
&& ops[i].operand[1].type == OPT_CONST)
{
g_stack_fsz = ops[i].operand[1].val;
- ops[i].flags |= OPF_RMD | OPF_DONE;
+ ops[i].flags |= OPF_RMD | OPF_DONE | OPF_NOREGS;
esp_sub = 1;
break;
}
while (i > 0 && j > 0) {
i--;
if (ops[i].op == OP_PUSH) {
- ops[i].flags &= ~(OPF_RMD | OPF_DONE);
+ ops[i].flags &= ~(OPF_RMD | OPF_DONE | OPF_NOREGS);
j--;
}
}
else
ferr(&ops[j], "'pop ecx' expected\n");
- ops[j].flags |= OPF_RMD | OPF_DONE;
+ ops[j].flags |= OPF_RMD | OPF_DONE | OPF_NOREGS;
j--;
}
if (l != ecx_push)
|| ops[j].operand[1].val != g_stack_fsz)
ferr(&ops[j], "'add esp' expected\n");
- ops[j].flags |= OPF_RMD | OPF_DONE;
+ ops[j].flags |= OPF_RMD | OPF_DONE | OPF_NOREGS;
ops[j].operand[1].val = 0; // hack for stack arg scanner
found = 1;
}
}
// find next instruction that reads opr
-// if multiple results are found - fail
// *op_i must be set to -1 by the caller
-// returns 1 if found, *op_i is then set to referencer insn
+// on return, *op_i is set to first referencer insn
+// returns 1 if exactly 1 referencer is found
static int find_next_read(int i, int opcnt,
const struct parsed_opr *opr, int magic, int *op_i)
{
for (; i < opcnt; i++)
{
if (ops[i].cc_scratch == magic)
- return 0;
+ return ret;
ops[i].cc_scratch = magic;
po = &ops[i];
continue;
check_i(po, po->bt_i);
if (po->flags & OPF_CJMP) {
- ret = find_next_read(po->bt_i, opcnt, opr, magic, op_i);
+ ret |= find_next_read(po->bt_i, opcnt, opr, magic, op_i);
if (ret < 0)
return ret;
}
-
- i = po->bt_i - 1;
+ else
+ i = po->bt_i - 1;
continue;
}
if (!is_opr_read(opr, po)) {
if (is_opr_modified(opr, po))
// it's overwritten
- return 0;
+ return ret;
if (po->flags & OPF_TAIL)
- return 0;
+ return ret;
continue;
}
&& ops[call_i].operand[1].type == OPT_LABEL)
{
// no other source users?
- ret = resolve_last_ref(i, &po->operand[0], opcnt * 10,
+ ret = resolve_last_ref(i, &po->operand[0], i + opcnt * 10,
&ref_i);
if (ret == 1 && call_i == ref_i) {
// and nothing uses it after us?
ref_i = -1;
- ret = find_next_read(i + 1, opcnt, &po->operand[0],
- opcnt * 11, &ref_i);
- if (ret != 1)
+ find_next_read(i + 1, opcnt, &po->operand[0],
+ i + opcnt * 11, &ref_i);
+ if (ref_i == -1)
// then also don't need the source mov
- ops[call_i].flags |= OPF_RMD;
+ ops[call_i].flags |= OPF_RMD | OPF_NOREGS;
}
}
break;
if (pp->is_unresolved)
break;
- ferr(po, "arg collect %d/%d hit esp adjust of %d\n",
+ fnote(po, "(this call)\n");
+ ferr(&ops[j], "arg collect %d/%d hit esp adjust of %d\n",
arg, pp->argc, ops[j].operand[1].val);
}
else if (ops[j].op == OP_POP && !(ops[j].flags & OPF_DONE))
if (pp->is_unresolved)
break;
- ferr(po, "arg collect %d/%d hit pop\n", arg, pp->argc);
+ fnote(po, "(this call)\n");
+ ferr(&ops[j], "arg collect %d/%d hit pop\n", arg, pp->argc);
}
else if (ops[j].flags & OPF_CJMP)
{
ops[j].flags &= ~OPF_RSAVE;
// check for __VALIST
- if (!pp->is_unresolved && pp->arg[arg].type.is_va_list) {
+ if (!pp->is_unresolved && g_func_pp != NULL
+ && pp->arg[arg].type.is_va_list)
+ {
k = -1;
ret = resolve_origin(j, &ops[j].operand[0],
magic + 1, &k, NULL);
if (!g_func_pp->is_vararg
|| strstr(ops[k].operand[1].name, buf))
{
- ops[k].flags |= OPF_RMD | OPF_DONE;
- ops[j].flags |= OPF_RMD | OPF_VAPUSH;
+ ops[k].flags |= OPF_RMD | OPF_NOREGS | OPF_DONE;
+ ops[j].flags |= OPF_RMD | OPF_NOREGS | OPF_VAPUSH;
save_args &= ~(1 << arg);
reg = -1;
}
return ret;
}
+static void reg_use_pass(int i, int opcnt, unsigned char *cbits,
+ int regmask_now, int *regmask,
+ int regmask_save_now, int *regmask_save,
+ int *regmask_init, int regmask_arg)
+{
+ struct parsed_op *po;
+ int already_saved;
+ int regmask_new;
+ int regmask_op;
+ int flags_set;
+ int ret, reg;
+ int j;
+
+ for (; i < opcnt; i++)
+ {
+ po = &ops[i];
+ if (cbits[i >> 3] & (1 << (i & 7)))
+ return;
+ cbits[i >> 3] |= (1 << (i & 7));
+
+ if ((po->flags & OPF_JMP) && po->op != OP_CALL) {
+ if (po->flags & (OPF_RMD|OPF_DONE))
+ continue;
+ if (po->btj != NULL) {
+ for (j = 0; j < po->btj->count; j++) {
+ check_i(po, po->btj->d[j].bt_i);
+ reg_use_pass(po->btj->d[j].bt_i, opcnt, cbits,
+ regmask_now, regmask, regmask_save_now, regmask_save,
+ regmask_init, regmask_arg);
+ }
+ return;
+ }
+
+ check_i(po, po->bt_i);
+ if (po->flags & OPF_CJMP)
+ reg_use_pass(po->bt_i, opcnt, cbits,
+ regmask_now, regmask, regmask_save_now, regmask_save,
+ regmask_init, regmask_arg);
+ else
+ i = po->bt_i - 1;
+ continue;
+ }
+
+ if (po->op == OP_PUSH && !(po->flags & (OPF_FARG|OPF_DONE))
+ && !g_func_pp->is_userstack
+ && po->operand[0].type == OPT_REG)
+ {
+ reg = po->operand[0].reg;
+ ferr_assert(po, reg >= 0);
+
+ already_saved = 0;
+ flags_set = OPF_RSAVE | OPF_RMD | OPF_DONE;
+ if (regmask_now & (1 << reg)) {
+ already_saved = regmask_save_now & (1 << reg);
+ flags_set = OPF_RSAVE | OPF_DONE;
+ }
+
+ ret = scan_for_pop(i + 1, opcnt, i + opcnt * 3, reg, 0, 0);
+ if (ret == 1) {
+ scan_for_pop(i + 1, opcnt, i + opcnt * 4, reg, 0, flags_set);
+ }
+ else {
+ ret = scan_for_pop_ret(i + 1, opcnt, po->operand[0].reg, 0);
+ if (ret == 1) {
+ scan_for_pop_ret(i + 1, opcnt, po->operand[0].reg,
+ flags_set);
+ }
+ }
+ if (ret == 1) {
+ ferr_assert(po, !already_saved);
+ po->flags |= flags_set;
+
+ if (regmask_now & (1 << reg)) {
+ regmask_save_now |= (1 << reg);
+ *regmask_save |= regmask_save_now;
+ }
+ continue;
+ }
+ }
+ else if (po->op == OP_POP && (po->flags & OPF_RSAVE)) {
+ reg = po->operand[0].reg;
+ ferr_assert(po, reg >= 0);
+
+ if (regmask_save_now & (1 << reg))
+ regmask_save_now &= ~(1 << reg);
+ else
+ regmask_now &= ~(1 << reg);
+ continue;
+ }
+ else if (po->op == OP_CALL) {
+ if ((po->regmask_dst & (1 << xAX))
+ && !(po->regmask_dst & (1 << xDX)))
+ {
+ if (po->flags & OPF_TAIL)
+ // don't need eax, will do "return f();" or "f(); return;"
+ po->regmask_dst &= ~(1 << xAX);
+ else {
+ struct parsed_opr opr = OPR_INIT(OPT_REG, OPLM_DWORD, xAX);
+ j = -1;
+ find_next_read(i + 1, opcnt, &opr, i + opcnt * 17, &j);
+ if (j == -1)
+ // not used
+ po->regmask_dst &= ~(1 << xAX);
+ }
+ }
+ }
+
+ if (po->flags & OPF_NOREGS)
+ continue;
+
+ regmask_op = po->regmask_src | po->regmask_dst;
+
+ regmask_new = po->regmask_src & ~regmask_now & ~regmask_arg;
+ regmask_new &= ~(1 << xSP);
+ if (g_bp_frame && !(po->flags & OPF_EBP_S))
+ regmask_new &= ~(1 << xBP);
+
+ if (po->op == OP_CALL) {
+ // allow fastcall calls from anywhere, calee may be also sitting
+ // in some fastcall table even when it's not using reg args
+ if (regmask_new & po->regmask_src & (1 << xCX)) {
+ *regmask_init |= (1 << xCX);
+ regmask_now |= (1 << xCX);
+ regmask_new &= ~(1 << xCX);
+ }
+ if (regmask_new & po->regmask_src & (1 << xDX)) {
+ *regmask_init |= (1 << xDX);
+ regmask_now |= (1 << xDX);
+ regmask_new &= ~(1 << xDX);
+ }
+ }
+
+ if (regmask_new != 0)
+ fnote(po, "uninitialized reg mask: %x\n", regmask_new);
+
+ if (regmask_op & (1 << xBP)) {
+ if (g_bp_frame && !(po->flags & OPF_EBP_S)) {
+ if (po->regmask_dst & (1 << xBP))
+ // compiler decided to drop bp frame and use ebp as scratch
+ scan_fwd_set_flags(i + 1, opcnt, i + opcnt * 5, OPF_EBP_S);
+ else
+ regmask_op &= ~(1 << xBP);
+ }
+ }
+
+ regmask_now |= regmask_op;
+ *regmask |= regmask_now;
+
+ if (po->flags & OPF_TAIL)
+ return;
+ }
+}
+
static void pp_insert_reg_arg(struct parsed_proto *pp, const char *reg)
{
int i;
fprintf(fout, ")");
}
-static int get_pp_arg_regmask(const struct parsed_proto *pp)
-{
- int regmask = 0;
- int i, reg;
-
- for (i = 0; i < pp->argc; i++) {
- if (pp->arg[i].reg != NULL) {
- reg = char_array_i(regs_r32,
- ARRAY_SIZE(regs_r32), pp->arg[i].reg);
- if (reg < 0)
- ferr(ops, "arg '%s' of func '%s' is not a reg?\n",
- pp->arg[i].reg, pp->name);
- regmask |= 1 << reg;
- }
- }
-
- return regmask;
-}
-
static char *saved_arg_name(char *buf, size_t buf_size, int grp, int num)
{
char buf1[16];
struct parsed_data *pd;
unsigned int uval;
int save_arg_vars[MAX_ARG_GRP] = { 0, };
+ unsigned char cbits[MAX_OPS / 8];
int cond_vars = 0;
int need_tmp_var = 0;
int need_tmp64 = 0;
int had_decl = 0;
int label_pending = 0;
int regmask_save = 0; // regs saved/restored in this func
- int regmask_arg = 0; // regs carrying function args (fastcall, etc)
+ int regmask_arg; // regs from this function args (fastcall, etc)
+ int regmask_ret; // regs needed on ret
int regmask_now; // temp
int regmask_init = 0; // regs that need zero initialization
int regmask_pp = 0; // regs used in complex push-pop graph
int regmask = 0; // used regs
int pfomask = 0;
int found = 0;
- int depth = 0;
int no_output;
int i, j, l;
int arg;
if (g_func_pp == NULL)
ferr(ops, "proto_parse failed for '%s'\n", funcn);
- regmask_arg = get_pp_arg_regmask(g_func_pp);
+ regmask_arg = get_pp_arg_regmask_src(g_func_pp);
+ regmask_ret = get_pp_arg_regmask_dst(g_func_pp);
+
+ if (g_func_pp->has_retreg) {
+ for (arg = 0; arg < g_func_pp->argc; arg++) {
+ if (g_func_pp->arg[arg].type.is_retreg) {
+ reg = char_array_i(regs_r32,
+ ARRAY_SIZE(regs_r32), g_func_pp->arg[arg].reg);
+ ferr_assert(ops, reg >= 0);
+ regmask_ret |= 1 << reg;
+ }
+ }
+ }
// pass1:
// - resolve all branches
// pass3:
// - remove dead labels
+ // - set regs needed at ret
for (i = 0; i < opcnt; i++)
{
if (g_labels[i] != NULL && g_label_refs[i].i == -1) {
free(g_labels[i]);
g_labels[i] = NULL;
}
+
+ if (ops[i].op == OP_RET)
+ ops[i].regmask_src |= regmask_ret;
}
// pass4:
patch_esp_adjust(&ops[j], pp->argc_stack * 4);
else {
for (l = 0; l < pp->argc_stack; l++)
- ops[j + l].flags |= OPF_DONE | OPF_RMD;
+ ops[j + l].flags |= OPF_DONE | OPF_RMD | OPF_NOREGS;
}
}
}
// pass5:
- // - process calls
- // - handle push <const>/pop pairs
+ // - process calls, stage 2
+ // - handle some push/pop pairs
+ // - scan for STD/CLD, propagate DF
for (i = 0; i < opcnt; i++)
{
po = &ops[i];
- if (po->flags & (OPF_RMD|OPF_DONE))
+ if (po->flags & OPF_RMD)
continue;
- if (po->op == OP_CALL && !(po->flags & OPF_DONE))
+ if (po->op == OP_CALL)
{
- pp = process_call(i, opcnt);
+ if (!(po->flags & OPF_DONE)) {
+ pp = process_call(i, opcnt);
- if (!pp->is_unresolved && !(po->flags & OPF_ATAIL)) {
- // since we know the args, collect them
- collect_call_args(po, i, pp, ®mask, save_arg_vars,
- i + opcnt * 2);
+ if (!pp->is_unresolved && !(po->flags & OPF_ATAIL)) {
+ // since we know the args, collect them
+ collect_call_args(po, i, pp, ®mask, save_arg_vars,
+ i + opcnt * 2);
+ }
+ // for unresolved, collect after other passes
}
+ pp = po->pp;
+ ferr_assert(po, pp != NULL);
+
+ po->regmask_src |= get_pp_arg_regmask_src(pp);
+ po->regmask_dst |= get_pp_arg_regmask_dst(pp);
+
if (strstr(pp->ret_type.name, "int64"))
need_tmp64 = 1;
+
+ continue;
}
- else if (po->op == OP_PUSH && !(po->flags & OPF_FARG)
+
+ if (po->flags & OPF_DONE)
+ continue;
+
+ if (po->op == OP_PUSH && !(po->flags & OPF_FARG)
&& !(po->flags & OPF_RSAVE) && po->operand[0].type == OPT_CONST)
- scan_for_pop_const(i, opcnt, ®mask_pp);
+ {
+ scan_for_pop_const(i, opcnt, i + opcnt * 12);
+ }
+ else if (po->op == OP_POP)
+ scan_pushes_for_pop(i, opcnt, ®mask_pp);
+ else if (po->op == OP_STD) {
+ po->flags |= OPF_DF | OPF_RMD | OPF_DONE;
+ scan_propagate_df(i + 1, opcnt);
+ }
}
// pass6:
// - find POPs for PUSHes, rm both
- // - scan for STD/CLD, propagate DF
// - scan for all used registers
+ memset(cbits, 0, sizeof(cbits));
+ reg_use_pass(0, opcnt, cbits, 0, ®mask,
+ 0, ®mask_save, ®mask_init, regmask_arg);
+
+ // pass7:
// - find flag set ops for their users
- // - do unreselved calls
+ // - do unresolved calls
// - declare indirect functions
for (i = 0; i < opcnt; i++)
{
if (po->flags & (OPF_RMD|OPF_DONE))
continue;
- if (po->op == OP_PUSH && (po->flags & OPF_RSAVE)) {
- reg = po->operand[0].reg;
- if (!(regmask & (1 << reg)))
- // not a reg save after all, rerun scan_for_pop
- po->flags &= ~OPF_RSAVE;
- else
- regmask_save |= 1 << reg;
- }
-
- if (po->op == OP_PUSH && !(po->flags & OPF_FARG)
- && !(po->flags & OPF_RSAVE) && !g_func_pp->is_userstack)
- {
- if (po->operand[0].type == OPT_REG)
- {
- reg = po->operand[0].reg;
- if (reg < 0)
- ferr(po, "reg not set for push?\n");
-
- depth = 0;
- ret = scan_for_pop(i + 1, opcnt,
- po->operand[0].name, i + opcnt * 3, 0, &depth, 0);
- if (ret == 1) {
- if (depth > 1)
- ferr(po, "too much depth: %d\n", depth);
-
- po->flags |= OPF_RMD;
- scan_for_pop(i + 1, opcnt, po->operand[0].name,
- i + opcnt * 4, 0, &depth, 1);
- continue;
- }
- ret = scan_for_pop_ret(i + 1, opcnt, po->operand[0].name, 0);
- if (ret == 0) {
- arg = OPF_RMD;
- if (regmask & (1 << reg)) {
- if (regmask_save & (1 << reg))
- ferr(po, "%s already saved?\n", po->operand[0].name);
- arg = OPF_RSAVE;
- }
- po->flags |= arg;
- scan_for_pop_ret(i + 1, opcnt, po->operand[0].name, arg);
- continue;
- }
- }
- }
-
- if (po->op == OP_STD) {
- po->flags |= OPF_DF | OPF_RMD | OPF_DONE;
- scan_propagate_df(i + 1, opcnt);
- }
-
- regmask_now = po->regmask_src | po->regmask_dst;
- if (regmask_now & (1 << xBP)) {
- if (g_bp_frame && !(po->flags & OPF_EBP_S)) {
- if (po->regmask_dst & (1 << xBP))
- // compiler decided to drop bp frame and use ebp as scratch
- scan_fwd_set_flags(i + 1, opcnt, i + opcnt * 5, OPF_EBP_S);
- else
- regmask_now &= ~(1 << xBP);
- }
- }
-
- regmask |= regmask_now;
-
if (po->flags & OPF_CC)
{
int setters[16], cnt = 0, branched = 0;
else if (po->op == OP_CALL) {
// note: resolved non-reg calls are OPF_DONE already
pp = po->pp;
- if (pp == NULL)
- ferr(po, "NULL pp\n");
+ ferr_assert(po, pp != NULL);
if (pp->is_unresolved) {
int regmask_stack = 0;
if (pp->argc_stack > 0)
pp->is_stdcall = 1;
}
-
- for (arg = 0; arg < pp->argc; arg++) {
- if (pp->arg[arg].reg != NULL) {
- reg = char_array_i(regs_r32,
- ARRAY_SIZE(regs_r32), pp->arg[arg].reg);
- if (reg < 0)
- ferr(ops, "arg '%s' is not a reg?\n", pp->arg[arg].reg);
- if (!(regmask & (1 << reg))) {
- regmask_init |= 1 << reg;
- regmask |= 1 << reg;
- }
- }
- }
}
else if (po->op == OP_MOV && po->operand[0].pp != NULL
&& po->operand[1].pp != NULL)
}
}
}
- else if (po->op == OP_RET && !IS(g_func_pp->ret_type.name, "void"))
- regmask |= 1 << xAX;
else if (po->op == OP_DIV || po->op == OP_IDIV) {
// 32bit division is common, look for it
if (po->op == OP_DIV)
}
}
- // pass7:
- // - confirm regmask_save, it might have been reduced
- if (regmask_save != 0)
- {
- regmask_save = 0;
- for (i = 0; i < opcnt; i++) {
- po = &ops[i];
- if (po->flags & OPF_RMD)
- continue;
-
- if (po->op == OP_PUSH && (po->flags & OPF_RSAVE))
- regmask_save |= 1 << po->operand[0].reg;
- }
- }
-
// output starts here
// define userstack size
pfomask = po->pfomask;
if (po->flags & (OPF_REPZ|OPF_REPNZ)) {
- struct parsed_opr opr = {0,};
- opr.type = OPT_REG;
- opr.reg = xCX;
- opr.lmod = OPLM_DWORD;
+ struct parsed_opr opr = OPR_INIT(OPT_REG, OPLM_DWORD, xCX);
ret = try_resolve_const(i, &opr, opcnt * 7 + i, &uval);
if (ret != 1 || uval == 0) {
fprintf(fout, " %s = ~%s;", buf1, buf1);
break;
+ case OP_XLAT:
+ assert_operand_cnt(2);
+ out_dst_opr(buf1, sizeof(buf1), po, &po->operand[0]);
+ out_src_opr_u32(buf2, sizeof(buf2), po, &po->operand[1]);
+ fprintf(fout, " %s = *(u8 *)(%s + %s);", buf1, buf2, buf1);
+ strcpy(g_comment, "xlat");
+ break;
+
case OP_CDQ:
assert_operand_cnt(2);
fprintf(fout, " %s = (s32)%s >> 31;",
// arithmetic w/flags
case OP_AND:
- if (po->operand[1].type == OPT_CONST && !po->operand[1].val) {
- // deal with complex dst clear
- assert_operand_cnt(2);
- fprintf(fout, " %s = %s;",
- out_dst_opr(buf1, sizeof(buf1), po, &po->operand[0]),
- out_src_opr(buf2, sizeof(buf2), po, &po->operand[1],
- default_cast_to(buf3, sizeof(buf3), &po->operand[0]), 0));
- output_std_flags(fout, po, &pfomask, buf1);
- last_arith_dst = &po->operand[0];
- delayed_flag_op = NULL;
- break;
- }
- // fallthrough
+ if (po->operand[1].type == OPT_CONST && !po->operand[1].val)
+ goto dualop_arith_const;
+ propagate_lmod(po, &po->operand[0], &po->operand[1]);
+ goto dualop_arith;
+
case OP_OR:
propagate_lmod(po, &po->operand[0], &po->operand[1]);
- // fallthrough
+ if (po->operand[1].type == OPT_CONST) {
+ j = lmod_bytes(po, po->operand[0].lmod);
+ if (((1ull << j * 8) - 1) == po->operand[1].val)
+ goto dualop_arith_const;
+ }
+ goto dualop_arith;
+
dualop_arith:
assert_operand_cnt(2);
fprintf(fout, " %s %s= %s;",
delayed_flag_op = NULL;
break;
+ dualop_arith_const:
+ // and 0, or ~0 used instead mov
+ assert_operand_cnt(2);
+ fprintf(fout, " %s = %s;",
+ out_dst_opr(buf1, sizeof(buf1), po, &po->operand[0]),
+ out_src_opr(buf2, sizeof(buf2), po, &po->operand[1],
+ default_cast_to(buf3, sizeof(buf3), &po->operand[0]), 0));
+ output_std_flags(fout, po, &pfomask, buf1);
+ last_arith_dst = &po->operand[0];
+ delayed_flag_op = NULL;
+ break;
+
case OP_SHL:
case OP_SHR:
assert_operand_cnt(2);
ferr(po, "TODO\n");
pfomask &= ~(1 << PFO_C);
}
- fprintf(fout, " %s %s= %s;", buf1, op_to_c(po),
+ fprintf(fout, " %s %s= %s", buf1, op_to_c(po),
out_src_opr_u32(buf2, sizeof(buf2), po, &po->operand[1]));
+ if (po->operand[1].type != OPT_CONST)
+ fprintf(fout, " & 0x1f");
+ fprintf(fout, ";");
output_std_flags(fout, po, &pfomask, buf1);
last_arith_dst = &po->operand[0];
delayed_flag_op = NULL;
delayed_flag_op = NULL;
break;
+ case OP_SHLD:
case OP_SHRD:
assert_operand_cnt(3);
propagate_lmod(po, &po->operand[0], &po->operand[1]);
out_dst_opr(buf1, sizeof(buf1), po, &po->operand[0]);
out_src_opr_u32(buf2, sizeof(buf2), po, &po->operand[1]);
out_src_opr_u32(buf3, sizeof(buf3), po, &po->operand[2]);
- fprintf(fout, " %s >>= %s; %s |= %s << (%d - %s);",
- buf1, buf3, buf1, buf2, l, buf3);
- strcpy(g_comment, "shrd");
+ if (po->operand[2].type != OPT_CONST)
+ ferr(po, "TODO: masking\n");
+ if (po->op == OP_SHLD) {
+ fprintf(fout, " %s <<= %s; %s |= %s >> (%d - %s);",
+ buf1, buf3, buf1, buf2, l, buf3);
+ strcpy(g_comment, "shld");
+ }
+ else {
+ fprintf(fout, " %s >>= %s; %s |= %s << (%d - %s);",
+ buf1, buf3, buf1, buf2, l, buf3);
+ strcpy(g_comment, "shrd");
+ }
output_std_flags(fout, po, &pfomask, buf1);
last_arith_dst = &po->operand[0];
delayed_flag_op = NULL;
strcat(g_comment, "jecxz");
break;
+ case OP_LOOP:
+ fprintf(fout, " if (--ecx == 0)\n");
+ fprintf(fout, " goto %s;", po->operand[0].name);
+ strcat(g_comment, "loop");
+ break;
+
case OP_JMP:
assert_operand_cnt(1);
last_arith_dst = NULL;
}
else if (!IS(pp->ret_type.name, "void")) {
if (po->flags & OPF_TAIL) {
- if (!IS(g_func_pp->ret_type.name, "void")) {
+ if (regmask_ret & (1 << xAX)) {
fprintf(fout, "return ");
if (g_func_pp->ret_type.is_ptr != pp->ret_type.is_ptr)
fprintf(fout, "(%s)", g_func_pp->ret_type.name);
}
}
- else if (regmask & (1 << xAX)) {
+ else if (po->regmask_dst & (1 << xAX)) {
fprintf(fout, "eax = ");
if (pp->ret_type.is_ptr)
fprintf(fout, "(u32)");
ret = 0;
else if (IS(pp->ret_type.name, "void"))
ret = 1;
- else if (IS(g_func_pp->ret_type.name, "void"))
+ else if (!(regmask_ret & (1 << xAX)))
ret = 1;
// else already handled as 'return f()'
if (ret) {
- if (!IS(g_func_pp->ret_type.name, "void")) {
+ if (regmask_ret & (1 << xAX)) {
ferr(po, "int func -> void func tailcall?\n");
}
else {
g_func_pp->arg[arg].reg, g_func_pp->arg[arg].reg);
}
- if (IS(g_func_pp->ret_type.name, "void")) {
+ if (!(regmask_ret & (1 << xAX))) {
if (i != opcnt - 1 || label_pending)
fprintf(fout, " return;");
}
break;
}
else if (po->flags & OPF_PPUSH) {
- // push/pop graph
+ // push/pop graph / non-const
ferr_assert(po, po->datap == NULL);
fprintf(fout, " %s = pp_%s;", buf1, buf1);
break;
struct func_proto_dep *dep;
struct parsed_op *po;
int from_caller = 0;
- int depth;
int j, l;
int reg;
int ret;
po = &ops[i];
if ((po->flags & OPF_JMP) && po->op != OP_CALL) {
+ if (po->flags & OPF_RMD)
+ continue;
+
if (po->btj != NULL) {
// jumptable
for (j = 0; j < po->btj->count; j++) {
else if (po->op == OP_PUSH && po->operand[0].type == OPT_REG)
{
reg = po->operand[0].reg;
- if (reg < 0)
- ferr(po, "reg not set for push?\n");
+ ferr_assert(po, reg >= 0);
if (po->flags & OPF_RSAVE) {
regmask_save |= 1 << reg;
if (po->flags & OPF_DONE)
continue;
- depth = 0;
- ret = scan_for_pop(i + 1, opcnt,
- po->operand[0].name, i + opcnt * 2, 0, &depth, 0);
+ ret = scan_for_pop(i + 1, opcnt, i + opcnt * 2, reg, 0, 0);
if (ret == 1) {
regmask_save |= 1 << reg;
po->flags |= OPF_RMD;
- scan_for_pop(i + 1, opcnt,
- po->operand[0].name, i + opcnt * 3, 0, &depth, 1);
+ scan_for_pop(i + 1, opcnt, i + opcnt * 3, reg, 0, OPF_RMD);
continue;
}
}
ret = 1;
}
else {
- struct parsed_opr opr = { 0, };
- opr.type = OPT_REG;
- opr.reg = xAX;
+ struct parsed_opr opr = OPR_INIT(OPT_REG, OPLM_DWORD, xAX);
j = -1;
from_caller = 0;
ret = resolve_origin(i, &opr, i + opcnt * 4, &j, &from_caller);
continue;
if (po->op == OP_PUSH && po->operand[0].type == OPT_CONST)
- scan_for_pop_const(i, opcnt, ®mask_dummy);
+ scan_for_pop_const(i, opcnt, i + opcnt * 13);
}
// pass5:
patch_esp_adjust(&ops[j], pp->argc_stack * 4);
else {
for (l = 0; l < pp->argc_stack; l++)
- ops[j + l].flags |= OPF_DONE | OPF_RMD;
+ ops[j + l].flags |= OPF_DONE | OPF_RMD | OPF_NOREGS;
}
}
if (po->flags & (OPF_RMD|OPF_DONE))
continue;
- if (po->op == OP_PUSH && po->operand[0].type == OPT_REG)
+ if (po->op == OP_PUSH && po->operand[0].type == OPT_REG
+ && po->operand[0].reg != xCX)
{
- ret = scan_for_pop_ret(i + 1, opcnt, po->operand[0].name, 0);
- if (ret == 0) {
+ ret = scan_for_pop_ret(i + 1, opcnt, po->operand[0].reg, 0);
+ if (ret == 1) {
// regmask_save |= 1 << po->operand[0].reg; // do it later
po->flags |= OPF_RSAVE | OPF_RMD | OPF_DONE;
- scan_for_pop_ret(i + 1, opcnt, po->operand[0].name, OPF_RMD);
+ scan_for_pop_ret(i + 1, opcnt, po->operand[0].reg, OPF_RMD);
}
}
- else if (po->op == OP_CALL && !(po->flags & OPF_DONE))
+ else if (po->op == OP_CALL)
{
pp = process_call(i, opcnt);
if (cbits[i >> 3] & (1 << (i & 7)))
continue;
+ if (g_labels[i] == NULL && i > 0 && ops[i - 1].op == OP_CALL
+ && ops[i - 1].pp != NULL && ops[i - 1].pp->is_osinc)
+ {
+ // the compiler sometimes still generates code after
+ // noreturn OS functions
+ break;
+ }
if (ops[i].op != OP_NOP)
ferr(&ops[i], "unreachable code\n");
}
fp->pp = &pp_cache[i];
fp->argc_stack = fp->pp->argc_stack;
fp->is_stdcall = fp->pp->is_stdcall;
- fp->regmask_dep = get_pp_arg_regmask(fp->pp);
+ fp->regmask_dep = get_pp_arg_regmask_src(fp->pp);
fp->has_ret = !IS(fp->pp->ret_type.name, "void");
}
unsigned long addr = strtoul(p, NULL, 16);
unsigned long f_addr = strtoul(g_func + 4, NULL, 16);
if (addr > f_addr && !scanned_ahead) {
- anote("scan_ahead caused by '%s', addr %lx\n",
- g_func, addr);
+ //anote("scan_ahead caused by '%s', addr %lx\n",
+ // g_func, addr);
scan_ahead(fasm);
scanned_ahead = 1;
func_chunks_sorted = 0;