+static const char *check_label_read_ref(struct parsed_op *po,
+ const char *name)
+{
+ const struct parsed_proto *pp;
+
+ pp = proto_parse(g_fhdr, name, 0);
+ if (pp == NULL)
+ ferr(po, "proto_parse failed for ref '%s'\n", name);
+
+ if (pp->is_func)
+ check_func_pp(po, pp, "ref");
+
+ return pp->name;
+}
+
+static char *out_src_opr(char *buf, size_t buf_size,
+ struct parsed_op *po, struct parsed_opr *popr, const char *cast,
+ int is_lea)
+{
+ char tmp1[256], tmp2[256];
+ char expr[256];
+ const char *name;
+ char *p;
+ int ret;
+
+ if (cast == NULL)
+ cast = "";
+
+ switch (popr->type) {
+ case OPT_REG:
+ if (is_lea)
+ ferr(po, "lea from reg?\n");
+
+ switch (popr->lmod) {
+ case OPLM_QWORD:
+ snprintf(buf, buf_size, "%s%s.q", cast, opr_reg_p(po, popr));
+ break;
+ case OPLM_DWORD:
+ snprintf(buf, buf_size, "%s%s", cast, opr_reg_p(po, popr));
+ break;
+ case OPLM_WORD:
+ snprintf(buf, buf_size, "%s%s",
+ simplify_cast(cast, "(u16)"), opr_reg_p(po, popr));
+ break;
+ case OPLM_BYTE:
+ if (popr->name[1] == 'h') // XXX..
+ snprintf(buf, buf_size, "%s(%s >> 8)",
+ simplify_cast(cast, "(u8)"), opr_reg_p(po, popr));
+ else
+ snprintf(buf, buf_size, "%s%s",
+ simplify_cast(cast, "(u8)"), opr_reg_p(po, popr));
+ break;
+ default:
+ ferr(po, "invalid src lmod: %d\n", popr->lmod);
+ }
+ break;
+
+ case OPT_REGMEM:
+ if (is_stack_access(po, popr)) {
+ stack_frame_access(po, popr, buf, buf_size,
+ popr->name, cast, 1, is_lea);
+ break;
+ }
+
+ strcpy(expr, popr->name);
+ if (strchr(expr, '[')) {
+ // special case: '[' can only be left for label[reg] form
+ ret = sscanf(expr, "%[^[][%[^]]]", tmp1, tmp2);
+ if (ret != 2)
+ ferr(po, "parse failure for '%s'\n", expr);
+ if (tmp1[0] == '(') {
+ // (off_4FFF50+3)[eax]
+ p = strchr(tmp1 + 1, ')');
+ if (p == NULL || p[1] != 0)
+ ferr(po, "parse failure (2) for '%s'\n", expr);
+ *p = 0;
+ memmove(tmp1, tmp1 + 1, strlen(tmp1));
+ }
+ snprintf(expr, sizeof(expr), "(u32)&%s + %s", tmp1, tmp2);
+ }
+
+ // XXX: do we need more parsing?
+ if (is_lea) {
+ snprintf(buf, buf_size, "%s", expr);
+ break;
+ }
+
+ snprintf(buf, buf_size, "%s(%s)",
+ simplify_cast(cast, lmod_cast_u_ptr(po, popr->lmod)), expr);
+ break;
+
+ case OPT_LABEL:
+ name = check_label_read_ref(po, popr->name);
+ if (cast[0] == 0 && popr->is_ptr)
+ cast = "(u32)";
+
+ if (is_lea)
+ snprintf(buf, buf_size, "(u32)&%s", name);
+ else if (popr->size_lt)
+ snprintf(buf, buf_size, "%s%s%s%s", cast,
+ lmod_cast_u_ptr(po, popr->lmod),
+ popr->is_array ? "" : "&", name);
+ else
+ snprintf(buf, buf_size, "%s%s%s", cast, name,
+ popr->is_array ? "[0]" : "");
+ break;
+
+ case OPT_OFFSET:
+ name = check_label_read_ref(po, popr->name);
+ if (cast[0] == 0)
+ cast = "(u32)";
+ if (is_lea)
+ ferr(po, "lea an offset?\n");
+ snprintf(buf, buf_size, "%s&%s", cast, name);
+ break;
+
+ case OPT_CONST:
+ if (is_lea)
+ ferr(po, "lea from const?\n");
+
+ printf_number(tmp1, sizeof(tmp1), popr->val);
+ if (popr->val == 0 && strchr(cast, '*'))
+ snprintf(buf, buf_size, "NULL");
+ else
+ snprintf(buf, buf_size, "%s%s",
+ simplify_cast_num(cast, popr->val), tmp1);
+ break;
+
+ default:
+ ferr(po, "invalid src type: %d\n", popr->type);
+ }
+
+ return buf;
+}
+
+// note: may set is_ptr (we find that out late for ebp frame..)
+static char *out_dst_opr(char *buf, size_t buf_size,
+ struct parsed_op *po, struct parsed_opr *popr)
+{
+ switch (popr->type) {
+ case OPT_REG:
+ switch (popr->lmod) {
+ case OPLM_QWORD:
+ snprintf(buf, buf_size, "%s.q", opr_reg_p(po, popr));
+ break;
+ case OPLM_DWORD:
+ snprintf(buf, buf_size, "%s", opr_reg_p(po, popr));
+ break;
+ case OPLM_WORD:
+ // ugh..
+ snprintf(buf, buf_size, "LOWORD(%s)", opr_reg_p(po, popr));
+ break;
+ case OPLM_BYTE:
+ // ugh..
+ if (popr->name[1] == 'h') // XXX..
+ snprintf(buf, buf_size, "BYTE1(%s)", opr_reg_p(po, popr));
+ else
+ snprintf(buf, buf_size, "LOBYTE(%s)", opr_reg_p(po, popr));
+ break;
+ default:
+ ferr(po, "invalid dst lmod: %d\n", popr->lmod);
+ }
+ break;
+
+ case OPT_REGMEM:
+ if (is_stack_access(po, popr)) {
+ stack_frame_access(po, popr, buf, buf_size,
+ popr->name, "", 0, 0);
+ break;
+ }
+
+ return out_src_opr(buf, buf_size, po, popr, NULL, 0);
+
+ case OPT_LABEL:
+ if (popr->size_mismatch)
+ snprintf(buf, buf_size, "%s%s%s",
+ lmod_cast_u_ptr(po, popr->lmod),
+ popr->is_array ? "" : "&", popr->name);
+ else
+ snprintf(buf, buf_size, "%s%s", popr->name,
+ popr->is_array ? "[0]" : "");
+ break;
+
+ default:
+ ferr(po, "invalid dst type: %d\n", popr->type);
+ }
+
+ return buf;
+}
+
+static char *out_src_opr_u32(char *buf, size_t buf_size,
+ struct parsed_op *po, struct parsed_opr *popr)
+{
+ return out_src_opr(buf, buf_size, po, popr, NULL, 0);
+}
+
+static char *out_src_opr_float(char *buf, size_t buf_size,
+ struct parsed_op *po, struct parsed_opr *popr)
+{
+ const char *cast = NULL;
+ char tmp[256];
+
+ switch (popr->type) {
+ case OPT_REG:
+ if (popr->reg < xST0 || popr->reg > xST7)
+ ferr(po, "bad reg: %d\n", popr->reg);
+
+ snprintf(buf, buf_size, "f_st%d", popr->reg - xST0);
+ break;
+
+ case OPT_REGMEM:
+ case OPT_LABEL:
+ case OPT_OFFSET:
+ switch (popr->lmod) {
+ case OPLM_QWORD:
+ cast = "double";
+ break;
+ case OPLM_DWORD:
+ cast = "float";
+ break;
+ default:
+ ferr(po, "unhandled lmod: %d\n", popr->lmod);
+ break;
+ }
+ out_src_opr(tmp, sizeof(tmp), po, popr, "", 1);
+ snprintf(buf, buf_size, "*((%s *)%s)", cast, tmp);
+ break;
+
+ default:
+ ferr(po, "invalid float type: %d\n", popr->type);
+ }
+
+ return buf;
+}
+
+static char *out_dst_opr_float(char *buf, size_t buf_size,
+ struct parsed_op *po, struct parsed_opr *popr)
+{
+ // same?
+ return out_src_opr_float(buf, buf_size, po, popr);
+}
+
+static void out_test_for_cc(char *buf, size_t buf_size,
+ struct parsed_op *po, enum parsed_flag_op pfo, int is_inv,
+ enum opr_lenmod lmod, const char *expr)
+{
+ const char *cast, *scast;
+
+ cast = lmod_cast_u(po, lmod);
+ scast = lmod_cast_s(po, lmod);
+
+ switch (pfo) {
+ case PFO_Z:
+ case PFO_BE: // CF=1||ZF=1; CF=0
+ snprintf(buf, buf_size, "(%s%s %s 0)",
+ cast, expr, is_inv ? "!=" : "==");
+ break;
+
+ case PFO_S:
+ case PFO_L: // SF!=OF; OF=0
+ snprintf(buf, buf_size, "(%s%s %s 0)",
+ scast, expr, is_inv ? ">=" : "<");
+ break;
+
+ case PFO_LE: // ZF=1||SF!=OF; OF=0
+ snprintf(buf, buf_size, "(%s%s %s 0)",
+ scast, expr, is_inv ? ">" : "<=");
+ break;
+
+ default:
+ ferr(po, "%s: unhandled parsed_flag_op: %d\n", __func__, pfo);
+ }
+}
+
+static void out_cmp_for_cc(char *buf, size_t buf_size,
+ struct parsed_op *po, enum parsed_flag_op pfo, int is_inv)
+{
+ const char *cast, *scast, *cast_use;
+ char buf1[256], buf2[256];
+ enum opr_lenmod lmod;
+
+ if (po->op != OP_DEC && po->operand[0].lmod != po->operand[1].lmod)
+ ferr(po, "%s: lmod mismatch: %d %d\n", __func__,
+ po->operand[0].lmod, po->operand[1].lmod);
+ lmod = po->operand[0].lmod;
+
+ cast = lmod_cast_u(po, lmod);
+ scast = lmod_cast_s(po, lmod);
+
+ switch (pfo) {
+ case PFO_C:
+ case PFO_Z:
+ case PFO_BE: // !a
+ cast_use = cast;
+ break;
+
+ case PFO_S:
+ case PFO_L: // !ge
+ case PFO_LE:
+ cast_use = scast;
+ break;
+
+ default:
+ ferr(po, "%s: unhandled parsed_flag_op: %d\n", __func__, pfo);
+ }
+
+ out_src_opr(buf1, sizeof(buf1), po, &po->operand[0], cast_use, 0);
+ if (po->op == OP_DEC)
+ snprintf(buf2, sizeof(buf2), "1");
+ else
+ out_src_opr(buf2, sizeof(buf2), po, &po->operand[1], cast_use, 0);
+
+ switch (pfo) {
+ case PFO_C:
+ // note: must be unsigned compare
+ snprintf(buf, buf_size, "(%s %s %s)",
+ buf1, is_inv ? ">=" : "<", buf2);
+ break;
+
+ case PFO_Z:
+ snprintf(buf, buf_size, "(%s %s %s)",
+ buf1, is_inv ? "!=" : "==", buf2);
+ break;
+
+ case PFO_BE: // !a
+ // note: must be unsigned compare
+ snprintf(buf, buf_size, "(%s %s %s)",
+ buf1, is_inv ? ">" : "<=", buf2);
+
+ // annoying case
+ if (is_inv && lmod == OPLM_BYTE
+ && po->operand[1].type == OPT_CONST
+ && po->operand[1].val == 0xff)
+ {
+ snprintf(g_comment, sizeof(g_comment), "if %s", buf);
+ snprintf(buf, buf_size, "(0)");
+ }
+ break;
+
+ // note: must be signed compare
+ case PFO_S:
+ snprintf(buf, buf_size, "(%s(%s - %s) %s 0)",
+ scast, buf1, buf2, is_inv ? ">=" : "<");
+ break;
+
+ case PFO_L: // !ge
+ snprintf(buf, buf_size, "(%s %s %s)",
+ buf1, is_inv ? ">=" : "<", buf2);
+ break;
+
+ case PFO_LE: // !g
+ snprintf(buf, buf_size, "(%s %s %s)",
+ buf1, is_inv ? ">" : "<=", buf2);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void out_cmp_test(char *buf, size_t buf_size,
+ struct parsed_op *po, enum parsed_flag_op pfo, int is_inv)
+{
+ char buf1[256], buf2[256], buf3[256];
+
+ if (po->op == OP_TEST) {
+ if (IS(opr_name(po, 0), opr_name(po, 1))) {
+ out_src_opr_u32(buf3, sizeof(buf3), po, &po->operand[0]);
+ }
+ else {
+ out_src_opr_u32(buf1, sizeof(buf1), po, &po->operand[0]);
+ out_src_opr_u32(buf2, sizeof(buf2), po, &po->operand[1]);
+ snprintf(buf3, sizeof(buf3), "(%s & %s)", buf1, buf2);
+ }
+ out_test_for_cc(buf, buf_size, po, pfo, is_inv,
+ po->operand[0].lmod, buf3);
+ }
+ else if (po->op == OP_CMP) {
+ out_cmp_for_cc(buf, buf_size, po, pfo, is_inv);
+ }
+ else
+ ferr(po, "%s: unhandled op: %d\n", __func__, po->op);
+}
+
+static void propagate_lmod(struct parsed_op *po, struct parsed_opr *popr1,
+ struct parsed_opr *popr2)
+{
+ if (popr1->lmod == OPLM_UNSPEC && popr2->lmod == OPLM_UNSPEC)
+ ferr(po, "missing lmod for both operands\n");
+
+ if (popr1->lmod == OPLM_UNSPEC)
+ popr1->lmod = popr2->lmod;
+ else if (popr2->lmod == OPLM_UNSPEC)
+ popr2->lmod = popr1->lmod;
+ else if (popr1->lmod != popr2->lmod) {
+ if (popr1->type_from_var) {
+ popr1->size_mismatch = 1;
+ if (popr1->lmod < popr2->lmod)
+ popr1->size_lt = 1;
+ popr1->lmod = popr2->lmod;
+ }
+ else if (popr2->type_from_var) {
+ popr2->size_mismatch = 1;
+ if (popr2->lmod < popr1->lmod)
+ popr2->size_lt = 1;
+ popr2->lmod = popr1->lmod;
+ }
+ else
+ ferr(po, "conflicting lmods: %d vs %d\n",
+ popr1->lmod, popr2->lmod);
+ }
+}
+
+static const char *op_to_c(struct parsed_op *po)
+{
+ switch (po->op)
+ {
+ case OP_ADD:
+ case OP_ADC:
+ return "+";
+ case OP_SUB:
+ case OP_SBB:
+ return "-";
+ case OP_AND:
+ return "&";
+ case OP_OR:
+ return "|";
+ case OP_XOR:
+ return "^";
+ case OP_SHL:
+ return "<<";
+ case OP_SHR:
+ return ">>";
+ case OP_MUL:
+ case OP_IMUL:
+ return "*";
+ default:
+ ferr(po, "op_to_c was supplied with %d\n", po->op);
+ }
+}
+
+// last op in stream - unconditional branch or ret
+#define LAST_OP(_i) ((ops[_i].flags & OPF_TAIL) \
+ || ((ops[_i].flags & (OPF_JMP|OPF_CJMP|OPF_RMD)) == OPF_JMP \
+ && ops[_i].op != OP_CALL))
+
+#define check_i(po, i) \
+ if ((i) < 0) \
+ ferr(po, "bad " #i ": %d\n", i)
+
+// note: this skips over calls and rm'd stuff assuming they're handled
+// so it's intended to use at one of final passes
+static int scan_for_pop(int i, int opcnt, int magic, int reg,
+ int depth, int flags_set)
+{
+ struct parsed_op *po;
+ int relevant;
+ int ret = 0;
+ int j;
+
+ for (; i < opcnt; i++) {
+ po = &ops[i];
+ if (po->cc_scratch == magic)
+ return ret; // already checked
+ po->cc_scratch = magic;
+
+ if (po->flags & OPF_TAIL) {
+ if (po->op == OP_CALL) {
+ if (po->pp != NULL && po->pp->is_noreturn)
+ // assume no stack cleanup for noreturn
+ return 1;
+ }
+ return -1; // deadend
+ }
+
+ if (po->flags & (OPF_RMD|OPF_DONE|OPF_FARG))
+ continue;
+
+ if ((po->flags & OPF_JMP) && po->op != OP_CALL) {
+ if (po->btj != NULL) {
+ // jumptable
+ for (j = 0; j < po->btj->count; j++) {
+ check_i(po, po->btj->d[j].bt_i);
+ ret |= scan_for_pop(po->btj->d[j].bt_i, opcnt, magic, reg,
+ depth, flags_set);
+ if (ret < 0)
+ return ret; // dead end
+ }
+ return ret;
+ }
+
+ check_i(po, po->bt_i);
+ if (po->flags & OPF_CJMP) {
+ ret |= scan_for_pop(po->bt_i, opcnt, magic, reg,
+ depth, flags_set);
+ if (ret < 0)
+ return ret; // dead end
+ }
+ else {
+ i = po->bt_i - 1;
+ }
+ continue;
+ }
+
+ relevant = 0;
+ if ((po->op == OP_POP || po->op == OP_PUSH)
+ && po->operand[0].type == OPT_REG && po->operand[0].reg == reg)
+ {
+ relevant = 1;
+ }
+
+ if (po->op == OP_PUSH) {
+ depth++;
+ }
+ else if (po->op == OP_POP) {
+ if (relevant && depth == 0) {
+ po->flags |= flags_set;
+ return 1;
+ }
+ depth--;
+ }
+ }
+
+ return -1;
+}
+
+// scan for 'reg' pop backwards starting from i
+// intended to use for register restore search, so other reg
+// references are considered an error
+static int scan_for_rsave_pop_reg(int i, int magic, int reg, int set_flags)
+{
+ struct parsed_op *po;
+ struct label_ref *lr;
+ int ret = 0;
+
+ ops[i].cc_scratch = magic;
+
+ while (1)
+ {
+ if (g_labels[i] != NULL) {
+ lr = &g_label_refs[i];
+ for (; lr != NULL; lr = lr->next) {
+ check_i(&ops[i], lr->i);
+ ret |= scan_for_rsave_pop_reg(lr->i, magic, reg, set_flags);
+ if (ret < 0)
+ return ret;
+ }
+ if (i > 0 && LAST_OP(i - 1))
+ return ret;
+ }
+
+ i--;
+ if (i < 0)
+ break;
+
+ if (ops[i].cc_scratch == magic)
+ return ret;
+ ops[i].cc_scratch = magic;
+
+ po = &ops[i];
+ if (po->op == OP_POP && po->operand[0].reg == reg) {
+ if (po->flags & (OPF_RMD|OPF_DONE))
+ return -1;
+
+ po->flags |= set_flags;
+ return 1;
+ }
+
+ // this also covers the case where we reach corresponding push
+ if ((po->regmask_dst | po->regmask_src) & (1 << reg))
+ return -1;
+ }
+
+ // nothing interesting on this path
+ return 0;
+}
+
+static void find_reachable_exits(int i, int opcnt, int magic,
+ int *exits, int *exit_count)
+{
+ struct parsed_op *po;
+ int j;
+
+ for (; i < opcnt; i++)
+ {
+ po = &ops[i];
+ if (po->cc_scratch == magic)
+ return;
+ po->cc_scratch = magic;
+
+ if (po->flags & OPF_TAIL) {
+ ferr_assert(po, *exit_count < MAX_EXITS);
+ exits[*exit_count] = i;
+ (*exit_count)++;
+ return;
+ }
+
+ if ((po->flags & OPF_JMP) && po->op != OP_CALL) {
+ if (po->flags & OPF_RMD)
+ continue;
+
+ if (po->btj != NULL) {
+ for (j = 0; j < po->btj->count; j++) {
+ check_i(po, po->btj->d[j].bt_i);
+ find_reachable_exits(po->btj->d[j].bt_i, opcnt, magic,
+ exits, exit_count);
+ }
+ return;
+ }
+
+ check_i(po, po->bt_i);
+ if (po->flags & OPF_CJMP)
+ find_reachable_exits(po->bt_i, opcnt, magic, exits, exit_count);
+ else
+ i = po->bt_i - 1;
+ continue;
+ }
+ }
+}
+
+// scan for 'reg' pop backwards starting from exits (all paths)
+static int scan_for_pop_ret(int i, int opcnt, int reg, int set_flags)
+{
+ static int exits[MAX_EXITS];
+ static int exit_count;
+ int j, ret;
+
+ if (!set_flags) {
+ exit_count = 0;
+ find_reachable_exits(i, opcnt, i + opcnt * 15, exits,
+ &exit_count);
+ ferr_assert(&ops[i], exit_count > 0);
+ }
+
+ for (j = 0; j < exit_count; j++) {
+ ret = scan_for_rsave_pop_reg(exits[j], i + opcnt * 16 + set_flags,
+ reg, set_flags);
+ if (ret == -1)
+ return -1;
+ }
+
+ return 1;
+}
+
+// scan for one or more pop of push <const>
+static int scan_for_pop_const_r(int i, int opcnt, int magic,
+ int push_i, int is_probe)
+{
+ struct parsed_op *po;
+ struct label_ref *lr;
+ int ret = 0;
+ int j;
+
+ for (; i < opcnt; i++)
+ {
+ po = &ops[i];
+ if (po->cc_scratch == magic)
+ return ret; // already checked
+ po->cc_scratch = magic;
+
+ if (po->flags & OPF_JMP) {
+ if (po->flags & OPF_RMD)
+ continue;
+ if (po->op == OP_CALL)
+ return -1;
+
+ if (po->btj != NULL) {
+ for (j = 0; j < po->btj->count; j++) {
+ check_i(po, po->btj->d[j].bt_i);
+ ret |= scan_for_pop_const_r(po->btj->d[j].bt_i, opcnt, magic,
+ push_i, is_probe);
+ if (ret < 0)
+ return ret;
+ }
+ return ret;
+ }
+
+ check_i(po, po->bt_i);
+ if (po->flags & OPF_CJMP) {
+ ret |= scan_for_pop_const_r(po->bt_i, opcnt, magic, push_i,
+ is_probe);
+ if (ret < 0)
+ return ret;
+ }
+ else {
+ i = po->bt_i - 1;
+ }
+ continue;
+ }
+
+ if ((po->flags & (OPF_TAIL|OPF_RSAVE)) || po->op == OP_PUSH)
+ return -1;
+
+ if (g_labels[i] != NULL) {
+ // all refs must be visited
+ lr = &g_label_refs[i];
+ for (; lr != NULL; lr = lr->next) {
+ check_i(po, lr->i);
+ if (ops[lr->i].cc_scratch != magic)
+ return -1;
+ }
+ if (i > 0 && !LAST_OP(i - 1) && ops[i - 1].cc_scratch != magic)
+ return -1;
+ }
+
+ if (po->op == OP_POP)
+ {
+ if (po->flags & (OPF_RMD|OPF_DONE))
+ return -1;
+
+ if (!is_probe) {
+ po->flags |= OPF_DONE;
+ po->datap = &ops[push_i];
+ }
+ return 1;
+ }
+ }
+
+ return -1;
+}
+
+static void scan_for_pop_const(int i, int opcnt, int magic)
+{
+ int ret;
+
+ ret = scan_for_pop_const_r(i + 1, opcnt, magic, i, 1);
+ if (ret == 1) {
+ ops[i].flags |= OPF_RMD | OPF_DONE;
+ scan_for_pop_const_r(i + 1, opcnt, magic + 1, i, 0);
+ }
+}
+
+// check if all branch targets within a marked path are also marked
+// note: the path checked must not be empty or end with a branch
+static int check_path_branches(int opcnt, int magic)
+{
+ struct parsed_op *po;
+ int i, j;
+
+ for (i = 0; i < opcnt; i++) {
+ po = &ops[i];
+ if (po->cc_scratch != magic)
+ continue;
+
+ if (po->flags & OPF_JMP) {
+ if ((po->flags & OPF_RMD) || po->op == OP_CALL)
+ continue;
+
+ if (po->btj != NULL) {
+ for (j = 0; j < po->btj->count; j++) {
+ check_i(po, po->btj->d[j].bt_i);
+ if (ops[po->btj->d[j].bt_i].cc_scratch != magic)
+ return 0;
+ }
+ }
+
+ check_i(po, po->bt_i);
+ if (ops[po->bt_i].cc_scratch != magic)
+ return 0;
+ if ((po->flags & OPF_CJMP) && ops[i + 1].cc_scratch != magic)
+ return 0;
+ }
+ }
+
+ return 1;
+}
+
+// scan for multiple pushes for given pop
+static int scan_pushes_for_pop_r(int i, int magic, int pop_i,
+ int is_probe)
+{
+ int reg = ops[pop_i].operand[0].reg;
+ struct parsed_op *po;
+ struct label_ref *lr;
+ int ret = 0;
+
+ ops[i].cc_scratch = magic;
+
+ while (1)
+ {
+ if (g_labels[i] != NULL) {
+ lr = &g_label_refs[i];
+ for (; lr != NULL; lr = lr->next) {
+ check_i(&ops[i], lr->i);
+ ret |= scan_pushes_for_pop_r(lr->i, magic, pop_i, is_probe);
+ if (ret < 0)
+ return ret;
+ }
+ if (i > 0 && LAST_OP(i - 1))
+ return ret;
+ }
+
+ i--;
+ if (i < 0)
+ break;
+
+ if (ops[i].cc_scratch == magic)
+ return ret;
+ ops[i].cc_scratch = magic;
+
+ po = &ops[i];
+ if (po->op == OP_CALL)
+ return -1;
+ if ((po->flags & (OPF_TAIL|OPF_RSAVE)) || po->op == OP_POP)
+ return -1;
+
+ if (po->op == OP_PUSH)
+ {
+ if (po->datap != NULL)
+ return -1;
+ if (po->operand[0].type == OPT_REG && po->operand[0].reg == reg)
+ // leave this case for reg save/restore handlers
+ return -1;
+
+ if (!is_probe) {
+ po->flags |= OPF_PPUSH | OPF_DONE;
+ po->datap = &ops[pop_i];
+ }
+ return 1;
+ }
+ }
+
+ return -1;
+}
+
+static void scan_pushes_for_pop(int i, int opcnt, int *regmask_pp)
+{
+ int magic = i + opcnt * 14;
+ int ret;
+
+ ret = scan_pushes_for_pop_r(i, magic, i, 1);
+ if (ret == 1) {
+ ret = check_path_branches(opcnt, magic);
+ if (ret == 1) {
+ ops[i].flags |= OPF_PPUSH | OPF_DONE;
+ *regmask_pp |= 1 << ops[i].operand[0].reg;
+ scan_pushes_for_pop_r(i, magic + 1, i, 0);
+ }
+ }
+}
+
+static void scan_propagate_df(int i, int opcnt)
+{
+ struct parsed_op *po = &ops[i];
+ int j;
+
+ for (; i < opcnt; i++) {
+ po = &ops[i];
+ if (po->flags & OPF_DF)
+ return; // already resolved
+ po->flags |= OPF_DF;
+
+ if (po->op == OP_CALL)
+ ferr(po, "call with DF set?\n");
+
+ if (po->flags & OPF_JMP) {
+ if (po->btj != NULL) {
+ // jumptable
+ for (j = 0; j < po->btj->count; j++) {
+ check_i(po, po->btj->d[j].bt_i);
+ scan_propagate_df(po->btj->d[j].bt_i, opcnt);
+ }
+ return;
+ }
+
+ if (po->flags & OPF_RMD)
+ continue;
+ check_i(po, po->bt_i);
+ if (po->flags & OPF_CJMP)
+ scan_propagate_df(po->bt_i, opcnt);
+ else
+ i = po->bt_i - 1;
+ continue;
+ }
+
+ if (po->flags & OPF_TAIL)
+ break;
+
+ if (po->op == OP_CLD) {
+ po->flags |= OPF_RMD | OPF_DONE;
+ return;
+ }
+ }
+
+ ferr(po, "missing DF clear?\n");
+}
+
+// is operand 'opr' referenced by parsed_op 'po'?
+static int is_opr_referenced(const struct parsed_opr *opr,
+ const struct parsed_op *po)
+{
+ int i, mask;
+
+ if (opr->type == OPT_REG) {
+ mask = po->regmask_dst | po->regmask_src;
+ if (po->op == OP_CALL)
+ mask |= (1 << xAX) | (1 << xCX) | (1 << xDX);
+ if ((1 << opr->reg) & mask)
+ return 1;
+ else
+ return 0;
+ }
+
+ for (i = 0; i < po->operand_cnt; i++)
+ if (IS(po->operand[0].name, opr->name))
+ return 1;
+
+ return 0;
+}
+
+// is operand 'opr' read by parsed_op 'po'?
+static int is_opr_read(const struct parsed_opr *opr,
+ const struct parsed_op *po)
+{
+ if (opr->type == OPT_REG) {
+ if (po->regmask_src & (1 << opr->reg))
+ return 1;
+ else
+ return 0;
+ }
+
+ // yes I'm lazy
+ return 0;
+}
+
+// is operand 'opr' modified by parsed_op 'po'?
+static int is_opr_modified(const struct parsed_opr *opr,
+ const struct parsed_op *po)
+{
+ int mask;
+
+ if (opr->type == OPT_REG) {
+ if (po->op == OP_CALL) {
+ mask = po->regmask_dst;
+ mask |= (1 << xAX) | (1 << xCX) | (1 << xDX); // ?
+ if (mask & (1 << opr->reg))
+ return 1;
+ else
+ return 0;
+ }
+
+ if (po->regmask_dst & (1 << opr->reg))
+ return 1;
+ else
+ return 0;
+ }
+
+ return IS(po->operand[0].name, opr->name);
+}
+
+// is any operand of parsed_op 'po_test' modified by parsed_op 'po'?
+static int is_any_opr_modified(const struct parsed_op *po_test,
+ const struct parsed_op *po, int c_mode)
+{
+ int mask;
+ int i;
+
+ if ((po->flags & OPF_RMD) || !(po->flags & OPF_DATA))
+ return 0;
+
+ if (po_test->operand_cnt == 1 && po_test->operand[0].type == OPT_CONST)
+ return 0;
+
+ if ((po_test->regmask_src | po_test->regmask_dst) & po->regmask_dst)
+ return 1;
+
+ // in reality, it can wreck any register, but in decompiled C
+ // version it can only overwrite eax or edx:eax
+ mask = (1 << xAX) | (1 << xDX);
+ if (!c_mode)
+ mask |= 1 << xCX;
+
+ if (po->op == OP_CALL
+ && ((po_test->regmask_src | po_test->regmask_dst) & mask))
+ return 1;
+
+ for (i = 0; i < po_test->operand_cnt; i++)
+ if (IS(po_test->operand[i].name, po->operand[0].name))
+ return 1;
+
+ return 0;
+}
+
+// scan for any po_test operand modification in range given
+static int scan_for_mod(struct parsed_op *po_test, int i, int opcnt,
+ int c_mode)
+{
+ if (po_test->operand_cnt == 1 && po_test->operand[0].type == OPT_CONST)
+ return -1;
+
+ for (; i < opcnt; i++) {
+ if (is_any_opr_modified(po_test, &ops[i], c_mode))
+ return i;
+ }
+
+ return -1;
+}
+
+// scan for po_test operand[0] modification in range given
+static int scan_for_mod_opr0(struct parsed_op *po_test,
+ int i, int opcnt)
+{
+ for (; i < opcnt; i++) {
+ if (is_opr_modified(&po_test->operand[0], &ops[i]))
+ return i;
+ }
+
+ return -1;
+}
+
+static int scan_for_flag_set(int i, int magic, int *branched,
+ int *setters, int *setter_cnt)
+{
+ struct label_ref *lr;
+ int ret;
+
+ while (i >= 0) {
+ if (ops[i].cc_scratch == magic) {
+ // is this a problem?
+ //ferr(&ops[i], "%s looped\n", __func__);
+ return 0;
+ }
+ ops[i].cc_scratch = magic;
+
+ if (g_labels[i] != NULL) {
+ *branched = 1;
+
+ lr = &g_label_refs[i];
+ for (; lr->next; lr = lr->next) {
+ check_i(&ops[i], lr->i);
+ ret = scan_for_flag_set(lr->i, magic,
+ branched, setters, setter_cnt);
+ if (ret < 0)
+ return ret;
+ }
+
+ check_i(&ops[i], lr->i);
+ if (i > 0 && LAST_OP(i - 1)) {
+ i = lr->i;
+ continue;
+ }
+ ret = scan_for_flag_set(lr->i, magic,
+ branched, setters, setter_cnt);
+ if (ret < 0)
+ return ret;
+ }
+ i--;
+
+ if (ops[i].flags & OPF_FLAGS) {
+ setters[*setter_cnt] = i;
+ (*setter_cnt)++;
+ return 0;
+ }
+
+ if ((ops[i].flags & (OPF_JMP|OPF_CJMP)) == OPF_JMP)
+ return -1;
+ }
+
+ return -1;
+}
+
+// scan back for cdq, if anything modifies edx, fail
+static int scan_for_cdq_edx(int i)
+{
+ while (i >= 0) {
+ if (g_labels[i] != NULL) {
+ if (g_label_refs[i].next != NULL)
+ return -1;
+ if (i > 0 && LAST_OP(i - 1)) {
+ i = g_label_refs[i].i;
+ continue;
+ }
+ return -1;
+ }
+ i--;
+
+ if (ops[i].op == OP_CDQ)
+ return i;
+
+ if (ops[i].regmask_dst & (1 << xDX))
+ return -1;
+ }
+
+ return -1;
+}
+
+static int scan_for_reg_clear(int i, int reg)
+{
+ while (i >= 0) {
+ if (g_labels[i] != NULL) {
+ if (g_label_refs[i].next != NULL)
+ return -1;
+ if (i > 0 && LAST_OP(i - 1)) {
+ i = g_label_refs[i].i;
+ continue;
+ }
+ return -1;
+ }
+ i--;
+
+ if (ops[i].op == OP_XOR
+ && ops[i].operand[0].lmod == OPLM_DWORD
+ && ops[i].operand[0].reg == ops[i].operand[1].reg
+ && ops[i].operand[0].reg == reg)
+ return i;
+
+ if (ops[i].regmask_dst & (1 << reg))
+ return -1;
+ }
+
+ return -1;
+}
+
+static void patch_esp_adjust(struct parsed_op *po, int adj)
+{
+ ferr_assert(po, po->op == OP_ADD);
+ ferr_assert(po, IS(opr_name(po, 0), "esp"));
+ ferr_assert(po, po->operand[1].type == OPT_CONST);
+
+ // this is a bit of a hack, but deals with use of
+ // single adj for multiple calls
+ po->operand[1].val -= adj;
+ po->flags |= OPF_RMD;
+ if (po->operand[1].val == 0)
+ po->flags |= OPF_DONE;
+ ferr_assert(po, (int)po->operand[1].val >= 0);
+}
+
+// scan for positive, constant esp adjust
+// multipath case is preliminary
+static int scan_for_esp_adjust(int i, int opcnt,
+ int adj_expect, int *adj, int *is_multipath, int do_update)
+{
+ int adj_expect_unknown = 0;
+ struct parsed_op *po;
+ int first_pop = -1;
+ int adj_best = 0;
+
+ *adj = *is_multipath = 0;
+ if (adj_expect < 0) {
+ adj_expect_unknown = 1;
+ adj_expect = 32 * 4; // enough?
+ }
+
+ for (; i < opcnt && *adj < adj_expect; i++) {
+ if (g_labels[i] != NULL)
+ *is_multipath = 1;
+
+ po = &ops[i];
+ if (po->flags & OPF_DONE)
+ continue;
+
+ if (po->op == OP_ADD && po->operand[0].reg == xSP) {
+ if (po->operand[1].type != OPT_CONST)
+ ferr(&ops[i], "non-const esp adjust?\n");
+ *adj += po->operand[1].val;
+ if (*adj & 3)
+ ferr(&ops[i], "unaligned esp adjust: %x\n", *adj);
+ if (do_update) {
+ if (!*is_multipath)
+ patch_esp_adjust(po, adj_expect);
+ else
+ po->flags |= OPF_RMD;
+ }
+ return i;
+ }
+ else if (po->op == OP_PUSH) {
+ //if (first_pop == -1)
+ // first_pop = -2; // none
+ *adj -= lmod_bytes(po, po->operand[0].lmod);
+ }
+ else if (po->op == OP_POP) {
+ if (!(po->flags & OPF_DONE)) {
+ // seems like msvc only uses 'pop ecx' for stack realignment..
+ if (po->operand[0].type != OPT_REG || po->operand[0].reg != xCX)
+ break;
+ if (first_pop == -1 && *adj >= 0)
+ first_pop = i;
+ }
+ if (do_update && *adj >= 0) {
+ po->flags |= OPF_RMD;
+ if (!*is_multipath)
+ po->flags |= OPF_DONE | OPF_NOREGS;
+ }
+
+ *adj += lmod_bytes(po, po->operand[0].lmod);
+ if (*adj > adj_best)
+ adj_best = *adj;
+ }
+ else if (po->flags & (OPF_JMP|OPF_TAIL)) {
+ if (po->op == OP_JMP && po->btj == NULL) {
+ if (po->bt_i <= i)
+ break;
+ i = po->bt_i - 1;
+ continue;
+ }
+ if (po->op != OP_CALL)
+ break;
+ if (po->operand[0].type != OPT_LABEL)
+ break;
+ if (po->pp != NULL && po->pp->is_stdcall)
+ break;
+ if (adj_expect_unknown && first_pop >= 0)
+ break;
+ // assume it's another cdecl call
+ }
+ }
+
+ if (first_pop >= 0) {
+ // probably only 'pop ecx' was used
+ *adj = adj_best;
+ return first_pop;
+ }
+
+ return -1;
+}
+
+static void scan_fwd_set_flags(int i, int opcnt, int magic, int flags)
+{
+ struct parsed_op *po;
+ int j;
+
+ if (i < 0)
+ ferr(ops, "%s: followed bad branch?\n", __func__);
+
+ for (; i < opcnt; i++) {
+ po = &ops[i];
+ if (po->cc_scratch == magic)
+ return;
+ po->cc_scratch = magic;
+ po->flags |= flags;
+
+ if ((po->flags & OPF_JMP) && po->op != OP_CALL) {
+ if (po->btj != NULL) {
+ // jumptable
+ for (j = 0; j < po->btj->count; j++)
+ scan_fwd_set_flags(po->btj->d[j].bt_i, opcnt, magic, flags);
+ return;
+ }
+
+ scan_fwd_set_flags(po->bt_i, opcnt, magic, flags);
+ if (!(po->flags & OPF_CJMP))
+ return;
+ }
+ if (po->flags & OPF_TAIL)
+ return;
+ }
+}
+
+static const struct parsed_proto *try_recover_pp(
+ struct parsed_op *po, const struct parsed_opr *opr, int *search_instead)
+{
+ const struct parsed_proto *pp = NULL;
+ char buf[256];
+ char *p;
+
+ // maybe an arg of g_func?
+ if (opr->type == OPT_REGMEM && is_stack_access(po, opr))
+ {
+ char ofs_reg[16] = { 0, };
+ int arg, arg_s, arg_i;
+ int stack_ra = 0;
+ int offset = 0;
+
+ if (g_header_mode)
+ return NULL;
+
+ parse_stack_access(po, opr->name, ofs_reg,
+ &offset, &stack_ra, NULL, 0);
+ if (ofs_reg[0] != 0)
+ ferr(po, "offset reg on arg access?\n");
+ if (offset <= stack_ra) {
+ // search who set the stack var instead
+ if (search_instead != NULL)
+ *search_instead = 1;
+ return NULL;
+ }
+
+ arg_i = (offset - stack_ra - 4) / 4;
+ for (arg = arg_s = 0; arg < g_func_pp->argc; arg++) {
+ if (g_func_pp->arg[arg].reg != NULL)
+ continue;
+ if (arg_s == arg_i)
+ break;
+ arg_s++;
+ }
+ if (arg == g_func_pp->argc)
+ ferr(po, "stack arg %d not in prototype?\n", arg_i);
+
+ pp = g_func_pp->arg[arg].fptr;
+ if (pp == NULL)
+ ferr(po, "icall sa: arg%d is not a fptr?\n", arg + 1);
+ check_func_pp(po, pp, "icall arg");
+ }
+ else if (opr->type == OPT_REGMEM && strchr(opr->name + 1, '[')) {
+ // label[index]
+ p = strchr(opr->name + 1, '[');
+ memcpy(buf, opr->name, p - opr->name);
+ buf[p - opr->name] = 0;
+ pp = proto_parse(g_fhdr, buf, g_quiet_pp);
+ }
+ else if (opr->type == OPT_OFFSET || opr->type == OPT_LABEL) {
+ pp = proto_parse(g_fhdr, opr->name, g_quiet_pp);
+ if (pp == NULL) {
+ if (!g_header_mode)
+ ferr(po, "proto_parse failed for icall to '%s'\n", opr->name);
+ }
+ else
+ check_func_pp(po, pp, "reg-fptr ref");
+ }
+
+ return pp;
+}
+
+static void scan_for_call_type(int i, const struct parsed_opr *opr,
+ int magic, const struct parsed_proto **pp_found, int *pp_i,
+ int *multi)
+{
+ const struct parsed_proto *pp = NULL;
+ struct parsed_op *po;
+ struct label_ref *lr;
+
+ ops[i].cc_scratch = magic;
+
+ while (1) {
+ if (g_labels[i] != NULL) {
+ lr = &g_label_refs[i];
+ for (; lr != NULL; lr = lr->next) {
+ check_i(&ops[i], lr->i);
+ scan_for_call_type(lr->i, opr, magic, pp_found, pp_i, multi);
+ }
+ if (i > 0 && LAST_OP(i - 1))
+ return;
+ }
+
+ i--;
+ if (i < 0)
+ break;
+
+ if (ops[i].cc_scratch == magic)
+ return;
+ ops[i].cc_scratch = magic;
+
+ if (!(ops[i].flags & OPF_DATA))
+ continue;
+ if (!is_opr_modified(opr, &ops[i]))
+ continue;
+ if (ops[i].op != OP_MOV && ops[i].op != OP_LEA) {
+ // most probably trashed by some processing
+ *pp_found = NULL;
+ return;
+ }
+
+ opr = &ops[i].operand[1];
+ if (opr->type != OPT_REG)
+ break;
+ }
+
+ po = (i >= 0) ? &ops[i] : ops;
+
+ if (i < 0) {
+ // reached the top - can only be an arg-reg
+ if (opr->type != OPT_REG || g_func_pp == NULL)
+ return;
+
+ for (i = 0; i < g_func_pp->argc; i++) {
+ if (g_func_pp->arg[i].reg == NULL)
+ continue;
+ if (IS(opr->name, g_func_pp->arg[i].reg))
+ break;
+ }
+ if (i == g_func_pp->argc)
+ return;
+ pp = g_func_pp->arg[i].fptr;
+ if (pp == NULL)
+ ferr(po, "icall: arg%d (%s) is not a fptr?\n",
+ i + 1, g_func_pp->arg[i].reg);
+ check_func_pp(po, pp, "icall reg-arg");
+ }
+ else
+ pp = try_recover_pp(po, opr, NULL);
+
+ if (*pp_found != NULL && pp != NULL && *pp_found != pp) {
+ if (!IS((*pp_found)->ret_type.name, pp->ret_type.name)
+ || (*pp_found)->is_stdcall != pp->is_stdcall
+ || (*pp_found)->is_fptr != pp->is_fptr
+ || (*pp_found)->argc != pp->argc
+ || (*pp_found)->argc_reg != pp->argc_reg
+ || (*pp_found)->argc_stack != pp->argc_stack)
+ {
+ ferr(po, "icall: parsed_proto mismatch\n");
+ }
+ *multi = 1;
+ }
+ if (pp != NULL) {
+ *pp_found = pp;
+ *pp_i = po - ops;
+ }
+}
+
+static void add_label_ref(struct label_ref *lr, int op_i)
+{
+ struct label_ref *lr_new;
+
+ if (lr->i == -1) {
+ lr->i = op_i;
+ return;
+ }
+
+ lr_new = calloc(1, sizeof(*lr_new));
+ lr_new->i = op_i;
+ lr_new->next = lr->next;
+ lr->next = lr_new;
+}
+
+static struct parsed_data *try_resolve_jumptab(int i, int opcnt)
+{
+ struct parsed_op *po = &ops[i];
+ struct parsed_data *pd;
+ char label[NAMELEN], *p;
+ int len, j, l;
+
+ p = strchr(po->operand[0].name, '[');
+ if (p == NULL)
+ return NULL;
+
+ len = p - po->operand[0].name;
+ strncpy(label, po->operand[0].name, len);
+ label[len] = 0;
+
+ for (j = 0, pd = NULL; j < g_func_pd_cnt; j++) {
+ if (IS(g_func_pd[j].label, label)) {
+ pd = &g_func_pd[j];
+ break;
+ }
+ }
+ if (pd == NULL)
+ //ferr(po, "label '%s' not parsed?\n", label);
+ return NULL;
+
+ if (pd->type != OPT_OFFSET)
+ ferr(po, "label '%s' with non-offset data?\n", label);
+
+ // find all labels, link
+ for (j = 0; j < pd->count; j++) {
+ for (l = 0; l < opcnt; l++) {
+ if (g_labels[l] != NULL && IS(g_labels[l], pd->d[j].u.label)) {
+ add_label_ref(&g_label_refs[l], i);
+ pd->d[j].bt_i = l;
+ break;
+ }
+ }
+ }
+
+ return pd;
+}
+
+static void clear_labels(int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++) {
+ if (g_labels[i] != NULL) {
+ free(g_labels[i]);
+ g_labels[i] = NULL;
+ }
+ }
+}
+
+static int get_pp_arg_regmask_src(const struct parsed_proto *pp)
+{
+ int regmask = 0;
+ int i, reg;
+
+ for (i = 0; i < pp->argc; i++) {
+ if (pp->arg[i].reg != NULL) {
+ reg = char_array_i(regs_r32,
+ ARRAY_SIZE(regs_r32), pp->arg[i].reg);
+ if (reg < 0)
+ ferr(ops, "arg '%s' of func '%s' is not a reg?\n",
+ pp->arg[i].reg, pp->name);
+ regmask |= 1 << reg;
+ }
+ }
+
+ return regmask;
+}
+
+static int get_pp_arg_regmask_dst(const struct parsed_proto *pp)
+{
+ if (strstr(pp->ret_type.name, "int64"))
+ return (1 << xAX) | (1 << xDX);
+ if (IS(pp->ret_type.name, "float")
+ || IS(pp->ret_type.name, "double"))
+ {
+ return mxST0;
+ }
+ if (strcasecmp(pp->ret_type.name, "void") == 0)
+ return 0;
+
+ return mxAX;
+}
+
+static void resolve_branches_parse_calls(int opcnt)
+{
+ static const struct {
+ const char *name;
+ enum op_op op;
+ unsigned int flags;
+ unsigned int regmask_src;
+ unsigned int regmask_dst;
+ } pseudo_ops[] = {
+ { "__ftol", OPP_FTOL, OPF_FPOP, mxST0, mxAX | mxDX },
+ };
+ const struct parsed_proto *pp_c;
+ struct parsed_proto *pp;
+ struct parsed_data *pd;
+ struct parsed_op *po;
+ const char *tmpname;
+ int i, l;
+ int ret;
+
+ for (i = 0; i < opcnt; i++)
+ {
+ po = &ops[i];
+ po->bt_i = -1;
+ po->btj = NULL;
+
+ if (po->datap != NULL) {
+ pp = calloc(1, sizeof(*pp));
+ my_assert_not(pp, NULL);
+
+ ret = parse_protostr(po->datap, pp);
+ if (ret < 0)
+ ferr(po, "bad protostr supplied: %s\n", (char *)po->datap);
+ free(po->datap);
+ po->datap = NULL;
+ po->pp = pp;
+ }
+
+ if (po->op == OP_CALL) {
+ pp = NULL;
+
+ if (po->pp != NULL)
+ pp = po->pp;
+ else if (po->operand[0].type == OPT_LABEL)
+ {
+ tmpname = opr_name(po, 0);
+ if (IS_START(tmpname, "loc_"))
+ ferr(po, "call to loc_*\n");
+
+ // convert some calls to pseudo-ops
+ for (l = 0; l < ARRAY_SIZE(pseudo_ops); l++) {
+ if (!IS(tmpname, pseudo_ops[l].name))
+ continue;
+
+ po->op = pseudo_ops[l].op;
+ po->operand_cnt = 0;
+ po->regmask_src = pseudo_ops[l].regmask_src;
+ po->regmask_dst = pseudo_ops[l].regmask_dst;
+ po->flags = pseudo_ops[l].flags;
+ po->flags |= po->regmask_dst ? OPF_DATA : 0;
+ break;
+ }
+ if (l < ARRAY_SIZE(pseudo_ops))
+ continue;
+
+ pp_c = proto_parse(g_fhdr, tmpname, g_header_mode);
+ if (!g_header_mode && pp_c == NULL)
+ ferr(po, "proto_parse failed for call '%s'\n", tmpname);
+
+ if (pp_c != NULL) {
+ pp = proto_clone(pp_c);
+ my_assert_not(pp, NULL);
+ }
+ }
+
+ if (pp != NULL) {
+ if (pp->is_fptr)
+ check_func_pp(po, pp, "fptr var call");
+ if (pp->is_noreturn)
+ po->flags |= OPF_TAIL;
+ }
+ po->pp = pp;
+ continue;
+ }
+
+ if (!(po->flags & OPF_JMP) || po->op == OP_RET)
+ continue;
+
+ if (po->operand[0].type == OPT_REGMEM) {
+ pd = try_resolve_jumptab(i, opcnt);
+ if (pd == NULL)
+ goto tailcall;
+
+ po->btj = pd;
+ continue;
+ }
+
+ for (l = 0; l < opcnt; l++) {
+ if (g_labels[l] != NULL
+ && IS(po->operand[0].name, g_labels[l]))
+ {
+ if (l == i + 1 && po->op == OP_JMP) {
+ // yet another alignment type..
+ po->flags |= OPF_RMD|OPF_DONE;
+ break;
+ }
+ add_label_ref(&g_label_refs[l], i);
+ po->bt_i = l;
+ break;
+ }
+ }
+
+ if (po->bt_i != -1 || (po->flags & OPF_RMD))
+ continue;
+
+ if (po->operand[0].type == OPT_LABEL)
+ // assume tail call
+ goto tailcall;
+
+ ferr(po, "unhandled branch\n");
+
+tailcall:
+ po->op = OP_CALL;
+ po->flags |= OPF_TAIL;
+ if (i > 0 && ops[i - 1].op == OP_POP)
+ po->flags |= OPF_ATAIL;
+ i--; // reprocess
+ }
+}
+
+static void scan_prologue_epilogue(int opcnt)
+{
+ int ecx_push = 0, esp_sub = 0;
+ int found;
+ int i, j, l;
+
+ if (ops[0].op == OP_PUSH && IS(opr_name(&ops[0], 0), "ebp")
+ && ops[1].op == OP_MOV
+ && IS(opr_name(&ops[1], 0), "ebp")
+ && IS(opr_name(&ops[1], 1), "esp"))
+ {
+ g_bp_frame = 1;
+ ops[0].flags |= OPF_RMD | OPF_DONE | OPF_NOREGS;
+ ops[1].flags |= OPF_RMD | OPF_DONE | OPF_NOREGS;
+ i = 2;
+
+ if (ops[2].op == OP_SUB && IS(opr_name(&ops[2], 0), "esp")) {
+ g_stack_fsz = opr_const(&ops[2], 1);
+ ops[2].flags |= OPF_RMD | OPF_DONE | OPF_NOREGS;
+ i++;
+ }
+ else {
+ // another way msvc builds stack frame..
+ i = 2;
+ while (ops[i].op == OP_PUSH && IS(opr_name(&ops[i], 0), "ecx")) {
+ g_stack_fsz += 4;
+ ops[i].flags |= OPF_RMD | OPF_DONE | OPF_NOREGS;
+ ecx_push++;
+ i++;
+ }
+ // and another way..
+ if (i == 2 && ops[i].op == OP_MOV && ops[i].operand[0].reg == xAX
+ && ops[i].operand[1].type == OPT_CONST
+ && ops[i + 1].op == OP_CALL
+ && IS(opr_name(&ops[i + 1], 0), "__alloca_probe"))
+ {
+ g_stack_fsz += ops[i].operand[1].val;
+ ops[i].flags |= OPF_RMD | OPF_DONE | OPF_NOREGS;
+ i++;
+ ops[i].flags |= OPF_RMD | OPF_DONE | OPF_NOREGS;
+ i++;
+ }
+ }
+
+ found = 0;
+ do {
+ for (; i < opcnt; i++)
+ if (ops[i].flags & OPF_TAIL)
+ break;
+ j = i - 1;
+ if (i == opcnt && (ops[j].flags & OPF_JMP)) {
+ if (ops[j].bt_i != -1 || ops[j].btj != NULL)
+ break;
+ i--;
+ j--;
+ }
+
+ if ((ops[j].op == OP_POP && IS(opr_name(&ops[j], 0), "ebp"))
+ || ops[j].op == OP_LEAVE)
+ {
+ ops[j].flags |= OPF_RMD | OPF_DONE | OPF_NOREGS;
+ }
+ else if (ops[i].op == OP_CALL && ops[i].pp != NULL
+ && ops[i].pp->is_noreturn)
+ {
+ // on noreturn, msvc sometimes cleans stack, sometimes not
+ i++;
+ found = 1;
+ continue;
+ }
+ else if (!(g_ida_func_attr & IDAFA_NORETURN))
+ ferr(&ops[j], "'pop ebp' expected\n");
+
+ if (g_stack_fsz != 0) {
+ if (ops[j].op == OP_LEAVE)
+ j--;
+ else if (ops[j].op == OP_POP
+ && ops[j - 1].op == OP_MOV
+ && IS(opr_name(&ops[j - 1], 0), "esp")
+ && IS(opr_name(&ops[j - 1], 1), "ebp"))
+ {
+ ops[j - 1].flags |= OPF_RMD | OPF_DONE | OPF_NOREGS;
+ j -= 2;
+ }
+ else if (!(g_ida_func_attr & IDAFA_NORETURN))
+ {
+ ferr(&ops[j], "esp restore expected\n");
+ }
+
+ if (ecx_push && j >= 0 && ops[j].op == OP_POP
+ && IS(opr_name(&ops[j], 0), "ecx"))
+ {
+ ferr(&ops[j], "unexpected ecx pop\n");
+ }
+ }
+
+ found = 1;
+ i++;
+ } while (i < opcnt);
+
+ if (!found)
+ ferr(ops, "missing ebp epilogue\n");
+ return;
+ }
+
+ // non-bp frame
+ i = 0;
+ while (ops[i].op == OP_PUSH && IS(opr_name(&ops[i], 0), "ecx")) {
+ ops[i].flags |= OPF_RMD | OPF_DONE | OPF_NOREGS;
+ g_stack_fsz += 4;
+ ecx_push++;
+ i++;
+ }
+
+ for (; i < opcnt; i++) {
+ if (ops[i].op == OP_PUSH || (ops[i].flags & (OPF_JMP|OPF_TAIL)))
+ break;
+ if (ops[i].op == OP_SUB && ops[i].operand[0].reg == xSP
+ && ops[i].operand[1].type == OPT_CONST)
+ {
+ g_stack_fsz = ops[i].operand[1].val;
+ ops[i].flags |= OPF_RMD | OPF_DONE | OPF_NOREGS;
+ esp_sub = 1;
+ break;
+ }
+ }
+
+ if (ecx_push && !esp_sub) {
+ // could actually be args for a call..
+ for (; i < opcnt; i++)
+ if (ops[i].op != OP_PUSH)
+ break;
+
+ if (ops[i].op == OP_CALL && ops[i].operand[0].type == OPT_LABEL) {
+ const struct parsed_proto *pp;
+ pp = proto_parse(g_fhdr, opr_name(&ops[i], 0), 1);
+ j = pp ? pp->argc_stack : 0;
+ while (i > 0 && j > 0) {
+ i--;
+ if (ops[i].op == OP_PUSH) {
+ ops[i].flags &= ~(OPF_RMD | OPF_DONE | OPF_NOREGS);
+ j--;
+ }
+ }
+ if (j != 0)
+ ferr(&ops[i], "unhandled prologue\n");
+
+ // recheck
+ i = g_stack_fsz = ecx_push = 0;
+ while (ops[i].op == OP_PUSH && IS(opr_name(&ops[i], 0), "ecx")) {
+ if (!(ops[i].flags & OPF_RMD))
+ break;
+ g_stack_fsz += 4;
+ ecx_push++;
+ i++;
+ }
+ }
+ }
+
+ found = 0;
+ if (ecx_push || esp_sub)
+ {
+ g_sp_frame = 1;
+
+ i++;
+ do {
+ for (; i < opcnt; i++)
+ if (ops[i].flags & OPF_TAIL)
+ break;
+ j = i - 1;
+ if (i == opcnt && (ops[j].flags & OPF_JMP)) {
+ if (ops[j].bt_i != -1 || ops[j].btj != NULL)
+ break;
+ i--;
+ j--;
+ }
+
+ if (ecx_push > 0) {
+ for (l = 0; l < ecx_push; l++) {
+ if (ops[j].op == OP_POP && IS(opr_name(&ops[j], 0), "ecx"))
+ /* pop ecx */;
+ else if (ops[j].op == OP_ADD
+ && IS(opr_name(&ops[j], 0), "esp")
+ && ops[j].operand[1].type == OPT_CONST)
+ {
+ /* add esp, N */
+ l += ops[j].operand[1].val / 4 - 1;
+ }
+ else
+ ferr(&ops[j], "'pop ecx' expected\n");
+
+ ops[j].flags |= OPF_RMD | OPF_DONE | OPF_NOREGS;
+ j--;
+ }
+ if (l != ecx_push)
+ ferr(&ops[j], "epilogue scan failed\n");
+
+ found = 1;
+ }
+
+ if (esp_sub) {
+ if (ops[j].op != OP_ADD
+ || !IS(opr_name(&ops[j], 0), "esp")
+ || ops[j].operand[1].type != OPT_CONST
+ || ops[j].operand[1].val != g_stack_fsz)
+ ferr(&ops[j], "'add esp' expected\n");
+
+ ops[j].flags |= OPF_RMD | OPF_DONE | OPF_NOREGS;
+ ops[j].operand[1].val = 0; // hack for stack arg scanner
+ found = 1;
+ }
+
+ i++;
+ } while (i < opcnt);
+
+ if (!found)
+ ferr(ops, "missing esp epilogue\n");
+ }
+}
+
+// find an instruction that changed opr before i op
+// *op_i must be set to -1 by the caller
+// *is_caller is set to 1 if one source is determined to be g_func arg
+// returns 1 if found, *op_i is then set to origin
+// returns -1 if multiple origins are found
+static int resolve_origin(int i, const struct parsed_opr *opr,
+ int magic, int *op_i, int *is_caller)
+{
+ struct label_ref *lr;
+ int ret = 0;
+
+ if (ops[i].cc_scratch == magic)
+ return 0;
+ ops[i].cc_scratch = magic;
+
+ while (1) {
+ if (g_labels[i] != NULL) {
+ lr = &g_label_refs[i];
+ for (; lr != NULL; lr = lr->next) {
+ check_i(&ops[i], lr->i);
+ ret |= resolve_origin(lr->i, opr, magic, op_i, is_caller);
+ }
+ if (i > 0 && LAST_OP(i - 1))
+ return ret;
+ }
+
+ i--;
+ if (i < 0) {
+ if (is_caller != NULL)
+ *is_caller = 1;
+ return -1;
+ }
+
+ if (ops[i].cc_scratch == magic)
+ return ret;
+ ops[i].cc_scratch = magic;
+
+ if (!(ops[i].flags & OPF_DATA))
+ continue;
+ if (!is_opr_modified(opr, &ops[i]))
+ continue;
+
+ if (*op_i >= 0) {
+ if (*op_i == i)
+ return ret | 1;
+
+ // XXX: could check if the other op does the same
+ return -1;
+ }
+
+ *op_i = i;
+ return ret | 1;
+ }
+}
+
+// find an instruction that previously referenced opr
+// if multiple results are found - fail
+// *op_i must be set to -1 by the caller
+// returns 1 if found, *op_i is then set to referencer insn
+static int resolve_last_ref(int i, const struct parsed_opr *opr,
+ int magic, int *op_i)
+{
+ struct label_ref *lr;
+ int ret = 0;
+
+ if (ops[i].cc_scratch == magic)
+ return 0;
+ ops[i].cc_scratch = magic;
+
+ while (1) {
+ if (g_labels[i] != NULL) {
+ lr = &g_label_refs[i];
+ for (; lr != NULL; lr = lr->next) {
+ check_i(&ops[i], lr->i);
+ ret |= resolve_last_ref(lr->i, opr, magic, op_i);
+ }
+ if (i > 0 && LAST_OP(i - 1))
+ return ret;
+ }
+
+ i--;
+ if (i < 0)
+ return -1;
+
+ if (ops[i].cc_scratch == magic)
+ return 0;
+ ops[i].cc_scratch = magic;
+
+ if (!is_opr_referenced(opr, &ops[i]))
+ continue;
+
+ if (*op_i >= 0)
+ return -1;
+
+ *op_i = i;
+ return 1;
+ }
+}
+
+// find next instruction that reads opr
+// *op_i must be set to -1 by the caller
+// on return, *op_i is set to first referencer insn
+// returns 1 if exactly 1 referencer is found
+static int find_next_read(int i, int opcnt,
+ const struct parsed_opr *opr, int magic, int *op_i)
+{
+ struct parsed_op *po;
+ int j, ret = 0;
+
+ for (; i < opcnt; i++)
+ {
+ if (ops[i].cc_scratch == magic)
+ return ret;
+ ops[i].cc_scratch = magic;
+
+ po = &ops[i];
+ if ((po->flags & OPF_JMP) && po->op != OP_CALL) {
+ if (po->btj != NULL) {
+ // jumptable
+ for (j = 0; j < po->btj->count; j++) {
+ check_i(po, po->btj->d[j].bt_i);
+ ret |= find_next_read(po->btj->d[j].bt_i, opcnt, opr,
+ magic, op_i);
+ }
+ return ret;
+ }
+
+ if (po->flags & OPF_RMD)
+ continue;
+ check_i(po, po->bt_i);
+ if (po->flags & OPF_CJMP) {
+ ret |= find_next_read(po->bt_i, opcnt, opr, magic, op_i);
+ if (ret < 0)
+ return ret;
+ }
+ else
+ i = po->bt_i - 1;
+ continue;
+ }
+
+ if (!is_opr_read(opr, po)) {
+ if (is_opr_modified(opr, po)
+ && (po->op == OP_CALL
+ || ((po->flags & OPF_DATA)
+ && po->operand[0].lmod == OPLM_DWORD)))
+ {
+ // it's overwritten
+ return ret;
+ }
+ if (po->flags & OPF_TAIL)
+ return ret;
+ continue;
+ }
+
+ if (*op_i >= 0)
+ return -1;
+
+ *op_i = i;
+ return 1;
+ }
+
+ return 0;
+}
+
+static int try_resolve_const(int i, const struct parsed_opr *opr,
+ int magic, unsigned int *val)
+{
+ int s_i = -1;
+ int ret;
+
+ ret = resolve_origin(i, opr, magic, &s_i, NULL);
+ if (ret == 1) {
+ i = s_i;
+ if (ops[i].op != OP_MOV && ops[i].operand[1].type != OPT_CONST)
+ return -1;
+
+ *val = ops[i].operand[1].val;
+ return 1;
+ }
+
+ return -1;
+}
+
+static const struct parsed_proto *resolve_icall(int i, int opcnt,
+ int *pp_i, int *multi_src)
+{
+ const struct parsed_proto *pp = NULL;
+ int search_advice = 0;
+ int offset = -1;
+ char name[256];
+ char s_reg[4];
+ int reg, len;
+ int ret;
+
+ *multi_src = 0;
+ *pp_i = -1;
+
+ switch (ops[i].operand[0].type) {
+ case OPT_REGMEM:
+ // try to resolve struct member calls
+ ret = sscanf(ops[i].operand[0].name, "%3s+%x%n",
+ s_reg, &offset, &len);
+ if (ret == 2 && len == strlen(ops[i].operand[0].name))
+ {
+ reg = char_array_i(regs_r32, ARRAY_SIZE(regs_r32), s_reg);
+ if (reg >= 0) {
+ struct parsed_opr opr = OPR_INIT(OPT_REG, OPLM_DWORD, reg);
+ int j = -1;
+ ret = resolve_origin(i, &opr, i + opcnt * 19, &j, NULL);
+ if (ret != 1)
+ break;
+ if (ops[j].op == OP_MOV && ops[j].operand[1].type == OPT_REGMEM
+ && ops[j].operand[0].lmod == OPLM_DWORD
+ && ops[j].pp == NULL) // no hint
+ {
+ // allow one simple dereference (directx)
+ reg = char_array_i(regs_r32, ARRAY_SIZE(regs_r32),
+ ops[j].operand[1].name);
+ if (reg < 0)
+ break;
+ struct parsed_opr opr2 = OPR_INIT(OPT_REG, OPLM_DWORD, reg);
+ int k = -1;
+ ret = resolve_origin(j, &opr2, j + opcnt * 19, &k, NULL);
+ if (ret != 1)
+ break;
+ j = k;
+ }
+ if (ops[j].op != OP_MOV)
+ break;
+ if (ops[j].operand[0].lmod != OPLM_DWORD)
+ break;
+ if (ops[j].pp != NULL) {
+ // type hint in asm
+ pp = ops[j].pp;
+ }
+ else if (ops[j].operand[1].type == OPT_REGMEM) {
+ // allow 'hello[ecx]' - assume array of same type items
+ ret = sscanf(ops[j].operand[1].name, "%[^[][e%2s]",
+ name, s_reg);
+ if (ret != 2)
+ break;
+ pp = proto_parse(g_fhdr, name, g_quiet_pp);
+ }
+ else if (ops[j].operand[1].type == OPT_LABEL)
+ pp = proto_parse(g_fhdr, ops[j].operand[1].name, g_quiet_pp);
+ else
+ break;
+ if (pp == NULL)
+ break;
+ if (pp->is_func || pp->is_fptr || !pp->type.is_struct) {
+ pp = NULL;
+ break;
+ }
+ pp = proto_lookup_struct(g_fhdr, pp->type.name, offset);
+ }
+ break;
+ }
+ // fallthrough
+ case OPT_LABEL:
+ case OPT_OFFSET:
+ pp = try_recover_pp(&ops[i], &ops[i].operand[0], &search_advice);
+ if (!search_advice)
+ break;
+ // fallthrough
+ default:
+ scan_for_call_type(i, &ops[i].operand[0], i + opcnt * 9, &pp,
+ pp_i, multi_src);
+ break;
+ }
+
+ return pp;
+}
+
+static struct parsed_proto *process_call_early(int i, int opcnt,
+ int *adj_i)
+{
+ struct parsed_op *po = &ops[i];
+ struct parsed_proto *pp;
+ int multipath = 0;
+ int adj = 0;
+ int j, ret;
+
+ pp = po->pp;
+ if (pp == NULL || pp->is_vararg || pp->argc_reg != 0)
+ // leave for later
+ return NULL;
+
+ // look for and make use of esp adjust
+ *adj_i = ret = -1;
+ if (!pp->is_stdcall && pp->argc_stack > 0)
+ ret = scan_for_esp_adjust(i + 1, opcnt,
+ pp->argc_stack * 4, &adj, &multipath, 0);
+ if (ret >= 0) {
+ if (pp->argc_stack > adj / 4)
+ return NULL;
+ if (multipath)
+ return NULL;
+ if (ops[ret].op == OP_POP) {
+ for (j = 1; j < adj / 4; j++) {
+ if (ops[ret + j].op != OP_POP
+ || ops[ret + j].operand[0].reg != xCX)
+ {
+ return NULL;
+ }
+ }
+ }
+ }
+
+ *adj_i = ret;
+ return pp;
+}
+
+static struct parsed_proto *process_call(int i, int opcnt)
+{
+ struct parsed_op *po = &ops[i];
+ const struct parsed_proto *pp_c;
+ struct parsed_proto *pp;
+ const char *tmpname;
+ int call_i = -1, ref_i = -1;
+ int adj = 0, multipath = 0;
+ int ret, arg;
+
+ tmpname = opr_name(po, 0);
+ pp = po->pp;
+ if (pp == NULL)
+ {
+ // indirect call
+ pp_c = resolve_icall(i, opcnt, &call_i, &multipath);
+ if (pp_c != NULL) {
+ if (!pp_c->is_func && !pp_c->is_fptr)
+ ferr(po, "call to non-func: %s\n", pp_c->name);
+ pp = proto_clone(pp_c);
+ my_assert_not(pp, NULL);
+ if (multipath)
+ // not resolved just to single func
+ pp->is_fptr = 1;
+
+ switch (po->operand[0].type) {
+ case OPT_REG:
+ // we resolved this call and no longer need the register
+ po->regmask_src &= ~(1 << po->operand[0].reg);
+
+ if (!multipath && i != call_i && ops[call_i].op == OP_MOV
+ && ops[call_i].operand[1].type == OPT_LABEL)
+ {
+ // no other source users?
+ ret = resolve_last_ref(i, &po->operand[0], i + opcnt * 10,
+ &ref_i);
+ if (ret == 1 && call_i == ref_i) {
+ // and nothing uses it after us?
+ ref_i = -1;
+ find_next_read(i + 1, opcnt, &po->operand[0],
+ i + opcnt * 11, &ref_i);
+ if (ref_i == -1)
+ // then also don't need the source mov
+ ops[call_i].flags |= OPF_RMD | OPF_NOREGS;
+ }
+ }
+ break;
+ case OPT_REGMEM:
+ pp->is_fptr = 1;
+ break;
+ default:
+ break;
+ }
+ }
+ if (pp == NULL) {
+ pp = calloc(1, sizeof(*pp));
+ my_assert_not(pp, NULL);
+
+ pp->is_fptr = 1;
+ ret = scan_for_esp_adjust(i + 1, opcnt,
+ -1, &adj, &multipath, 0);
+ if (ret < 0 || adj < 0) {
+ if (!g_allow_regfunc)
+ ferr(po, "non-__cdecl indirect call unhandled yet\n");
+ pp->is_unresolved = 1;
+ adj = 0;
+ }
+ adj /= 4;
+ if (adj > ARRAY_SIZE(pp->arg))
+ ferr(po, "esp adjust too large: %d\n", adj);
+ pp->ret_type.name = strdup("int");
+ pp->argc = pp->argc_stack = adj;
+ for (arg = 0; arg < pp->argc; arg++)
+ pp->arg[arg].type.name = strdup("int");
+ }
+ po->pp = pp;
+ }
+
+ // look for and make use of esp adjust
+ multipath = 0;
+ ret = -1;
+ if (!pp->is_stdcall && pp->argc_stack > 0) {
+ int adj_expect = pp->is_vararg ? -1 : pp->argc_stack * 4;
+ ret = scan_for_esp_adjust(i + 1, opcnt,
+ adj_expect, &adj, &multipath, 0);
+ }
+ if (ret >= 0) {
+ if (pp->is_vararg) {
+ if (adj / 4 < pp->argc_stack) {
+ fnote(po, "(this call)\n");
+ ferr(&ops[ret], "esp adjust is too small: %x < %x\n",
+ adj, pp->argc_stack * 4);
+ }
+ // modify pp to make it have varargs as normal args
+ arg = pp->argc;
+ pp->argc += adj / 4 - pp->argc_stack;
+ for (; arg < pp->argc; arg++) {
+ pp->arg[arg].type.name = strdup("int");
+ pp->argc_stack++;
+ }
+ if (pp->argc > ARRAY_SIZE(pp->arg))
+ ferr(po, "too many args for '%s'\n", tmpname);
+ }
+ if (pp->argc_stack > adj / 4) {
+ fnote(po, "(this call)\n");
+ ferr(&ops[ret], "stack tracking failed for '%s': %x %x\n",
+ tmpname, pp->argc_stack * 4, adj);
+ }
+
+ scan_for_esp_adjust(i + 1, opcnt,
+ pp->argc_stack * 4, &adj, &multipath, 1);
+ }
+ else if (pp->is_vararg)
+ ferr(po, "missing esp_adjust for vararg func '%s'\n",
+ pp->name);
+
+ return pp;
+}
+
+static int collect_call_args_early(struct parsed_op *po, int i,
+ struct parsed_proto *pp, int *regmask)
+{
+ int arg, ret;
+ int j;
+
+ for (arg = 0; arg < pp->argc; arg++)
+ if (pp->arg[arg].reg == NULL)
+ break;
+
+ // first see if it can be easily done
+ for (j = i; j > 0 && arg < pp->argc; )
+ {
+ if (g_labels[j] != NULL)
+ return -1;
+ j--;
+
+ if (ops[j].op == OP_CALL)
+ return -1;
+ else if (ops[j].op == OP_ADD && ops[j].operand[0].reg == xSP)
+ return -1;
+ else if (ops[j].op == OP_POP)
+ return -1;
+ else if (ops[j].flags & OPF_CJMP)
+ return -1;
+ else if (ops[j].op == OP_PUSH) {
+ if (ops[j].flags & (OPF_FARG|OPF_FARGNR))
+ return -1;
+ ret = scan_for_mod(&ops[j], j + 1, i, 1);
+ if (ret >= 0)
+ return -1;
+
+ if (pp->arg[arg].type.is_va_list)
+ return -1;
+
+ // next arg
+ for (arg++; arg < pp->argc; arg++)
+ if (pp->arg[arg].reg == NULL)
+ break;
+ }
+ }
+
+ if (arg < pp->argc)
+ return -1;
+
+ // now do it
+ for (arg = 0; arg < pp->argc; arg++)
+ if (pp->arg[arg].reg == NULL)
+ break;
+
+ for (j = i; j > 0 && arg < pp->argc; )
+ {
+ j--;
+
+ if (ops[j].op == OP_PUSH)
+ {
+ ops[j].p_argnext = -1;
+ ferr_assert(&ops[j], pp->arg[arg].datap == NULL);
+ pp->arg[arg].datap = &ops[j];
+
+ if (ops[j].operand[0].type == OPT_REG)
+ *regmask |= 1 << ops[j].operand[0].reg;
+
+ ops[j].flags |= OPF_RMD | OPF_DONE | OPF_FARGNR | OPF_FARG;
+ ops[j].flags &= ~OPF_RSAVE;
+
+ // next arg
+ for (arg++; arg < pp->argc; arg++)
+ if (pp->arg[arg].reg == NULL)
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int collect_call_args_r(struct parsed_op *po, int i,
+ struct parsed_proto *pp, int *regmask, int *save_arg_vars,
+ int *arg_grp, int arg, int magic, int need_op_saving, int may_reuse)
+{
+ struct parsed_proto *pp_tmp;
+ struct parsed_op *po_tmp;
+ struct label_ref *lr;
+ int need_to_save_current;
+ int arg_grp_current = 0;
+ int save_args_seen = 0;
+ int save_args;
+ int ret = 0;
+ int reg;
+ char buf[32];
+ int j, k;
+
+ if (i < 0) {
+ ferr(po, "dead label encountered\n");
+ return -1;
+ }
+
+ for (; arg < pp->argc; arg++)
+ if (pp->arg[arg].reg == NULL)
+ break;
+ magic = (magic & 0xffffff) | (arg << 24);
+
+ for (j = i; j >= 0 && (arg < pp->argc || pp->is_unresolved); )
+ {
+ if (((ops[j].cc_scratch ^ magic) & 0xffffff) == 0) {
+ if (ops[j].cc_scratch != magic) {
+ ferr(&ops[j], "arg collect hit same path with diff args for %s\n",
+ pp->name);
+ return -1;
+ }
+ // ok: have already been here
+ return 0;
+ }
+ ops[j].cc_scratch = magic;
+
+ if (g_labels[j] != NULL && g_label_refs[j].i != -1) {
+ lr = &g_label_refs[j];
+ if (lr->next != NULL)
+ need_op_saving = 1;
+ for (; lr->next; lr = lr->next) {
+ check_i(&ops[j], lr->i);
+ if ((ops[lr->i].flags & (OPF_JMP|OPF_CJMP)) != OPF_JMP)
+ may_reuse = 1;
+ ret = collect_call_args_r(po, lr->i, pp, regmask, save_arg_vars,
+ arg_grp, arg, magic, need_op_saving, may_reuse);
+ if (ret < 0)
+ return ret;
+ }
+
+ check_i(&ops[j], lr->i);
+ if ((ops[lr->i].flags & (OPF_JMP|OPF_CJMP)) != OPF_JMP)
+ may_reuse = 1;
+ if (j > 0 && LAST_OP(j - 1)) {
+ // follow last branch in reverse
+ j = lr->i;
+ continue;
+ }
+ need_op_saving = 1;
+ ret = collect_call_args_r(po, lr->i, pp, regmask, save_arg_vars,
+ arg_grp, arg, magic, need_op_saving, may_reuse);
+ if (ret < 0)
+ return ret;
+ }
+ j--;
+
+ if (ops[j].op == OP_CALL)
+ {
+ if (pp->is_unresolved)
+ break;
+
+ pp_tmp = ops[j].pp;
+ if (pp_tmp == NULL)
+ ferr(po, "arg collect hit unparsed call '%s'\n",
+ ops[j].operand[0].name);
+ if (may_reuse && pp_tmp->argc_stack > 0)
+ ferr(po, "arg collect %d/%d hit '%s' with %d stack args\n",
+ arg, pp->argc, opr_name(&ops[j], 0), pp_tmp->argc_stack);
+ }
+ // esp adjust of 0 means we collected it before
+ else if (ops[j].op == OP_ADD && ops[j].operand[0].reg == xSP
+ && (ops[j].operand[1].type != OPT_CONST
+ || ops[j].operand[1].val != 0))
+ {
+ if (pp->is_unresolved)
+ break;
+
+ fnote(po, "(this call)\n");
+ ferr(&ops[j], "arg collect %d/%d hit esp adjust of %d\n",
+ arg, pp->argc, ops[j].operand[1].val);
+ }
+ else if (ops[j].op == OP_POP && !(ops[j].flags & OPF_DONE))
+ {
+ if (pp->is_unresolved)
+ break;
+
+ fnote(po, "(this call)\n");
+ ferr(&ops[j], "arg collect %d/%d hit pop\n", arg, pp->argc);
+ }
+ else if (ops[j].flags & OPF_CJMP)
+ {
+ if (pp->is_unresolved)
+ break;
+
+ may_reuse = 1;
+ }
+ else if (ops[j].op == OP_PUSH
+ && !(ops[j].flags & (OPF_FARGNR|OPF_DONE)))
+ {
+ if (pp->is_unresolved && (ops[j].flags & OPF_RMD))
+ break;
+
+ ops[j].p_argnext = -1;
+ po_tmp = pp->arg[arg].datap;
+ if (po_tmp != NULL)
+ ops[j].p_argnext = po_tmp - ops;
+ pp->arg[arg].datap = &ops[j];
+
+ need_to_save_current = 0;
+ save_args = 0;
+ reg = -1;
+ if (ops[j].operand[0].type == OPT_REG)
+ reg = ops[j].operand[0].reg;
+
+ if (!need_op_saving) {
+ ret = scan_for_mod(&ops[j], j + 1, i, 1);
+ need_to_save_current = (ret >= 0);
+ }
+ if (need_op_saving || need_to_save_current) {
+ // mark this push as one that needs operand saving
+ ops[j].flags &= ~OPF_RMD;
+ if (ops[j].p_argnum == 0) {
+ ops[j].p_argnum = arg + 1;
+ save_args |= 1 << arg;
+ }
+ else if (ops[j].p_argnum < arg + 1) {
+ // XXX: might kill valid var..
+ //*save_arg_vars &= ~(1 << (ops[j].p_argnum - 1));
+ ops[j].p_argnum = arg + 1;
+ save_args |= 1 << arg;
+ }
+
+ if (save_args_seen & (1 << (ops[j].p_argnum - 1))) {
+ save_args_seen = 0;
+ arg_grp_current++;
+ if (arg_grp_current >= MAX_ARG_GRP)
+ ferr(&ops[j], "out of arg groups (arg%d), f %s\n",
+ ops[j].p_argnum, pp->name);
+ }
+ }
+ else if (ops[j].p_argnum == 0)
+ ops[j].flags |= OPF_RMD;
+
+ // some PUSHes are reused by different calls on other branches,
+ // but that can't happen if we didn't branch, so they
+ // can be removed from future searches (handles nested calls)
+ if (!may_reuse)
+ ops[j].flags |= OPF_FARGNR;
+
+ ops[j].flags |= OPF_FARG;
+ ops[j].flags &= ~OPF_RSAVE;
+
+ // check for __VALIST
+ if (!pp->is_unresolved && g_func_pp != NULL
+ && pp->arg[arg].type.is_va_list)
+ {
+ k = -1;
+ ret = resolve_origin(j, &ops[j].operand[0],
+ magic + 1, &k, NULL);
+ if (ret == 1 && k >= 0)
+ {
+ if (ops[k].op == OP_LEA) {
+ if (!g_func_pp->is_vararg)
+ ferr(&ops[k], "lea <arg> used, but %s is not vararg?\n",
+ g_func_pp->name);
+
+ snprintf(buf, sizeof(buf), "arg_%X",
+ g_func_pp->argc_stack * 4);
+ if (strstr(ops[k].operand[1].name, buf)
+ || strstr(ops[k].operand[1].name, "arglist"))
+ {
+ ops[k].flags |= OPF_RMD | OPF_NOREGS | OPF_DONE;
+ ops[j].flags |= OPF_RMD | OPF_NOREGS | OPF_VAPUSH;
+ save_args &= ~(1 << arg);
+ reg = -1;
+ }
+ else
+ ferr(&ops[k], "va_list arg detection failed\n");
+ }
+ // check for va_list from g_func_pp arg too
+ else if (ops[k].op == OP_MOV
+ && is_stack_access(&ops[k], &ops[k].operand[1]))
+ {
+ ret = stack_frame_access(&ops[k], &ops[k].operand[1],
+ buf, sizeof(buf), ops[k].operand[1].name, "", 1, 0);
+ if (ret >= 0) {
+ ops[k].flags |= OPF_RMD | OPF_DONE;
+ ops[j].flags |= OPF_RMD;
+ ops[j].p_argpass = ret + 1;
+ save_args &= ~(1 << arg);
+ reg = -1;
+ }
+ }
+ }
+ }
+
+ *save_arg_vars |= save_args;
+
+ // tracking reg usage
+ if (reg >= 0)
+ *regmask |= 1 << reg;
+
+ arg++;
+ if (!pp->is_unresolved) {
+ // next arg
+ for (; arg < pp->argc; arg++)
+ if (pp->arg[arg].reg == NULL)
+ break;
+ }
+ magic = (magic & 0xffffff) | (arg << 24);
+ }
+
+ if (ops[j].p_arggrp > arg_grp_current) {
+ save_args_seen = 0;
+ arg_grp_current = ops[j].p_arggrp;
+ }
+ if (ops[j].p_argnum > 0)
+ save_args_seen |= 1 << (ops[j].p_argnum - 1);
+ }
+
+ if (arg < pp->argc) {
+ ferr(po, "arg collect failed for '%s': %d/%d\n",
+ pp->name, arg, pp->argc);
+ return -1;
+ }
+
+ if (arg_grp_current > *arg_grp)
+ *arg_grp = arg_grp_current;
+
+ return arg;
+}
+
+static int collect_call_args(struct parsed_op *po, int i,
+ struct parsed_proto *pp, int *regmask, int *save_arg_vars,
+ int magic)
+{
+ // arg group is for cases when pushes for
+ // multiple funcs are going on
+ struct parsed_op *po_tmp;
+ int save_arg_vars_current = 0;
+ int arg_grp = 0;
+ int ret;
+ int a;
+
+ ret = collect_call_args_r(po, i, pp, regmask,
+ &save_arg_vars_current, &arg_grp, 0, magic, 0, 0);
+ if (ret < 0)
+ return ret;
+
+ if (arg_grp != 0) {
+ // propagate arg_grp
+ for (a = 0; a < pp->argc; a++) {
+ if (pp->arg[a].reg != NULL)
+ continue;
+
+ po_tmp = pp->arg[a].datap;
+ while (po_tmp != NULL) {
+ po_tmp->p_arggrp = arg_grp;
+ if (po_tmp->p_argnext > 0)
+ po_tmp = &ops[po_tmp->p_argnext];
+ else
+ po_tmp = NULL;
+ }
+ }
+ }
+ save_arg_vars[arg_grp] |= save_arg_vars_current;
+
+ if (pp->is_unresolved) {
+ pp->argc += ret;
+ pp->argc_stack += ret;
+ for (a = 0; a < pp->argc; a++)
+ if (pp->arg[a].type.name == NULL)
+ pp->arg[a].type.name = strdup("int");
+ }
+
+ return ret;
+}
+
+static void reg_use_pass(int i, int opcnt, unsigned char *cbits,
+ int regmask_now, int *regmask,
+ int regmask_save_now, int *regmask_save,
+ int *regmask_init, int regmask_arg)
+{
+ struct parsed_op *po;
+ unsigned int mask;
+ int already_saved;
+ int regmask_new;
+ int regmask_op;
+ int flags_set;
+ int ret, reg;
+ int j;
+
+ for (; i < opcnt; i++)
+ {
+ po = &ops[i];
+ if (cbits[i >> 3] & (1 << (i & 7)))
+ return;
+ cbits[i >> 3] |= (1 << (i & 7));
+
+ if ((po->flags & OPF_JMP) && po->op != OP_CALL) {
+ if (po->flags & (OPF_RMD|OPF_DONE))
+ continue;
+ if (po->btj != NULL) {
+ for (j = 0; j < po->btj->count; j++) {
+ check_i(po, po->btj->d[j].bt_i);
+ reg_use_pass(po->btj->d[j].bt_i, opcnt, cbits,
+ regmask_now, regmask, regmask_save_now, regmask_save,
+ regmask_init, regmask_arg);
+ }
+ return;
+ }
+
+ check_i(po, po->bt_i);
+ if (po->flags & OPF_CJMP)
+ reg_use_pass(po->bt_i, opcnt, cbits,
+ regmask_now, regmask, regmask_save_now, regmask_save,
+ regmask_init, regmask_arg);
+ else
+ i = po->bt_i - 1;
+ continue;
+ }
+
+ if (po->op == OP_PUSH && !(po->flags & (OPF_FARG|OPF_DONE))
+ && !g_func_pp->is_userstack
+ && po->operand[0].type == OPT_REG)
+ {
+ reg = po->operand[0].reg;
+ ferr_assert(po, reg >= 0);
+
+ already_saved = 0;
+ flags_set = OPF_RSAVE | OPF_RMD | OPF_DONE;
+ if (regmask_now & (1 << reg)) {
+ already_saved = regmask_save_now & (1 << reg);
+ flags_set = OPF_RSAVE | OPF_DONE;
+ }
+
+ ret = scan_for_pop(i + 1, opcnt, i + opcnt * 3, reg, 0, 0);
+ if (ret == 1) {
+ scan_for_pop(i + 1, opcnt, i + opcnt * 4, reg, 0, flags_set);
+ }
+ else {
+ ret = scan_for_pop_ret(i + 1, opcnt, po->operand[0].reg, 0);
+ if (ret == 1) {
+ scan_for_pop_ret(i + 1, opcnt, po->operand[0].reg,
+ flags_set);
+ }
+ }
+ if (ret == 1) {
+ ferr_assert(po, !already_saved);
+ po->flags |= flags_set;
+
+ if (regmask_now & (1 << reg)) {
+ regmask_save_now |= (1 << reg);
+ *regmask_save |= regmask_save_now;
+ }
+ continue;
+ }
+ }
+ else if (po->op == OP_POP && (po->flags & OPF_RSAVE)) {
+ reg = po->operand[0].reg;
+ ferr_assert(po, reg >= 0);
+
+ if (regmask_save_now & (1 << reg))
+ regmask_save_now &= ~(1 << reg);
+ else
+ regmask_now &= ~(1 << reg);
+ continue;
+ }
+ else if (po->op == OP_CALL) {
+ if ((po->regmask_dst & (1 << xAX))
+ && !(po->regmask_dst & (1 << xDX)))
+ {
+ if (po->flags & OPF_TAIL)
+ // don't need eax, will do "return f();" or "f(); return;"
+ po->regmask_dst &= ~(1 << xAX);
+ else {
+ struct parsed_opr opr = OPR_INIT(OPT_REG, OPLM_DWORD, xAX);
+ j = -1;
+ find_next_read(i + 1, opcnt, &opr, i + opcnt * 17, &j);
+ if (j == -1)
+ // not used
+ po->regmask_dst &= ~(1 << xAX);
+ }
+ }
+ }
+
+ if (po->flags & OPF_NOREGS)
+ continue;
+
+ if (po->flags & OPF_FPUSH) {
+ if (regmask_now & mxST1)
+ ferr(po, "TODO: FPUSH on active ST1\n");
+ if (regmask_now & mxST0)
+ po->flags |= OPF_FSHIFT;
+ mask = mxST0 | mxST1;
+ regmask_now = (regmask_now & ~mask) | ((regmask_now & mxST0) << 1);
+ }
+
+ // if incomplete register is used, clear it on init to avoid
+ // later use of uninitialized upper part in some situations
+ if ((po->flags & OPF_DATA) && po->operand[0].type == OPT_REG
+ && po->operand[0].lmod != OPLM_DWORD)
+ {
+ reg = po->operand[0].reg;
+ ferr_assert(po, reg >= 0);
+
+ if (!(regmask_now & (1 << reg)))
+ *regmask_init |= 1 << reg;
+ }
+
+ regmask_op = po->regmask_src | po->regmask_dst;
+
+ regmask_new = po->regmask_src & ~regmask_now & ~regmask_arg;
+ regmask_new &= ~(1 << xSP);
+ if (g_bp_frame && !(po->flags & OPF_EBP_S))
+ regmask_new &= ~(1 << xBP);
+
+ if (po->op == OP_CALL) {
+ // allow fastcall calls from anywhere, calee may be also sitting
+ // in some fastcall table even when it's not using reg args
+ if (regmask_new & po->regmask_src & (1 << xCX)) {
+ *regmask_init |= (1 << xCX);
+ regmask_now |= (1 << xCX);
+ regmask_new &= ~(1 << xCX);
+ }
+ if (regmask_new & po->regmask_src & (1 << xDX)) {
+ *regmask_init |= (1 << xDX);
+ regmask_now |= (1 << xDX);
+ regmask_new &= ~(1 << xDX);
+ }
+ }
+
+ if (regmask_new != 0)
+ fnote(po, "uninitialized reg mask: %x\n", regmask_new);
+
+ if (regmask_op & (1 << xBP)) {
+ if (g_bp_frame && !(po->flags & OPF_EBP_S)) {
+ if (po->regmask_dst & (1 << xBP))
+ // compiler decided to drop bp frame and use ebp as scratch
+ scan_fwd_set_flags(i + 1, opcnt, i + opcnt * 5, OPF_EBP_S);
+ else
+ regmask_op &= ~(1 << xBP);
+ }
+ }
+
+ regmask_now |= regmask_op;
+ *regmask |= regmask_now;
+
+ // released regs
+ if (po->flags & OPF_FPOP) {
+ mask = mxST0 | mxST1;
+ if (!(regmask_now & mask))
+ ferr(po, "float pop on empty stack?\n");
+ if (regmask_now & mxST1)
+ po->flags |= OPF_FSHIFT;
+ regmask_now = (regmask_now & ~mask) | ((regmask_now & mxST1) >> 1);
+ }
+
+ if (po->flags & OPF_TAIL) {
+ if (regmask_now & (mxST0 | mxST1))
+ ferr(po, "float regs on tail: %x\n", regmask_now);
+ return;
+ }
+ }
+}
+
+static void pp_insert_reg_arg(struct parsed_proto *pp, const char *reg)
+{
+ int i;
+
+ for (i = 0; i < pp->argc; i++)
+ if (pp->arg[i].reg == NULL)
+ break;
+
+ if (pp->argc_stack)
+ memmove(&pp->arg[i + 1], &pp->arg[i],
+ sizeof(pp->arg[0]) * pp->argc_stack);
+ memset(&pp->arg[i], 0, sizeof(pp->arg[i]));
+ pp->arg[i].reg = strdup(reg);
+ pp->arg[i].type.name = strdup("int");
+ pp->argc++;
+ pp->argc_reg++;
+}
+
+static void output_std_flags(FILE *fout, struct parsed_op *po,
+ int *pfomask, const char *dst_opr_text)
+{
+ if (*pfomask & (1 << PFO_Z)) {
+ fprintf(fout, "\n cond_z = (%s%s == 0);",
+ lmod_cast_u(po, po->operand[0].lmod), dst_opr_text);
+ *pfomask &= ~(1 << PFO_Z);
+ }
+ if (*pfomask & (1 << PFO_S)) {
+ fprintf(fout, "\n cond_s = (%s%s < 0);",
+ lmod_cast_s(po, po->operand[0].lmod), dst_opr_text);
+ *pfomask &= ~(1 << PFO_S);
+ }
+}
+
+enum {
+ OPP_FORCE_NORETURN = (1 << 0),
+ OPP_SIMPLE_ARGS = (1 << 1),
+ OPP_ALIGN = (1 << 2),
+};
+
+static void output_pp_attrs(FILE *fout, const struct parsed_proto *pp,
+ int flags)
+{
+ const char *cconv = "";
+
+ if (pp->is_fastcall)
+ cconv = "__fastcall ";
+ else if (pp->is_stdcall && pp->argc_reg == 0)
+ cconv = "__stdcall ";
+
+ fprintf(fout, (flags & OPP_ALIGN) ? "%-16s" : "%s", cconv);
+
+ if (pp->is_noreturn || (flags & OPP_FORCE_NORETURN))
+ fprintf(fout, "noreturn ");
+}
+
+static void output_pp(FILE *fout, const struct parsed_proto *pp,
+ int flags)
+{
+ int i;
+
+ fprintf(fout, (flags & OPP_ALIGN) ? "%-5s" : "%s ",
+ pp->ret_type.name);
+ if (pp->is_fptr)
+ fprintf(fout, "(");
+ output_pp_attrs(fout, pp, flags);
+ if (pp->is_fptr)
+ fprintf(fout, "*");
+ fprintf(fout, "%s", pp->name);
+ if (pp->is_fptr)
+ fprintf(fout, ")");
+
+ fprintf(fout, "(");
+ for (i = 0; i < pp->argc; i++) {
+ if (i > 0)
+ fprintf(fout, ", ");
+ if (pp->arg[i].fptr != NULL && !(flags & OPP_SIMPLE_ARGS)) {
+ // func pointer
+ output_pp(fout, pp->arg[i].fptr, 0);
+ }
+ else if (pp->arg[i].type.is_retreg) {
+ fprintf(fout, "u32 *r_%s", pp->arg[i].reg);
+ }
+ else {
+ fprintf(fout, "%s", pp->arg[i].type.name);
+ if (!pp->is_fptr)
+ fprintf(fout, " a%d", i + 1);
+ }
+ }
+ if (pp->is_vararg) {
+ if (i > 0)
+ fprintf(fout, ", ");
+ fprintf(fout, "...");
+ }
+ fprintf(fout, ")");
+}
+
+static char *saved_arg_name(char *buf, size_t buf_size, int grp, int num)
+{
+ char buf1[16];
+
+ buf1[0] = 0;
+ if (grp > 0)
+ snprintf(buf1, sizeof(buf1), "%d", grp);
+ snprintf(buf, buf_size, "s%s_a%d", buf1, num);
+
+ return buf;
+}
+
+static void gen_x_cleanup(int opcnt);
+
+static void gen_func(FILE *fout, FILE *fhdr, const char *funcn, int opcnt)
+{
+ struct parsed_op *po, *delayed_flag_op = NULL, *tmp_op;
+ struct parsed_opr *last_arith_dst = NULL;
+ char buf1[256], buf2[256], buf3[256], cast[64];
+ struct parsed_proto *pp, *pp_tmp;
+ struct parsed_data *pd;
+ unsigned int uval;
+ int save_arg_vars[MAX_ARG_GRP] = { 0, };
+ unsigned char cbits[MAX_OPS / 8];
+ int cond_vars = 0;
+ int need_tmp_var = 0;
+ int need_tmp64 = 0;
+ int had_decl = 0;
+ int label_pending = 0;
+ int regmask_save = 0; // regs saved/restored in this func
+ int regmask_arg; // regs from this function args (fastcall, etc)
+ int regmask_ret; // regs needed on ret
+ int regmask_now; // temp
+ int regmask_init = 0; // regs that need zero initialization
+ int regmask_pp = 0; // regs used in complex push-pop graph
+ int regmask = 0; // used regs
+ int pfomask = 0;
+ int found = 0;
+ int no_output;
+ int i, j, l;
+ int arg;
+ int reg;
+ int ret;
+
+ g_bp_frame = g_sp_frame = g_stack_fsz = 0;
+ g_stack_frame_used = 0;
+
+ g_func_pp = proto_parse(fhdr, funcn, 0);
+ if (g_func_pp == NULL)
+ ferr(ops, "proto_parse failed for '%s'\n", funcn);
+
+ regmask_arg = get_pp_arg_regmask_src(g_func_pp);
+ regmask_ret = get_pp_arg_regmask_dst(g_func_pp);
+
+ if (g_func_pp->has_retreg) {
+ for (arg = 0; arg < g_func_pp->argc; arg++) {
+ if (g_func_pp->arg[arg].type.is_retreg) {
+ reg = char_array_i(regs_r32,
+ ARRAY_SIZE(regs_r32), g_func_pp->arg[arg].reg);
+ ferr_assert(ops, reg >= 0);
+ regmask_ret |= 1 << reg;
+ }
+ }
+ }
+
+ // pass1:
+ // - resolve all branches
+ // - parse calls with labels
+ resolve_branches_parse_calls(opcnt);
+
+ // pass2:
+ // - handle ebp/esp frame, remove ops related to it
+ scan_prologue_epilogue(opcnt);
+
+ // pass3:
+ // - remove dead labels
+ // - set regs needed at ret
+ for (i = 0; i < opcnt; i++)
+ {
+ if (g_labels[i] != NULL && g_label_refs[i].i == -1) {
+ free(g_labels[i]);
+ g_labels[i] = NULL;
+ }
+
+ if (ops[i].op == OP_RET)
+ ops[i].regmask_src |= regmask_ret;
+ }
+
+ // pass4:
+ // - process trivial calls
+ for (i = 0; i < opcnt; i++)
+ {
+ po = &ops[i];
+ if (po->flags & (OPF_RMD|OPF_DONE))
+ continue;
+
+ if (po->op == OP_CALL)
+ {
+ pp = process_call_early(i, opcnt, &j);
+ if (pp != NULL) {
+ if (!(po->flags & OPF_ATAIL))
+ // since we know the args, try to collect them
+ if (collect_call_args_early(po, i, pp, ®mask) != 0)
+ pp = NULL;
+ }
+
+ if (pp != NULL) {
+ if (j >= 0) {
+ // commit esp adjust
+ if (ops[j].op != OP_POP)
+ patch_esp_adjust(&ops[j], pp->argc_stack * 4);
+ else {
+ for (l = 0; l < pp->argc_stack; l++)
+ ops[j + l].flags |= OPF_DONE | OPF_RMD | OPF_NOREGS;
+ }
+ }
+
+ if (strstr(pp->ret_type.name, "int64"))
+ need_tmp64 = 1;
+
+ po->flags |= OPF_DONE;
+ }
+ }
+ }
+
+ // pass5:
+ // - process calls, stage 2
+ // - handle some push/pop pairs
+ // - scan for STD/CLD, propagate DF
+ for (i = 0; i < opcnt; i++)
+ {
+ po = &ops[i];
+ if (po->flags & OPF_RMD)
+ continue;
+
+ if (po->op == OP_CALL)
+ {
+ if (!(po->flags & OPF_DONE)) {
+ pp = process_call(i, opcnt);
+
+ if (!pp->is_unresolved && !(po->flags & OPF_ATAIL)) {
+ // since we know the args, collect them
+ collect_call_args(po, i, pp, ®mask, save_arg_vars,
+ i + opcnt * 2);
+ }
+ // for unresolved, collect after other passes
+ }
+
+ pp = po->pp;
+ ferr_assert(po, pp != NULL);
+
+ po->regmask_src |= get_pp_arg_regmask_src(pp);
+ po->regmask_dst |= get_pp_arg_regmask_dst(pp);
+
+ if (po->regmask_dst & mxST0)
+ po->flags |= OPF_FPUSH;
+
+ if (strstr(pp->ret_type.name, "int64"))
+ need_tmp64 = 1;
+
+ continue;
+ }
+
+ if (po->flags & OPF_DONE)
+ continue;
+
+ if (po->op == OP_PUSH && !(po->flags & OPF_FARG)
+ && !(po->flags & OPF_RSAVE) && po->operand[0].type == OPT_CONST)
+ {
+ scan_for_pop_const(i, opcnt, i + opcnt * 12);
+ }
+ else if (po->op == OP_POP)
+ scan_pushes_for_pop(i, opcnt, ®mask_pp);
+ else if (po->op == OP_STD) {
+ po->flags |= OPF_DF | OPF_RMD | OPF_DONE;
+ scan_propagate_df(i + 1, opcnt);
+ }
+ }
+
+ // pass6:
+ // - find POPs for PUSHes, rm both
+ // - scan for all used registers
+ memset(cbits, 0, sizeof(cbits));
+ reg_use_pass(0, opcnt, cbits, 0, ®mask,
+ 0, ®mask_save, ®mask_init, regmask_arg);
+
+ // pass7:
+ // - find flag set ops for their users
+ // - do unresolved calls
+ // - declare indirect functions
+ for (i = 0; i < opcnt; i++)
+ {
+ po = &ops[i];
+ if (po->flags & (OPF_RMD|OPF_DONE))
+ continue;
+
+ if (po->flags & OPF_CC)
+ {
+ int setters[16], cnt = 0, branched = 0;
+
+ ret = scan_for_flag_set(i, i + opcnt * 6,
+ &branched, setters, &cnt);
+ if (ret < 0 || cnt <= 0)
+ ferr(po, "unable to trace flag setter(s)\n");
+ if (cnt > ARRAY_SIZE(setters))
+ ferr(po, "too many flag setters\n");
+
+ for (j = 0; j < cnt; j++)
+ {
+ tmp_op = &ops[setters[j]]; // flag setter
+ pfomask = 0;
+
+ // to get nicer code, we try to delay test and cmp;
+ // if we can't because of operand modification, or if we
+ // have arith op, or branch, make it calculate flags explicitly
+ if (tmp_op->op == OP_TEST || tmp_op->op == OP_CMP)
+ {
+ if (branched || scan_for_mod(tmp_op, setters[j] + 1, i, 0) >= 0)
+ pfomask = 1 << po->pfo;
+ }
+ else if (tmp_op->op == OP_CMPS || tmp_op->op == OP_SCAS) {
+ pfomask = 1 << po->pfo;
+ }
+ else {
+ // see if we'll be able to handle based on op result
+ if ((tmp_op->op != OP_AND && tmp_op->op != OP_OR
+ && po->pfo != PFO_Z && po->pfo != PFO_S
+ && po->pfo != PFO_P)
+ || branched
+ || scan_for_mod_opr0(tmp_op, setters[j] + 1, i) >= 0)
+ {
+ pfomask = 1 << po->pfo;
+ }
+
+ if (tmp_op->op == OP_ADD && po->pfo == PFO_C) {
+ propagate_lmod(tmp_op, &tmp_op->operand[0],
+ &tmp_op->operand[1]);
+ if (tmp_op->operand[0].lmod == OPLM_DWORD)
+ need_tmp64 = 1;
+ }
+ }
+ if (pfomask) {
+ tmp_op->pfomask |= pfomask;
+ cond_vars |= pfomask;
+ }
+ // note: may overwrite, currently not a problem
+ po->datap = tmp_op;
+ }
+
+ if (po->op == OP_RCL || po->op == OP_RCR
+ || po->op == OP_ADC || po->op == OP_SBB)
+ cond_vars |= 1 << PFO_C;
+ }
+
+ if (po->op == OP_CMPS || po->op == OP_SCAS) {
+ cond_vars |= 1 << PFO_Z;
+ }
+ else if (po->op == OP_MUL
+ || (po->op == OP_IMUL && po->operand_cnt == 1))
+ {
+ if (po->operand[0].lmod == OPLM_DWORD)
+ need_tmp64 = 1;
+ }
+ else if (po->op == OP_CALL) {
+ // note: resolved non-reg calls are OPF_DONE already
+ pp = po->pp;
+ ferr_assert(po, pp != NULL);
+
+ if (pp->is_unresolved) {
+ int regmask_stack = 0;
+ collect_call_args(po, i, pp, ®mask, save_arg_vars,
+ i + opcnt * 2);
+
+ // this is pretty rough guess:
+ // see ecx and edx were pushed (and not their saved versions)
+ for (arg = 0; arg < pp->argc; arg++) {
+ if (pp->arg[arg].reg != NULL)
+ continue;
+
+ tmp_op = pp->arg[arg].datap;
+ if (tmp_op == NULL)
+ ferr(po, "parsed_op missing for arg%d\n", arg);
+ if (tmp_op->p_argnum == 0 && tmp_op->operand[0].type == OPT_REG)
+ regmask_stack |= 1 << tmp_op->operand[0].reg;
+ }
+
+ if (!((regmask_stack & (1 << xCX))
+ && (regmask_stack & (1 << xDX))))
+ {
+ if (pp->argc_stack != 0
+ || ((regmask | regmask_arg) & ((1 << xCX)|(1 << xDX))))
+ {
+ pp_insert_reg_arg(pp, "ecx");
+ pp->is_fastcall = 1;
+ regmask_init |= 1 << xCX;
+ regmask |= 1 << xCX;
+ }
+ if (pp->argc_stack != 0
+ || ((regmask | regmask_arg) & (1 << xDX)))
+ {
+ pp_insert_reg_arg(pp, "edx");
+ regmask_init |= 1 << xDX;
+ regmask |= 1 << xDX;
+ }
+ }
+
+ // note: __cdecl doesn't fall into is_unresolved category
+ if (pp->argc_stack > 0)
+ pp->is_stdcall = 1;
+ }
+ }
+ else if (po->op == OP_MOV && po->operand[0].pp != NULL
+ && po->operand[1].pp != NULL)
+ {
+ // <var> = offset <something>
+ if ((po->operand[1].pp->is_func || po->operand[1].pp->is_fptr)
+ && !IS_START(po->operand[1].name, "off_"))
+ {
+ if (!po->operand[0].pp->is_fptr)
+ ferr(po, "%s not declared as fptr when it should be\n",
+ po->operand[0].name);
+ if (pp_cmp_func(po->operand[0].pp, po->operand[1].pp)) {
+ pp_print(buf1, sizeof(buf1), po->operand[0].pp);
+ pp_print(buf2, sizeof(buf2), po->operand[1].pp);
+ fnote(po, "var: %s\n", buf1);
+ fnote(po, "func: %s\n", buf2);
+ ferr(po, "^ mismatch\n");
+ }
+ }
+ }
+ else if (po->op == OP_DIV || po->op == OP_IDIV) {
+ if (po->operand[0].lmod == OPLM_DWORD) {
+ // 32bit division is common, look for it
+ if (po->op == OP_DIV)
+ ret = scan_for_reg_clear(i, xDX);
+ else
+ ret = scan_for_cdq_edx(i);
+ if (ret >= 0)
+ po->flags |= OPF_32BIT;
+ else
+ need_tmp64 = 1;
+ }
+ else
+ need_tmp_var = 1;
+ }
+ else if (po->op == OP_CLD)
+ po->flags |= OPF_RMD | OPF_DONE;
+ else if (po->op == OPP_FTOL) {
+ struct parsed_opr opr = OPR_INIT(OPT_REG, OPLM_DWORD, xDX);
+ j = -1;
+ find_next_read(i + 1, opcnt, &opr, i + opcnt * 18, &j);
+ if (j == -1)
+ po->flags |= OPF_32BIT;
+ }
+
+ if (po->op == OP_RCL || po->op == OP_RCR || po->op == OP_XCHG)
+ need_tmp_var = 1;
+ }
+
+ // output starts here
+
+ // define userstack size
+ if (g_func_pp->is_userstack) {
+ fprintf(fout, "#ifndef US_SZ_%s\n", g_func_pp->name);
+ fprintf(fout, "#define US_SZ_%s USERSTACK_SIZE\n", g_func_pp->name);
+ fprintf(fout, "#endif\n");
+ }
+
+ // the function itself
+ ferr_assert(ops, !g_func_pp->is_fptr);
+ output_pp(fout, g_func_pp,
+ (g_ida_func_attr & IDAFA_NORETURN) ? OPP_FORCE_NORETURN : 0);
+ fprintf(fout, "\n{\n");
+
+ // declare indirect functions
+ for (i = 0; i < opcnt; i++) {
+ po = &ops[i];
+ if (po->flags & OPF_RMD)
+ continue;
+
+ if (po->op == OP_CALL) {
+ pp = po->pp;
+ if (pp == NULL)
+ ferr(po, "NULL pp\n");
+
+ if (pp->is_fptr && !(pp->name[0] != 0 && pp->is_arg)) {
+ if (pp->name[0] != 0) {
+ memmove(pp->name + 2, pp->name, strlen(pp->name) + 1);
+ memcpy(pp->name, "i_", 2);
+
+ // might be declared already
+ found = 0;
+ for (j = 0; j < i; j++) {
+ if (ops[j].op == OP_CALL && (pp_tmp = ops[j].pp)) {
+ if (pp_tmp->is_fptr && IS(pp->name, pp_tmp->name)) {
+ found = 1;
+ break;
+ }
+ }
+ }
+ if (found)
+ continue;
+ }
+ else
+ snprintf(pp->name, sizeof(pp->name), "icall%d", i);
+
+ fprintf(fout, " ");
+ output_pp(fout, pp, OPP_SIMPLE_ARGS);
+ fprintf(fout, ";\n");
+ }
+ }
+ }
+
+ // output LUTs/jumptables
+ for (i = 0; i < g_func_pd_cnt; i++) {
+ pd = &g_func_pd[i];
+ fprintf(fout, " static const ");
+ if (pd->type == OPT_OFFSET) {
+ fprintf(fout, "void *jt_%s[] =\n { ", pd->label);
+
+ for (j = 0; j < pd->count; j++) {
+ if (j > 0)
+ fprintf(fout, ", ");
+ fprintf(fout, "&&%s", pd->d[j].u.label);
+ }
+ }
+ else {
+ fprintf(fout, "%s %s[] =\n { ",
+ lmod_type_u(ops, pd->lmod), pd->label);
+
+ for (j = 0; j < pd->count; j++) {
+ if (j > 0)
+ fprintf(fout, ", ");
+ fprintf(fout, "%u", pd->d[j].u.val);
+ }
+ }
+ fprintf(fout, " };\n");
+ had_decl = 1;
+ }
+
+ // declare stack frame, va_arg
+ if (g_stack_fsz) {
+ fprintf(fout, " union { u32 d[%d];", (g_stack_fsz + 3) / 4);
+ if (g_func_lmods & (1 << OPLM_WORD))
+ fprintf(fout, " u16 w[%d];", (g_stack_fsz + 1) / 2);
+ if (g_func_lmods & (1 << OPLM_BYTE))
+ fprintf(fout, " u8 b[%d];", g_stack_fsz);
+ if (g_func_lmods & (1 << OPLM_QWORD))
+ fprintf(fout, " double q[%d];", (g_stack_fsz + 7) / 8);
+ fprintf(fout, " } sf;\n");
+ had_decl = 1;
+ }
+
+ if (g_func_pp->is_userstack) {
+ fprintf(fout, " u32 fake_sf[US_SZ_%s / 4];\n", g_func_pp->name);
+ fprintf(fout, " u32 *esp = &fake_sf[sizeof(fake_sf) / 4];\n");
+ had_decl = 1;
+ }
+
+ if (g_func_pp->is_vararg) {
+ fprintf(fout, " va_list ap;\n");
+ had_decl = 1;
+ }
+
+ // declare arg-registers
+ for (i = 0; i < g_func_pp->argc; i++) {
+ if (g_func_pp->arg[i].reg != NULL) {
+ reg = char_array_i(regs_r32,
+ ARRAY_SIZE(regs_r32), g_func_pp->arg[i].reg);
+ if (regmask & (1 << reg)) {
+ if (g_func_pp->arg[i].type.is_retreg)
+ fprintf(fout, " u32 %s = *r_%s;\n",
+ g_func_pp->arg[i].reg, g_func_pp->arg[i].reg);
+ else
+ fprintf(fout, " u32 %s = (u32)a%d;\n",
+ g_func_pp->arg[i].reg, i + 1);
+ }
+ else {
+ if (g_func_pp->arg[i].type.is_retreg)
+ ferr(ops, "retreg '%s' is unused?\n",
+ g_func_pp->arg[i].reg);
+ fprintf(fout, " // %s = a%d; // unused\n",
+ g_func_pp->arg[i].reg, i + 1);
+ }
+ had_decl = 1;
+ }
+ }
+
+ // declare normal registers
+ regmask_now = regmask & ~regmask_arg;
+ regmask_now &= ~(1 << xSP);
+ if (regmask_now & 0x00ff) {
+ for (reg = 0; reg < 8; reg++) {
+ if (regmask_now & (1 << reg)) {
+ fprintf(fout, " u32 %s", regs_r32[reg]);
+ if (regmask_init & (1 << reg))
+ fprintf(fout, " = 0");
+ fprintf(fout, ";\n");
+ had_decl = 1;
+ }
+ }
+ }
+ // ... mmx
+ if (regmask_now & 0xff00) {
+ for (reg = 8; reg < 16; reg++) {
+ if (regmask_now & (1 << reg)) {
+ fprintf(fout, " mmxr %s", regs_r32[reg]);
+ if (regmask_init & (1 << reg))
+ fprintf(fout, " = { 0, }");
+ fprintf(fout, ";\n");
+ had_decl = 1;
+ }
+ }
+ }
+ // ... x87
+ if (regmask_now & 0xff0000) {
+ for (reg = 16; reg < 24; reg++) {
+ if (regmask_now & (1 << reg)) {
+ fprintf(fout, " double f_st%d", reg - 16);
+ if (regmask_init & (1 << reg))
+ fprintf(fout, " = 0");
+ fprintf(fout, ";\n");
+ had_decl = 1;
+ }
+ }
+ }
+
+ if (regmask_save) {
+ for (reg = 0; reg < 8; reg++) {
+ if (regmask_save & (1 << reg)) {
+ fprintf(fout, " u32 s_%s;\n", regs_r32[reg]);
+ had_decl = 1;
+ }
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(save_arg_vars); i++) {
+ if (save_arg_vars[i] == 0)
+ continue;
+ for (reg = 0; reg < 32; reg++) {
+ if (save_arg_vars[i] & (1 << reg)) {
+ fprintf(fout, " u32 %s;\n",
+ saved_arg_name(buf1, sizeof(buf1), i, reg + 1));
+ had_decl = 1;
+ }
+ }
+ }
+
+ // declare push-pop temporaries
+ if (regmask_pp) {
+ for (reg = 0; reg < 8; reg++) {
+ if (regmask_pp & (1 << reg)) {
+ fprintf(fout, " u32 pp_%s;\n", regs_r32[reg]);
+ had_decl = 1;
+ }
+ }
+ }
+
+ if (cond_vars) {
+ for (i = 0; i < 8; i++) {
+ if (cond_vars & (1 << i)) {
+ fprintf(fout, " u32 cond_%s;\n", parsed_flag_op_names[i]);
+ had_decl = 1;
+ }
+ }
+ }
+
+ if (need_tmp_var) {
+ fprintf(fout, " u32 tmp;\n");
+ had_decl = 1;
+ }
+
+ if (need_tmp64) {
+ fprintf(fout, " u64 tmp64;\n");
+ had_decl = 1;
+ }