+ }
+ }
+
+ if (po->bt_i != -1 || (po->flags & OPF_RMD))
+ continue;
+
+ if (po->operand[0].type == OPT_LABEL)
+ // assume tail call
+ goto tailcall;
+
+ ferr(po, "unhandled branch\n");
+
+tailcall:
+ po->op = OP_CALL;
+ po->flags |= OPF_TAIL;
+ if (i > 0 && ops[i - 1].op == OP_POP)
+ po->flags |= OPF_ATAIL;
+ i--; // reprocess
+ }
+}
+
+static void scan_prologue_epilogue(int opcnt)
+{
+ int ecx_push = 0, esp_sub = 0;
+ int found;
+ int i, j, l;
+
+ if (ops[0].op == OP_PUSH && IS(opr_name(&ops[0], 0), "ebp")
+ && ops[1].op == OP_MOV
+ && IS(opr_name(&ops[1], 0), "ebp")
+ && IS(opr_name(&ops[1], 1), "esp"))
+ {
+ g_bp_frame = 1;
+ ops[0].flags |= OPF_RMD | OPF_DONE | OPF_NOREGS;
+ ops[1].flags |= OPF_RMD | OPF_DONE | OPF_NOREGS;
+ i = 2;
+
+ if (ops[2].op == OP_SUB && IS(opr_name(&ops[2], 0), "esp")) {
+ g_stack_fsz = opr_const(&ops[2], 1);
+ ops[2].flags |= OPF_RMD | OPF_DONE | OPF_NOREGS;
+ i++;
+ }
+ else {
+ // another way msvc builds stack frame..
+ i = 2;
+ while (ops[i].op == OP_PUSH && IS(opr_name(&ops[i], 0), "ecx")) {
+ g_stack_fsz += 4;
+ ops[i].flags |= OPF_RMD | OPF_DONE | OPF_NOREGS;
+ ecx_push++;
+ i++;
+ }
+ // and another way..
+ if (i == 2 && ops[i].op == OP_MOV && ops[i].operand[0].reg == xAX
+ && ops[i].operand[1].type == OPT_CONST
+ && ops[i + 1].op == OP_CALL
+ && IS(opr_name(&ops[i + 1], 0), "__alloca_probe"))
+ {
+ g_stack_fsz += ops[i].operand[1].val;
+ ops[i].flags |= OPF_RMD | OPF_DONE | OPF_NOREGS;
+ i++;
+ ops[i].flags |= OPF_RMD | OPF_DONE | OPF_NOREGS;
+ i++;
+ }
+ }
+
+ found = 0;
+ do {
+ for (; i < opcnt; i++)
+ if (ops[i].flags & OPF_TAIL)
+ break;
+ j = i - 1;
+ if (i == opcnt && (ops[j].flags & OPF_JMP)) {
+ if (ops[j].bt_i != -1 || ops[j].btj != NULL)
+ break;
+ i--;
+ j--;
+ }
+
+ if ((ops[j].op == OP_POP && IS(opr_name(&ops[j], 0), "ebp"))
+ || ops[j].op == OP_LEAVE)
+ {
+ ops[j].flags |= OPF_RMD | OPF_DONE | OPF_NOREGS;
+ }
+ else if (ops[i].op == OP_CALL && ops[i].pp != NULL
+ && ops[i].pp->is_noreturn)
+ {
+ // on noreturn, msvc sometimes cleans stack, sometimes not
+ i++;
+ found = 1;
+ continue;
+ }
+ else if (!(g_ida_func_attr & IDAFA_NORETURN))
+ ferr(&ops[j], "'pop ebp' expected\n");
+
+ if (g_stack_fsz != 0) {
+ if (ops[j].op == OP_LEAVE)
+ j--;
+ else if (ops[j].op == OP_POP
+ && ops[j - 1].op == OP_MOV
+ && IS(opr_name(&ops[j - 1], 0), "esp")
+ && IS(opr_name(&ops[j - 1], 1), "ebp"))
+ {
+ ops[j - 1].flags |= OPF_RMD | OPF_DONE | OPF_NOREGS;
+ j -= 2;
+ }
+ else if (!(g_ida_func_attr & IDAFA_NORETURN))
+ {
+ ferr(&ops[j], "esp restore expected\n");
+ }
+
+ if (ecx_push && j >= 0 && ops[j].op == OP_POP
+ && IS(opr_name(&ops[j], 0), "ecx"))
+ {
+ ferr(&ops[j], "unexpected ecx pop\n");
+ }
+ }
+
+ found = 1;
+ i++;
+ } while (i < opcnt);
+
+ if (!found)
+ ferr(ops, "missing ebp epilogue\n");
+ return;
+ }
+
+ // non-bp frame
+ i = 0;
+ while (ops[i].op == OP_PUSH && IS(opr_name(&ops[i], 0), "ecx")) {
+ ops[i].flags |= OPF_RMD | OPF_DONE | OPF_NOREGS;
+ g_stack_fsz += 4;
+ ecx_push++;
+ i++;
+ }
+
+ for (; i < opcnt; i++) {
+ if (ops[i].op == OP_PUSH || (ops[i].flags & (OPF_JMP|OPF_TAIL)))
+ break;
+ if (ops[i].op == OP_SUB && ops[i].operand[0].reg == xSP
+ && ops[i].operand[1].type == OPT_CONST)
+ {
+ g_stack_fsz = ops[i].operand[1].val;
+ ops[i].flags |= OPF_RMD | OPF_DONE | OPF_NOREGS;
+ esp_sub = 1;
+ break;
+ }
+ }
+
+ if (ecx_push && !esp_sub) {
+ // could actually be args for a call..
+ for (; i < opcnt; i++)
+ if (ops[i].op != OP_PUSH)
+ break;
+
+ if (ops[i].op == OP_CALL && ops[i].operand[0].type == OPT_LABEL) {
+ const struct parsed_proto *pp;
+ pp = proto_parse(g_fhdr, opr_name(&ops[i], 0), 1);
+ j = pp ? pp->argc_stack : 0;
+ while (i > 0 && j > 0) {
+ i--;
+ if (ops[i].op == OP_PUSH) {
+ ops[i].flags &= ~(OPF_RMD | OPF_DONE | OPF_NOREGS);
+ j--;
+ }
+ }
+ if (j != 0)
+ ferr(&ops[i], "unhandled prologue\n");
+
+ // recheck
+ i = g_stack_fsz = ecx_push = 0;
+ while (ops[i].op == OP_PUSH && IS(opr_name(&ops[i], 0), "ecx")) {
+ if (!(ops[i].flags & OPF_RMD))
+ break;
+ g_stack_fsz += 4;
+ ecx_push++;
+ i++;
+ }
+ }
+ }
+
+ found = 0;
+ if (ecx_push || esp_sub)
+ {
+ g_sp_frame = 1;
+
+ i++;
+ do {
+ for (; i < opcnt; i++)
+ if (ops[i].flags & OPF_TAIL)
+ break;
+ j = i - 1;
+ if (i == opcnt && (ops[j].flags & OPF_JMP)) {
+ if (ops[j].bt_i != -1 || ops[j].btj != NULL)
+ break;
+ i--;
+ j--;
+ }
+
+ if (ecx_push > 0) {
+ for (l = 0; l < ecx_push; l++) {
+ if (ops[j].op == OP_POP && IS(opr_name(&ops[j], 0), "ecx"))
+ /* pop ecx */;
+ else if (ops[j].op == OP_ADD
+ && IS(opr_name(&ops[j], 0), "esp")
+ && ops[j].operand[1].type == OPT_CONST)
+ {
+ /* add esp, N */
+ l += ops[j].operand[1].val / 4 - 1;
+ }
+ else
+ ferr(&ops[j], "'pop ecx' expected\n");
+
+ ops[j].flags |= OPF_RMD | OPF_DONE | OPF_NOREGS;
+ j--;
+ }
+ if (l != ecx_push)
+ ferr(&ops[j], "epilogue scan failed\n");
+
+ found = 1;
+ }
+
+ if (esp_sub) {
+ if (ops[j].op != OP_ADD
+ || !IS(opr_name(&ops[j], 0), "esp")
+ || ops[j].operand[1].type != OPT_CONST
+ || ops[j].operand[1].val != g_stack_fsz)
+ ferr(&ops[j], "'add esp' expected\n");
+
+ ops[j].flags |= OPF_RMD | OPF_DONE | OPF_NOREGS;
+ ops[j].operand[1].val = 0; // hack for stack arg scanner
+ found = 1;
+ }
+
+ i++;
+ } while (i < opcnt);
+
+ if (!found)
+ ferr(ops, "missing esp epilogue\n");
+ }
+}
+
+static const struct parsed_proto *resolve_icall(int i, int opcnt,
+ int *pp_i, int *multi_src)
+{
+ const struct parsed_proto *pp = NULL;
+ int search_advice = 0;
+
+ *multi_src = 0;
+ *pp_i = -1;
+
+ switch (ops[i].operand[0].type) {
+ case OPT_REGMEM:
+ case OPT_LABEL:
+ case OPT_OFFSET:
+ pp = try_recover_pp(&ops[i], &ops[i].operand[0], &search_advice);
+ if (!search_advice)
+ break;
+ // fallthrough
+ default:
+ scan_for_call_type(i, &ops[i].operand[0], i + opcnt * 9, &pp,
+ pp_i, multi_src);
+ break;
+ }
+
+ return pp;
+}
+
+// find an instruction that changed opr before i op
+// *op_i must be set to -1 by the caller
+// *entry is set to 1 if one source is determined to be the caller
+// returns 1 if found, *op_i is then set to origin
+static int resolve_origin(int i, const struct parsed_opr *opr,
+ int magic, int *op_i, int *is_caller)
+{
+ struct label_ref *lr;
+ int ret = 0;
+
+ if (ops[i].cc_scratch == magic)
+ return 0;
+ ops[i].cc_scratch = magic;
+
+ while (1) {
+ if (g_labels[i] != NULL) {
+ lr = &g_label_refs[i];
+ for (; lr != NULL; lr = lr->next) {
+ check_i(&ops[i], lr->i);
+ ret |= resolve_origin(lr->i, opr, magic, op_i, is_caller);
+ }
+ if (i > 0 && LAST_OP(i - 1))
+ return ret;
+ }
+
+ i--;
+ if (i < 0) {
+ if (is_caller != NULL)
+ *is_caller = 1;
+ return -1;
+ }
+
+ if (ops[i].cc_scratch == magic)
+ return ret;
+ ops[i].cc_scratch = magic;
+
+ if (!(ops[i].flags & OPF_DATA))
+ continue;
+ if (!is_opr_modified(opr, &ops[i]))
+ continue;
+
+ if (*op_i >= 0) {
+ if (*op_i == i)
+ return ret | 1;
+
+ // XXX: could check if the other op does the same
+ return -1;
+ }
+
+ *op_i = i;
+ return ret | 1;
+ }
+}
+
+// find an instruction that previously referenced opr
+// if multiple results are found - fail
+// *op_i must be set to -1 by the caller
+// returns 1 if found, *op_i is then set to referencer insn
+static int resolve_last_ref(int i, const struct parsed_opr *opr,
+ int magic, int *op_i)
+{
+ struct label_ref *lr;
+ int ret = 0;
+
+ if (ops[i].cc_scratch == magic)
+ return 0;
+ ops[i].cc_scratch = magic;
+
+ while (1) {
+ if (g_labels[i] != NULL) {
+ lr = &g_label_refs[i];
+ for (; lr != NULL; lr = lr->next) {
+ check_i(&ops[i], lr->i);
+ ret |= resolve_last_ref(lr->i, opr, magic, op_i);
+ }
+ if (i > 0 && LAST_OP(i - 1))
+ return ret;
+ }
+
+ i--;
+ if (i < 0)
+ return -1;
+
+ if (ops[i].cc_scratch == magic)
+ return 0;
+ ops[i].cc_scratch = magic;
+
+ if (!is_opr_referenced(opr, &ops[i]))
+ continue;
+
+ if (*op_i >= 0)
+ return -1;
+
+ *op_i = i;
+ return 1;
+ }
+}
+
+// find next instruction that reads opr
+// *op_i must be set to -1 by the caller
+// on return, *op_i is set to first referencer insn
+// returns 1 if exactly 1 referencer is found
+static int find_next_read(int i, int opcnt,
+ const struct parsed_opr *opr, int magic, int *op_i)
+{
+ struct parsed_op *po;
+ int j, ret = 0;
+
+ for (; i < opcnt; i++)
+ {
+ if (ops[i].cc_scratch == magic)
+ return ret;
+ ops[i].cc_scratch = magic;
+
+ po = &ops[i];
+ if ((po->flags & OPF_JMP) && po->op != OP_CALL) {
+ if (po->btj != NULL) {
+ // jumptable
+ for (j = 0; j < po->btj->count; j++) {
+ check_i(po, po->btj->d[j].bt_i);
+ ret |= find_next_read(po->btj->d[j].bt_i, opcnt, opr,
+ magic, op_i);
+ }
+ return ret;
+ }
+
+ if (po->flags & OPF_RMD)
+ continue;
+ check_i(po, po->bt_i);
+ if (po->flags & OPF_CJMP) {
+ ret |= find_next_read(po->bt_i, opcnt, opr, magic, op_i);
+ if (ret < 0)
+ return ret;
+ }
+ else
+ i = po->bt_i - 1;
+ continue;
+ }
+
+ if (!is_opr_read(opr, po)) {
+ if (is_opr_modified(opr, po)
+ && (po->op == OP_CALL
+ || ((po->flags & OPF_DATA)
+ && po->operand[0].lmod == OPLM_DWORD)))
+ {
+ // it's overwritten
+ return ret;
+ }
+ if (po->flags & OPF_TAIL)
+ return ret;
+ continue;
+ }
+
+ if (*op_i >= 0)
+ return -1;
+
+ *op_i = i;
+ return 1;
+ }
+
+ return 0;
+}
+
+static int try_resolve_const(int i, const struct parsed_opr *opr,
+ int magic, unsigned int *val)
+{
+ int s_i = -1;
+ int ret;
+
+ ret = resolve_origin(i, opr, magic, &s_i, NULL);
+ if (ret == 1) {
+ i = s_i;
+ if (ops[i].op != OP_MOV && ops[i].operand[1].type != OPT_CONST)
+ return -1;
+
+ *val = ops[i].operand[1].val;
+ return 1;
+ }
+
+ return -1;
+}
+
+static struct parsed_proto *process_call_early(int i, int opcnt,
+ int *adj_i)
+{
+ struct parsed_op *po = &ops[i];
+ struct parsed_proto *pp;
+ int multipath = 0;
+ int adj = 0;
+ int j, ret;
+
+ pp = po->pp;
+ if (pp == NULL || pp->is_vararg || pp->argc_reg != 0)
+ // leave for later
+ return NULL;
+
+ // look for and make use of esp adjust
+ *adj_i = ret = -1;
+ if (!pp->is_stdcall && pp->argc_stack > 0)
+ ret = scan_for_esp_adjust(i + 1, opcnt,
+ pp->argc_stack * 4, &adj, &multipath, 0);
+ if (ret >= 0) {
+ if (pp->argc_stack > adj / 4)
+ return NULL;
+ if (multipath)
+ return NULL;
+ if (ops[ret].op == OP_POP) {
+ for (j = 1; j < adj / 4; j++) {
+ if (ops[ret + j].op != OP_POP
+ || ops[ret + j].operand[0].reg != xCX)
+ {
+ return NULL;
+ }
+ }
+ }
+ }
+
+ *adj_i = ret;
+ return pp;
+}
+
+static struct parsed_proto *process_call(int i, int opcnt)
+{
+ struct parsed_op *po = &ops[i];
+ const struct parsed_proto *pp_c;
+ struct parsed_proto *pp;
+ const char *tmpname;
+ int call_i = -1, ref_i = -1;
+ int adj = 0, multipath = 0;
+ int ret, arg;
+
+ tmpname = opr_name(po, 0);
+ pp = po->pp;
+ if (pp == NULL)
+ {
+ // indirect call
+ pp_c = resolve_icall(i, opcnt, &call_i, &multipath);
+ if (pp_c != NULL) {
+ if (!pp_c->is_func && !pp_c->is_fptr)
+ ferr(po, "call to non-func: %s\n", pp_c->name);
+ pp = proto_clone(pp_c);
+ my_assert_not(pp, NULL);
+ if (multipath)
+ // not resolved just to single func
+ pp->is_fptr = 1;
+
+ switch (po->operand[0].type) {
+ case OPT_REG:
+ // we resolved this call and no longer need the register
+ po->regmask_src &= ~(1 << po->operand[0].reg);
+
+ if (!multipath && i != call_i && ops[call_i].op == OP_MOV
+ && ops[call_i].operand[1].type == OPT_LABEL)
+ {
+ // no other source users?
+ ret = resolve_last_ref(i, &po->operand[0], i + opcnt * 10,
+ &ref_i);
+ if (ret == 1 && call_i == ref_i) {
+ // and nothing uses it after us?
+ ref_i = -1;
+ find_next_read(i + 1, opcnt, &po->operand[0],
+ i + opcnt * 11, &ref_i);
+ if (ref_i == -1)
+ // then also don't need the source mov
+ ops[call_i].flags |= OPF_RMD | OPF_NOREGS;
+ }
+ }
+ break;
+ case OPT_REGMEM:
+ pp->is_fptr = 1;
+ break;
+ default:
+ break;
+ }
+ }
+ if (pp == NULL) {
+ pp = calloc(1, sizeof(*pp));
+ my_assert_not(pp, NULL);
+
+ pp->is_fptr = 1;
+ ret = scan_for_esp_adjust(i + 1, opcnt,
+ -1, &adj, &multipath, 0);
+ if (ret < 0 || adj < 0) {
+ if (!g_allow_regfunc)
+ ferr(po, "non-__cdecl indirect call unhandled yet\n");
+ pp->is_unresolved = 1;
+ adj = 0;
+ }
+ adj /= 4;
+ if (adj > ARRAY_SIZE(pp->arg))
+ ferr(po, "esp adjust too large: %d\n", adj);
+ pp->ret_type.name = strdup("int");
+ pp->argc = pp->argc_stack = adj;
+ for (arg = 0; arg < pp->argc; arg++)
+ pp->arg[arg].type.name = strdup("int");
+ }
+ po->pp = pp;
+ }
+
+ // look for and make use of esp adjust
+ multipath = 0;
+ ret = -1;
+ if (!pp->is_stdcall && pp->argc_stack > 0) {
+ int adj_expect = pp->is_vararg ? -1 : pp->argc_stack * 4;
+ ret = scan_for_esp_adjust(i + 1, opcnt,
+ adj_expect, &adj, &multipath, 0);
+ }
+ if (ret >= 0) {
+ if (pp->is_vararg) {
+ if (adj / 4 < pp->argc_stack) {
+ fnote(po, "(this call)\n");
+ ferr(&ops[ret], "esp adjust is too small: %x < %x\n",
+ adj, pp->argc_stack * 4);
+ }
+ // modify pp to make it have varargs as normal args
+ arg = pp->argc;
+ pp->argc += adj / 4 - pp->argc_stack;
+ for (; arg < pp->argc; arg++) {
+ pp->arg[arg].type.name = strdup("int");
+ pp->argc_stack++;
+ }
+ if (pp->argc > ARRAY_SIZE(pp->arg))
+ ferr(po, "too many args for '%s'\n", tmpname);
+ }
+ if (pp->argc_stack > adj / 4) {
+ fnote(po, "(this call)\n");
+ ferr(&ops[ret], "stack tracking failed for '%s': %x %x\n",
+ tmpname, pp->argc_stack * 4, adj);
+ }
+
+ scan_for_esp_adjust(i + 1, opcnt,
+ pp->argc_stack * 4, &adj, &multipath, 1);
+ }
+ else if (pp->is_vararg)
+ ferr(po, "missing esp_adjust for vararg func '%s'\n",
+ pp->name);
+
+ return pp;
+}
+
+static int collect_call_args_early(struct parsed_op *po, int i,
+ struct parsed_proto *pp, int *regmask)
+{
+ int arg, ret;
+ int j;
+
+ for (arg = 0; arg < pp->argc; arg++)
+ if (pp->arg[arg].reg == NULL)
+ break;
+
+ // first see if it can be easily done
+ for (j = i; j > 0 && arg < pp->argc; )
+ {
+ if (g_labels[j] != NULL)
+ return -1;
+ j--;
+
+ if (ops[j].op == OP_CALL)
+ return -1;
+ else if (ops[j].op == OP_ADD && ops[j].operand[0].reg == xSP)
+ return -1;
+ else if (ops[j].op == OP_POP)
+ return -1;
+ else if (ops[j].flags & OPF_CJMP)
+ return -1;
+ else if (ops[j].op == OP_PUSH) {
+ if (ops[j].flags & (OPF_FARG|OPF_FARGNR))
+ return -1;
+ ret = scan_for_mod(&ops[j], j + 1, i, 1);
+ if (ret >= 0)
+ return -1;
+
+ if (pp->arg[arg].type.is_va_list)
+ return -1;
+
+ // next arg
+ for (arg++; arg < pp->argc; arg++)
+ if (pp->arg[arg].reg == NULL)
+ break;
+ }
+ }
+
+ if (arg < pp->argc)
+ return -1;
+
+ // now do it
+ for (arg = 0; arg < pp->argc; arg++)
+ if (pp->arg[arg].reg == NULL)
+ break;
+
+ for (j = i; j > 0 && arg < pp->argc; )
+ {
+ j--;
+
+ if (ops[j].op == OP_PUSH)
+ {
+ ops[j].p_argnext = -1;
+ ferr_assert(&ops[j], pp->arg[arg].datap == NULL);
+ pp->arg[arg].datap = &ops[j];
+
+ if (ops[j].operand[0].type == OPT_REG)
+ *regmask |= 1 << ops[j].operand[0].reg;
+
+ ops[j].flags |= OPF_RMD | OPF_DONE | OPF_FARGNR | OPF_FARG;
+ ops[j].flags &= ~OPF_RSAVE;
+
+ // next arg
+ for (arg++; arg < pp->argc; arg++)
+ if (pp->arg[arg].reg == NULL)
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int collect_call_args_r(struct parsed_op *po, int i,
+ struct parsed_proto *pp, int *regmask, int *save_arg_vars,
+ int *arg_grp, int arg, int magic, int need_op_saving, int may_reuse)
+{
+ struct parsed_proto *pp_tmp;
+ struct parsed_op *po_tmp;
+ struct label_ref *lr;
+ int need_to_save_current;
+ int arg_grp_current = 0;
+ int save_args_seen = 0;
+ int save_args;
+ int ret = 0;
+ int reg;
+ char buf[32];
+ int j, k;
+
+ if (i < 0) {
+ ferr(po, "dead label encountered\n");
+ return -1;
+ }
+
+ for (; arg < pp->argc; arg++)
+ if (pp->arg[arg].reg == NULL)
+ break;
+ magic = (magic & 0xffffff) | (arg << 24);
+
+ for (j = i; j >= 0 && (arg < pp->argc || pp->is_unresolved); )
+ {
+ if (((ops[j].cc_scratch ^ magic) & 0xffffff) == 0) {
+ if (ops[j].cc_scratch != magic) {
+ ferr(&ops[j], "arg collect hit same path with diff args for %s\n",
+ pp->name);
+ return -1;
+ }
+ // ok: have already been here
+ return 0;
+ }
+ ops[j].cc_scratch = magic;
+
+ if (g_labels[j] != NULL && g_label_refs[j].i != -1) {
+ lr = &g_label_refs[j];
+ if (lr->next != NULL)
+ need_op_saving = 1;
+ for (; lr->next; lr = lr->next) {
+ check_i(&ops[j], lr->i);
+ if ((ops[lr->i].flags & (OPF_JMP|OPF_CJMP)) != OPF_JMP)
+ may_reuse = 1;
+ ret = collect_call_args_r(po, lr->i, pp, regmask, save_arg_vars,
+ arg_grp, arg, magic, need_op_saving, may_reuse);
+ if (ret < 0)
+ return ret;
+ }
+
+ check_i(&ops[j], lr->i);
+ if ((ops[lr->i].flags & (OPF_JMP|OPF_CJMP)) != OPF_JMP)
+ may_reuse = 1;
+ if (j > 0 && LAST_OP(j - 1)) {
+ // follow last branch in reverse
+ j = lr->i;
+ continue;
+ }
+ need_op_saving = 1;
+ ret = collect_call_args_r(po, lr->i, pp, regmask, save_arg_vars,
+ arg_grp, arg, magic, need_op_saving, may_reuse);
+ if (ret < 0)
+ return ret;
+ }
+ j--;
+
+ if (ops[j].op == OP_CALL)
+ {
+ if (pp->is_unresolved)
+ break;
+
+ pp_tmp = ops[j].pp;
+ if (pp_tmp == NULL)
+ ferr(po, "arg collect hit unparsed call '%s'\n",
+ ops[j].operand[0].name);
+ if (may_reuse && pp_tmp->argc_stack > 0)
+ ferr(po, "arg collect %d/%d hit '%s' with %d stack args\n",
+ arg, pp->argc, opr_name(&ops[j], 0), pp_tmp->argc_stack);
+ }
+ // esp adjust of 0 means we collected it before
+ else if (ops[j].op == OP_ADD && ops[j].operand[0].reg == xSP
+ && (ops[j].operand[1].type != OPT_CONST
+ || ops[j].operand[1].val != 0))
+ {
+ if (pp->is_unresolved)
+ break;
+
+ fnote(po, "(this call)\n");
+ ferr(&ops[j], "arg collect %d/%d hit esp adjust of %d\n",
+ arg, pp->argc, ops[j].operand[1].val);
+ }
+ else if (ops[j].op == OP_POP && !(ops[j].flags & OPF_DONE))
+ {
+ if (pp->is_unresolved)
+ break;
+
+ fnote(po, "(this call)\n");
+ ferr(&ops[j], "arg collect %d/%d hit pop\n", arg, pp->argc);
+ }
+ else if (ops[j].flags & OPF_CJMP)
+ {
+ if (pp->is_unresolved)
+ break;
+