+ if (pp_c != NULL) {
+ pp = proto_clone(pp_c);
+ my_assert_not(pp, NULL);
+ }
+ }
+
+ if (pp != NULL) {
+ if (pp->is_fptr)
+ check_func_pp(po, pp, "fptr var call");
+ if (pp->is_noreturn) {
+ po->flags |= OPF_TAIL;
+ po->flags &= ~OPF_ATAIL; // most likely...
+ }
+ }
+ po->pp = pp;
+ continue;
+ }
+
+ if (!(po->flags & OPF_JMP) || po->op == OP_RET)
+ continue;
+
+ if (po->operand[0].type == OPT_REGMEM) {
+ pd = try_resolve_jumptab(i, opcnt);
+ if (pd == NULL)
+ goto tailcall;
+
+ po->btj = pd;
+ continue;
+ }
+
+ for (l = 0; l < opcnt; l++) {
+ if (g_labels[l] != NULL
+ && IS(po->operand[0].name, g_labels[l]))
+ {
+ if (l == i + 1 && po->op == OP_JMP) {
+ // yet another alignment type..
+ po->flags |= OPF_RMD|OPF_DONE;
+ break;
+ }
+ add_label_ref(&g_label_refs[l], i);
+ po->bt_i = l;
+ break;
+ }
+ }
+
+ if (po->bt_i != -1 || (po->flags & OPF_RMD))
+ continue;
+
+ if (po->operand[0].type == OPT_LABEL)
+ // assume tail call
+ goto tailcall;
+
+ ferr(po, "unhandled branch\n");
+
+tailcall:
+ po->op = OP_CALL;
+ po->flags |= OPF_TAIL;
+ prev_op = i > 0 ? ops[i - 1].op : OP_UD2;
+ if (prev_op == OP_POP)
+ po->flags |= OPF_ATAIL;
+ if (g_stack_fsz + g_bp_frame == 0 && prev_op != OP_PUSH
+ && (g_func_pp == NULL || g_func_pp->argc_stack > 0))
+ {
+ po->flags |= OPF_ATAIL;
+ }
+ i--; // reprocess
+ }
+}
+
+static int resolve_origin(int i, const struct parsed_opr *opr,
+ int magic, int *op_i, int *is_caller);
+
+static void eliminate_seh_writes(int opcnt)
+{
+ const struct parsed_opr *opr;
+ char ofs_reg[16];
+ int offset;
+ int i;
+
+ // assume all sf writes above g_seh_size to be seh related
+ // (probably unsafe but oh well)
+ for (i = 0; i < opcnt; i++) {
+ if (ops[i].op != OP_MOV)
+ continue;
+ opr = &ops[i].operand[0];
+ if (opr->type != OPT_REGMEM)
+ continue;
+ if (!is_stack_access(&ops[i], opr))
+ continue;
+
+ offset = 0;
+ parse_stack_access(&ops[i], opr->name, ofs_reg, &offset,
+ NULL, NULL, 0);
+ if (offset < 0 && offset >= -g_seh_size)
+ ops[i].flags |= OPF_RMD | OPF_DONE | OPF_NOREGS;
+ }
+}
+
+static void eliminate_seh(int opcnt)
+{
+ int i, j, k, ret;
+
+ for (i = 0; i < opcnt; i++) {
+ if (ops[i].op != OP_MOV)
+ continue;
+ if (ops[i].operand[0].segment != SEG_FS)
+ continue;
+ if (!IS(opr_name(&ops[i], 0), "0"))
+ continue;
+
+ ops[i].flags |= OPF_RMD | OPF_DONE | OPF_NOREGS;
+ if (ops[i].operand[1].reg == xSP) {
+ for (j = i - 1; j >= 0; j--) {
+ if (ops[j].op != OP_PUSH)
+ continue;
+ ops[j].flags |= OPF_RMD | OPF_DONE | OPF_NOREGS;
+ g_seh_size += 4;
+ if (ops[j].operand[0].val == ~0)
+ break;
+ if (ops[j].operand[0].type == OPT_REG) {
+ k = -1;
+ ret = resolve_origin(j, &ops[j].operand[0],
+ j + opcnt * 22, &k, NULL);
+ if (ret == 1)
+ ops[k].flags |= OPF_RMD | OPF_DONE | OPF_NOREGS;
+ }
+ }
+ if (j < 0)
+ ferr(ops, "missing seh terminator\n");
+ }
+ else {
+ k = -1;
+ ret = resolve_origin(i, &ops[i].operand[1],
+ i + opcnt * 23, &k, NULL);
+ if (ret == 1)
+ ops[k].flags |= OPF_RMD | OPF_DONE | OPF_NOREGS;
+ }
+ }
+
+ eliminate_seh_writes(opcnt);
+}
+
+static void eliminate_seh_calls(int opcnt)
+{
+ int epilog_found = 0;
+ int i;
+
+ g_bp_frame = 1;
+ g_seh_size = 0x10;
+
+ i = 0;
+ ferr_assert(&ops[i], ops[i].op == OP_PUSH
+ && ops[i].operand[0].type == OPT_CONST);
+ g_stack_fsz = g_seh_size + ops[i].operand[0].val;
+ ops[i].flags |= OPF_RMD | OPF_DONE | OPF_NOREGS;
+
+ i++;
+ ferr_assert(&ops[i], ops[i].op == OP_PUSH
+ && ops[i].operand[0].type == OPT_OFFSET);
+ ops[i].flags |= OPF_RMD | OPF_DONE | OPF_NOREGS;
+
+ i++;
+ ferr_assert(&ops[i], ops[i].op == OP_CALL
+ && IS(opr_name(&ops[i], 0), "__SEH_prolog"));
+ ops[i].flags |= OPF_RMD | OPF_DONE | OPF_NOREGS;
+
+ for (i++; i < opcnt; i++) {
+ if (ops[i].op != OP_CALL)
+ continue;
+ if (!IS(opr_name(&ops[i], 0), "__SEH_epilog"))
+ continue;
+
+ ops[i].flags |= OPF_RMD | OPF_DONE | OPF_NOREGS;
+ epilog_found = 1;
+ }
+ ferr_assert(ops, epilog_found);
+
+ eliminate_seh_writes(opcnt);
+}
+
+static int scan_prologue(int i, int opcnt, int *ecx_push, int *esp_sub)
+{
+ int j;
+
+ for (; i < opcnt; i++)
+ if (!(ops[i].flags & OPF_DONE))
+ break;
+
+ while (ops[i].op == OP_PUSH && IS(opr_name(&ops[i], 0), "ecx")) {
+ ops[i].flags |= OPF_RMD | OPF_DONE | OPF_NOREGS;
+ g_stack_fsz += 4;
+ (*ecx_push)++;
+ i++;
+ }
+
+ for (; i < opcnt; i++) {
+ if (i > 0 && g_labels[i] != NULL)
+ break;
+ if (ops[i].op == OP_PUSH || (ops[i].flags & (OPF_JMP|OPF_TAIL)))
+ break;
+ if (ops[i].op == OP_SUB && ops[i].operand[0].reg == xSP
+ && ops[i].operand[1].type == OPT_CONST)
+ {
+ g_stack_fsz += opr_const(&ops[i], 1);
+ ops[i].flags |= OPF_RMD | OPF_DONE | OPF_NOREGS;
+ i++;
+ *esp_sub = 1;
+ break;
+ }
+ if (ops[i].op == OP_MOV && ops[i].operand[0].reg == xAX
+ && ops[i].operand[1].type == OPT_CONST)
+ {
+ for (j = i + 1; j < opcnt; j++)
+ if (!(ops[j].flags & OPF_DONE))
+ break;
+ if (ops[j].op == OP_CALL
+ && IS(opr_name(&ops[j], 0), "__alloca_probe"))
+ {
+ g_stack_fsz += opr_const(&ops[i], 1);
+ ops[i].flags |= OPF_RMD | OPF_DONE | OPF_NOREGS;
+ ops[j].flags |= OPF_RMD | OPF_DONE | OPF_NOREGS;
+ i = j + 1;
+ *esp_sub = 1;
+ }
+ break;
+ }
+ }
+
+ return i;
+}
+
+static void scan_prologue_epilogue(int opcnt, int *stack_align)
+{
+ int ecx_push = 0, esp_sub = 0, pusha = 0;
+ int sandard_epilogue;
+ int found;
+ int i, j, l;
+
+ if (g_seh_found == 2) {
+ eliminate_seh_calls(opcnt);
+ return;
+ }
+ if (g_seh_found) {
+ eliminate_seh(opcnt);
+ // ida treats seh as part of sf
+ g_stack_fsz = g_seh_size;
+ esp_sub = 1;
+ }
+
+ if (ops[0].op == OP_PUSH && IS(opr_name(&ops[0], 0), "ebp")
+ && ops[1].op == OP_MOV
+ && IS(opr_name(&ops[1], 0), "ebp")
+ && IS(opr_name(&ops[1], 1), "esp"))
+ {
+ g_bp_frame = 1;
+ ops[0].flags |= OPF_RMD | OPF_DONE | OPF_NOREGS;
+ ops[1].flags |= OPF_RMD | OPF_DONE | OPF_NOREGS;
+
+ for (i = 2; i < opcnt; i++)
+ if (!(ops[i].flags & OPF_DONE))
+ break;
+
+ if (ops[i].op == OP_PUSHA) {
+ ops[i].flags |= OPF_RMD | OPF_DONE | OPF_NOREGS;
+ pusha = 1;
+ i++;
+ }
+
+ if (ops[i].op == OP_AND && ops[i].operand[0].reg == xSP
+ && ops[i].operand[1].type == OPT_CONST)
+ {
+ l = ops[i].operand[1].val;
+ j = ffs(l) - 1;
+ if (j == -1 || (l >> j) != -1)
+ ferr(&ops[i], "unhandled esp align: %x\n", l);
+ if (stack_align != NULL)
+ *stack_align = 1 << j;
+ ops[i].flags |= OPF_RMD | OPF_DONE | OPF_NOREGS;
+ i++;
+ }
+
+ i = scan_prologue(i, opcnt, &ecx_push, &esp_sub);
+
+ found = 0;
+ do {
+ for (; i < opcnt; i++)
+ if (ops[i].flags & OPF_TAIL)
+ break;
+ j = i - 1;
+ if (i == opcnt && (ops[j].flags & OPF_JMP)) {
+ if (ops[j].bt_i != -1 || ops[j].btj != NULL)
+ break;
+ i--;
+ j--;
+ }
+
+ sandard_epilogue = 0;
+ if (ops[j].op == OP_POP && IS(opr_name(&ops[j], 0), "ebp"))
+ {
+ ops[j].flags |= OPF_RMD | OPF_DONE | OPF_NOREGS;
+ // the standard epilogue is sometimes even used without a sf
+ if (ops[j - 1].op == OP_MOV
+ && IS(opr_name(&ops[j - 1], 0), "esp")
+ && IS(opr_name(&ops[j - 1], 1), "ebp"))
+ sandard_epilogue = 1;
+ }
+ else if (ops[j].op == OP_LEAVE)
+ {
+ ops[j].flags |= OPF_RMD | OPF_DONE | OPF_NOREGS;
+ sandard_epilogue = 1;
+ }
+ else if (ops[i].op == OP_CALL && ops[i].pp != NULL
+ && ops[i].pp->is_noreturn)
+ {
+ // on noreturn, msvc sometimes cleans stack, sometimes not
+ i++;
+ found = 1;
+ continue;
+ }
+ else if (!(g_ida_func_attr & IDAFA_NORETURN))
+ ferr(&ops[j], "'pop ebp' expected\n");
+
+ if (g_stack_fsz != 0 || sandard_epilogue) {
+ if (ops[j].op == OP_LEAVE)
+ j--;
+ else if (sandard_epilogue) // mov esp, ebp
+ {
+ ops[j - 1].flags |= OPF_RMD | OPF_DONE | OPF_NOREGS;
+ j -= 2;
+ }
+ else if (!(g_ida_func_attr & IDAFA_NORETURN))
+ {
+ ferr(&ops[j], "esp restore expected\n");
+ }
+
+ if (ecx_push && j >= 0 && ops[j].op == OP_POP
+ && IS(opr_name(&ops[j], 0), "ecx"))
+ {
+ ferr(&ops[j], "unexpected ecx pop\n");
+ }
+ }
+
+ if (pusha) {
+ if (ops[j].op == OP_POPA)
+ ops[j].flags |= OPF_RMD | OPF_DONE | OPF_NOREGS;
+ else
+ ferr(&ops[j], "popa expected\n");
+ }
+
+ found = 1;
+ i++;
+ } while (i < opcnt);
+
+ if (!found)
+ ferr(ops, "missing ebp epilogue\n");
+ return;
+ }
+
+ // non-bp frame
+ i = scan_prologue(0, opcnt, &ecx_push, &esp_sub);
+
+ if (ecx_push && !esp_sub) {
+ // could actually be args for a call..
+ for (; i < opcnt; i++)
+ if (ops[i].op != OP_PUSH)
+ break;
+
+ if (ops[i].op == OP_CALL && ops[i].operand[0].type == OPT_LABEL) {
+ const struct parsed_proto *pp;
+ pp = proto_parse(g_fhdr, opr_name(&ops[i], 0), 1);
+ j = pp ? pp->argc_stack : 0;
+ while (i > 0 && j > 0) {
+ i--;
+ if (ops[i].op == OP_PUSH) {
+ ops[i].flags &= ~(OPF_RMD | OPF_DONE | OPF_NOREGS);
+ j--;
+ }
+ }
+ if (j != 0)
+ ferr(&ops[i], "unhandled prologue\n");
+
+ // recheck
+ i = ecx_push = 0;
+ g_stack_fsz = g_seh_size;
+ while (ops[i].op == OP_PUSH && IS(opr_name(&ops[i], 0), "ecx")) {
+ if (!(ops[i].flags & OPF_RMD))
+ break;
+ g_stack_fsz += 4;
+ ecx_push++;
+ i++;
+ }
+ }
+ }
+
+ found = 0;
+ if (ecx_push || esp_sub)
+ {
+ g_sp_frame = 1;
+
+ do {
+ for (; i < opcnt; i++)
+ if (ops[i].flags & OPF_TAIL)
+ break;
+
+ j = i - 1;
+ if (i == opcnt && (ops[j].flags & OPF_JMP)) {
+ if (ops[j].bt_i != -1 || ops[j].btj != NULL)
+ break;
+ i--;
+ j--;
+ }
+ else if (i < opcnt && (ops[i].flags & OPF_ATAIL)) {
+ // skip arg updates for arg-reuse tailcall
+ for (; j >= 0; j--) {
+ if (ops[j].op != OP_MOV)
+ break;
+ if (ops[j].operand[0].type != OPT_REGMEM)
+ break;
+ if (strstr(ops[j].operand[0].name, "arg_") == NULL)
+ break;
+ }
+ }
+
+ if (ecx_push > 0 && !esp_sub) {
+ for (l = 0; l < ecx_push && j >= 0; l++) {
+ if (ops[j].op == OP_POP && IS(opr_name(&ops[j], 0), "ecx"))
+ /* pop ecx */;
+ else if (ops[j].op == OP_ADD
+ && IS(opr_name(&ops[j], 0), "esp")
+ && ops[j].operand[1].type == OPT_CONST)
+ {
+ /* add esp, N */
+ l += ops[j].operand[1].val / 4 - 1;
+ }
+ else
+ break;
+
+ ops[j].flags |= OPF_RMD | OPF_DONE | OPF_NOREGS;
+ j--;
+ }
+ if (l != ecx_push) {
+ if (i < opcnt && ops[i].op == OP_CALL
+ && ops[i].pp != NULL && ops[i].pp->is_noreturn)
+ {
+ // noreturn tailcall with no epilogue
+ i++;
+ found = 1;
+ continue;
+ }
+ ferr(&ops[j], "epilogue scan failed\n");
+ }
+
+ found = 1;
+ }
+
+ if (esp_sub) {
+ if (ops[j].op != OP_ADD
+ || !IS(opr_name(&ops[j], 0), "esp")
+ || ops[j].operand[1].type != OPT_CONST)
+ {
+ if (i < opcnt && ops[i].op == OP_CALL
+ && ops[i].pp != NULL && ops[i].pp->is_noreturn)
+ {
+ // noreturn tailcall with no epilogue
+ i++;
+ found = 1;
+ continue;
+ }
+ ferr(&ops[j], "'add esp' expected\n");
+ }
+
+ if (ops[j].operand[1].val < g_stack_fsz)
+ ferr(&ops[j], "esp adj is too low (need %d)\n", g_stack_fsz);
+
+ ops[j].operand[1].val -= g_stack_fsz; // for stack arg scanner
+ if (ops[j].operand[1].val == 0)
+ ops[j].flags |= OPF_RMD | OPF_DONE | OPF_NOREGS;
+ found = 1;
+ }
+
+ i++;
+ } while (i < opcnt);
+
+ if (!found)
+ ferr(ops, "missing esp epilogue\n");
+ }
+}
+
+// find an instruction that changed opr before i op
+// *op_i must be set to -1 by the caller
+// *is_caller is set to 1 if one source is determined to be g_func arg
+// returns 1 if found, *op_i is then set to origin
+// returns -1 if multiple origins are found
+static int resolve_origin(int i, const struct parsed_opr *opr,
+ int magic, int *op_i, int *is_caller)
+{
+ struct label_ref *lr;
+ int ret = 0;
+
+ while (1) {
+ if (g_labels[i] != NULL) {
+ lr = &g_label_refs[i];
+ for (; lr != NULL; lr = lr->next) {
+ check_i(&ops[i], lr->i);
+ ret |= resolve_origin(lr->i, opr, magic, op_i, is_caller);
+ }
+ if (i > 0 && LAST_OP(i - 1))
+ return ret;
+ }
+
+ i--;
+ if (i < 0) {
+ if (is_caller != NULL)
+ *is_caller = 1;
+ return -1;
+ }
+
+ if (ops[i].cc_scratch == magic)
+ return ret;
+ ops[i].cc_scratch = magic;
+
+ if (!(ops[i].flags & OPF_DATA))
+ continue;
+ if (!is_opr_modified(opr, &ops[i]))
+ continue;
+
+ if (*op_i >= 0) {
+ if (*op_i == i || are_ops_same(&ops[*op_i], &ops[i]))
+ return ret | 1;
+
+ return -1;
+ }
+
+ *op_i = i;
+ return ret | 1;
+ }
+}
+
+// find an instruction that previously referenced opr
+// if multiple results are found - fail
+// *op_i must be set to -1 by the caller
+// returns 1 if found, *op_i is then set to referencer insn
+static int resolve_last_ref(int i, const struct parsed_opr *opr,
+ int magic, int *op_i)
+{
+ struct label_ref *lr;
+ int ret = 0;
+
+ while (1) {
+ if (g_labels[i] != NULL) {
+ lr = &g_label_refs[i];
+ for (; lr != NULL; lr = lr->next) {
+ check_i(&ops[i], lr->i);
+ ret |= resolve_last_ref(lr->i, opr, magic, op_i);
+ }
+ if (i > 0 && LAST_OP(i - 1))
+ return ret;
+ }
+
+ i--;
+ if (i < 0)
+ return -1;
+
+ if (ops[i].cc_scratch == magic)
+ return 0;
+ ops[i].cc_scratch = magic;
+
+ if (!is_opr_referenced(opr, &ops[i]))
+ continue;
+
+ if (*op_i >= 0)
+ return -1;
+
+ *op_i = i;
+ return 1;
+ }
+}
+
+// adjust datap of all reachable 'op' insns when moving back
+// returns 1 if at least 1 op was found
+// returns -1 if path without an op was found
+static int adjust_prev_op(int i, enum op_op op, int magic, void *datap)
+{
+ struct label_ref *lr;
+ int ret = 0;
+
+ if (ops[i].cc_scratch == magic)
+ return 0;
+ ops[i].cc_scratch = magic;
+
+ while (1) {
+ if (g_labels[i] != NULL) {
+ lr = &g_label_refs[i];
+ for (; lr != NULL; lr = lr->next) {
+ check_i(&ops[i], lr->i);
+ ret |= adjust_prev_op(lr->i, op, magic, datap);
+ }
+ if (i > 0 && LAST_OP(i - 1))
+ return ret;
+ }
+
+ i--;
+ if (i < 0)
+ return -1;
+
+ if (ops[i].cc_scratch == magic)
+ return 0;
+ ops[i].cc_scratch = magic;
+
+ if (ops[i].op != op)
+ continue;
+
+ ops[i].datap = datap;
+ return 1;
+ }
+}
+
+// find next instruction that reads opr
+// *op_i must be set to -1 by the caller
+// on return, *op_i is set to first referencer insn
+// returns 1 if exactly 1 referencer is found
+static int find_next_read(int i, int opcnt,
+ const struct parsed_opr *opr, int magic, int *op_i)
+{
+ struct parsed_op *po;
+ int j, ret = 0;
+
+ for (; i < opcnt; i++)
+ {
+ if (ops[i].cc_scratch == magic)
+ return ret;
+ ops[i].cc_scratch = magic;
+
+ po = &ops[i];
+ if ((po->flags & OPF_JMP) && po->op != OP_CALL) {
+ if (po->btj != NULL) {
+ // jumptable
+ for (j = 0; j < po->btj->count; j++) {
+ check_i(po, po->btj->d[j].bt_i);
+ ret |= find_next_read(po->btj->d[j].bt_i, opcnt, opr,
+ magic, op_i);
+ }
+ return ret;
+ }
+
+ if (po->flags & OPF_RMD)
+ continue;
+ check_i(po, po->bt_i);
+ if (po->flags & OPF_CJMP) {
+ ret |= find_next_read(po->bt_i, opcnt, opr, magic, op_i);
+ if (ret < 0)
+ return ret;
+ }
+ else
+ i = po->bt_i - 1;
+ continue;
+ }
+
+ if (!is_opr_read(opr, po)) {
+ int full_opr = 1;
+ if (opr->type == OPT_REG && po->operand[0].type == OPT_REG
+ && opr->reg == po->operand[0].reg && (po->flags & OPF_DATA))
+ {
+ full_opr = po->operand[0].lmod >= opr->lmod;
+ }
+ if (is_opr_modified(opr, po) && full_opr) {
+ // it's overwritten
+ return ret;
+ }
+ if (po->flags & OPF_TAIL)
+ return ret;
+ continue;
+ }
+
+ if (*op_i >= 0)
+ return -1;
+
+ *op_i = i;
+ return 1;
+ }
+
+ return 0;
+}
+
+// find next instruction that reads opr
+// *op_i must be set to -1 by the caller
+// on return, *op_i is set to first flag user insn
+// returns 1 if exactly 1 flag user is found
+static int find_next_flag_use(int i, int opcnt, int magic, int *op_i)
+{
+ struct parsed_op *po;
+ int j, ret = 0;
+
+ for (; i < opcnt; i++)
+ {
+ if (ops[i].cc_scratch == magic)
+ return ret;
+ ops[i].cc_scratch = magic;
+
+ po = &ops[i];
+ if (po->op == OP_CALL)
+ return -1;
+ if (po->flags & OPF_JMP) {
+ if (po->btj != NULL) {
+ // jumptable
+ for (j = 0; j < po->btj->count; j++) {
+ check_i(po, po->btj->d[j].bt_i);
+ ret |= find_next_flag_use(po->btj->d[j].bt_i, opcnt,
+ magic, op_i);
+ }
+ return ret;
+ }
+
+ if (po->flags & OPF_RMD)
+ continue;
+ check_i(po, po->bt_i);
+ if (po->flags & OPF_CJMP)
+ goto found;
+ else
+ i = po->bt_i - 1;
+ continue;
+ }
+
+ if (!(po->flags & OPF_CC)) {
+ if (po->flags & OPF_FLAGS)
+ // flags changed
+ return ret;
+ if (po->flags & OPF_TAIL)
+ return ret;
+ continue;
+ }
+
+found:
+ if (*op_i >= 0)
+ return -1;
+
+ *op_i = i;
+ return 1;
+ }
+
+ return 0;
+}
+
+static int try_resolve_const(int i, const struct parsed_opr *opr,
+ int magic, unsigned int *val)
+{
+ int s_i = -1;
+ int ret;
+
+ ret = resolve_origin(i, opr, magic, &s_i, NULL);
+ if (ret == 1) {
+ i = s_i;
+ if (ops[i].op != OP_MOV && ops[i].operand[1].type != OPT_CONST)
+ return -1;
+
+ *val = ops[i].operand[1].val;
+ return 1;
+ }
+
+ return -1;
+}
+
+static int resolve_used_bits(int i, int opcnt, int reg,
+ int *mask, int *is_z_check)
+{
+ struct parsed_opr opr = OPR_INIT(OPT_REG, OPLM_WORD, reg);
+ int j = -1, k = -1;
+ int ret;
+
+ ret = find_next_read(i, opcnt, &opr, i + opcnt * 20, &j);
+ if (ret != 1)
+ return -1;
+
+ find_next_read(j + 1, opcnt, &opr, i + opcnt * 20 + 1, &k);
+ if (k != -1) {
+ fnote(&ops[j], "(first read)\n");
+ ferr(&ops[k], "TODO: bit resolve: multiple readers\n");
+ }
+
+ if (ops[j].op != OP_TEST || ops[j].operand[1].type != OPT_CONST)
+ ferr(&ops[j], "TODO: bit resolve: not a const test\n");
+
+ ferr_assert(&ops[j], ops[j].operand[0].type == OPT_REG);
+ ferr_assert(&ops[j], ops[j].operand[0].reg == reg);
+
+ *mask = ops[j].operand[1].val;
+ if (ops[j].operand[0].lmod == OPLM_BYTE
+ && ops[j].operand[0].name[1] == 'h')
+ {
+ *mask <<= 8;
+ }
+ ferr_assert(&ops[j], (*mask & ~0xffff) == 0);
+
+ *is_z_check = 0;
+ ret = find_next_flag_use(j + 1, opcnt, i + opcnt * 20 + 2, &k);
+ if (ret == 1)
+ *is_z_check = ops[k].pfo == PFO_Z;
+
+ return 0;
+}
+
+static const struct parsed_proto *resolve_deref(int i, int magic,
+ struct parsed_opr *opr, int level)
+{
+ struct parsed_opr opr_s = OPR_INIT(OPT_REG, OPLM_DWORD, 0);
+ const struct parsed_proto *pp = NULL;
+ int from_caller = 0;
+ char s_reg[4];
+ int offset = 0;
+ int len = 0;
+ int j = -1;
+ int k = -1;
+ int reg;
+ int ret;
+
+ ret = sscanf(opr->name, "%3s+%x%n", s_reg, &offset, &len);
+ if (ret != 2 || len != strlen(opr->name)) {
+ ret = sscanf(opr->name, "%3s%n", s_reg, &len);
+ if (ret != 1 || len != strlen(opr->name))
+ return NULL;
+ }
+
+ reg = char_array_i(regs_r32, ARRAY_SIZE(regs_r32), s_reg);
+ if (reg < 0)
+ return NULL;
+
+ opr_s.reg = reg;
+ ret = resolve_origin(i, &opr_s, i + magic, &j, NULL);
+ if (ret != 1)
+ return NULL;
+
+ if (ops[j].op == OP_MOV && ops[j].operand[1].type == OPT_REGMEM
+ && strlen(ops[j].operand[1].name) == 3
+ && ops[j].operand[0].lmod == OPLM_DWORD
+ && ops[j].pp == NULL // no hint
+ && level == 0)
+ {
+ // allow one simple dereference (com/directx)
+ reg = char_array_i(regs_r32, ARRAY_SIZE(regs_r32),
+ ops[j].operand[1].name);
+ if (reg < 0)
+ return NULL;
+ opr_s.reg = reg;
+ ret = resolve_origin(j, &opr_s, j + magic, &k, NULL);
+ if (ret != 1)
+ return NULL;
+ j = k;
+ }
+ if (ops[j].op != OP_MOV || ops[j].operand[0].lmod != OPLM_DWORD)
+ return NULL;
+
+ if (ops[j].pp != NULL) {
+ // type hint in asm
+ pp = ops[j].pp;
+ }
+ else if (ops[j].operand[1].type == OPT_REGMEM) {
+ pp = try_recover_pp(&ops[j], &ops[j].operand[1], 0, NULL);
+ if (pp == NULL) {
+ // maybe structure ptr in structure
+ pp = resolve_deref(j, magic, &ops[j].operand[1], level + 1);
+ }
+ }
+ else if (ops[j].operand[1].type == OPT_LABEL)
+ pp = proto_parse(g_fhdr, ops[j].operand[1].name, g_quiet_pp);
+ else if (ops[j].operand[1].type == OPT_REG) {
+ // maybe arg reg?
+ k = -1;
+ ret = resolve_origin(j, &ops[j].operand[1], i + magic,
+ &k, &from_caller);
+ if (ret != 1 && from_caller && k == -1 && g_func_pp != NULL) {
+ for (k = 0; k < g_func_pp->argc; k++) {
+ if (g_func_pp->arg[k].reg == NULL)
+ continue;
+ if (IS(g_func_pp->arg[k].reg, ops[j].operand[1].name)) {
+ pp = g_func_pp->arg[k].pp;
+ break;
+ }
+ }
+ }
+ }
+
+ if (pp == NULL)
+ return NULL;
+ if (pp->is_func || pp->is_fptr || !pp->type.is_struct) {
+ if (offset != 0)
+ ferr(&ops[j], "expected struct, got '%s %s'\n",
+ pp->type.name, pp->name);
+ return NULL;
+ }
+
+ return proto_lookup_struct(g_fhdr, pp->type.name, offset);
+}
+
+static const struct parsed_proto *resolve_icall(int i, int opcnt,
+ int *pp_i, int *multi_src)
+{
+ const struct parsed_proto *pp = NULL;
+ int search_advice = 0;
+
+ *multi_src = 0;
+ *pp_i = -1;
+
+ switch (ops[i].operand[0].type) {
+ case OPT_REGMEM:
+ // try to resolve struct member calls
+ pp = resolve_deref(i, i + opcnt * 19, &ops[i].operand[0], 0);
+ if (pp != NULL)
+ break;
+ // fallthrough
+ case OPT_LABEL:
+ case OPT_OFFSET:
+ pp = try_recover_pp(&ops[i], &ops[i].operand[0],
+ 1, &search_advice);
+ if (!search_advice)
+ break;
+ // fallthrough
+ default:
+ scan_for_call_type(i, &ops[i].operand[0], i + opcnt * 9, &pp,
+ pp_i, multi_src);
+ break;
+ }
+
+ return pp;
+}
+
+static struct parsed_proto *process_call_early(int i, int opcnt,
+ int *adj_i)
+{
+ struct parsed_op *po = &ops[i];
+ struct parsed_proto *pp;
+ int multipath = 0;
+ int adj = 0;
+ int j, ret;
+
+ pp = po->pp;
+ if (pp == NULL || pp->is_vararg || pp->argc_reg != 0)
+ // leave for later
+ return NULL;
+
+ // look for and make use of esp adjust
+ *adj_i = ret = -1;
+ if (!pp->is_stdcall && pp->argc_stack > 0)
+ ret = scan_for_esp_adjust(i + 1, opcnt,
+ pp->argc_stack * 4, &adj, &multipath, 0);
+ if (ret >= 0) {
+ if (pp->argc_stack > adj / 4)
+ return NULL;
+ if (multipath)
+ return NULL;
+ if (ops[ret].op == OP_POP) {
+ for (j = 1; j < adj / 4; j++) {
+ if (ops[ret + j].op != OP_POP
+ || ops[ret + j].operand[0].reg != xCX)
+ {
+ return NULL;
+ }
+ }
+ }
+ }
+
+ *adj_i = ret;
+ return pp;
+}
+
+static struct parsed_proto *process_call(int i, int opcnt)
+{
+ struct parsed_op *po = &ops[i];
+ const struct parsed_proto *pp_c;
+ struct parsed_proto *pp;
+ const char *tmpname;
+ int call_i = -1, ref_i = -1;
+ int adj = 0, multipath = 0;
+ int ret, arg;
+
+ tmpname = opr_name(po, 0);
+ pp = po->pp;
+ if (pp == NULL)
+ {
+ // indirect call
+ pp_c = resolve_icall(i, opcnt, &call_i, &multipath);
+ if (pp_c != NULL) {
+ if (!pp_c->is_func && !pp_c->is_fptr)
+ ferr(po, "call to non-func: %s\n", pp_c->name);
+ pp = proto_clone(pp_c);
+ my_assert_not(pp, NULL);
+ if (multipath)
+ // not resolved just to single func
+ pp->is_fptr = 1;
+
+ switch (po->operand[0].type) {
+ case OPT_REG:
+ // we resolved this call and no longer need the register
+ po->regmask_src &= ~(1 << po->operand[0].reg);
+
+ if (!multipath && i != call_i && ops[call_i].op == OP_MOV
+ && ops[call_i].operand[1].type == OPT_LABEL)
+ {
+ // no other source users?
+ ret = resolve_last_ref(i, &po->operand[0], i + opcnt * 10,
+ &ref_i);
+ if (ret == 1 && call_i == ref_i) {
+ // and nothing uses it after us?
+ ref_i = -1;
+ find_next_read(i + 1, opcnt, &po->operand[0],
+ i + opcnt * 11, &ref_i);
+ if (ref_i == -1)
+ // then also don't need the source mov
+ ops[call_i].flags |= OPF_RMD | OPF_NOREGS;
+ }
+ }
+ break;
+ case OPT_REGMEM:
+ pp->is_fptr = 1;
+ break;
+ default:
+ break;
+ }
+ }
+ if (pp == NULL) {
+ pp = calloc(1, sizeof(*pp));
+ my_assert_not(pp, NULL);
+
+ pp->is_fptr = 1;
+ ret = scan_for_esp_adjust(i + 1, opcnt,
+ -1, &adj, &multipath, 0);
+ if (ret < 0 || adj < 0) {
+ if (!g_allow_regfunc)
+ ferr(po, "non-__cdecl indirect call unhandled yet\n");
+ pp->is_unresolved = 1;
+ adj = 0;
+ }
+ adj /= 4;
+ if (adj > ARRAY_SIZE(pp->arg))
+ ferr(po, "esp adjust too large: %d\n", adj);
+ pp->ret_type.name = strdup("int");
+ pp->argc = pp->argc_stack = adj;
+ for (arg = 0; arg < pp->argc; arg++)
+ pp->arg[arg].type.name = strdup("int");
+ }
+ po->pp = pp;
+ }
+
+ // look for and make use of esp adjust
+ multipath = 0;
+ ret = -1;
+ if (!pp->is_stdcall && pp->argc_stack > 0) {
+ int adj_expect = pp->is_vararg ? -1 : pp->argc_stack * 4;
+ ret = scan_for_esp_adjust(i + 1, opcnt,
+ adj_expect, &adj, &multipath, 0);
+ }
+ if (ret >= 0) {
+ if (pp->is_vararg) {
+ if (adj / 4 < pp->argc_stack) {
+ fnote(po, "(this call)\n");
+ ferr(&ops[ret], "esp adjust is too small: %x < %x\n",
+ adj, pp->argc_stack * 4);
+ }
+ // modify pp to make it have varargs as normal args
+ arg = pp->argc;
+ pp->argc += adj / 4 - pp->argc_stack;
+ for (; arg < pp->argc; arg++) {
+ pp->arg[arg].type.name = strdup("int");
+ pp->argc_stack++;
+ }
+ if (pp->argc > ARRAY_SIZE(pp->arg))
+ ferr(po, "too many args for '%s'\n", tmpname);
+ }
+ if (pp->argc_stack > adj / 4) {
+ if (pp->is_noreturn)
+ // assume no stack adjust was emited
+ goto out;
+ fnote(po, "(this call)\n");
+ ferr(&ops[ret], "stack tracking failed for '%s': %x %x\n",
+ tmpname, pp->argc_stack * 4, adj);
+ }
+
+ scan_for_esp_adjust(i + 1, opcnt,
+ pp->argc_stack * 4, &adj, &multipath, 1);
+ }
+ else if (pp->is_vararg)
+ ferr(po, "missing esp_adjust for vararg func '%s'\n",
+ pp->name);
+
+out:
+ return pp;
+}
+
+static int collect_call_args_no_push(int i, struct parsed_proto *pp,
+ int *regmask_ffca)
+{
+ struct parsed_op *po;
+ int offset = 0;
+ int base_arg;
+ int j, arg;
+ int ret;
+
+ for (base_arg = 0; base_arg < pp->argc; base_arg++)
+ if (pp->arg[base_arg].reg == NULL)
+ break;
+
+ for (j = i; j > 0; )
+ {
+ ferr_assert(&ops[j], g_labels[j] == NULL);
+ j--;
+
+ po = &ops[j];
+ ferr_assert(po, po->op != OP_PUSH);
+ if (po->op == OP_FST)
+ {
+ if (po->operand[0].type != OPT_REGMEM)
+ continue;
+ ret = parse_stack_esp_offset(po, po->operand[0].name, &offset);
+ if (ret != 0)
+ continue;
+ if (offset < 0 || offset >= pp->argc_stack * 4 || (offset & 3)) {
+ //ferr(po, "offset %d, %d args\n", offset, pp->argc_stack);
+ continue;
+ }
+
+ arg = base_arg + offset / 4;
+ po->p_argnext = -1;
+ po->p_argnum = arg + 1;
+ ferr_assert(po, pp->arg[arg].datap == NULL);
+ pp->arg[arg].datap = po;
+ po->flags |= OPF_DONE | OPF_FARGNR | OPF_FARG;
+ if (regmask_ffca != NULL)
+ *regmask_ffca |= 1 << arg;
+ }
+ else if (po->op == OP_SUB && po->operand[0].reg == xSP
+ && po->operand[1].type == OPT_CONST)
+ {
+ po->flags |= OPF_RMD | OPF_DONE | OPF_FARGNR | OPF_FARG;
+ break;
+ }
+ }
+
+ for (arg = base_arg; arg < pp->argc; arg++) {
+ ferr_assert(&ops[i], pp->arg[arg].reg == NULL);
+ po = pp->arg[arg].datap;
+ if (po == NULL)
+ ferr(&ops[i], "arg %d/%d not found\n", arg, pp->argc);
+ if (po->operand[0].lmod == OPLM_QWORD)
+ arg++;
+ }
+
+ return 0;
+}
+
+static int collect_call_args_early(int i, struct parsed_proto *pp,
+ int *regmask, int *regmask_ffca)
+{
+ struct parsed_op *po;
+ int arg, ret;
+ int j;
+
+ for (arg = 0; arg < pp->argc; arg++)
+ if (pp->arg[arg].reg == NULL)
+ break;
+
+ // first see if it can be easily done
+ for (j = i; j > 0 && arg < pp->argc; )
+ {
+ if (g_labels[j] != NULL)
+ return -1;
+ j--;
+
+ po = &ops[j];
+ if (po->op == OP_CALL)
+ return -1;
+ else if (po->op == OP_ADD && po->operand[0].reg == xSP)
+ return -1;
+ else if (po->op == OP_POP)
+ return -1;
+ else if (po->flags & OPF_CJMP)
+ return -1;
+ else if (po->op == OP_PUSH) {
+ if (po->flags & (OPF_FARG|OPF_FARGNR))
+ return -1;
+ if (!g_header_mode) {
+ ret = scan_for_mod(po, j + 1, i, 1);
+ if (ret >= 0)
+ return -1;
+ }
+
+ if (pp->arg[arg].type.is_va_list)
+ return -1;
+
+ // next arg
+ for (arg++; arg < pp->argc; arg++)
+ if (pp->arg[arg].reg == NULL)
+ break;
+ }
+ else if (po->op == OP_SUB && po->operand[0].reg == xSP
+ && po->operand[1].type == OPT_CONST)
+ {
+ if (po->flags & (OPF_RMD|OPF_DONE))
+ return -1;
+ if (po->operand[1].val != pp->argc_stack * 4)
+ ferr(po, "unexpected esp adjust: %d\n",
+ po->operand[1].val * 4);
+ ferr_assert(po, pp->argc - arg == pp->argc_stack);
+ return collect_call_args_no_push(i, pp, regmask_ffca);
+ }
+ }
+
+ if (arg < pp->argc)
+ return -1;
+
+ // now do it
+ for (arg = 0; arg < pp->argc; arg++)
+ if (pp->arg[arg].reg == NULL)
+ break;
+
+ for (j = i; j > 0 && arg < pp->argc; )
+ {
+ j--;
+
+ if (ops[j].op == OP_PUSH)
+ {
+ ops[j].p_argnext = -1;
+ ferr_assert(&ops[j], pp->arg[arg].datap == NULL);
+ pp->arg[arg].datap = &ops[j];
+
+ if (regmask != NULL && ops[j].operand[0].type == OPT_REG)
+ *regmask |= 1 << ops[j].operand[0].reg;
+
+ ops[j].flags |= OPF_RMD | OPF_DONE | OPF_FARGNR | OPF_FARG;
+ ops[j].flags &= ~OPF_RSAVE;
+
+ // next arg
+ for (arg++; arg < pp->argc; arg++)
+ if (pp->arg[arg].reg == NULL)
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int sync_argnum(struct parsed_op *po, int argnum)
+{
+ struct parsed_op *po_tmp;
+
+ // see if other branches don't have higher argnum
+ for (po_tmp = po; po_tmp != NULL; ) {
+ if (argnum < po_tmp->p_argnum)
+ argnum = po_tmp->p_argnum;
+ // note: p_argnext is active on current collect_call_args only
+ po_tmp = po_tmp->p_argnext >= 0 ? &ops[po_tmp->p_argnext] : NULL;
+ }
+
+ // make all argnums consistent
+ for (po_tmp = po; po_tmp != NULL; ) {
+ if (po_tmp->p_argnum != 0)
+ po_tmp->p_argnum = argnum;
+ po_tmp = po_tmp->p_argnext >= 0 ? &ops[po_tmp->p_argnext] : NULL;
+ }
+
+ return argnum;
+}
+
+static int collect_call_args_r(struct parsed_op *po, int i,
+ struct parsed_proto *pp, int *regmask, int *arg_grp,
+ int arg, int argnum, int magic, int need_op_saving, int may_reuse)
+{
+ struct parsed_proto *pp_tmp;
+ struct parsed_op *po_tmp;
+ struct label_ref *lr;
+ int need_to_save_current;
+ int arg_grp_current = 0;
+ int save_args_seen = 0;
+ int ret = 0;
+ int reg;
+ char buf[32];
+ int j, k;
+
+ if (i < 0) {
+ ferr(po, "dead label encountered\n");
+ return -1;
+ }
+
+ for (; arg < pp->argc; arg++, argnum++)
+ if (pp->arg[arg].reg == NULL)
+ break;
+ magic = (magic & 0xffffff) | (arg << 24);
+
+ for (j = i; j >= 0 && (arg < pp->argc || pp->is_unresolved); )
+ {
+ if (((ops[j].cc_scratch ^ magic) & 0xffffff) == 0) {
+ if (ops[j].cc_scratch != magic) {
+ ferr(&ops[j], "arg collect hit same path with diff args for %s\n",
+ pp->name);
+ return -1;
+ }
+ // ok: have already been here
+ return 0;
+ }
+ ops[j].cc_scratch = magic;
+
+ if (g_labels[j] != NULL && g_label_refs[j].i != -1) {
+ lr = &g_label_refs[j];
+ if (lr->next != NULL)
+ need_op_saving = 1;
+ for (; lr->next; lr = lr->next) {
+ check_i(&ops[j], lr->i);
+ if ((ops[lr->i].flags & (OPF_JMP|OPF_CJMP)) != OPF_JMP)
+ may_reuse = 1;
+ ret = collect_call_args_r(po, lr->i, pp, regmask, arg_grp,
+ arg, argnum, magic, need_op_saving, may_reuse);
+ if (ret < 0)
+ return ret;
+ }
+
+ check_i(&ops[j], lr->i);
+ if ((ops[lr->i].flags & (OPF_JMP|OPF_CJMP)) != OPF_JMP)
+ may_reuse = 1;
+ if (j > 0 && LAST_OP(j - 1)) {
+ // follow last branch in reverse
+ j = lr->i;
+ continue;
+ }
+ need_op_saving = 1;
+ ret = collect_call_args_r(po, lr->i, pp, regmask, arg_grp,
+ arg, argnum, magic, need_op_saving, may_reuse);
+ if (ret < 0)
+ return ret;
+ }
+ j--;
+
+ if (ops[j].op == OP_CALL)
+ {
+ if (pp->is_unresolved)
+ break;
+
+ pp_tmp = ops[j].pp;
+ if (pp_tmp == NULL)
+ ferr(po, "arg collect %d/%d hit unparsed call '%s'\n",
+ arg, pp->argc, ops[j].operand[0].name);
+ if (may_reuse && pp_tmp->argc_stack > 0)
+ ferr(po, "arg collect %d/%d hit '%s' with %d stack args\n",
+ arg, pp->argc, opr_name(&ops[j], 0), pp_tmp->argc_stack);
+ }
+ // esp adjust of 0 means we collected it before
+ else if (ops[j].op == OP_ADD && ops[j].operand[0].reg == xSP
+ && (ops[j].operand[1].type != OPT_CONST
+ || ops[j].operand[1].val != 0))
+ {
+ if (pp->is_unresolved)
+ break;
+
+ fnote(po, "(this call)\n");
+ ferr(&ops[j], "arg collect %d/%d hit esp adjust of %d\n",
+ arg, pp->argc, ops[j].operand[1].val);
+ }
+ else if (ops[j].op == OP_POP && !(ops[j].flags & OPF_DONE))
+ {
+ if (pp->is_unresolved)
+ break;
+
+ fnote(po, "(this call)\n");
+ ferr(&ops[j], "arg collect %d/%d hit pop\n", arg, pp->argc);
+ }
+ else if (ops[j].flags & OPF_CJMP)
+ {
+ if (pp->is_unresolved)
+ break;
+
+ may_reuse = 1;
+ }
+ else if (ops[j].op == OP_PUSH
+ && !(ops[j].flags & (OPF_FARGNR|OPF_DONE)))
+ {
+ if (pp->is_unresolved && (ops[j].flags & OPF_RMD))
+ break;
+
+ ops[j].p_argnext = -1;
+ po_tmp = pp->arg[arg].datap;
+ if (po_tmp != NULL)
+ ops[j].p_argnext = po_tmp - ops;
+ pp->arg[arg].datap = &ops[j];
+
+ argnum = sync_argnum(&ops[j], argnum);