OPF_LOCK = (1 << 17), /* op has lock prefix */
OPF_VAPUSH = (1 << 18), /* vararg ptr push (as call arg) */
OPF_DONE = (1 << 19), /* already fully handled by analysis */
+ OPF_PPUSH = (1 << 20), /* part of complex push-pop graph */
};
enum op_op {
// x87
// mmx
OP_EMMS,
- // mmx
+ // undefined
OP_UD2,
};
};
// datap:
-// OP_CALL - parser proto hint (str)
+// OP_CALL - parser proto hint (str)
// (OPF_CC) - points to one of (OPF_FLAGS) that affects cc op
-// OP_POP - points to OP_PUSH in push/pop pair
+// OP_PUSH - points to OP_POP in complex push/pop graph
+// OP_POP - points to OP_PUSH in simple push/pop pair
struct parsed_equ {
char name[64];
static int g_stack_frame_used;
static int g_stack_fsz;
static int g_ida_func_attr;
+static int g_skip_func;
static int g_allow_regfunc;
static int g_quiet_pp;
static int g_header_mode;
const struct parsed_type *c_type)
{
static const char *dword_types[] = {
- "int", "_DWORD", "UINT_PTR", "DWORD",
+ "uint32_t", "int", "_DWORD", "UINT_PTR", "DWORD",
"WPARAM", "LPARAM", "UINT", "__int32",
"LONG", "HIMC", "BOOL", "size_t",
"float",
}
if (i == ARRAY_SIZE(op_table)) {
- anote("unhandled op: '%s'\n", words[0]);
+ if (!g_skip_func)
+ aerr("unhandled op: '%s'\n", words[0]);
i--; // OP_UD2
}
w++;
|| ((ops[_i].flags & (OPF_JMP|OPF_CJMP|OPF_RMD)) == OPF_JMP \
&& ops[_i].op != OP_CALL))
+#define check_i(po, i) \
+ if ((i) < 0) \
+ ferr(po, "bad " #i ": %d\n", i)
+
static int scan_for_pop(int i, int opcnt, const char *reg,
int magic, int depth, int *maxdepth, int do_flags)
{
if (po->btj != NULL) {
// jumptable
for (j = 0; j < po->btj->count; j++) {
+ check_i(po, po->btj->d[j].bt_i);
ret |= scan_for_pop(po->btj->d[j].bt_i, opcnt, reg, magic,
depth, maxdepth, do_flags);
if (ret < 0)
return ret;
}
- if (po->bt_i < 0) {
- ferr(po, "dead branch\n");
- return -1;
- }
-
+ check_i(po, po->bt_i);
if (po->flags & OPF_CJMP) {
ret |= scan_for_pop(po->bt_i, opcnt, reg, magic,
depth, maxdepth, do_flags);
return found ? 0 : -1;
}
-static void scan_for_pop_const(int i, int opcnt)
+static void scan_for_pop_const(int i, int opcnt, int *regmask_pp)
{
+ struct parsed_op *po;
+ int is_multipath = 0;
int j;
for (j = i + 1; j < opcnt; j++) {
- if ((ops[j].flags & (OPF_JMP|OPF_TAIL|OPF_RSAVE))
- || ops[j].op == OP_PUSH || g_labels[i] != NULL)
+ po = &ops[j];
+
+ if (po->op == OP_JMP && po->btj == NULL) {
+ ferr_assert(po, po->bt_i >= 0);
+ j = po->bt_i - 1;
+ continue;
+ }
+
+ if ((po->flags & (OPF_JMP|OPF_TAIL|OPF_RSAVE))
+ || po->op == OP_PUSH)
{
break;
}
- if (ops[j].op == OP_POP && !(ops[j].flags & (OPF_RMD|OPF_DONE)))
+ if (g_labels[j] != NULL)
+ is_multipath = 1;
+
+ if (po->op == OP_POP && !(po->flags & OPF_RMD))
{
- ops[i].flags |= OPF_RMD | OPF_DONE;
- ops[j].flags |= OPF_DONE;
- ops[j].datap = &ops[i];
+ is_multipath |= !!(po->flags & OPF_PPUSH);
+ if (is_multipath) {
+ ops[i].flags |= OPF_PPUSH | OPF_DONE;
+ ops[i].datap = po;
+ po->flags |= OPF_PPUSH | OPF_DONE;
+ *regmask_pp |= 1 << po->operand[0].reg;
+ }
+ else {
+ ops[i].flags |= OPF_RMD | OPF_DONE;
+ po->flags |= OPF_DONE;
+ po->datap = &ops[i];
+ }
break;
}
}
if (po->flags & OPF_JMP) {
if (po->btj != NULL) {
// jumptable
- for (j = 0; j < po->btj->count; j++)
+ for (j = 0; j < po->btj->count; j++) {
+ check_i(po, po->btj->d[j].bt_i);
scan_propagate_df(po->btj->d[j].bt_i, opcnt);
+ }
return;
}
- if (po->bt_i < 0) {
- ferr(po, "dead branch\n");
- return;
- }
-
+ check_i(po, po->bt_i);
if (po->flags & OPF_CJMP)
scan_propagate_df(po->bt_i, opcnt);
else
ferr(po, "missing DF clear?\n");
}
+// is operand 'opr' referenced by parsed_op 'po'?
+static int is_opr_referenced(const struct parsed_opr *opr,
+ const struct parsed_op *po)
+{
+ int i, mask;
+
+ if (opr->type == OPT_REG) {
+ mask = po->regmask_dst | po->regmask_src;
+ if (po->op == OP_CALL)
+ mask |= (1 << xAX) | (1 << xCX) | (1 << xDX);
+ if ((1 << opr->reg) & mask)
+ return 1;
+ else
+ return 0;
+ }
+
+ for (i = 0; i < po->operand_cnt; i++)
+ if (IS(po->operand[0].name, opr->name))
+ return 1;
+
+ return 0;
+}
+
+// is operand 'opr' read by parsed_op 'po'?
+static int is_opr_read(const struct parsed_opr *opr,
+ const struct parsed_op *po)
+{
+ int mask;
+
+ if (opr->type == OPT_REG) {
+ mask = po->regmask_src;
+ if (po->op == OP_CALL)
+ // assume worst case
+ mask |= (1 << xAX) | (1 << xCX) | (1 << xDX);
+ if ((1 << opr->reg) & mask)
+ return 1;
+ else
+ return 0;
+ }
+
+ // yes I'm lazy
+ return 0;
+}
+
// is operand 'opr' modified by parsed_op 'po'?
static int is_opr_modified(const struct parsed_opr *opr,
const struct parsed_op *po)
{
int mask;
- if ((po->flags & OPF_RMD) || !(po->flags & OPF_DATA))
+ if (!(po->flags & OPF_DATA))
return 0;
if (opr->type == OPT_REG) {
return -1;
}
-#define check_i(po, i) \
- if ((i) < 0) \
- ferr(po, "bad " #i ": %d\n", i)
-
static int scan_for_flag_set(int i, int magic, int *branched,
int *setters, int *setter_cnt)
{
return -1;
}
+static void patch_esp_adjust(struct parsed_op *po, int adj)
+{
+ ferr_assert(po, po->op == OP_ADD);
+ ferr_assert(po, IS(opr_name(po, 0), "esp"));
+ ferr_assert(po, po->operand[1].type == OPT_CONST);
+
+ // this is a bit of a hack, but deals with use of
+ // single adj for multiple calls
+ po->operand[1].val -= adj;
+ po->flags |= OPF_RMD;
+ if (po->operand[1].val == 0)
+ po->flags |= OPF_DONE;
+ ferr_assert(po, (int)po->operand[1].val >= 0);
+}
+
// scan for positive, constant esp adjust
+// multipath case is preliminary
static int scan_for_esp_adjust(int i, int opcnt,
- int adj_expect, int *adj, int *multipath)
+ int adj_expect, int *adj, int *is_multipath, int do_update)
{
struct parsed_op *po;
int first_pop = -1;
- *adj = *multipath = 0;
+ *adj = *is_multipath = 0;
for (; i < opcnt && *adj < adj_expect; i++) {
if (g_labels[i] != NULL)
- *multipath = 1;
+ *is_multipath = 1;
po = &ops[i];
if (po->flags & OPF_DONE)
*adj += po->operand[1].val;
if (*adj & 3)
ferr(&ops[i], "unaligned esp adjust: %x\n", *adj);
+ if (do_update) {
+ if (!*is_multipath)
+ patch_esp_adjust(po, adj_expect);
+ else
+ po->flags |= OPF_RMD;
+ }
return i;
}
else if (po->op == OP_PUSH) {
if (first_pop == -1 && *adj >= 0)
first_pop = i;
}
+ if (do_update && *adj >= 0) {
+ po->flags |= OPF_RMD;
+ if (!*is_multipath)
+ po->flags |= OPF_DONE;
+ }
+
*adj += lmod_bytes(po, po->operand[0].lmod);
}
else if (po->flags & (OPF_JMP|OPF_TAIL)) {
}
static void scan_for_call_type(int i, const struct parsed_opr *opr,
- int magic, const struct parsed_proto **pp_found, int *multi)
+ int magic, const struct parsed_proto **pp_found, int *pp_i,
+ int *multi)
{
const struct parsed_proto *pp = NULL;
struct parsed_op *po;
lr = &g_label_refs[i];
for (; lr != NULL; lr = lr->next) {
check_i(&ops[i], lr->i);
- scan_for_call_type(lr->i, opr, magic, pp_found, multi);
+ scan_for_call_type(lr->i, opr, magic, pp_found, pp_i, multi);
}
if (i > 0 && LAST_OP(i - 1))
return;
}
*multi = 1;
}
- if (pp != NULL)
+ if (pp != NULL) {
*pp_found = pp;
+ *pp_i = po - ops;
+ }
}
// early check for tail call or branch back
}
static const struct parsed_proto *resolve_icall(int i, int opcnt,
- int *multi_src)
+ int *pp_i, int *multi_src)
{
const struct parsed_proto *pp = NULL;
int search_advice = 0;
*multi_src = 0;
+ *pp_i = -1;
switch (ops[i].operand[0].type) {
case OPT_REGMEM:
// fallthrough
default:
scan_for_call_type(i, &ops[i].operand[0], i + opcnt * 9, &pp,
- multi_src);
+ pp_i, multi_src);
break;
}
}
// find an instruction that changed opr before i op
-// *op_i must be set to -1 by caller
+// *op_i must be set to -1 by the caller
// *entry is set to 1 if one source is determined to be the caller
// returns 1 if found, *op_i is then set to origin
static int resolve_origin(int i, const struct parsed_opr *opr,
}
}
+// find an instruction that previously referenced opr
+// if multiple results are found - fail
+// *op_i must be set to -1 by the caller
+// returns 1 if found, *op_i is then set to referencer insn
+static int resolve_last_ref(int i, const struct parsed_opr *opr,
+ int magic, int *op_i)
+{
+ struct label_ref *lr;
+ int ret = 0;
+
+ if (ops[i].cc_scratch == magic)
+ return 0;
+ ops[i].cc_scratch = magic;
+
+ while (1) {
+ if (g_labels[i] != NULL) {
+ lr = &g_label_refs[i];
+ for (; lr != NULL; lr = lr->next) {
+ check_i(&ops[i], lr->i);
+ ret |= resolve_last_ref(lr->i, opr, magic, op_i);
+ }
+ if (i > 0 && LAST_OP(i - 1))
+ return ret;
+ }
+
+ i--;
+ if (i < 0)
+ return -1;
+
+ if (ops[i].cc_scratch == magic)
+ return 0;
+ ops[i].cc_scratch = magic;
+
+ if (!is_opr_referenced(opr, &ops[i]))
+ continue;
+
+ if (*op_i >= 0)
+ return -1;
+
+ *op_i = i;
+ return 1;
+ }
+}
+
+// find next instruction that reads opr
+// if multiple results are found - fail
+// *op_i must be set to -1 by the caller
+// returns 1 if found, *op_i is then set to referencer insn
+static int find_next_read(int i, int opcnt,
+ const struct parsed_opr *opr, int magic, int *op_i)
+{
+ struct parsed_op *po;
+ int j, ret = 0;
+
+ for (; i < opcnt; i++)
+ {
+ if (ops[i].cc_scratch == magic)
+ return 0;
+ ops[i].cc_scratch = magic;
+
+ po = &ops[i];
+ if ((po->flags & OPF_JMP) && po->op != OP_CALL) {
+ if (po->btj != NULL) {
+ // jumptable
+ for (j = 0; j < po->btj->count; j++) {
+ check_i(po, po->btj->d[j].bt_i);
+ ret |= find_next_read(po->btj->d[j].bt_i, opcnt, opr,
+ magic, op_i);
+ }
+ return ret;
+ }
+
+ if (po->flags & OPF_RMD)
+ continue;
+ check_i(po, po->bt_i);
+ if (po->flags & OPF_CJMP) {
+ ret = find_next_read(po->bt_i, opcnt, opr, magic, op_i);
+ if (ret < 0)
+ return ret;
+ }
+
+ i = po->bt_i - 1;
+ continue;
+ }
+
+ if (!is_opr_read(opr, po)) {
+ if (is_opr_modified(opr, po))
+ // it's overwritten
+ return 0;
+ if (po->flags & OPF_TAIL)
+ return 0;
+ continue;
+ }
+
+ if (*op_i >= 0)
+ return -1;
+
+ *op_i = i;
+ return 1;
+ }
+
+ return 0;
+}
+
static int try_resolve_const(int i, const struct parsed_opr *opr,
int magic, unsigned int *val)
{
*adj_i = ret = -1;
if (!pp->is_stdcall && pp->argc_stack > 0)
ret = scan_for_esp_adjust(i + 1, opcnt,
- pp->argc_stack * 4, &adj, &multipath);
+ pp->argc_stack * 4, &adj, &multipath, 0);
if (ret >= 0) {
if (pp->argc_stack > adj / 4)
return NULL;
return pp;
}
-static void patch_esp_adjust(struct parsed_op *po, int adj)
-{
- ferr_assert(po, po->op == OP_ADD);
- ferr_assert(po, IS(opr_name(po, 0), "esp"));
- ferr_assert(po, po->operand[1].type == OPT_CONST);
-
- // this is a bit of a hack, but deals with use of
- // single adj for multiple calls
- po->operand[1].val -= adj;
- po->flags |= OPF_RMD;
- if (po->operand[1].val == 0)
- po->flags |= OPF_DONE;
- ferr_assert(po, (int)po->operand[1].val >= 0);
-}
-
static struct parsed_proto *process_call(int i, int opcnt)
{
struct parsed_op *po = &ops[i];
const struct parsed_proto *pp_c;
struct parsed_proto *pp;
const char *tmpname;
+ int call_i = -1, ref_i = -1;
int adj = 0, multipath = 0;
int ret, arg;
if (pp == NULL)
{
// indirect call
- pp_c = resolve_icall(i, opcnt, &multipath);
+ pp_c = resolve_icall(i, opcnt, &call_i, &multipath);
if (pp_c != NULL) {
if (!pp_c->is_func && !pp_c->is_fptr)
ferr(po, "call to non-func: %s\n", pp_c->name);
case OPT_REG:
// we resolved this call and no longer need the register
po->regmask_src &= ~(1 << po->operand[0].reg);
+
+ if (!multipath && i != call_i && ops[call_i].op == OP_MOV
+ && ops[call_i].operand[1].type == OPT_LABEL)
+ {
+ // no other source users?
+ ret = resolve_last_ref(i, &po->operand[0], opcnt * 10,
+ &ref_i);
+ if (ret == 1 && call_i == ref_i) {
+ // and nothing uses it after us?
+ ref_i = -1;
+ ret = find_next_read(i + 1, opcnt, &po->operand[0],
+ opcnt * 11, &ref_i);
+ if (ret != 1)
+ // then also don't need the source mov
+ ops[call_i].flags |= OPF_RMD;
+ }
+ }
break;
case OPT_REGMEM:
pp->is_fptr = 1;
my_assert_not(pp, NULL);
pp->is_fptr = 1;
- ret = scan_for_esp_adjust(i + 1, opcnt, 32*4, &adj, &multipath);
+ ret = scan_for_esp_adjust(i + 1, opcnt,
+ 32*4, &adj, &multipath, 0);
if (ret < 0 || adj < 0) {
if (!g_allow_regfunc)
ferr(po, "non-__cdecl indirect call unhandled yet\n");
ret = -1;
if (!pp->is_stdcall && pp->argc_stack > 0)
ret = scan_for_esp_adjust(i + 1, opcnt,
- pp->argc_stack * 4, &adj, &multipath);
+ pp->argc_stack * 4, &adj, &multipath, 0);
if (ret >= 0) {
if (pp->is_vararg) {
if (adj / 4 < pp->argc_stack) {
tmpname, pp->argc_stack * 4, adj);
}
- ops[ret].flags |= OPF_RMD;
- if (ops[ret].op == OP_POP) {
- if (adj > 4) {
- // deal with multi-pop stack adjust
- adj = pp->argc_stack;
- while (ops[ret].op == OP_POP && adj > 0 && ret < opcnt) {
- ops[ret].flags |= OPF_RMD | OPF_DONE;
- adj--;
- ret++;
- }
- }
- }
- else if (!multipath)
- patch_esp_adjust(&ops[ret], pp->argc_stack * 4);
+ scan_for_esp_adjust(i + 1, opcnt,
+ pp->argc_stack * 4, &adj, &multipath, 1);
}
else if (pp->is_vararg)
ferr(po, "missing esp_adjust for vararg func '%s'\n",
}
}
+enum {
+ OPP_FORCE_NORETURN = (1 << 0),
+ OPP_SIMPLE_ARGS = (1 << 1),
+ OPP_ALIGN = (1 << 2),
+};
+
static void output_pp_attrs(FILE *fout, const struct parsed_proto *pp,
- int is_noreturn)
+ int flags)
{
+ const char *cconv = "";
+
if (pp->is_fastcall)
- fprintf(fout, "__fastcall ");
+ cconv = "__fastcall ";
else if (pp->is_stdcall && pp->argc_reg == 0)
- fprintf(fout, "__stdcall ");
- if (pp->is_noreturn || is_noreturn)
+ cconv = "__stdcall ";
+
+ fprintf(fout, (flags & OPP_ALIGN) ? "%-16s" : "%s", cconv);
+
+ if (pp->is_noreturn || (flags & OPP_FORCE_NORETURN))
fprintf(fout, "noreturn ");
}
+static void output_pp(FILE *fout, const struct parsed_proto *pp,
+ int flags)
+{
+ int i;
+
+ fprintf(fout, (flags & OPP_ALIGN) ? "%-5s" : "%s ",
+ pp->ret_type.name);
+ if (pp->is_fptr)
+ fprintf(fout, "(");
+ output_pp_attrs(fout, pp, flags);
+ if (pp->is_fptr)
+ fprintf(fout, "*");
+ fprintf(fout, "%s", pp->name);
+ if (pp->is_fptr)
+ fprintf(fout, ")");
+
+ fprintf(fout, "(");
+ for (i = 0; i < pp->argc; i++) {
+ if (i > 0)
+ fprintf(fout, ", ");
+ if (pp->arg[i].fptr != NULL && !(flags & OPP_SIMPLE_ARGS)) {
+ // func pointer
+ output_pp(fout, pp->arg[i].fptr, 0);
+ }
+ else if (pp->arg[i].type.is_retreg) {
+ fprintf(fout, "u32 *r_%s", pp->arg[i].reg);
+ }
+ else {
+ fprintf(fout, "%s", pp->arg[i].type.name);
+ if (!pp->is_fptr)
+ fprintf(fout, " a%d", i + 1);
+ }
+ }
+ if (pp->is_vararg) {
+ if (i > 0)
+ fprintf(fout, ", ");
+ fprintf(fout, "...");
+ }
+ fprintf(fout, ")");
+}
+
static int get_pp_arg_regmask(const struct parsed_proto *pp)
{
int regmask = 0;
int need_tmp64 = 0;
int had_decl = 0;
int label_pending = 0;
- int regmask_save = 0;
- int regmask_arg = 0;
- int regmask_now = 0;
- int regmask_init = 0;
- int regmask = 0;
+ int regmask_save = 0; // regs saved/restored in this func
+ int regmask_arg = 0; // regs carrying function args (fastcall, etc)
+ int regmask_now; // temp
+ int regmask_init = 0; // regs that need zero initialization
+ int regmask_pp = 0; // regs used in complex push-pop graph
+ int regmask = 0; // used regs
int pfomask = 0;
int found = 0;
int depth = 0;
// pass4:
// - process calls
+ // - handle push <const>/pop pairs
for (i = 0; i < opcnt; i++)
{
po = &ops[i];
if (strstr(pp->ret_type.name, "int64"))
need_tmp64 = 1;
}
+ else if (po->op == OP_PUSH && !(po->flags & OPF_FARG)
+ && !(po->flags & OPF_RSAVE) && po->operand[0].type == OPT_CONST)
+ scan_for_pop_const(i, opcnt, ®mask_pp);
}
// pass5:
continue;
}
}
- else if (po->operand[0].type == OPT_CONST) {
- scan_for_pop_const(i, opcnt);
- }
}
if (po->op == OP_STD) {
}
// the function itself
- fprintf(fout, "%s ", g_func_pp->ret_type.name);
- output_pp_attrs(fout, g_func_pp, g_ida_func_attr & IDAFA_NORETURN);
- fprintf(fout, "%s(", g_func_pp->name);
-
- for (i = 0; i < g_func_pp->argc; i++) {
- if (i > 0)
- fprintf(fout, ", ");
- if (g_func_pp->arg[i].fptr != NULL) {
- // func pointer..
- pp = g_func_pp->arg[i].fptr;
- fprintf(fout, "%s (", pp->ret_type.name);
- output_pp_attrs(fout, pp, 0);
- fprintf(fout, "*a%d)(", i + 1);
- for (j = 0; j < pp->argc; j++) {
- if (j > 0)
- fprintf(fout, ", ");
- if (pp->arg[j].fptr)
- ferr(ops, "nested fptr\n");
- fprintf(fout, "%s", pp->arg[j].type.name);
- }
- if (pp->is_vararg) {
- if (j > 0)
- fprintf(fout, ", ");
- fprintf(fout, "...");
- }
- fprintf(fout, ")");
- }
- else if (g_func_pp->arg[i].type.is_retreg) {
- fprintf(fout, "u32 *r_%s", g_func_pp->arg[i].reg);
- }
- else {
- fprintf(fout, "%s a%d", g_func_pp->arg[i].type.name, i + 1);
- }
- }
- if (g_func_pp->is_vararg) {
- if (i > 0)
- fprintf(fout, ", ");
- fprintf(fout, "...");
- }
-
- fprintf(fout, ")\n{\n");
+ ferr_assert(ops, !g_func_pp->is_fptr);
+ output_pp(fout, g_func_pp,
+ (g_ida_func_attr & IDAFA_NORETURN) ? OPP_FORCE_NORETURN : 0);
+ fprintf(fout, "\n{\n");
// declare indirect functions
for (i = 0; i < opcnt; i++) {
else
snprintf(pp->name, sizeof(pp->name), "icall%d", i);
- fprintf(fout, " %s (", pp->ret_type.name);
- output_pp_attrs(fout, pp, 0);
- fprintf(fout, "*%s)(", pp->name);
- for (j = 0; j < pp->argc; j++) {
- if (j > 0)
- fprintf(fout, ", ");
- fprintf(fout, "%s a%d", pp->arg[j].type.name, j + 1);
- }
- fprintf(fout, ");\n");
+ fprintf(fout, " ");
+ output_pp(fout, pp, OPP_SIMPLE_ARGS);
+ fprintf(fout, ";\n");
}
}
}
}
}
+ // declare normal registers
regmask_now = regmask & ~regmask_arg;
regmask_now &= ~(1 << xSP);
if (regmask_now & 0x00ff) {
}
}
+ // declare push-pop temporaries
+ if (regmask_pp) {
+ for (reg = 0; reg < 8; reg++) {
+ if (regmask_pp & (1 << reg)) {
+ fprintf(fout, " u32 pp_%s;\n", regs_r32[reg]);
+ had_decl = 1;
+ }
+ }
+ }
+
if (cond_vars) {
for (i = 0; i < 8; i++) {
if (cond_vars & (1 << i)) {
fprintf(fout, " s_%s = %s;", buf1, buf1);
break;
}
+ else if (po->flags & OPF_PPUSH) {
+ tmp_op = po->datap;
+ ferr_assert(po, tmp_op != NULL);
+ out_dst_opr(buf2, sizeof(buf2), po, &tmp_op->operand[0]);
+ fprintf(fout, " pp_%s = %s;", buf2, buf1);
+ break;
+ }
else if (g_func_pp->is_userstack) {
fprintf(fout, " *(--esp) = %s;", buf1);
break;
break;
case OP_POP:
+ out_dst_opr(buf1, sizeof(buf1), po, &po->operand[0]);
if (po->flags & OPF_RSAVE) {
- out_dst_opr(buf1, sizeof(buf1), po, &po->operand[0]);
fprintf(fout, " %s = s_%s;", buf1, buf1);
break;
}
+ else if (po->flags & OPF_PPUSH) {
+ // push/pop graph
+ ferr_assert(po, po->datap == NULL);
+ fprintf(fout, " %s = pp_%s;", buf1, buf1);
+ break;
+ }
else if (po->datap != NULL) {
// push/pop pair
tmp_op = po->datap;
- out_dst_opr(buf1, sizeof(buf1), po, &po->operand[0]);
fprintf(fout, " %s = %s;", buf1,
out_src_opr(buf2, sizeof(buf2),
tmp_op, &tmp_op->operand[0],
break;
}
else if (g_func_pp->is_userstack) {
- fprintf(fout, " %s = *esp++;",
- out_dst_opr(buf1, sizeof(buf1), po, &po->operand[0]));
+ fprintf(fout, " %s = *esp++;", buf1);
break;
}
else
enum opr_lenmod lmod;
unsigned int is_seeded:1;
unsigned int is_c_str:1;
+ const struct parsed_proto *pp; // seed pp, if any
} *hg_vars;
static int hg_var_cnt;
if (po->btj != NULL) {
// jumptable
for (j = 0; j < po->btj->count; j++) {
+ check_i(po, po->btj->d[j].bt_i);
gen_hdr_dep_pass(po->btj->d[j].bt_i, opcnt, cbits, fp,
regmask_save, regmask_dst, regmask_dep, has_ret);
}
return;
}
- if (po->bt_i < 0) {
- ferr(po, "dead branch\n");
- return;
- }
-
+ check_i(po, po->bt_i);
if (po->flags & OPF_CJMP) {
gen_hdr_dep_pass(po->bt_i, opcnt, cbits, fp,
regmask_save, regmask_dst, regmask_dep, has_ret);
continue;
if (po->op == OP_PUSH && po->operand[0].type == OPT_CONST)
- scan_for_pop_const(i, opcnt);
+ scan_for_pop_const(i, opcnt, ®mask_dummy);
}
// pass4:
if (pp != NULL && pp->is_include)
continue;
+ if (fp->pp != NULL) {
+ // part of seed, output later
+ continue;
+ }
+
regmask_dep = fp->regmask_dep;
argc_stack = fp->argc_stack;
[OPLM_QWORD] = "uint64_t",
};
const struct scanned_var *var;
+ char line[256] = { 0, };
int i;
// resolve deps
for (i = 0; i < hg_var_cnt; i++) {
var = &hg_vars[i];
- if (var->is_c_str)
+ if (var->pp != NULL)
+ // part of seed
+ continue;
+ else if (var->is_c_str)
fprintf(fout, "extern %-8s %s[];", "char", var->name);
else
fprintf(fout, "extern %-8s %s;",
// output function prototypes
output_hdr_fp(fout, hg_fp, hg_fp_cnt);
+
+ // seed passthrough
+ fprintf(fout, "\n// - seed -\n");
+
+ rewind(g_fhdr);
+ while (fgets(line, sizeof(line), g_fhdr))
+ fwrite(line, 1, strlen(line), fout);
}
// read a line, truncating it if it doesn't fit
static void scan_variables(FILE *fasm)
{
- const struct parsed_proto *pp_c;
struct scanned_var *var;
char line[256] = { 0, };
char words[3][256];
snprintf(var->name, sizeof(var->name), "%s", words[0]);
// maybe already in seed header?
- pp_c = proto_parse(g_fhdr, var->name, 1);
- if (pp_c != NULL) {
- if (pp_c->is_func)
- aerr("func?\n");
- else if (pp_c->is_fptr) {
+ var->pp = proto_parse(g_fhdr, var->name, 1);
+ if (var->pp != NULL) {
+ if (var->pp->is_fptr) {
var->lmod = OPLM_DWORD;
//var->is_ptr = 1;
}
- else if (!guess_lmod_from_c_type(&var->lmod, &pp_c->type))
+ else if (var->pp->is_func)
+ aerr("func?\n");
+ else if (!guess_lmod_from_c_type(&var->lmod, &var->pp->type))
aerr("unhandled C type '%s' for '%s'\n",
- pp_c->type.name, var->name);
+ var->pp->type.name, var->name);
var->is_seeded = 1;
continue;
do_pending_endp:
// do delayed endp processing to collect switch jumptables
if (pending_endp) {
- if (in_func && !skip_func && !end && wordc >= 2
+ if (in_func && !g_skip_func && !end && wordc >= 2
&& ((words[0][0] == 'd' && words[0][2] == 0)
|| (words[1][0] == 'd' && words[1][2] == 0)))
{
continue;
}
- if (in_func && !skip_func) {
+ if (in_func && !g_skip_func) {
if (g_header_mode)
gen_hdr(g_func, pi);
else
in_func = 0;
g_ida_func_attr = 0;
skip_warned = 0;
- skip_func = 0;
+ g_skip_func = 0;
g_func[0] = 0;
func_chunks_used = 0;
func_chunk_i = -1;
words[0], g_func);
p = words[0];
if (bsearch(&p, rlist, rlist_len, sizeof(rlist[0]), cmpstringp))
- skip_func = 1;
+ g_skip_func = 1;
strcpy(g_func, words[0]);
set_label(0, words[0]);
in_func = 1;
&& ops[0].op == OP_JMP && ops[0].operand[0].had_ds)
{
// import jump
- skip_func = 1;
+ g_skip_func = 1;
}
- if (!skip_func && func_chunks_used) {
+ if (!g_skip_func && func_chunks_used) {
// start processing chunks
struct chunk_item *ci, key = { g_func, 0 };
continue;
}
- if (!in_func || skip_func) {
- if (!skip_warned && !skip_func && g_labels[pi] != NULL) {
+ if (!in_func || g_skip_func) {
+ if (!skip_warned && !g_skip_func && g_labels[pi] != NULL) {
if (verbose)
anote("skipping from '%s'\n", g_labels[pi]);
skip_warned = 1;