{
//printf("get_pointer(%x)\n",(int)stub);
int *i_ptr=find_extjump_insn(stub);
- assert((*i_ptr&0x0f000000)==0x0a000000);
+ assert((*i_ptr&0x0f000000)==0x0a000000); // b
return (u_char *)i_ptr+((*i_ptr<<8)>>6)+8;
}
}
// this output is parsed by verify_dirty, get_bounds, isclean, get_clean_addr
-static void do_dirty_stub_emit_args(u_int arg0)
+static void do_dirty_stub_emit_args(u_int arg0, u_int source_len)
{
#ifndef HAVE_ARMV7
emit_loadlp((int)source, 1);
emit_loadlp((int)copy, 2);
- emit_loadlp(slen*4, 3);
+ emit_loadlp(source_len, 3);
#else
emit_movw(((u_int)source)&0x0000FFFF, 1);
emit_movw(((u_int)copy)&0x0000FFFF, 2);
emit_movt(((u_int)source)&0xFFFF0000, 1);
emit_movt(((u_int)copy)&0xFFFF0000, 2);
- emit_movw(slen*4, 3);
+ emit_movw(source_len, 3);
#endif
emit_movimm(arg0, 0);
}
-static void *do_dirty_stub(int i)
+static void *do_dirty_stub(int i, u_int source_len)
{
assem_debug("do_dirty_stub %x\n",start+i*4);
- do_dirty_stub_emit_args(start + i*4);
+ do_dirty_stub_emit_args(start + i*4, source_len);
emit_far_call(verify_code);
void *entry = out;
load_regs_entry(i);
return entry;
}
-static void do_dirty_stub_ds()
+static void do_dirty_stub_ds(u_int source_len)
{
- do_dirty_stub_emit_args(start + 1);
+ do_dirty_stub_emit_args(start + 1, source_len);
emit_far_call(verify_code_ds);
}
|| (*ptr&0x7e000000) == 0x34000000) { // cbz/cbnz
// Conditional branch are limited to +/- 1MB
// block max size is 256k so branching beyond the +/- 1MB limit
- // should only happen when jumping to an already compiled block (see add_link)
+ // should only happen when jumping to an already compiled block (see add_jump_out)
// a workaround would be to do a trampoline jump via a stub at the end of the block
assert(-1048576 <= offset && offset < 1048576);
*ptr=(*ptr&0xFF00000F)|(((offset>>2)&0x7ffff)<<5);
}
// this output is parsed by verify_dirty, get_bounds, isclean, get_clean_addr
-static void do_dirty_stub_base(u_int vaddr)
+static void do_dirty_stub_base(u_int vaddr, u_int source_len)
{
- assert(slen <= MAXBLOCK);
+ assert(source_len <= MAXBLOCK*4);
emit_loadlp_ofs(0, 0); // ldr x1, source
emit_loadlp_ofs(0, 1); // ldr x2, copy
- emit_movz(slen*4, 2);
+ emit_movz(source_len, 2);
emit_far_call(verify_code_arm64);
void *jmp = out;
emit_cbz(0, 0);
{
assert((ptr[0] & 0xff00001f) == 0x58000000); // ldr x0, source
assert((ptr[1] & 0xff00001f) == 0x58000001); // ldr x1, copy
- assert((ptr[2] & 0xffe0001f) == 0x52800002); // movz w2, #slen*4
+ assert((ptr[2] & 0xffe0001f) == 0x52800002); // movz w2, #source_len
assert( ptr[8] == 0xd61f0000); // br x0
}
output_w64((uintptr_t)copy);
}
-static void *do_dirty_stub(int i)
+static void *do_dirty_stub(int i, u_int source_len)
{
assem_debug("do_dirty_stub %x\n",start+i*4);
u_int *loadlps = (void *)out;
- do_dirty_stub_base(start + i*4);
+ do_dirty_stub_base(start + i*4, source_len);
void *entry = out;
load_regs_entry(i);
if (entry == out)
return entry;
}
-static void do_dirty_stub_ds(void)
+static void do_dirty_stub_ds(u_int source_len)
{
u_int *loadlps = (void *)out;
- do_dirty_stub_base(start + 1);
+ do_dirty_stub_base(start + 1, source_len);
void *lit_jumpover = out;
emit_jmp(out + 8*2);
do_dirty_stub_emit_literals(loadlps);
assert_dirty_stub(ptr);
source = (void *)get_from_ldr_literal(&ptr[0]); // ldr x1, source
copy = (void *)get_from_ldr_literal(&ptr[1]); // ldr x1, copy
- len = get_from_movz(&ptr[2]); // movz w3, #slen*4
+ len = get_from_movz(&ptr[2]); // movz w3, #source_len
return !memcmp(source, copy, len);
}
const u_int *ptr = addr;
assert_dirty_stub(ptr);
*start = (u_char *)get_from_ldr_literal(&ptr[0]); // ldr x1, source
- *end = *start + get_from_movz(&ptr[2]); // movz w3, #slen*4
+ *end = *start + get_from_movz(&ptr[2]); // movz w3, #source_len
}
/* Special assem */
#ifdef __MACH__
#define dynarec_local ESYM(dynarec_local)
-#define add_link ESYM(add_link)
+#define add_jump_out ESYM(add_jump_out)
#define new_recompile_block ESYM(new_recompile_block)
#define get_addr ESYM(get_addr)
#define get_addr_ht ESYM(get_addr_ht)
orrcs r2, r6, #2048
ldr r5, [r3, r2, lsl #2]
lsl r12, r12, #8
- add r6, r1, r12, asr #6
+ add r6, r1, r12, asr #6 /* old target */
mov r8, #0
/* jump_in lookup */
1:
mov r5, r1
mov r1, r6
- bl add_link
+ bl add_jump_out
sub r2, r8, r5
and r1, r7, #0xff000000
lsl r2, r2, #6
inv_debug("INVALIDATE: kill pointer to %x (%p)\n",head->vaddr,head->addr);
void *host_addr=find_extjump_insn(head->addr);
mark_clear_cache(host_addr);
- set_jump_target(host_addr, head->addr);
+ set_jump_target(host_addr, head->addr); // point back to dyna_linker
next=head->next;
free(head);
head=next;
// Add an entry to jump_out after making a link
// src should point to code by emit_extjump2()
-void add_link(u_int vaddr,void *src)
+void add_jump_out(u_int vaddr,void *src)
{
u_int page=get_page(vaddr);
- inv_debug("add_link: %p -> %x (%d)\n",src,vaddr,page);
+ inv_debug("add_jump_out: %p -> %x (%d)\n",src,vaddr,page);
check_extjump2(src);
ll_add(jump_out+page,vaddr,src);
- //void *ptr=get_pointer(src);
- //inv_debug("add_link: Pointer is to %p\n",ptr);
+ //inv_debug("add_jump_out: to %p\n",get_pointer(src));
}
// If a code block was found to be unmodified (bit was set in
emit_extjump_ds(branch_addr, target_addr);
if(compiled_target_addr) {
set_jump_target(branch_addr, compiled_target_addr);
- add_link(target_addr,stub);
+ add_jump_out(target_addr,stub);
}
else set_jump_target(branch_addr, stub);
if(likely[i]) {
emit_extjump_ds(branch_addr, target_addr);
if(compiled_target_addr) {
set_jump_target(branch_addr, compiled_target_addr);
- add_link(target_addr,stub);
+ add_jump_out(target_addr,stub);
}
else set_jump_target(branch_addr, stub);
}
u_int page=get_page(vaddr);
u_int vpage=get_vpage(vaddr);
ll_add(jump_dirty+vpage,vaddr,(void *)out);
- do_dirty_stub_ds();
+ do_dirty_stub_ds(slen*4);
ll_add(jump_in+page,vaddr,(void *)out);
assert(regs[0].regmap_entry[HOST_CCREG]==CCREG);
if(regs[0].regmap[HOST_CCREG]!=CCREG)
literal_pool_jumpover(256);
}
}
- //assert(is_ujump(i-2));
+
+ assert(slen > 0);
+ if (itype[slen-1] == INTCALL) {
+ // no ending needed for this block since INTCALL never returns
+ }
// If the block did not end with an unconditional branch,
// add a jump to the next instruction.
- if(i>1) {
+ else if (i > 1) {
if(!is_ujump(i-2)&&itype[i-1]!=SPAN) {
assert(itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=RJUMP);
assert(i==slen);
emit_extjump(link_addr[i].addr, link_addr[i].target);
if (addr) {
set_jump_target(link_addr[i].addr, addr);
- add_link(link_addr[i].target,stub);
+ add_jump_out(link_addr[i].target,stub);
}
else
set_jump_target(link_addr[i].addr, stub);
//#endif
}
}
+
+ u_int source_len = slen*4;
+ if (itype[slen-1] == INTCALL && source_len > 4)
+ // no need to treat the last instruction as compiled
+ // as interpreter fully handles it
+ source_len -= 4;
+
+ if ((u_char *)copy + source_len > (u_char *)shadow + sizeof(shadow))
+ copy = shadow;
+
// External Branch Targets (jump_in)
- if(copy+slen*4>(void *)shadow+sizeof(shadow)) copy=shadow;
for(i=0;i<slen;i++)
{
if(bt[i]||i==0)
assem_debug("%p (%d) <- %8x\n",instr_addr[i],i,start+i*4);
assem_debug("jump_in: %x\n",start+i*4);
ll_add(jump_dirty+vpage,vaddr,out);
- void *entry_point = do_dirty_stub(i);
+ void *entry_point = do_dirty_stub(i, source_len);
ll_add_flags(jump_in+page,vaddr,state_rflags,entry_point);
// If there was an existing entry in the hash table,
// replace it with the new address.
#endif
assert(out - (u_char *)beginning < MAX_OUTPUT_BLOCK_SIZE);
//printf("shadow buffer: %p-%p\n",copy,(u_char *)copy+slen*4);
- memcpy(copy,source,slen*4);
- copy+=slen*4;
+ memcpy(copy, source, source_len);
+ copy += source_len;
end_block(beginning);