#endif
}
-static void mark_clear_cache(void *target)
-{
- u_long offset = (u_char *)target - translation_cache;
- u_int mask = 1u << ((offset >> 12) & 31);
- if (!(needs_clear_cache[offset >> 17] & mask)) {
- char *start = (char *)((u_long)target & ~4095ul);
- start_tcache_write(start, start + 4096);
- needs_clear_cache[offset >> 17] |= mask;
- }
-}
-
-// Clearing the cache is rather slow on ARM Linux, so mark the areas
-// that need to be cleared, and then only clear these areas once.
-static void do_clear_cache()
-{
- int i,j;
- for (i=0;i<(1<<(TARGET_SIZE_2-17));i++)
- {
- u_int bitmap=needs_clear_cache[i];
- if(bitmap) {
- u_char *start, *end;
- for(j=0;j<32;j++)
- {
- if(bitmap&(1<<j)) {
- start=translation_cache+i*131072+j*4096;
- end=start+4095;
- j++;
- while(j<32) {
- if(bitmap&(1<<j)) {
- end+=4096;
- j++;
- }else{
- end_tcache_write(start, end);
- break;
- }
- }
- }
- }
- needs_clear_cache[i]=0;
- }
- }
-}
-
// CPU-architecture-specific initialization
static void arch_init(void)
{
emit_writeword(rt,&mini_ht[(return_address&0xFF)>>3][0]);
}
-static void mark_clear_cache(void *target)
+static void clear_cache_arm64(char *start, char *end)
{
- u_long offset = (u_char *)target - translation_cache;
- u_int mask = 1u << ((offset >> 12) & 31);
- if (!(needs_clear_cache[offset >> 17] & mask)) {
- char *start = (char *)((u_long)target & ~4095ul);
- start_tcache_write(start, start + 4096);
- needs_clear_cache[offset >> 17] |= mask;
+ // Don't rely on GCC's __clear_cache implementation, as it caches
+ // icache/dcache cache line sizes, that can vary between cores on
+ // big.LITTLE architectures.
+ uint64_t addr, ctr_el0;
+ static size_t icache_line_size = 0xffff, dcache_line_size = 0xffff;
+ size_t isize, dsize;
+
+ __asm__ volatile("mrs %0, ctr_el0" : "=r"(ctr_el0));
+ isize = 4 << ((ctr_el0 >> 0) & 0xf);
+ dsize = 4 << ((ctr_el0 >> 16) & 0xf);
+
+ // use the global minimum cache line size
+ icache_line_size = isize = icache_line_size < isize ? icache_line_size : isize;
+ dcache_line_size = dsize = dcache_line_size < dsize ? dcache_line_size : dsize;
+
+ /* If CTR_EL0.IDC is enabled, Data cache clean to the Point of Unification is
+ not required for instruction to data coherence. */
+ if ((ctr_el0 & (1 << 28)) == 0x0) {
+ addr = (uint64_t)start & ~(uint64_t)(dsize - 1);
+ for (; addr < (uint64_t)end; addr += dsize)
+ // use "civac" instead of "cvau", as this is the suggested workaround for
+ // Cortex-A53 errata 819472, 826319, 827319 and 824069.
+ __asm__ volatile("dc civac, %0" : : "r"(addr) : "memory");
}
-}
+ __asm__ volatile("dsb ish" : : : "memory");
-// Clearing the cache is rather slow on ARM Linux, so mark the areas
-// that need to be cleared, and then only clear these areas once.
-static void do_clear_cache()
-{
- int i,j;
- for (i=0;i<(1<<(TARGET_SIZE_2-17));i++)
- {
- u_int bitmap=needs_clear_cache[i];
- if(bitmap) {
- u_char *start, *end;
- for(j=0;j<32;j++)
- {
- if(bitmap&(1<<j)) {
- start=translation_cache+i*131072+j*4096;
- end=start+4095;
- j++;
- while(j<32) {
- if(bitmap&(1<<j)) {
- end+=4096;
- j++;
- }else{
- end_tcache_write(start, end);
- break;
- }
- }
- }
- }
- needs_clear_cache[i]=0;
- }
+ /* If CTR_EL0.DIC is enabled, Instruction cache cleaning to the Point of
+ Unification is not required for instruction to data coherence. */
+ if ((ctr_el0 & (1 << 29)) == 0x0) {
+ addr = (uint64_t)start & ~(uint64_t)(isize - 1);
+ for (; addr < (uint64_t)end; addr += isize)
+ __asm__ volatile("ic ivau, %0" : : "r"(addr) : "memory");
+
+ __asm__ volatile("dsb ish" : : : "memory");
}
+
+ __asm__ volatile("isb" : : : "memory");
}
// CPU-architecture-specific initialization
static void end_tcache_write(void *start, void *end)
{
-#ifdef __arm__
+#if defined(__arm__) || defined(__aarch64__)
size_t len = (char *)end - (char *)start;
#if defined(__BLACKBERRY_QNX__)
msync(start, len, MS_SYNC | MS_CACHE_ONLY | MS_INVALIDATE_ICACHE);
sceKernelSyncVMDomain(sceBlock, start, len);
#elif defined(_3DS)
ctr_flush_invalidate_cache();
+ #elif defined(__aarch64__)
+ // as of 2021, __clear_cache() is still broken on arm64
+ // so here is a custom one :(
+ clear_cache_arm64(start, end);
#else
__clear_cache(start, end);
#endif
(void)len;
-#else
- __clear_cache(start, end);
#endif
mprotect_w_x(start, end, 1);
end_tcache_write(start, out);
}
+// also takes care of w^x mappings when patching code
+static u_int needs_clear_cache[1<<(TARGET_SIZE_2-17)];
+
+static void mark_clear_cache(void *target)
+{
+ uintptr_t offset = (u_char *)target - ndrc->translation_cache;
+ u_int mask = 1u << ((offset >> 12) & 31);
+ if (!(needs_clear_cache[offset >> 17] & mask)) {
+ char *start = (char *)((uintptr_t)target & ~4095l);
+ start_tcache_write(start, start + 4095);
+ needs_clear_cache[offset >> 17] |= mask;
+ }
+}
+
+// Clearing the cache is rather slow on ARM Linux, so mark the areas
+// that need to be cleared, and then only clear these areas once.
+static void do_clear_cache(void)
+{
+ int i, j;
+ for (i = 0; i < (1<<(TARGET_SIZE_2-17)); i++)
+ {
+ u_int bitmap = needs_clear_cache[i];
+ if (!bitmap)
+ continue;
+ for (j = 0; j < 32; j++)
+ {
+ u_char *start, *end;
+ if (!(bitmap & (1<<j)))
+ continue;
+
+ start = ndrc->translation_cache + i*131072 + j*4096;
+ end = start + 4095;
+ for (j++; j < 32; j++) {
+ if (!(bitmap & (1<<j)))
+ break;
+ end += 4096;
+ }
+ end_tcache_write(start, end);
+ }
+ needs_clear_cache[i] = 0;
+ }
+}
+
//#define DEBUG_CYCLE_COUNT 1
#define NO_CYCLE_PENALTY_THR 12
{
inv_debug("EXP: Kill pointer at %p (%x)\n",head->addr,head->vaddr);
void *host_addr=find_extjump_insn(head->addr);
- #if defined(__arm__) || defined(__aarch64__)
- mark_clear_cache(host_addr);
- #endif
+ mark_clear_cache(host_addr);
set_jump_target(host_addr, head->addr);
}
head=head->next;
while(head!=NULL) {
inv_debug("INVALIDATE: kill pointer to %x (%p)\n",head->vaddr,head->addr);
void *host_addr=find_extjump_insn(head->addr);
- #if defined(__arm__) || defined(__aarch64__)
- mark_clear_cache(host_addr);
- #endif
+ mark_clear_cache(host_addr);
set_jump_target(host_addr, head->addr);
next=head->next;
free(head);
for(first=page+1;first<last;first++) {
invalidate_page(first);
}
- #if defined(__arm__) || defined(__aarch64__)
- do_clear_cache();
- #endif
+ do_clear_cache();
// Don't trap writes
invalid_code[block]=1;
// This is called when loading a save state.
// Anything could have changed, so invalidate everything.
-void invalidate_all_pages()
+void invalidate_all_pages(void)
{
u_int page;
for(page=0;page<4096;page++)
#ifdef USE_MINI_HT
memset(mini_ht,-1,sizeof(mini_ht));
#endif
+ do_clear_cache();
}
static void do_invstub(int n)
// clear the state completely, instead of just marking
// things invalid like invalidate_all_pages() does
-void new_dynarec_clear_full()
+void new_dynarec_clear_full(void)
{
int n;
out = ndrc->translation_cache;
for(n=0;n<4096;n++) ll_clear(jump_dirty+n);
}
-void new_dynarec_init()
+void new_dynarec_init(void)
{
SysPrintf("Init new dynarec\n");
SysPrintf("warning: RAM is not directly mapped, performance will suffer\n");
}
-void new_dynarec_cleanup()
+void new_dynarec_cleanup(void)
{
int n;
#ifdef BASE_ADDR_DYNAMIC
break;
case 3:
// Clear jump_out
- #if defined(__arm__) || defined(__aarch64__)
if((expirep&2047)==0)
do_clear_cache();
- #endif
ll_remove_matching_addrs(jump_out+(expirep&2047),base,shift);
ll_remove_matching_addrs(jump_out+2048+(expirep&2047),base,shift);
break;