drc: something works on arm64
[pcsx_rearmed.git] / libpcsxcore / new_dynarec / new_dynarec.c
CommitLineData
57871462 1/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
2 * Mupen64plus - new_dynarec.c *
20d507ba 3 * Copyright (C) 2009-2011 Ari64 *
57871462 4 * *
5 * This program is free software; you can redistribute it and/or modify *
6 * it under the terms of the GNU General Public License as published by *
7 * the Free Software Foundation; either version 2 of the License, or *
8 * (at your option) any later version. *
9 * *
10 * This program is distributed in the hope that it will be useful, *
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
13 * GNU General Public License for more details. *
14 * *
15 * You should have received a copy of the GNU General Public License *
16 * along with this program; if not, write to the *
17 * Free Software Foundation, Inc., *
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. *
19 * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
20
21#include <stdlib.h>
22#include <stdint.h> //include for uint64_t
23#include <assert.h>
d848b60a 24#include <errno.h>
4600ba03 25#include <sys/mman.h>
d148d265 26#ifdef __MACH__
27#include <libkern/OSCacheControl.h>
28#endif
1e212a25 29#ifdef _3DS
30#include <3ds_utils.h>
31#endif
32#ifdef VITA
33#include <psp2/kernel/sysmem.h>
34static int sceBlock;
35#endif
57871462 36
d148d265 37#include "new_dynarec_config.h"
3968e69e 38#include "../psxhle.h"
39#include "../psxinterpreter.h"
3d624f89 40#include "emu_if.h" //emulator interface
57871462 41
d1e4ebd9 42#define noinline __attribute__((noinline,noclone))
b14b6a8f 43#ifndef ARRAY_SIZE
44#define ARRAY_SIZE(x) (sizeof(x) / sizeof(x[0]))
45#endif
46
4600ba03 47//#define DISASM
48//#define assem_debug printf
49//#define inv_debug printf
50#define assem_debug(...)
51#define inv_debug(...)
57871462 52
53#ifdef __i386__
54#include "assem_x86.h"
55#endif
56#ifdef __x86_64__
57#include "assem_x64.h"
58#endif
59#ifdef __arm__
60#include "assem_arm.h"
61#endif
be516ebe 62#ifdef __aarch64__
63#include "assem_arm64.h"
64#endif
57871462 65
66#define MAXBLOCK 4096
67#define MAX_OUTPUT_BLOCK_SIZE 262144
2573466a 68
b14b6a8f 69// stubs
70enum stub_type {
71 CC_STUB = 1,
72 FP_STUB = 2,
73 LOADB_STUB = 3,
74 LOADH_STUB = 4,
75 LOADW_STUB = 5,
76 LOADD_STUB = 6,
77 LOADBU_STUB = 7,
78 LOADHU_STUB = 8,
79 STOREB_STUB = 9,
80 STOREH_STUB = 10,
81 STOREW_STUB = 11,
82 STORED_STUB = 12,
83 STORELR_STUB = 13,
84 INVCODE_STUB = 14,
85};
86
57871462 87struct regstat
88{
89 signed char regmap_entry[HOST_REGS];
90 signed char regmap[HOST_REGS];
57871462 91 uint64_t wasdirty;
92 uint64_t dirty;
93 uint64_t u;
57871462 94 u_int wasconst;
95 u_int isconst;
8575a877 96 u_int loadedconst; // host regs that have constants loaded
97 u_int waswritten; // MIPS regs that were used as store base before
57871462 98};
99
de5a60c3 100// note: asm depends on this layout
57871462 101struct ll_entry
102{
103 u_int vaddr;
de5a60c3 104 u_int reg_sv_flags;
57871462 105 void *addr;
106 struct ll_entry *next;
107};
108
df4dc2b1 109struct ht_entry
110{
111 u_int vaddr[2];
112 void *tcaddr[2];
113};
114
b14b6a8f 115struct code_stub
116{
117 enum stub_type type;
118 void *addr;
119 void *retaddr;
120 u_int a;
121 uintptr_t b;
122 uintptr_t c;
123 u_int d;
124 u_int e;
125};
126
643aeae3 127struct link_entry
128{
129 void *addr;
130 u_int target;
131 u_int ext;
132};
133
e2b5e7aa 134 // used by asm:
135 u_char *out;
df4dc2b1 136 struct ht_entry hash_table[65536] __attribute__((aligned(16)));
e2b5e7aa 137 struct ll_entry *jump_in[4096] __attribute__((aligned(16)));
138 struct ll_entry *jump_dirty[4096];
139
140 static struct ll_entry *jump_out[4096];
141 static u_int start;
142 static u_int *source;
143 static char insn[MAXBLOCK][10];
144 static u_char itype[MAXBLOCK];
145 static u_char opcode[MAXBLOCK];
146 static u_char opcode2[MAXBLOCK];
147 static u_char bt[MAXBLOCK];
148 static u_char rs1[MAXBLOCK];
149 static u_char rs2[MAXBLOCK];
150 static u_char rt1[MAXBLOCK];
151 static u_char rt2[MAXBLOCK];
e2b5e7aa 152 static u_char dep1[MAXBLOCK];
153 static u_char dep2[MAXBLOCK];
154 static u_char lt1[MAXBLOCK];
bedfea38 155 static uint64_t gte_rs[MAXBLOCK]; // gte: 32 data and 32 ctl regs
156 static uint64_t gte_rt[MAXBLOCK];
157 static uint64_t gte_unneeded[MAXBLOCK];
ffb0b9e0 158 static u_int smrv[32]; // speculated MIPS register values
159 static u_int smrv_strong; // mask or regs that are likely to have correct values
160 static u_int smrv_weak; // same, but somewhat less likely
161 static u_int smrv_strong_next; // same, but after current insn executes
162 static u_int smrv_weak_next;
e2b5e7aa 163 static int imm[MAXBLOCK];
164 static u_int ba[MAXBLOCK];
165 static char likely[MAXBLOCK];
166 static char is_ds[MAXBLOCK];
167 static char ooo[MAXBLOCK];
168 static uint64_t unneeded_reg[MAXBLOCK];
e2b5e7aa 169 static uint64_t branch_unneeded_reg[MAXBLOCK];
afec9d44 170 static signed char regmap_pre[MAXBLOCK][HOST_REGS]; // pre-instruction i?
956f3129 171 static uint64_t current_constmap[HOST_REGS];
172 static uint64_t constmap[MAXBLOCK][HOST_REGS];
173 static struct regstat regs[MAXBLOCK];
174 static struct regstat branch_regs[MAXBLOCK];
e2b5e7aa 175 static signed char minimum_free_regs[MAXBLOCK];
176 static u_int needed_reg[MAXBLOCK];
177 static u_int wont_dirty[MAXBLOCK];
178 static u_int will_dirty[MAXBLOCK];
179 static int ccadj[MAXBLOCK];
180 static int slen;
df4dc2b1 181 static void *instr_addr[MAXBLOCK];
643aeae3 182 static struct link_entry link_addr[MAXBLOCK];
e2b5e7aa 183 static int linkcount;
b14b6a8f 184 static struct code_stub stubs[MAXBLOCK*3];
e2b5e7aa 185 static int stubcount;
186 static u_int literals[1024][2];
187 static int literalcount;
188 static int is_delayslot;
e2b5e7aa 189 static char shadow[1048576] __attribute__((aligned(16)));
190 static void *copy;
191 static int expirep;
192 static u_int stop_after_jal;
a327ad27 193#ifndef RAM_FIXED
01d26796 194 static uintptr_t ram_offset;
a327ad27 195#else
01d26796 196 static const uintptr_t ram_offset=0;
a327ad27 197#endif
e2b5e7aa 198
199 int new_dynarec_hacks;
200 int new_dynarec_did_compile;
687b4580 201
202 extern int cycle_count; // ... until end of the timeslice, counts -N -> 0
203 extern int last_count; // last absolute target, often = next_interupt
204 extern int pcaddr;
205 extern int pending_exception;
206 extern int branch_target;
d1e4ebd9 207 extern uintptr_t mini_ht[32][2];
57871462 208 extern u_char restore_candidate[512];
57871462 209
210 /* registers that may be allocated */
211 /* 1-31 gpr */
7c3a5182 212#define LOREG 32 // lo
213#define HIREG 33 // hi
00fa9369 214//#define FSREG 34 // FPU status (FCSR)
57871462 215#define CSREG 35 // Coprocessor status
216#define CCREG 36 // Cycle count
217#define INVCP 37 // Pointer to invalid_code
1edfcc68 218//#define MMREG 38 // Pointer to memory_map
9c45ca93 219//#define ROREG 39 // ram offset (if rdram!=0x80000000)
619e5ded 220#define TEMPREG 40
221#define FTEMP 40 // FPU temporary register
222#define PTEMP 41 // Prefetch temporary register
1edfcc68 223//#define TLREG 42 // TLB mapping offset
619e5ded 224#define RHASH 43 // Return address hash
225#define RHTBL 44 // Return address hash table address
226#define RTEMP 45 // JR/JALR address register
227#define MAXREG 45
228#define AGEN1 46 // Address generation temporary register
1edfcc68 229//#define AGEN2 47 // Address generation temporary register
230//#define MGEN1 48 // Maptable address generation temporary register
231//#define MGEN2 49 // Maptable address generation temporary register
619e5ded 232#define BTREG 50 // Branch target temporary register
57871462 233
234 /* instruction types */
235#define NOP 0 // No operation
236#define LOAD 1 // Load
237#define STORE 2 // Store
238#define LOADLR 3 // Unaligned load
239#define STORELR 4 // Unaligned store
9f51b4b9 240#define MOV 5 // Move
57871462 241#define ALU 6 // Arithmetic/logic
242#define MULTDIV 7 // Multiply/divide
243#define SHIFT 8 // Shift by register
244#define SHIFTIMM 9// Shift by immediate
245#define IMM16 10 // 16-bit immediate
246#define RJUMP 11 // Unconditional jump to register
247#define UJUMP 12 // Unconditional jump
248#define CJUMP 13 // Conditional branch (BEQ/BNE/BGTZ/BLEZ)
249#define SJUMP 14 // Conditional branch (regimm format)
250#define COP0 15 // Coprocessor 0
251#define COP1 16 // Coprocessor 1
252#define C1LS 17 // Coprocessor 1 load/store
ad49de89 253//#define FJUMP 18 // Conditional branch (floating point)
00fa9369 254//#define FLOAT 19 // Floating point unit
255//#define FCONV 20 // Convert integer to float
256//#define FCOMP 21 // Floating point compare (sets FSREG)
57871462 257#define SYSCALL 22// SYSCALL
258#define OTHER 23 // Other
259#define SPAN 24 // Branch/delay slot spans 2 pages
260#define NI 25 // Not implemented
7139f3c8 261#define HLECALL 26// PCSX fake opcodes for HLE
b9b61529 262#define COP2 27 // Coprocessor 2 move
263#define C2LS 28 // Coprocessor 2 load/store
264#define C2OP 29 // Coprocessor 2 operation
1e973cb0 265#define INTCALL 30// Call interpreter to handle rare corner cases
57871462 266
57871462 267 /* branch codes */
268#define TAKEN 1
269#define NOTTAKEN 2
270#define NULLDS 3
271
7c3a5182 272#define DJT_1 (void *)1l // no function, just a label in assem_debug log
273#define DJT_2 (void *)2l
274
57871462 275// asm linkage
3968e69e 276int new_recompile_block(u_int addr);
57871462 277void *get_addr_ht(u_int vaddr);
278void invalidate_block(u_int block);
279void invalidate_addr(u_int addr);
280void remove_hash(int vaddr);
57871462 281void dyna_linker();
282void dyna_linker_ds();
283void verify_code();
57871462 284void verify_code_ds();
285void cc_interrupt();
286void fp_exception();
287void fp_exception_ds();
3968e69e 288void jump_to_new_pc();
7139f3c8 289void new_dyna_leave();
57871462 290
57871462 291// Needed by assembler
ad49de89 292static void wb_register(signed char r,signed char regmap[],uint64_t dirty);
293static void wb_dirtys(signed char i_regmap[],uint64_t i_dirty);
294static void wb_needed_dirtys(signed char i_regmap[],uint64_t i_dirty,int addr);
e2b5e7aa 295static void load_all_regs(signed char i_regmap[]);
296static void load_needed_regs(signed char i_regmap[],signed char next_regmap[]);
297static void load_regs_entry(int t);
ad49de89 298static void load_all_consts(signed char regmap[],u_int dirty,int i);
e2b5e7aa 299
3968e69e 300static int verify_dirty(const u_int *ptr);
e2b5e7aa 301static int get_final_value(int hr, int i, int *value);
b14b6a8f 302static void add_stub(enum stub_type type, void *addr, void *retaddr,
303 u_int a, uintptr_t b, uintptr_t c, u_int d, u_int e);
304static void add_stub_r(enum stub_type type, void *addr, void *retaddr,
305 int i, int addr_reg, struct regstat *i_regs, int ccadj, u_int reglist);
643aeae3 306static void add_to_linker(void *addr, u_int target, int ext);
8062d65a 307static void *emit_fastpath_cmp_jump(int i,int addr,int *addr_reg_override);
687b4580 308static void *get_direct_memhandler(void *table, u_int addr,
309 enum stub_type type, uintptr_t *addr_host);
310static void pass_args(int a0, int a1);
57871462 311
d148d265 312static void mprotect_w_x(void *start, void *end, int is_x)
313{
314#ifdef NO_WRITE_EXEC
1e212a25 315 #if defined(VITA)
316 // *Open* enables write on all memory that was
317 // allocated by sceKernelAllocMemBlockForVM()?
318 if (is_x)
319 sceKernelCloseVMDomain();
320 else
321 sceKernelOpenVMDomain();
322 #else
d148d265 323 u_long mstart = (u_long)start & ~4095ul;
324 u_long mend = (u_long)end;
325 if (mprotect((void *)mstart, mend - mstart,
326 PROT_READ | (is_x ? PROT_EXEC : PROT_WRITE)) != 0)
327 SysPrintf("mprotect(%c) failed: %s\n", is_x ? 'x' : 'w', strerror(errno));
1e212a25 328 #endif
d148d265 329#endif
330}
331
332static void start_tcache_write(void *start, void *end)
333{
334 mprotect_w_x(start, end, 0);
335}
336
337static void end_tcache_write(void *start, void *end)
338{
339#ifdef __arm__
340 size_t len = (char *)end - (char *)start;
341 #if defined(__BLACKBERRY_QNX__)
342 msync(start, len, MS_SYNC | MS_CACHE_ONLY | MS_INVALIDATE_ICACHE);
343 #elif defined(__MACH__)
344 sys_cache_control(kCacheFunctionPrepareForExecution, start, len);
345 #elif defined(VITA)
1e212a25 346 sceKernelSyncVMDomain(sceBlock, start, len);
347 #elif defined(_3DS)
348 ctr_flush_invalidate_cache();
d148d265 349 #else
350 __clear_cache(start, end);
351 #endif
352 (void)len;
be516ebe 353#else
354 __clear_cache(start, end);
d148d265 355#endif
356
357 mprotect_w_x(start, end, 1);
358}
359
360static void *start_block(void)
361{
362 u_char *end = out + MAX_OUTPUT_BLOCK_SIZE;
643aeae3 363 if (end > translation_cache + (1<<TARGET_SIZE_2))
364 end = translation_cache + (1<<TARGET_SIZE_2);
d148d265 365 start_tcache_write(out, end);
366 return out;
367}
368
369static void end_block(void *start)
370{
371 end_tcache_write(start, out);
372}
373
57871462 374//#define DEBUG_CYCLE_COUNT 1
375
b6e87b2b 376#define NO_CYCLE_PENALTY_THR 12
377
4e9dcd7f 378int cycle_multiplier; // 100 for 1.0
379
380static int CLOCK_ADJUST(int x)
381{
382 int s=(x>>31)|1;
383 return (x * cycle_multiplier + s * 50) / 100;
384}
385
94d23bb9 386static u_int get_page(u_int vaddr)
57871462 387{
0ce47d46 388 u_int page=vaddr&~0xe0000000;
389 if (page < 0x1000000)
390 page &= ~0x0e00000; // RAM mirrors
391 page>>=12;
57871462 392 if(page>2048) page=2048+(page&2047);
94d23bb9 393 return page;
394}
395
d25604ca 396// no virtual mem in PCSX
397static u_int get_vpage(u_int vaddr)
398{
399 return get_page(vaddr);
400}
94d23bb9 401
df4dc2b1 402static struct ht_entry *hash_table_get(u_int vaddr)
403{
404 return &hash_table[((vaddr>>16)^vaddr)&0xFFFF];
405}
406
407static void hash_table_add(struct ht_entry *ht_bin, u_int vaddr, void *tcaddr)
408{
409 ht_bin->vaddr[1] = ht_bin->vaddr[0];
410 ht_bin->tcaddr[1] = ht_bin->tcaddr[0];
411 ht_bin->vaddr[0] = vaddr;
412 ht_bin->tcaddr[0] = tcaddr;
413}
414
415// some messy ari64's code, seems to rely on unsigned 32bit overflow
416static int doesnt_expire_soon(void *tcaddr)
417{
418 u_int diff = (u_int)((u_char *)tcaddr - out) << (32-TARGET_SIZE_2);
419 return diff > (u_int)(0x60000000 + (MAX_OUTPUT_BLOCK_SIZE << (32-TARGET_SIZE_2)));
420}
421
94d23bb9 422// Get address from virtual address
423// This is called from the recompiled JR/JALR instructions
d1e4ebd9 424void noinline *get_addr(u_int vaddr)
94d23bb9 425{
426 u_int page=get_page(vaddr);
427 u_int vpage=get_vpage(vaddr);
57871462 428 struct ll_entry *head;
429 //printf("TRACE: count=%d next=%d (get_addr %x,page %d)\n",Count,next_interupt,vaddr,page);
430 head=jump_in[page];
431 while(head!=NULL) {
de5a60c3 432 if(head->vaddr==vaddr) {
643aeae3 433 //printf("TRACE: count=%d next=%d (get_addr match %x: %p)\n",Count,next_interupt,vaddr,head->addr);
df4dc2b1 434 hash_table_add(hash_table_get(vaddr), vaddr, head->addr);
57871462 435 return head->addr;
436 }
437 head=head->next;
438 }
439 head=jump_dirty[vpage];
440 while(head!=NULL) {
de5a60c3 441 if(head->vaddr==vaddr) {
643aeae3 442 //printf("TRACE: count=%d next=%d (get_addr match dirty %x: %p)\n",Count,next_interupt,vaddr,head->addr);
57871462 443 // Don't restore blocks which are about to expire from the cache
df4dc2b1 444 if (doesnt_expire_soon(head->addr))
445 if (verify_dirty(head->addr)) {
57871462 446 //printf("restore candidate: %x (%d) d=%d\n",vaddr,page,invalid_code[vaddr>>12]);
447 invalid_code[vaddr>>12]=0;
9be4ba64 448 inv_code_start=inv_code_end=~0;
57871462 449 if(vpage<2048) {
57871462 450 restore_candidate[vpage>>3]|=1<<(vpage&7);
451 }
452 else restore_candidate[page>>3]|=1<<(page&7);
df4dc2b1 453 struct ht_entry *ht_bin = hash_table_get(vaddr);
454 if (ht_bin->vaddr[0] == vaddr)
455 ht_bin->tcaddr[0] = head->addr; // Replace existing entry
57871462 456 else
df4dc2b1 457 hash_table_add(ht_bin, vaddr, head->addr);
458
57871462 459 return head->addr;
460 }
461 }
462 head=head->next;
463 }
464 //printf("TRACE: count=%d next=%d (get_addr no-match %x)\n",Count,next_interupt,vaddr);
465 int r=new_recompile_block(vaddr);
466 if(r==0) return get_addr(vaddr);
467 // Execute in unmapped page, generate pagefault execption
468 Status|=2;
469 Cause=(vaddr<<31)|0x8;
470 EPC=(vaddr&1)?vaddr-5:vaddr;
471 BadVAddr=(vaddr&~1);
472 Context=(Context&0xFF80000F)|((BadVAddr>>9)&0x007FFFF0);
473 EntryHi=BadVAddr&0xFFFFE000;
474 return get_addr_ht(0x80000000);
475}
476// Look up address in hash table first
477void *get_addr_ht(u_int vaddr)
478{
479 //printf("TRACE: count=%d next=%d (get_addr_ht %x)\n",Count,next_interupt,vaddr);
df4dc2b1 480 const struct ht_entry *ht_bin = hash_table_get(vaddr);
481 if (ht_bin->vaddr[0] == vaddr) return ht_bin->tcaddr[0];
482 if (ht_bin->vaddr[1] == vaddr) return ht_bin->tcaddr[1];
57871462 483 return get_addr(vaddr);
484}
485
57871462 486void clear_all_regs(signed char regmap[])
487{
488 int hr;
489 for (hr=0;hr<HOST_REGS;hr++) regmap[hr]=-1;
490}
491
d1e4ebd9 492static signed char get_reg(const signed char regmap[],int r)
57871462 493{
494 int hr;
495 for (hr=0;hr<HOST_REGS;hr++) if(hr!=EXCLUDE_REG&&regmap[hr]==r) return hr;
496 return -1;
497}
498
499// Find a register that is available for two consecutive cycles
d1e4ebd9 500static signed char get_reg2(signed char regmap1[], const signed char regmap2[], int r)
57871462 501{
502 int hr;
503 for (hr=0;hr<HOST_REGS;hr++) if(hr!=EXCLUDE_REG&&regmap1[hr]==r&&regmap2[hr]==r) return hr;
504 return -1;
505}
506
507int count_free_regs(signed char regmap[])
508{
509 int count=0;
510 int hr;
511 for(hr=0;hr<HOST_REGS;hr++)
512 {
513 if(hr!=EXCLUDE_REG) {
514 if(regmap[hr]<0) count++;
515 }
516 }
517 return count;
518}
519
520void dirty_reg(struct regstat *cur,signed char reg)
521{
522 int hr;
523 if(!reg) return;
524 for (hr=0;hr<HOST_REGS;hr++) {
525 if((cur->regmap[hr]&63)==reg) {
526 cur->dirty|=1<<hr;
527 }
528 }
529}
530
57871462 531void set_const(struct regstat *cur,signed char reg,uint64_t value)
532{
533 int hr;
534 if(!reg) return;
535 for (hr=0;hr<HOST_REGS;hr++) {
536 if(cur->regmap[hr]==reg) {
537 cur->isconst|=1<<hr;
956f3129 538 current_constmap[hr]=value;
57871462 539 }
57871462 540 }
541}
542
543void clear_const(struct regstat *cur,signed char reg)
544{
545 int hr;
546 if(!reg) return;
547 for (hr=0;hr<HOST_REGS;hr++) {
548 if((cur->regmap[hr]&63)==reg) {
549 cur->isconst&=~(1<<hr);
550 }
551 }
552}
553
554int is_const(struct regstat *cur,signed char reg)
555{
556 int hr;
79c75f1b 557 if(reg<0) return 0;
57871462 558 if(!reg) return 1;
559 for (hr=0;hr<HOST_REGS;hr++) {
560 if((cur->regmap[hr]&63)==reg) {
561 return (cur->isconst>>hr)&1;
562 }
563 }
564 return 0;
565}
566uint64_t get_const(struct regstat *cur,signed char reg)
567{
568 int hr;
569 if(!reg) return 0;
570 for (hr=0;hr<HOST_REGS;hr++) {
571 if(cur->regmap[hr]==reg) {
956f3129 572 return current_constmap[hr];
57871462 573 }
574 }
c43b5311 575 SysPrintf("Unknown constant in r%d\n",reg);
7c3a5182 576 abort();
57871462 577}
578
579// Least soon needed registers
580// Look at the next ten instructions and see which registers
581// will be used. Try not to reallocate these.
582void lsn(u_char hsn[], int i, int *preferred_reg)
583{
584 int j;
585 int b=-1;
586 for(j=0;j<9;j++)
587 {
588 if(i+j>=slen) {
589 j=slen-i-1;
590 break;
591 }
592 if(itype[i+j]==UJUMP||itype[i+j]==RJUMP||(source[i+j]>>16)==0x1000)
593 {
594 // Don't go past an unconditonal jump
595 j++;
596 break;
597 }
598 }
599 for(;j>=0;j--)
600 {
601 if(rs1[i+j]) hsn[rs1[i+j]]=j;
602 if(rs2[i+j]) hsn[rs2[i+j]]=j;
603 if(rt1[i+j]) hsn[rt1[i+j]]=j;
604 if(rt2[i+j]) hsn[rt2[i+j]]=j;
605 if(itype[i+j]==STORE || itype[i+j]==STORELR) {
606 // Stores can allocate zero
607 hsn[rs1[i+j]]=j;
608 hsn[rs2[i+j]]=j;
609 }
610 // On some architectures stores need invc_ptr
611 #if defined(HOST_IMM8)
b9b61529 612 if(itype[i+j]==STORE || itype[i+j]==STORELR || (opcode[i+j]&0x3b)==0x39 || (opcode[i+j]&0x3b)==0x3a) {
57871462 613 hsn[INVCP]=j;
614 }
615 #endif
ad49de89 616 if(i+j>=0&&(itype[i+j]==UJUMP||itype[i+j]==CJUMP||itype[i+j]==SJUMP))
57871462 617 {
618 hsn[CCREG]=j;
619 b=j;
620 }
621 }
622 if(b>=0)
623 {
624 if(ba[i+b]>=start && ba[i+b]<(start+slen*4))
625 {
626 // Follow first branch
627 int t=(ba[i+b]-start)>>2;
628 j=7-b;if(t+j>=slen) j=slen-t-1;
629 for(;j>=0;j--)
630 {
631 if(rs1[t+j]) if(hsn[rs1[t+j]]>j+b+2) hsn[rs1[t+j]]=j+b+2;
632 if(rs2[t+j]) if(hsn[rs2[t+j]]>j+b+2) hsn[rs2[t+j]]=j+b+2;
633 //if(rt1[t+j]) if(hsn[rt1[t+j]]>j+b+2) hsn[rt1[t+j]]=j+b+2;
634 //if(rt2[t+j]) if(hsn[rt2[t+j]]>j+b+2) hsn[rt2[t+j]]=j+b+2;
635 }
636 }
637 // TODO: preferred register based on backward branch
638 }
639 // Delay slot should preferably not overwrite branch conditions or cycle count
ad49de89 640 if(i>0&&(itype[i-1]==RJUMP||itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP)) {
57871462 641 if(rs1[i-1]) if(hsn[rs1[i-1]]>1) hsn[rs1[i-1]]=1;
642 if(rs2[i-1]) if(hsn[rs2[i-1]]>1) hsn[rs2[i-1]]=1;
643 hsn[CCREG]=1;
644 // ...or hash tables
645 hsn[RHASH]=1;
646 hsn[RHTBL]=1;
647 }
648 // Coprocessor load/store needs FTEMP, even if not declared
b9b61529 649 if(itype[i]==C1LS||itype[i]==C2LS) {
57871462 650 hsn[FTEMP]=0;
651 }
652 // Load L/R also uses FTEMP as a temporary register
653 if(itype[i]==LOADLR) {
654 hsn[FTEMP]=0;
655 }
b7918751 656 // Also SWL/SWR/SDL/SDR
657 if(opcode[i]==0x2a||opcode[i]==0x2e||opcode[i]==0x2c||opcode[i]==0x2d) {
57871462 658 hsn[FTEMP]=0;
659 }
57871462 660 // Don't remove the miniht registers
661 if(itype[i]==UJUMP||itype[i]==RJUMP)
662 {
663 hsn[RHASH]=0;
664 hsn[RHTBL]=0;
665 }
666}
667
668// We only want to allocate registers if we're going to use them again soon
669int needed_again(int r, int i)
670{
671 int j;
672 int b=-1;
673 int rn=10;
9f51b4b9 674
57871462 675 if(i>0&&(itype[i-1]==UJUMP||itype[i-1]==RJUMP||(source[i-1]>>16)==0x1000))
676 {
677 if(ba[i-1]<start || ba[i-1]>start+slen*4-4)
678 return 0; // Don't need any registers if exiting the block
679 }
680 for(j=0;j<9;j++)
681 {
682 if(i+j>=slen) {
683 j=slen-i-1;
684 break;
685 }
686 if(itype[i+j]==UJUMP||itype[i+j]==RJUMP||(source[i+j]>>16)==0x1000)
687 {
688 // Don't go past an unconditonal jump
689 j++;
690 break;
691 }
1e973cb0 692 if(itype[i+j]==SYSCALL||itype[i+j]==HLECALL||itype[i+j]==INTCALL||((source[i+j]&0xfc00003f)==0x0d))
57871462 693 {
694 break;
695 }
696 }
697 for(;j>=1;j--)
698 {
699 if(rs1[i+j]==r) rn=j;
700 if(rs2[i+j]==r) rn=j;
701 if((unneeded_reg[i+j]>>r)&1) rn=10;
ad49de89 702 if(i+j>=0&&(itype[i+j]==UJUMP||itype[i+j]==CJUMP||itype[i+j]==SJUMP))
57871462 703 {
704 b=j;
705 }
706 }
707 /*
708 if(b>=0)
709 {
710 if(ba[i+b]>=start && ba[i+b]<(start+slen*4))
711 {
712 // Follow first branch
713 int o=rn;
714 int t=(ba[i+b]-start)>>2;
715 j=7-b;if(t+j>=slen) j=slen-t-1;
716 for(;j>=0;j--)
717 {
718 if(!((unneeded_reg[t+j]>>r)&1)) {
719 if(rs1[t+j]==r) if(rn>j+b+2) rn=j+b+2;
720 if(rs2[t+j]==r) if(rn>j+b+2) rn=j+b+2;
721 }
722 else rn=o;
723 }
724 }
725 }*/
b7217e13 726 if(rn<10) return 1;
581335b0 727 (void)b;
57871462 728 return 0;
729}
730
731// Try to match register allocations at the end of a loop with those
732// at the beginning
733int loop_reg(int i, int r, int hr)
734{
735 int j,k;
736 for(j=0;j<9;j++)
737 {
738 if(i+j>=slen) {
739 j=slen-i-1;
740 break;
741 }
742 if(itype[i+j]==UJUMP||itype[i+j]==RJUMP||(source[i+j]>>16)==0x1000)
743 {
744 // Don't go past an unconditonal jump
745 j++;
746 break;
747 }
748 }
749 k=0;
750 if(i>0){
ad49de89 751 if(itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP)
57871462 752 k--;
753 }
754 for(;k<j;k++)
755 {
00fa9369 756 assert(r < 64);
757 if((unneeded_reg[i+k]>>r)&1) return hr;
ad49de89 758 if(i+k>=0&&(itype[i+k]==UJUMP||itype[i+k]==CJUMP||itype[i+k]==SJUMP))
57871462 759 {
760 if(ba[i+k]>=start && ba[i+k]<(start+i*4))
761 {
762 int t=(ba[i+k]-start)>>2;
763 int reg=get_reg(regs[t].regmap_entry,r);
764 if(reg>=0) return reg;
765 //reg=get_reg(regs[t+1].regmap_entry,r);
766 //if(reg>=0) return reg;
767 }
768 }
769 }
770 return hr;
771}
772
773
774// Allocate every register, preserving source/target regs
775void alloc_all(struct regstat *cur,int i)
776{
777 int hr;
9f51b4b9 778
57871462 779 for(hr=0;hr<HOST_REGS;hr++) {
780 if(hr!=EXCLUDE_REG) {
781 if(((cur->regmap[hr]&63)!=rs1[i])&&((cur->regmap[hr]&63)!=rs2[i])&&
782 ((cur->regmap[hr]&63)!=rt1[i])&&((cur->regmap[hr]&63)!=rt2[i]))
783 {
784 cur->regmap[hr]=-1;
785 cur->dirty&=~(1<<hr);
786 }
787 // Don't need zeros
788 if((cur->regmap[hr]&63)==0)
789 {
790 cur->regmap[hr]=-1;
791 cur->dirty&=~(1<<hr);
792 }
793 }
794 }
795}
796
d1e4ebd9 797#ifndef NDEBUG
798static int host_tempreg_in_use;
799
800static void host_tempreg_acquire(void)
801{
802 assert(!host_tempreg_in_use);
803 host_tempreg_in_use = 1;
804}
805
806static void host_tempreg_release(void)
807{
808 host_tempreg_in_use = 0;
809}
810#else
811static void host_tempreg_acquire(void) {}
812static void host_tempreg_release(void) {}
813#endif
814
8062d65a 815#ifdef DRC_DBG
816extern void gen_interupt();
817extern void do_insn_cmp();
d1e4ebd9 818#define FUNCNAME(f) { f, " " #f }
8062d65a 819static const struct {
d1e4ebd9 820 void *addr;
8062d65a 821 const char *name;
822} function_names[] = {
823 FUNCNAME(cc_interrupt),
824 FUNCNAME(gen_interupt),
825 FUNCNAME(get_addr_ht),
826 FUNCNAME(get_addr),
827 FUNCNAME(jump_handler_read8),
828 FUNCNAME(jump_handler_read16),
829 FUNCNAME(jump_handler_read32),
830 FUNCNAME(jump_handler_write8),
831 FUNCNAME(jump_handler_write16),
832 FUNCNAME(jump_handler_write32),
833 FUNCNAME(invalidate_addr),
3968e69e 834 FUNCNAME(jump_to_new_pc),
8062d65a 835 FUNCNAME(new_dyna_leave),
836 FUNCNAME(pcsx_mtc0),
837 FUNCNAME(pcsx_mtc0_ds),
838 FUNCNAME(do_insn_cmp),
3968e69e 839#ifdef __arm__
840 FUNCNAME(verify_code),
841#endif
8062d65a 842};
843
d1e4ebd9 844static const char *func_name(const void *a)
8062d65a 845{
846 int i;
847 for (i = 0; i < sizeof(function_names)/sizeof(function_names[0]); i++)
848 if (function_names[i].addr == a)
849 return function_names[i].name;
850 return "";
851}
852#else
853#define func_name(x) ""
854#endif
855
57871462 856#ifdef __i386__
857#include "assem_x86.c"
858#endif
859#ifdef __x86_64__
860#include "assem_x64.c"
861#endif
862#ifdef __arm__
863#include "assem_arm.c"
864#endif
be516ebe 865#ifdef __aarch64__
866#include "assem_arm64.c"
867#endif
57871462 868
869// Add virtual address mapping to linked list
870void ll_add(struct ll_entry **head,int vaddr,void *addr)
871{
872 struct ll_entry *new_entry;
873 new_entry=malloc(sizeof(struct ll_entry));
874 assert(new_entry!=NULL);
875 new_entry->vaddr=vaddr;
de5a60c3 876 new_entry->reg_sv_flags=0;
57871462 877 new_entry->addr=addr;
878 new_entry->next=*head;
879 *head=new_entry;
880}
881
de5a60c3 882void ll_add_flags(struct ll_entry **head,int vaddr,u_int reg_sv_flags,void *addr)
57871462 883{
7139f3c8 884 ll_add(head,vaddr,addr);
de5a60c3 885 (*head)->reg_sv_flags=reg_sv_flags;
57871462 886}
887
888// Check if an address is already compiled
889// but don't return addresses which are about to expire from the cache
890void *check_addr(u_int vaddr)
891{
df4dc2b1 892 struct ht_entry *ht_bin = hash_table_get(vaddr);
893 size_t i;
b14b6a8f 894 for (i = 0; i < ARRAY_SIZE(ht_bin->vaddr); i++) {
df4dc2b1 895 if (ht_bin->vaddr[i] == vaddr)
896 if (doesnt_expire_soon((u_char *)ht_bin->tcaddr[i] - MAX_OUTPUT_BLOCK_SIZE))
897 if (isclean(ht_bin->tcaddr[i]))
898 return ht_bin->tcaddr[i];
57871462 899 }
94d23bb9 900 u_int page=get_page(vaddr);
57871462 901 struct ll_entry *head;
902 head=jump_in[page];
df4dc2b1 903 while (head != NULL) {
904 if (head->vaddr == vaddr) {
905 if (doesnt_expire_soon(head->addr)) {
57871462 906 // Update existing entry with current address
df4dc2b1 907 if (ht_bin->vaddr[0] == vaddr) {
908 ht_bin->tcaddr[0] = head->addr;
57871462 909 return head->addr;
910 }
df4dc2b1 911 if (ht_bin->vaddr[1] == vaddr) {
912 ht_bin->tcaddr[1] = head->addr;
57871462 913 return head->addr;
914 }
915 // Insert into hash table with low priority.
916 // Don't evict existing entries, as they are probably
917 // addresses that are being accessed frequently.
df4dc2b1 918 if (ht_bin->vaddr[0] == -1) {
919 ht_bin->vaddr[0] = vaddr;
920 ht_bin->tcaddr[0] = head->addr;
921 }
922 else if (ht_bin->vaddr[1] == -1) {
923 ht_bin->vaddr[1] = vaddr;
924 ht_bin->tcaddr[1] = head->addr;
57871462 925 }
926 return head->addr;
927 }
928 }
929 head=head->next;
930 }
931 return 0;
932}
933
934void remove_hash(int vaddr)
935{
936 //printf("remove hash: %x\n",vaddr);
df4dc2b1 937 struct ht_entry *ht_bin = hash_table_get(vaddr);
938 if (ht_bin->vaddr[1] == vaddr) {
939 ht_bin->vaddr[1] = -1;
940 ht_bin->tcaddr[1] = NULL;
57871462 941 }
df4dc2b1 942 if (ht_bin->vaddr[0] == vaddr) {
943 ht_bin->vaddr[0] = ht_bin->vaddr[1];
944 ht_bin->tcaddr[0] = ht_bin->tcaddr[1];
945 ht_bin->vaddr[1] = -1;
946 ht_bin->tcaddr[1] = NULL;
57871462 947 }
948}
949
643aeae3 950void ll_remove_matching_addrs(struct ll_entry **head,uintptr_t addr,int shift)
57871462 951{
952 struct ll_entry *next;
953 while(*head) {
643aeae3 954 if(((uintptr_t)((*head)->addr)>>shift)==(addr>>shift) ||
955 ((uintptr_t)((*head)->addr-MAX_OUTPUT_BLOCK_SIZE)>>shift)==(addr>>shift))
57871462 956 {
643aeae3 957 inv_debug("EXP: Remove pointer to %p (%x)\n",(*head)->addr,(*head)->vaddr);
57871462 958 remove_hash((*head)->vaddr);
959 next=(*head)->next;
960 free(*head);
961 *head=next;
962 }
963 else
964 {
965 head=&((*head)->next);
966 }
967 }
968}
969
970// Remove all entries from linked list
971void ll_clear(struct ll_entry **head)
972{
973 struct ll_entry *cur;
974 struct ll_entry *next;
581335b0 975 if((cur=*head)) {
57871462 976 *head=0;
977 while(cur) {
978 next=cur->next;
979 free(cur);
980 cur=next;
981 }
982 }
983}
984
985// Dereference the pointers and remove if it matches
643aeae3 986static void ll_kill_pointers(struct ll_entry *head,uintptr_t addr,int shift)
57871462 987{
988 while(head) {
643aeae3 989 uintptr_t ptr = (uintptr_t)get_pointer(head->addr);
990 inv_debug("EXP: Lookup pointer to %lx at %p (%x)\n",(long)ptr,head->addr,head->vaddr);
57871462 991 if(((ptr>>shift)==(addr>>shift)) ||
992 (((ptr-MAX_OUTPUT_BLOCK_SIZE)>>shift)==(addr>>shift)))
993 {
643aeae3 994 inv_debug("EXP: Kill pointer at %p (%x)\n",head->addr,head->vaddr);
d148d265 995 void *host_addr=find_extjump_insn(head->addr);
687b4580 996 #if defined(__arm__) || defined(__aarch64__)
d148d265 997 mark_clear_cache(host_addr);
dd3a91a1 998 #endif
df4dc2b1 999 set_jump_target(host_addr, head->addr);
57871462 1000 }
1001 head=head->next;
1002 }
1003}
1004
1005// This is called when we write to a compiled block (see do_invstub)
d1e4ebd9 1006static void invalidate_page(u_int page)
57871462 1007{
57871462 1008 struct ll_entry *head;
1009 struct ll_entry *next;
1010 head=jump_in[page];
1011 jump_in[page]=0;
1012 while(head!=NULL) {
1013 inv_debug("INVALIDATE: %x\n",head->vaddr);
1014 remove_hash(head->vaddr);
1015 next=head->next;
1016 free(head);
1017 head=next;
1018 }
1019 head=jump_out[page];
1020 jump_out[page]=0;
1021 while(head!=NULL) {
643aeae3 1022 inv_debug("INVALIDATE: kill pointer to %x (%p)\n",head->vaddr,head->addr);
d148d265 1023 void *host_addr=find_extjump_insn(head->addr);
687b4580 1024 #if defined(__arm__) || defined(__aarch64__)
d148d265 1025 mark_clear_cache(host_addr);
dd3a91a1 1026 #endif
df4dc2b1 1027 set_jump_target(host_addr, head->addr);
57871462 1028 next=head->next;
1029 free(head);
1030 head=next;
1031 }
57871462 1032}
9be4ba64 1033
1034static void invalidate_block_range(u_int block, u_int first, u_int last)
57871462 1035{
94d23bb9 1036 u_int page=get_page(block<<12);
57871462 1037 //printf("first=%d last=%d\n",first,last);
f76eeef9 1038 invalidate_page(page);
57871462 1039 assert(first+5>page); // NB: this assumes MAXBLOCK<=4096 (4 pages)
1040 assert(last<page+5);
1041 // Invalidate the adjacent pages if a block crosses a 4K boundary
1042 while(first<page) {
1043 invalidate_page(first);
1044 first++;
1045 }
1046 for(first=page+1;first<last;first++) {
1047 invalidate_page(first);
1048 }
be516ebe 1049 #if defined(__arm__) || defined(__aarch64__)
dd3a91a1 1050 do_clear_cache();
1051 #endif
9f51b4b9 1052
57871462 1053 // Don't trap writes
1054 invalid_code[block]=1;
f76eeef9 1055
57871462 1056 #ifdef USE_MINI_HT
1057 memset(mini_ht,-1,sizeof(mini_ht));
1058 #endif
1059}
9be4ba64 1060
1061void invalidate_block(u_int block)
1062{
1063 u_int page=get_page(block<<12);
1064 u_int vpage=get_vpage(block<<12);
1065 inv_debug("INVALIDATE: %x (%d)\n",block<<12,page);
1066 //inv_debug("invalid_code[block]=%d\n",invalid_code[block]);
1067 u_int first,last;
1068 first=last=page;
1069 struct ll_entry *head;
1070 head=jump_dirty[vpage];
1071 //printf("page=%d vpage=%d\n",page,vpage);
1072 while(head!=NULL) {
9be4ba64 1073 if(vpage>2047||(head->vaddr>>12)==block) { // Ignore vaddr hash collision
01d26796 1074 u_char *start, *end;
1075 get_bounds(head->addr, &start, &end);
1076 //printf("start: %p end: %p\n", start, end);
1077 if (page < 2048 && start >= rdram && end < rdram+RAM_SIZE) {
1078 if (((start-rdram)>>12) <= page && ((end-1-rdram)>>12) >= page) {
1079 if ((((start-rdram)>>12)&2047) < first) first = ((start-rdram)>>12)&2047;
1080 if ((((end-1-rdram)>>12)&2047) > last) last = ((end-1-rdram)>>12)&2047;
9be4ba64 1081 }
1082 }
9be4ba64 1083 }
1084 head=head->next;
1085 }
1086 invalidate_block_range(block,first,last);
1087}
1088
57871462 1089void invalidate_addr(u_int addr)
1090{
9be4ba64 1091 //static int rhits;
1092 // this check is done by the caller
1093 //if (inv_code_start<=addr&&addr<=inv_code_end) { rhits++; return; }
d25604ca 1094 u_int page=get_vpage(addr);
9be4ba64 1095 if(page<2048) { // RAM
1096 struct ll_entry *head;
1097 u_int addr_min=~0, addr_max=0;
4a35de07 1098 u_int mask=RAM_SIZE-1;
1099 u_int addr_main=0x80000000|(addr&mask);
9be4ba64 1100 int pg1;
4a35de07 1101 inv_code_start=addr_main&~0xfff;
1102 inv_code_end=addr_main|0xfff;
9be4ba64 1103 pg1=page;
1104 if (pg1>0) {
1105 // must check previous page too because of spans..
1106 pg1--;
1107 inv_code_start-=0x1000;
1108 }
1109 for(;pg1<=page;pg1++) {
1110 for(head=jump_dirty[pg1];head!=NULL;head=head->next) {
01d26796 1111 u_char *start_h, *end_h;
1112 u_int start, end;
1113 get_bounds(head->addr, &start_h, &end_h);
1114 start = (uintptr_t)start_h - ram_offset;
1115 end = (uintptr_t)end_h - ram_offset;
4a35de07 1116 if(start<=addr_main&&addr_main<end) {
9be4ba64 1117 if(start<addr_min) addr_min=start;
1118 if(end>addr_max) addr_max=end;
1119 }
4a35de07 1120 else if(addr_main<start) {
9be4ba64 1121 if(start<inv_code_end)
1122 inv_code_end=start-1;
1123 }
1124 else {
1125 if(end>inv_code_start)
1126 inv_code_start=end;
1127 }
1128 }
1129 }
1130 if (addr_min!=~0) {
1131 inv_debug("INV ADDR: %08x hit %08x-%08x\n", addr, addr_min, addr_max);
1132 inv_code_start=inv_code_end=~0;
1133 invalidate_block_range(addr>>12,(addr_min&mask)>>12,(addr_max&mask)>>12);
1134 return;
1135 }
1136 else {
4a35de07 1137 inv_code_start=(addr&~mask)|(inv_code_start&mask);
1138 inv_code_end=(addr&~mask)|(inv_code_end&mask);
d25604ca 1139 inv_debug("INV ADDR: %08x miss, inv %08x-%08x, sk %d\n", addr, inv_code_start, inv_code_end, 0);
9be4ba64 1140 return;
d25604ca 1141 }
9be4ba64 1142 }
57871462 1143 invalidate_block(addr>>12);
1144}
9be4ba64 1145
dd3a91a1 1146// This is called when loading a save state.
1147// Anything could have changed, so invalidate everything.
57871462 1148void invalidate_all_pages()
1149{
581335b0 1150 u_int page;
57871462 1151 for(page=0;page<4096;page++)
1152 invalidate_page(page);
1153 for(page=0;page<1048576;page++)
1154 if(!invalid_code[page]) {
1155 restore_candidate[(page&2047)>>3]|=1<<(page&7);
1156 restore_candidate[((page&2047)>>3)+256]|=1<<(page&7);
1157 }
57871462 1158 #ifdef USE_MINI_HT
1159 memset(mini_ht,-1,sizeof(mini_ht));
1160 #endif
57871462 1161}
1162
d1e4ebd9 1163static void do_invstub(int n)
1164{
1165 literal_pool(20);
1166 u_int reglist=stubs[n].a;
1167 set_jump_target(stubs[n].addr, out);
1168 save_regs(reglist);
1169 if(stubs[n].b!=0) emit_mov(stubs[n].b,0);
1170 emit_call(invalidate_addr);
1171 restore_regs(reglist);
1172 emit_jmp(stubs[n].retaddr); // return address
1173}
1174
57871462 1175// Add an entry to jump_out after making a link
d1e4ebd9 1176// src should point to code by emit_extjump2()
57871462 1177void add_link(u_int vaddr,void *src)
1178{
94d23bb9 1179 u_int page=get_page(vaddr);
643aeae3 1180 inv_debug("add_link: %p -> %x (%d)\n",src,vaddr,page);
d1e4ebd9 1181 check_extjump2(src);
57871462 1182 ll_add(jump_out+page,vaddr,src);
643aeae3 1183 //void *ptr=get_pointer(src);
1184 //inv_debug("add_link: Pointer is to %p\n",ptr);
57871462 1185}
1186
1187// If a code block was found to be unmodified (bit was set in
1188// restore_candidate) and it remains unmodified (bit is clear
1189// in invalid_code) then move the entries for that 4K page from
1190// the dirty list to the clean list.
1191void clean_blocks(u_int page)
1192{
1193 struct ll_entry *head;
1194 inv_debug("INV: clean_blocks page=%d\n",page);
1195 head=jump_dirty[page];
1196 while(head!=NULL) {
1197 if(!invalid_code[head->vaddr>>12]) {
1198 // Don't restore blocks which are about to expire from the cache
df4dc2b1 1199 if (doesnt_expire_soon(head->addr)) {
581335b0 1200 if(verify_dirty(head->addr)) {
01d26796 1201 u_char *start, *end;
643aeae3 1202 //printf("Possibly Restore %x (%p)\n",head->vaddr, head->addr);
57871462 1203 u_int i;
1204 u_int inv=0;
01d26796 1205 get_bounds(head->addr, &start, &end);
1206 if (start - rdram < RAM_SIZE) {
1207 for (i = (start-rdram+0x80000000)>>12; i <= (end-1-rdram+0x80000000)>>12; i++) {
57871462 1208 inv|=invalid_code[i];
1209 }
1210 }
4cb76aa4 1211 else if((signed int)head->vaddr>=(signed int)0x80000000+RAM_SIZE) {
57871462 1212 inv=1;
1213 }
1214 if(!inv) {
df4dc2b1 1215 void *clean_addr = get_clean_addr(head->addr);
1216 if (doesnt_expire_soon(clean_addr)) {
57871462 1217 u_int ppage=page;
643aeae3 1218 inv_debug("INV: Restored %x (%p/%p)\n",head->vaddr, head->addr, clean_addr);
57871462 1219 //printf("page=%x, addr=%x\n",page,head->vaddr);
1220 //assert(head->vaddr>>12==(page|0x80000));
de5a60c3 1221 ll_add_flags(jump_in+ppage,head->vaddr,head->reg_sv_flags,clean_addr);
df4dc2b1 1222 struct ht_entry *ht_bin = hash_table_get(head->vaddr);
1223 if (ht_bin->vaddr[0] == head->vaddr)
1224 ht_bin->tcaddr[0] = clean_addr; // Replace existing entry
1225 if (ht_bin->vaddr[1] == head->vaddr)
1226 ht_bin->tcaddr[1] = clean_addr; // Replace existing entry
57871462 1227 }
1228 }
1229 }
1230 }
1231 }
1232 head=head->next;
1233 }
1234}
1235
8062d65a 1236/* Register allocation */
1237
1238// Note: registers are allocated clean (unmodified state)
1239// if you intend to modify the register, you must call dirty_reg().
1240static void alloc_reg(struct regstat *cur,int i,signed char reg)
1241{
1242 int r,hr;
1243 int preferred_reg = (reg&7);
1244 if(reg==CCREG) preferred_reg=HOST_CCREG;
1245 if(reg==PTEMP||reg==FTEMP) preferred_reg=12;
1246
1247 // Don't allocate unused registers
1248 if((cur->u>>reg)&1) return;
1249
1250 // see if it's already allocated
1251 for(hr=0;hr<HOST_REGS;hr++)
1252 {
1253 if(cur->regmap[hr]==reg) return;
1254 }
1255
1256 // Keep the same mapping if the register was already allocated in a loop
1257 preferred_reg = loop_reg(i,reg,preferred_reg);
1258
1259 // Try to allocate the preferred register
1260 if(cur->regmap[preferred_reg]==-1) {
1261 cur->regmap[preferred_reg]=reg;
1262 cur->dirty&=~(1<<preferred_reg);
1263 cur->isconst&=~(1<<preferred_reg);
1264 return;
1265 }
1266 r=cur->regmap[preferred_reg];
1267 assert(r < 64);
1268 if((cur->u>>r)&1) {
1269 cur->regmap[preferred_reg]=reg;
1270 cur->dirty&=~(1<<preferred_reg);
1271 cur->isconst&=~(1<<preferred_reg);
1272 return;
1273 }
1274
1275 // Clear any unneeded registers
1276 // We try to keep the mapping consistent, if possible, because it
1277 // makes branches easier (especially loops). So we try to allocate
1278 // first (see above) before removing old mappings. If this is not
1279 // possible then go ahead and clear out the registers that are no
1280 // longer needed.
1281 for(hr=0;hr<HOST_REGS;hr++)
1282 {
1283 r=cur->regmap[hr];
1284 if(r>=0) {
1285 assert(r < 64);
1286 if((cur->u>>r)&1) {cur->regmap[hr]=-1;break;}
1287 }
1288 }
1289 // Try to allocate any available register, but prefer
1290 // registers that have not been used recently.
1291 if(i>0) {
1292 for(hr=0;hr<HOST_REGS;hr++) {
1293 if(hr!=EXCLUDE_REG&&cur->regmap[hr]==-1) {
1294 if(regs[i-1].regmap[hr]!=rs1[i-1]&&regs[i-1].regmap[hr]!=rs2[i-1]&&regs[i-1].regmap[hr]!=rt1[i-1]&&regs[i-1].regmap[hr]!=rt2[i-1]) {
1295 cur->regmap[hr]=reg;
1296 cur->dirty&=~(1<<hr);
1297 cur->isconst&=~(1<<hr);
1298 return;
1299 }
1300 }
1301 }
1302 }
1303 // Try to allocate any available register
1304 for(hr=0;hr<HOST_REGS;hr++) {
1305 if(hr!=EXCLUDE_REG&&cur->regmap[hr]==-1) {
1306 cur->regmap[hr]=reg;
1307 cur->dirty&=~(1<<hr);
1308 cur->isconst&=~(1<<hr);
1309 return;
1310 }
1311 }
1312
1313 // Ok, now we have to evict someone
1314 // Pick a register we hopefully won't need soon
1315 u_char hsn[MAXREG+1];
1316 memset(hsn,10,sizeof(hsn));
1317 int j;
1318 lsn(hsn,i,&preferred_reg);
1319 //printf("eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",cur->regmap[0],cur->regmap[1],cur->regmap[2],cur->regmap[3],cur->regmap[5],cur->regmap[6],cur->regmap[7]);
1320 //printf("hsn(%x): %d %d %d %d %d %d %d\n",start+i*4,hsn[cur->regmap[0]&63],hsn[cur->regmap[1]&63],hsn[cur->regmap[2]&63],hsn[cur->regmap[3]&63],hsn[cur->regmap[5]&63],hsn[cur->regmap[6]&63],hsn[cur->regmap[7]&63]);
1321 if(i>0) {
1322 // Don't evict the cycle count at entry points, otherwise the entry
1323 // stub will have to write it.
1324 if(bt[i]&&hsn[CCREG]>2) hsn[CCREG]=2;
1325 if(i>1&&hsn[CCREG]>2&&(itype[i-2]==RJUMP||itype[i-2]==UJUMP||itype[i-2]==CJUMP||itype[i-2]==SJUMP)) hsn[CCREG]=2;
1326 for(j=10;j>=3;j--)
1327 {
1328 // Alloc preferred register if available
1329 if(hsn[r=cur->regmap[preferred_reg]&63]==j) {
1330 for(hr=0;hr<HOST_REGS;hr++) {
1331 // Evict both parts of a 64-bit register
1332 if((cur->regmap[hr]&63)==r) {
1333 cur->regmap[hr]=-1;
1334 cur->dirty&=~(1<<hr);
1335 cur->isconst&=~(1<<hr);
1336 }
1337 }
1338 cur->regmap[preferred_reg]=reg;
1339 return;
1340 }
1341 for(r=1;r<=MAXREG;r++)
1342 {
1343 if(hsn[r]==j&&r!=rs1[i-1]&&r!=rs2[i-1]&&r!=rt1[i-1]&&r!=rt2[i-1]) {
8062d65a 1344 for(hr=0;hr<HOST_REGS;hr++) {
1345 if(hr!=HOST_CCREG||j<hsn[CCREG]) {
1346 if(cur->regmap[hr]==r) {
1347 cur->regmap[hr]=reg;
1348 cur->dirty&=~(1<<hr);
1349 cur->isconst&=~(1<<hr);
1350 return;
1351 }
1352 }
1353 }
1354 }
1355 }
1356 }
1357 }
1358 for(j=10;j>=0;j--)
1359 {
1360 for(r=1;r<=MAXREG;r++)
1361 {
1362 if(hsn[r]==j) {
8062d65a 1363 for(hr=0;hr<HOST_REGS;hr++) {
1364 if(cur->regmap[hr]==r) {
1365 cur->regmap[hr]=reg;
1366 cur->dirty&=~(1<<hr);
1367 cur->isconst&=~(1<<hr);
1368 return;
1369 }
1370 }
1371 }
1372 }
1373 }
7c3a5182 1374 SysPrintf("This shouldn't happen (alloc_reg)");abort();
8062d65a 1375}
1376
1377// Allocate a temporary register. This is done without regard to
1378// dirty status or whether the register we request is on the unneeded list
1379// Note: This will only allocate one register, even if called multiple times
1380static void alloc_reg_temp(struct regstat *cur,int i,signed char reg)
1381{
1382 int r,hr;
1383 int preferred_reg = -1;
1384
1385 // see if it's already allocated
1386 for(hr=0;hr<HOST_REGS;hr++)
1387 {
1388 if(hr!=EXCLUDE_REG&&cur->regmap[hr]==reg) return;
1389 }
1390
1391 // Try to allocate any available register
1392 for(hr=HOST_REGS-1;hr>=0;hr--) {
1393 if(hr!=EXCLUDE_REG&&cur->regmap[hr]==-1) {
1394 cur->regmap[hr]=reg;
1395 cur->dirty&=~(1<<hr);
1396 cur->isconst&=~(1<<hr);
1397 return;
1398 }
1399 }
1400
1401 // Find an unneeded register
1402 for(hr=HOST_REGS-1;hr>=0;hr--)
1403 {
1404 r=cur->regmap[hr];
1405 if(r>=0) {
1406 assert(r < 64);
1407 if((cur->u>>r)&1) {
1408 if(i==0||((unneeded_reg[i-1]>>r)&1)) {
1409 cur->regmap[hr]=reg;
1410 cur->dirty&=~(1<<hr);
1411 cur->isconst&=~(1<<hr);
1412 return;
1413 }
1414 }
1415 }
1416 }
1417
1418 // Ok, now we have to evict someone
1419 // Pick a register we hopefully won't need soon
1420 // TODO: we might want to follow unconditional jumps here
1421 // TODO: get rid of dupe code and make this into a function
1422 u_char hsn[MAXREG+1];
1423 memset(hsn,10,sizeof(hsn));
1424 int j;
1425 lsn(hsn,i,&preferred_reg);
1426 //printf("hsn: %d %d %d %d %d %d %d\n",hsn[cur->regmap[0]&63],hsn[cur->regmap[1]&63],hsn[cur->regmap[2]&63],hsn[cur->regmap[3]&63],hsn[cur->regmap[5]&63],hsn[cur->regmap[6]&63],hsn[cur->regmap[7]&63]);
1427 if(i>0) {
1428 // Don't evict the cycle count at entry points, otherwise the entry
1429 // stub will have to write it.
1430 if(bt[i]&&hsn[CCREG]>2) hsn[CCREG]=2;
1431 if(i>1&&hsn[CCREG]>2&&(itype[i-2]==RJUMP||itype[i-2]==UJUMP||itype[i-2]==CJUMP||itype[i-2]==SJUMP)) hsn[CCREG]=2;
1432 for(j=10;j>=3;j--)
1433 {
1434 for(r=1;r<=MAXREG;r++)
1435 {
1436 if(hsn[r]==j&&r!=rs1[i-1]&&r!=rs2[i-1]&&r!=rt1[i-1]&&r!=rt2[i-1]) {
8062d65a 1437 for(hr=0;hr<HOST_REGS;hr++) {
1438 if(hr!=HOST_CCREG||hsn[CCREG]>2) {
1439 if(cur->regmap[hr]==r) {
1440 cur->regmap[hr]=reg;
1441 cur->dirty&=~(1<<hr);
1442 cur->isconst&=~(1<<hr);
1443 return;
1444 }
1445 }
1446 }
1447 }
1448 }
1449 }
1450 }
1451 for(j=10;j>=0;j--)
1452 {
1453 for(r=1;r<=MAXREG;r++)
1454 {
1455 if(hsn[r]==j) {
8062d65a 1456 for(hr=0;hr<HOST_REGS;hr++) {
1457 if(cur->regmap[hr]==r) {
1458 cur->regmap[hr]=reg;
1459 cur->dirty&=~(1<<hr);
1460 cur->isconst&=~(1<<hr);
1461 return;
1462 }
1463 }
1464 }
1465 }
1466 }
7c3a5182 1467 SysPrintf("This shouldn't happen");abort();
8062d65a 1468}
1469
ad49de89 1470static void mov_alloc(struct regstat *current,int i)
57871462 1471{
1472 // Note: Don't need to actually alloc the source registers
ad49de89 1473 //alloc_reg(current,i,rs1[i]);
1474 alloc_reg(current,i,rt1[i]);
1475
57871462 1476 clear_const(current,rs1[i]);
1477 clear_const(current,rt1[i]);
1478 dirty_reg(current,rt1[i]);
1479}
1480
ad49de89 1481static void shiftimm_alloc(struct regstat *current,int i)
57871462 1482{
57871462 1483 if(opcode2[i]<=0x3) // SLL/SRL/SRA
1484 {
1485 if(rt1[i]) {
1486 if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1487 else lt1[i]=rs1[i];
1488 alloc_reg(current,i,rt1[i]);
57871462 1489 dirty_reg(current,rt1[i]);
dc49e339 1490 if(is_const(current,rs1[i])) {
1491 int v=get_const(current,rs1[i]);
1492 if(opcode2[i]==0x00) set_const(current,rt1[i],v<<imm[i]);
1493 if(opcode2[i]==0x02) set_const(current,rt1[i],(u_int)v>>imm[i]);
1494 if(opcode2[i]==0x03) set_const(current,rt1[i],v>>imm[i]);
1495 }
1496 else clear_const(current,rt1[i]);
57871462 1497 }
1498 }
dc49e339 1499 else
1500 {
1501 clear_const(current,rs1[i]);
1502 clear_const(current,rt1[i]);
1503 }
1504
57871462 1505 if(opcode2[i]>=0x38&&opcode2[i]<=0x3b) // DSLL/DSRL/DSRA
1506 {
9c45ca93 1507 assert(0);
57871462 1508 }
1509 if(opcode2[i]==0x3c) // DSLL32
1510 {
9c45ca93 1511 assert(0);
57871462 1512 }
1513 if(opcode2[i]==0x3e) // DSRL32
1514 {
9c45ca93 1515 assert(0);
57871462 1516 }
1517 if(opcode2[i]==0x3f) // DSRA32
1518 {
9c45ca93 1519 assert(0);
57871462 1520 }
1521}
1522
ad49de89 1523static void shift_alloc(struct regstat *current,int i)
57871462 1524{
1525 if(rt1[i]) {
1526 if(opcode2[i]<=0x07) // SLLV/SRLV/SRAV
1527 {
1528 if(rs1[i]) alloc_reg(current,i,rs1[i]);
1529 if(rs2[i]) alloc_reg(current,i,rs2[i]);
1530 alloc_reg(current,i,rt1[i]);
e1190b87 1531 if(rt1[i]==rs2[i]) {
1532 alloc_reg_temp(current,i,-1);
1533 minimum_free_regs[i]=1;
1534 }
57871462 1535 } else { // DSLLV/DSRLV/DSRAV
00fa9369 1536 assert(0);
57871462 1537 }
1538 clear_const(current,rs1[i]);
1539 clear_const(current,rs2[i]);
1540 clear_const(current,rt1[i]);
1541 dirty_reg(current,rt1[i]);
1542 }
1543}
1544
ad49de89 1545static void alu_alloc(struct regstat *current,int i)
57871462 1546{
1547 if(opcode2[i]>=0x20&&opcode2[i]<=0x23) { // ADD/ADDU/SUB/SUBU
1548 if(rt1[i]) {
1549 if(rs1[i]&&rs2[i]) {
1550 alloc_reg(current,i,rs1[i]);
1551 alloc_reg(current,i,rs2[i]);
1552 }
1553 else {
1554 if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1555 if(rs2[i]&&needed_again(rs2[i],i)) alloc_reg(current,i,rs2[i]);
1556 }
1557 alloc_reg(current,i,rt1[i]);
1558 }
57871462 1559 }
1560 if(opcode2[i]==0x2a||opcode2[i]==0x2b) { // SLT/SLTU
1561 if(rt1[i]) {
ad49de89 1562 alloc_reg(current,i,rs1[i]);
1563 alloc_reg(current,i,rs2[i]);
1564 alloc_reg(current,i,rt1[i]);
57871462 1565 }
57871462 1566 }
1567 if(opcode2[i]>=0x24&&opcode2[i]<=0x27) { // AND/OR/XOR/NOR
1568 if(rt1[i]) {
1569 if(rs1[i]&&rs2[i]) {
1570 alloc_reg(current,i,rs1[i]);
1571 alloc_reg(current,i,rs2[i]);
1572 }
1573 else
1574 {
1575 if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1576 if(rs2[i]&&needed_again(rs2[i],i)) alloc_reg(current,i,rs2[i]);
1577 }
1578 alloc_reg(current,i,rt1[i]);
57871462 1579 }
1580 }
1581 if(opcode2[i]>=0x2c&&opcode2[i]<=0x2f) { // DADD/DADDU/DSUB/DSUBU
00fa9369 1582 assert(0);
57871462 1583 }
1584 clear_const(current,rs1[i]);
1585 clear_const(current,rs2[i]);
1586 clear_const(current,rt1[i]);
1587 dirty_reg(current,rt1[i]);
1588}
1589
ad49de89 1590static void imm16_alloc(struct regstat *current,int i)
57871462 1591{
1592 if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1593 else lt1[i]=rs1[i];
1594 if(rt1[i]) alloc_reg(current,i,rt1[i]);
1595 if(opcode[i]==0x18||opcode[i]==0x19) { // DADDI/DADDIU
00fa9369 1596 assert(0);
57871462 1597 }
1598 else if(opcode[i]==0x0a||opcode[i]==0x0b) { // SLTI/SLTIU
57871462 1599 clear_const(current,rs1[i]);
1600 clear_const(current,rt1[i]);
1601 }
1602 else if(opcode[i]>=0x0c&&opcode[i]<=0x0e) { // ANDI/ORI/XORI
57871462 1603 if(is_const(current,rs1[i])) {
1604 int v=get_const(current,rs1[i]);
1605 if(opcode[i]==0x0c) set_const(current,rt1[i],v&imm[i]);
1606 if(opcode[i]==0x0d) set_const(current,rt1[i],v|imm[i]);
1607 if(opcode[i]==0x0e) set_const(current,rt1[i],v^imm[i]);
1608 }
1609 else clear_const(current,rt1[i]);
1610 }
1611 else if(opcode[i]==0x08||opcode[i]==0x09) { // ADDI/ADDIU
1612 if(is_const(current,rs1[i])) {
1613 int v=get_const(current,rs1[i]);
1614 set_const(current,rt1[i],v+imm[i]);
1615 }
1616 else clear_const(current,rt1[i]);
57871462 1617 }
1618 else {
1619 set_const(current,rt1[i],((long long)((short)imm[i]))<<16); // LUI
57871462 1620 }
1621 dirty_reg(current,rt1[i]);
1622}
1623
ad49de89 1624static void load_alloc(struct regstat *current,int i)
57871462 1625{
1626 clear_const(current,rt1[i]);
1627 //if(rs1[i]!=rt1[i]&&needed_again(rs1[i],i)) clear_const(current,rs1[i]); // Does this help or hurt?
1628 if(!rs1[i]) current->u&=~1LL; // Allow allocating r0 if it's the source register
1629 if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
373d1d07 1630 if(rt1[i]&&!((current->u>>rt1[i])&1)) {
57871462 1631 alloc_reg(current,i,rt1[i]);
373d1d07 1632 assert(get_reg(current->regmap,rt1[i])>=0);
57871462 1633 if(opcode[i]==0x27||opcode[i]==0x37) // LWU/LD
1634 {
ad49de89 1635 assert(0);
57871462 1636 }
1637 else if(opcode[i]==0x1A||opcode[i]==0x1B) // LDL/LDR
1638 {
ad49de89 1639 assert(0);
57871462 1640 }
57871462 1641 dirty_reg(current,rt1[i]);
57871462 1642 // LWL/LWR need a temporary register for the old value
1643 if(opcode[i]==0x22||opcode[i]==0x26)
1644 {
1645 alloc_reg(current,i,FTEMP);
1646 alloc_reg_temp(current,i,-1);
e1190b87 1647 minimum_free_regs[i]=1;
57871462 1648 }
1649 }
1650 else
1651 {
373d1d07 1652 // Load to r0 or unneeded register (dummy load)
57871462 1653 // but we still need a register to calculate the address
535d208a 1654 if(opcode[i]==0x22||opcode[i]==0x26)
1655 {
1656 alloc_reg(current,i,FTEMP); // LWL/LWR need another temporary
1657 }
57871462 1658 alloc_reg_temp(current,i,-1);
e1190b87 1659 minimum_free_regs[i]=1;
535d208a 1660 if(opcode[i]==0x1A||opcode[i]==0x1B) // LDL/LDR
1661 {
ad49de89 1662 assert(0);
535d208a 1663 }
57871462 1664 }
1665}
1666
1667void store_alloc(struct regstat *current,int i)
1668{
1669 clear_const(current,rs2[i]);
1670 if(!(rs2[i])) current->u&=~1LL; // Allow allocating r0 if necessary
1671 if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1672 alloc_reg(current,i,rs2[i]);
1673 if(opcode[i]==0x2c||opcode[i]==0x2d||opcode[i]==0x3f) { // 64-bit SDL/SDR/SD
ad49de89 1674 assert(0);
57871462 1675 }
57871462 1676 #if defined(HOST_IMM8)
1677 // On CPUs without 32-bit immediates we need a pointer to invalid_code
1678 else alloc_reg(current,i,INVCP);
1679 #endif
b7918751 1680 if(opcode[i]==0x2a||opcode[i]==0x2e||opcode[i]==0x2c||opcode[i]==0x2d) { // SWL/SWL/SDL/SDR
57871462 1681 alloc_reg(current,i,FTEMP);
1682 }
1683 // We need a temporary register for address generation
1684 alloc_reg_temp(current,i,-1);
e1190b87 1685 minimum_free_regs[i]=1;
57871462 1686}
1687
1688void c1ls_alloc(struct regstat *current,int i)
1689{
1690 //clear_const(current,rs1[i]); // FIXME
1691 clear_const(current,rt1[i]);
1692 if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1693 alloc_reg(current,i,CSREG); // Status
1694 alloc_reg(current,i,FTEMP);
1695 if(opcode[i]==0x35||opcode[i]==0x3d) { // 64-bit LDC1/SDC1
ad49de89 1696 assert(0);
57871462 1697 }
57871462 1698 #if defined(HOST_IMM8)
1699 // On CPUs without 32-bit immediates we need a pointer to invalid_code
1700 else if((opcode[i]&0x3b)==0x39) // SWC1/SDC1
1701 alloc_reg(current,i,INVCP);
1702 #endif
1703 // We need a temporary register for address generation
1704 alloc_reg_temp(current,i,-1);
1705}
1706
b9b61529 1707void c2ls_alloc(struct regstat *current,int i)
1708{
1709 clear_const(current,rt1[i]);
1710 if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1711 alloc_reg(current,i,FTEMP);
b9b61529 1712 #if defined(HOST_IMM8)
1713 // On CPUs without 32-bit immediates we need a pointer to invalid_code
1edfcc68 1714 if((opcode[i]&0x3b)==0x3a) // SWC2/SDC2
b9b61529 1715 alloc_reg(current,i,INVCP);
1716 #endif
1717 // We need a temporary register for address generation
1718 alloc_reg_temp(current,i,-1);
e1190b87 1719 minimum_free_regs[i]=1;
b9b61529 1720}
1721
57871462 1722#ifndef multdiv_alloc
1723void multdiv_alloc(struct regstat *current,int i)
1724{
1725 // case 0x18: MULT
1726 // case 0x19: MULTU
1727 // case 0x1A: DIV
1728 // case 0x1B: DIVU
1729 // case 0x1C: DMULT
1730 // case 0x1D: DMULTU
1731 // case 0x1E: DDIV
1732 // case 0x1F: DDIVU
1733 clear_const(current,rs1[i]);
1734 clear_const(current,rs2[i]);
1735 if(rs1[i]&&rs2[i])
1736 {
1737 if((opcode2[i]&4)==0) // 32-bit
1738 {
1739 current->u&=~(1LL<<HIREG);
1740 current->u&=~(1LL<<LOREG);
1741 alloc_reg(current,i,HIREG);
1742 alloc_reg(current,i,LOREG);
1743 alloc_reg(current,i,rs1[i]);
1744 alloc_reg(current,i,rs2[i]);
57871462 1745 dirty_reg(current,HIREG);
1746 dirty_reg(current,LOREG);
1747 }
1748 else // 64-bit
1749 {
00fa9369 1750 assert(0);
57871462 1751 }
1752 }
1753 else
1754 {
1755 // Multiply by zero is zero.
1756 // MIPS does not have a divide by zero exception.
1757 // The result is undefined, we return zero.
1758 alloc_reg(current,i,HIREG);
1759 alloc_reg(current,i,LOREG);
57871462 1760 dirty_reg(current,HIREG);
1761 dirty_reg(current,LOREG);
1762 }
1763}
1764#endif
1765
1766void cop0_alloc(struct regstat *current,int i)
1767{
1768 if(opcode2[i]==0) // MFC0
1769 {
1770 if(rt1[i]) {
1771 clear_const(current,rt1[i]);
1772 alloc_all(current,i);
1773 alloc_reg(current,i,rt1[i]);
57871462 1774 dirty_reg(current,rt1[i]);
1775 }
1776 }
1777 else if(opcode2[i]==4) // MTC0
1778 {
1779 if(rs1[i]){
1780 clear_const(current,rs1[i]);
1781 alloc_reg(current,i,rs1[i]);
1782 alloc_all(current,i);
1783 }
1784 else {
1785 alloc_all(current,i); // FIXME: Keep r0
1786 current->u&=~1LL;
1787 alloc_reg(current,i,0);
1788 }
1789 }
1790 else
1791 {
1792 // TLBR/TLBWI/TLBWR/TLBP/ERET
1793 assert(opcode2[i]==0x10);
1794 alloc_all(current,i);
1795 }
e1190b87 1796 minimum_free_regs[i]=HOST_REGS;
57871462 1797}
1798
00fa9369 1799static void cop12_alloc(struct regstat *current,int i)
57871462 1800{
1801 alloc_reg(current,i,CSREG); // Load status
00fa9369 1802 if(opcode2[i]<3) // MFC1/CFC1
57871462 1803 {
7de557a6 1804 if(rt1[i]){
1805 clear_const(current,rt1[i]);
00fa9369 1806 alloc_reg(current,i,rt1[i]);
7de557a6 1807 dirty_reg(current,rt1[i]);
57871462 1808 }
57871462 1809 alloc_reg_temp(current,i,-1);
1810 }
00fa9369 1811 else if(opcode2[i]>3) // MTC1/CTC1
57871462 1812 {
1813 if(rs1[i]){
1814 clear_const(current,rs1[i]);
00fa9369 1815 alloc_reg(current,i,rs1[i]);
57871462 1816 }
1817 else {
1818 current->u&=~1LL;
1819 alloc_reg(current,i,0);
57871462 1820 }
00fa9369 1821 alloc_reg_temp(current,i,-1);
57871462 1822 }
e1190b87 1823 minimum_free_regs[i]=1;
57871462 1824}
00fa9369 1825
b9b61529 1826void c2op_alloc(struct regstat *current,int i)
1827{
1828 alloc_reg_temp(current,i,-1);
1829}
57871462 1830
1831void syscall_alloc(struct regstat *current,int i)
1832{
1833 alloc_cc(current,i);
1834 dirty_reg(current,CCREG);
1835 alloc_all(current,i);
e1190b87 1836 minimum_free_regs[i]=HOST_REGS;
57871462 1837 current->isconst=0;
1838}
1839
1840void delayslot_alloc(struct regstat *current,int i)
1841{
1842 switch(itype[i]) {
1843 case UJUMP:
1844 case CJUMP:
1845 case SJUMP:
1846 case RJUMP:
57871462 1847 case SYSCALL:
7139f3c8 1848 case HLECALL:
57871462 1849 case SPAN:
7c3a5182 1850 assem_debug("jump in the delay slot. this shouldn't happen.\n");//abort();
c43b5311 1851 SysPrintf("Disabled speculative precompilation\n");
57871462 1852 stop_after_jal=1;
1853 break;
1854 case IMM16:
1855 imm16_alloc(current,i);
1856 break;
1857 case LOAD:
1858 case LOADLR:
1859 load_alloc(current,i);
1860 break;
1861 case STORE:
1862 case STORELR:
1863 store_alloc(current,i);
1864 break;
1865 case ALU:
1866 alu_alloc(current,i);
1867 break;
1868 case SHIFT:
1869 shift_alloc(current,i);
1870 break;
1871 case MULTDIV:
1872 multdiv_alloc(current,i);
1873 break;
1874 case SHIFTIMM:
1875 shiftimm_alloc(current,i);
1876 break;
1877 case MOV:
1878 mov_alloc(current,i);
1879 break;
1880 case COP0:
1881 cop0_alloc(current,i);
1882 break;
1883 case COP1:
b9b61529 1884 case COP2:
00fa9369 1885 cop12_alloc(current,i);
57871462 1886 break;
1887 case C1LS:
1888 c1ls_alloc(current,i);
1889 break;
b9b61529 1890 case C2LS:
1891 c2ls_alloc(current,i);
1892 break;
b9b61529 1893 case C2OP:
1894 c2op_alloc(current,i);
1895 break;
57871462 1896 }
1897}
1898
1899// Special case where a branch and delay slot span two pages in virtual memory
1900static void pagespan_alloc(struct regstat *current,int i)
1901{
1902 current->isconst=0;
1903 current->wasconst=0;
1904 regs[i].wasconst=0;
e1190b87 1905 minimum_free_regs[i]=HOST_REGS;
57871462 1906 alloc_all(current,i);
1907 alloc_cc(current,i);
1908 dirty_reg(current,CCREG);
1909 if(opcode[i]==3) // JAL
1910 {
1911 alloc_reg(current,i,31);
1912 dirty_reg(current,31);
1913 }
1914 if(opcode[i]==0&&(opcode2[i]&0x3E)==8) // JR/JALR
1915 {
1916 alloc_reg(current,i,rs1[i]);
5067f341 1917 if (rt1[i]!=0) {
1918 alloc_reg(current,i,rt1[i]);
1919 dirty_reg(current,rt1[i]);
57871462 1920 }
1921 }
1922 if((opcode[i]&0x2E)==4) // BEQ/BNE/BEQL/BNEL
1923 {
1924 if(rs1[i]) alloc_reg(current,i,rs1[i]);
1925 if(rs2[i]) alloc_reg(current,i,rs2[i]);
57871462 1926 }
1927 else
1928 if((opcode[i]&0x2E)==6) // BLEZ/BGTZ/BLEZL/BGTZL
1929 {
1930 if(rs1[i]) alloc_reg(current,i,rs1[i]);
57871462 1931 }
57871462 1932 //else ...
1933}
1934
b14b6a8f 1935static void add_stub(enum stub_type type, void *addr, void *retaddr,
1936 u_int a, uintptr_t b, uintptr_t c, u_int d, u_int e)
1937{
d1e4ebd9 1938 assert(stubcount < ARRAY_SIZE(stubs));
b14b6a8f 1939 stubs[stubcount].type = type;
1940 stubs[stubcount].addr = addr;
1941 stubs[stubcount].retaddr = retaddr;
1942 stubs[stubcount].a = a;
1943 stubs[stubcount].b = b;
1944 stubs[stubcount].c = c;
1945 stubs[stubcount].d = d;
1946 stubs[stubcount].e = e;
57871462 1947 stubcount++;
1948}
1949
b14b6a8f 1950static void add_stub_r(enum stub_type type, void *addr, void *retaddr,
1951 int i, int addr_reg, struct regstat *i_regs, int ccadj, u_int reglist)
1952{
1953 add_stub(type, addr, retaddr, i, addr_reg, (uintptr_t)i_regs, ccadj, reglist);
1954}
1955
57871462 1956// Write out a single register
ad49de89 1957static void wb_register(signed char r,signed char regmap[],uint64_t dirty)
57871462 1958{
1959 int hr;
1960 for(hr=0;hr<HOST_REGS;hr++) {
1961 if(hr!=EXCLUDE_REG) {
1962 if((regmap[hr]&63)==r) {
1963 if((dirty>>hr)&1) {
ad49de89 1964 assert(regmap[hr]<64);
1965 emit_storereg(r,hr);
57871462 1966 }
1967 }
1968 }
1969 }
1970}
1971
8062d65a 1972static void wb_valid(signed char pre[],signed char entry[],u_int dirty_pre,u_int dirty,uint64_t u)
1973{
1974 //if(dirty_pre==dirty) return;
1975 int hr,reg;
1976 for(hr=0;hr<HOST_REGS;hr++) {
1977 if(hr!=EXCLUDE_REG) {
1978 reg=pre[hr];
1979 if(((~u)>>(reg&63))&1) {
1980 if(reg>0) {
1981 if(((dirty_pre&~dirty)>>hr)&1) {
1982 if(reg>0&&reg<34) {
1983 emit_storereg(reg,hr);
1984 }
1985 else if(reg>=64) {
1986 assert(0);
1987 }
1988 }
1989 }
1990 }
1991 }
1992 }
1993}
1994
687b4580 1995// trashes r2
1996static void pass_args(int a0, int a1)
1997{
1998 if(a0==1&&a1==0) {
1999 // must swap
2000 emit_mov(a0,2); emit_mov(a1,1); emit_mov(2,0);
2001 }
2002 else if(a0!=0&&a1==0) {
2003 emit_mov(a1,1);
2004 if (a0>=0) emit_mov(a0,0);
2005 }
2006 else {
2007 if(a0>=0&&a0!=0) emit_mov(a0,0);
2008 if(a1>=0&&a1!=1) emit_mov(a1,1);
2009 }
2010}
2011
2012static void alu_assemble(int i,struct regstat *i_regs)
57871462 2013{
2014 if(opcode2[i]>=0x20&&opcode2[i]<=0x23) { // ADD/ADDU/SUB/SUBU
2015 if(rt1[i]) {
2016 signed char s1,s2,t;
2017 t=get_reg(i_regs->regmap,rt1[i]);
2018 if(t>=0) {
2019 s1=get_reg(i_regs->regmap,rs1[i]);
2020 s2=get_reg(i_regs->regmap,rs2[i]);
2021 if(rs1[i]&&rs2[i]) {
2022 assert(s1>=0);
2023 assert(s2>=0);
2024 if(opcode2[i]&2) emit_sub(s1,s2,t);
2025 else emit_add(s1,s2,t);
2026 }
2027 else if(rs1[i]) {
2028 if(s1>=0) emit_mov(s1,t);
2029 else emit_loadreg(rs1[i],t);
2030 }
2031 else if(rs2[i]) {
2032 if(s2>=0) {
2033 if(opcode2[i]&2) emit_neg(s2,t);
2034 else emit_mov(s2,t);
2035 }
2036 else {
2037 emit_loadreg(rs2[i],t);
2038 if(opcode2[i]&2) emit_neg(t,t);
2039 }
2040 }
2041 else emit_zeroreg(t);
2042 }
2043 }
2044 }
2045 if(opcode2[i]>=0x2c&&opcode2[i]<=0x2f) { // DADD/DADDU/DSUB/DSUBU
00fa9369 2046 assert(0);
57871462 2047 }
2048 if(opcode2[i]==0x2a||opcode2[i]==0x2b) { // SLT/SLTU
2049 if(rt1[i]) {
ad49de89 2050 signed char s1l,s2l,t;
57871462 2051 {
57871462 2052 t=get_reg(i_regs->regmap,rt1[i]);
2053 //assert(t>=0);
2054 if(t>=0) {
2055 s1l=get_reg(i_regs->regmap,rs1[i]);
2056 s2l=get_reg(i_regs->regmap,rs2[i]);
2057 if(rs2[i]==0) // rx<r0
2058 {
2059 assert(s1l>=0);
2060 if(opcode2[i]==0x2a) // SLT
2061 emit_shrimm(s1l,31,t);
2062 else // SLTU (unsigned can not be less than zero)
2063 emit_zeroreg(t);
2064 }
2065 else if(rs1[i]==0) // r0<rx
2066 {
2067 assert(s2l>=0);
2068 if(opcode2[i]==0x2a) // SLT
2069 emit_set_gz32(s2l,t);
2070 else // SLTU (set if not zero)
2071 emit_set_nz32(s2l,t);
2072 }
2073 else{
2074 assert(s1l>=0);assert(s2l>=0);
2075 if(opcode2[i]==0x2a) // SLT
2076 emit_set_if_less32(s1l,s2l,t);
2077 else // SLTU
2078 emit_set_if_carry32(s1l,s2l,t);
2079 }
2080 }
2081 }
2082 }
2083 }
2084 if(opcode2[i]>=0x24&&opcode2[i]<=0x27) { // AND/OR/XOR/NOR
2085 if(rt1[i]) {
ad49de89 2086 signed char s1l,s2l,tl;
57871462 2087 tl=get_reg(i_regs->regmap,rt1[i]);
57871462 2088 {
57871462 2089 if(tl>=0) {
2090 s1l=get_reg(i_regs->regmap,rs1[i]);
2091 s2l=get_reg(i_regs->regmap,rs2[i]);
2092 if(rs1[i]&&rs2[i]) {
2093 assert(s1l>=0);
2094 assert(s2l>=0);
2095 if(opcode2[i]==0x24) { // AND
2096 emit_and(s1l,s2l,tl);
2097 } else
2098 if(opcode2[i]==0x25) { // OR
2099 emit_or(s1l,s2l,tl);
2100 } else
2101 if(opcode2[i]==0x26) { // XOR
2102 emit_xor(s1l,s2l,tl);
2103 } else
2104 if(opcode2[i]==0x27) { // NOR
2105 emit_or(s1l,s2l,tl);
2106 emit_not(tl,tl);
2107 }
2108 }
2109 else
2110 {
2111 if(opcode2[i]==0x24) { // AND
2112 emit_zeroreg(tl);
2113 } else
2114 if(opcode2[i]==0x25||opcode2[i]==0x26) { // OR/XOR
2115 if(rs1[i]){
2116 if(s1l>=0) emit_mov(s1l,tl);
2117 else emit_loadreg(rs1[i],tl); // CHECK: regmap_entry?
2118 }
2119 else
2120 if(rs2[i]){
2121 if(s2l>=0) emit_mov(s2l,tl);
2122 else emit_loadreg(rs2[i],tl); // CHECK: regmap_entry?
2123 }
2124 else emit_zeroreg(tl);
2125 } else
2126 if(opcode2[i]==0x27) { // NOR
2127 if(rs1[i]){
2128 if(s1l>=0) emit_not(s1l,tl);
2129 else {
2130 emit_loadreg(rs1[i],tl);
2131 emit_not(tl,tl);
2132 }
2133 }
2134 else
2135 if(rs2[i]){
2136 if(s2l>=0) emit_not(s2l,tl);
2137 else {
2138 emit_loadreg(rs2[i],tl);
2139 emit_not(tl,tl);
2140 }
2141 }
2142 else emit_movimm(-1,tl);
2143 }
2144 }
2145 }
2146 }
2147 }
2148 }
2149}
2150
2151void imm16_assemble(int i,struct regstat *i_regs)
2152{
2153 if (opcode[i]==0x0f) { // LUI
2154 if(rt1[i]) {
2155 signed char t;
2156 t=get_reg(i_regs->regmap,rt1[i]);
2157 //assert(t>=0);
2158 if(t>=0) {
2159 if(!((i_regs->isconst>>t)&1))
2160 emit_movimm(imm[i]<<16,t);
2161 }
2162 }
2163 }
2164 if(opcode[i]==0x08||opcode[i]==0x09) { // ADDI/ADDIU
2165 if(rt1[i]) {
2166 signed char s,t;
2167 t=get_reg(i_regs->regmap,rt1[i]);
2168 s=get_reg(i_regs->regmap,rs1[i]);
2169 if(rs1[i]) {
2170 //assert(t>=0);
2171 //assert(s>=0);
2172 if(t>=0) {
2173 if(!((i_regs->isconst>>t)&1)) {
2174 if(s<0) {
2175 if(i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2176 emit_addimm(t,imm[i],t);
2177 }else{
2178 if(!((i_regs->wasconst>>s)&1))
2179 emit_addimm(s,imm[i],t);
2180 else
2181 emit_movimm(constmap[i][s]+imm[i],t);
2182 }
2183 }
2184 }
2185 } else {
2186 if(t>=0) {
2187 if(!((i_regs->isconst>>t)&1))
2188 emit_movimm(imm[i],t);
2189 }
2190 }
2191 }
2192 }
2193 if(opcode[i]==0x18||opcode[i]==0x19) { // DADDI/DADDIU
2194 if(rt1[i]) {
7c3a5182 2195 signed char sl,tl;
57871462 2196 tl=get_reg(i_regs->regmap,rt1[i]);
57871462 2197 sl=get_reg(i_regs->regmap,rs1[i]);
2198 if(tl>=0) {
2199 if(rs1[i]) {
57871462 2200 assert(sl>=0);
7c3a5182 2201 emit_addimm(sl,imm[i],tl);
57871462 2202 } else {
2203 emit_movimm(imm[i],tl);
57871462 2204 }
2205 }
2206 }
2207 }
2208 else if(opcode[i]==0x0a||opcode[i]==0x0b) { // SLTI/SLTIU
2209 if(rt1[i]) {
2210 //assert(rs1[i]!=0); // r0 might be valid, but it's probably a bug
ad49de89 2211 signed char sl,t;
57871462 2212 t=get_reg(i_regs->regmap,rt1[i]);
57871462 2213 sl=get_reg(i_regs->regmap,rs1[i]);
2214 //assert(t>=0);
2215 if(t>=0) {
2216 if(rs1[i]>0) {
57871462 2217 if(opcode[i]==0x0a) { // SLTI
2218 if(sl<0) {
2219 if(i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2220 emit_slti32(t,imm[i],t);
2221 }else{
2222 emit_slti32(sl,imm[i],t);
2223 }
2224 }
2225 else { // SLTIU
2226 if(sl<0) {
2227 if(i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2228 emit_sltiu32(t,imm[i],t);
2229 }else{
2230 emit_sltiu32(sl,imm[i],t);
2231 }
2232 }
57871462 2233 }else{
2234 // SLTI(U) with r0 is just stupid,
2235 // nonetheless examples can be found
2236 if(opcode[i]==0x0a) // SLTI
2237 if(0<imm[i]) emit_movimm(1,t);
2238 else emit_zeroreg(t);
2239 else // SLTIU
2240 {
2241 if(imm[i]) emit_movimm(1,t);
2242 else emit_zeroreg(t);
2243 }
2244 }
2245 }
2246 }
2247 }
2248 else if(opcode[i]>=0x0c&&opcode[i]<=0x0e) { // ANDI/ORI/XORI
2249 if(rt1[i]) {
7c3a5182 2250 signed char sl,tl;
57871462 2251 tl=get_reg(i_regs->regmap,rt1[i]);
57871462 2252 sl=get_reg(i_regs->regmap,rs1[i]);
2253 if(tl>=0 && !((i_regs->isconst>>tl)&1)) {
2254 if(opcode[i]==0x0c) //ANDI
2255 {
2256 if(rs1[i]) {
2257 if(sl<0) {
2258 if(i_regs->regmap_entry[tl]!=rs1[i]) emit_loadreg(rs1[i],tl);
2259 emit_andimm(tl,imm[i],tl);
2260 }else{
2261 if(!((i_regs->wasconst>>sl)&1))
2262 emit_andimm(sl,imm[i],tl);
2263 else
2264 emit_movimm(constmap[i][sl]&imm[i],tl);
2265 }
2266 }
2267 else
2268 emit_zeroreg(tl);
57871462 2269 }
2270 else
2271 {
2272 if(rs1[i]) {
2273 if(sl<0) {
2274 if(i_regs->regmap_entry[tl]!=rs1[i]) emit_loadreg(rs1[i],tl);
2275 }
581335b0 2276 if(opcode[i]==0x0d) { // ORI
2277 if(sl<0) {
2278 emit_orimm(tl,imm[i],tl);
2279 }else{
2280 if(!((i_regs->wasconst>>sl)&1))
2281 emit_orimm(sl,imm[i],tl);
2282 else
2283 emit_movimm(constmap[i][sl]|imm[i],tl);
2284 }
57871462 2285 }
581335b0 2286 if(opcode[i]==0x0e) { // XORI
2287 if(sl<0) {
2288 emit_xorimm(tl,imm[i],tl);
2289 }else{
2290 if(!((i_regs->wasconst>>sl)&1))
2291 emit_xorimm(sl,imm[i],tl);
2292 else
2293 emit_movimm(constmap[i][sl]^imm[i],tl);
2294 }
57871462 2295 }
2296 }
2297 else {
2298 emit_movimm(imm[i],tl);
57871462 2299 }
2300 }
2301 }
2302 }
2303 }
2304}
2305
2306void shiftimm_assemble(int i,struct regstat *i_regs)
2307{
2308 if(opcode2[i]<=0x3) // SLL/SRL/SRA
2309 {
2310 if(rt1[i]) {
2311 signed char s,t;
2312 t=get_reg(i_regs->regmap,rt1[i]);
2313 s=get_reg(i_regs->regmap,rs1[i]);
2314 //assert(t>=0);
dc49e339 2315 if(t>=0&&!((i_regs->isconst>>t)&1)){
57871462 2316 if(rs1[i]==0)
2317 {
2318 emit_zeroreg(t);
2319 }
2320 else
2321 {
2322 if(s<0&&i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2323 if(imm[i]) {
2324 if(opcode2[i]==0) // SLL
2325 {
2326 emit_shlimm(s<0?t:s,imm[i],t);
2327 }
2328 if(opcode2[i]==2) // SRL
2329 {
2330 emit_shrimm(s<0?t:s,imm[i],t);
2331 }
2332 if(opcode2[i]==3) // SRA
2333 {
2334 emit_sarimm(s<0?t:s,imm[i],t);
2335 }
2336 }else{
2337 // Shift by zero
2338 if(s>=0 && s!=t) emit_mov(s,t);
2339 }
2340 }
2341 }
2342 //emit_storereg(rt1[i],t); //DEBUG
2343 }
2344 }
2345 if(opcode2[i]>=0x38&&opcode2[i]<=0x3b) // DSLL/DSRL/DSRA
2346 {
9c45ca93 2347 assert(0);
57871462 2348 }
2349 if(opcode2[i]==0x3c) // DSLL32
2350 {
9c45ca93 2351 assert(0);
57871462 2352 }
2353 if(opcode2[i]==0x3e) // DSRL32
2354 {
9c45ca93 2355 assert(0);
57871462 2356 }
2357 if(opcode2[i]==0x3f) // DSRA32
2358 {
9c45ca93 2359 assert(0);
57871462 2360 }
2361}
2362
2363#ifndef shift_assemble
3968e69e 2364static void shift_assemble(int i,struct regstat *i_regs)
57871462 2365{
3968e69e 2366 signed char s,t,shift;
2367 if (rt1[i] == 0)
2368 return;
2369 assert(opcode2[i]<=0x07); // SLLV/SRLV/SRAV
2370 t = get_reg(i_regs->regmap, rt1[i]);
2371 s = get_reg(i_regs->regmap, rs1[i]);
2372 shift = get_reg(i_regs->regmap, rs2[i]);
2373 if (t < 0)
2374 return;
2375
2376 if(rs1[i]==0)
2377 emit_zeroreg(t);
2378 else if(rs2[i]==0) {
2379 assert(s>=0);
2380 if(s!=t) emit_mov(s,t);
2381 }
2382 else {
2383 host_tempreg_acquire();
2384 emit_andimm(shift,31,HOST_TEMPREG);
2385 switch(opcode2[i]) {
2386 case 4: // SLLV
2387 emit_shl(s,HOST_TEMPREG,t);
2388 break;
2389 case 6: // SRLV
2390 emit_shr(s,HOST_TEMPREG,t);
2391 break;
2392 case 7: // SRAV
2393 emit_sar(s,HOST_TEMPREG,t);
2394 break;
2395 default:
2396 assert(0);
2397 }
2398 host_tempreg_release();
2399 }
57871462 2400}
3968e69e 2401
57871462 2402#endif
2403
8062d65a 2404enum {
2405 MTYPE_8000 = 0,
2406 MTYPE_8020,
2407 MTYPE_0000,
2408 MTYPE_A000,
2409 MTYPE_1F80,
2410};
2411
2412static int get_ptr_mem_type(u_int a)
2413{
2414 if(a < 0x00200000) {
2415 if(a<0x1000&&((start>>20)==0xbfc||(start>>24)==0xa0))
2416 // return wrong, must use memhandler for BIOS self-test to pass
2417 // 007 does similar stuff from a00 mirror, weird stuff
2418 return MTYPE_8000;
2419 return MTYPE_0000;
2420 }
2421 if(0x1f800000 <= a && a < 0x1f801000)
2422 return MTYPE_1F80;
2423 if(0x80200000 <= a && a < 0x80800000)
2424 return MTYPE_8020;
2425 if(0xa0000000 <= a && a < 0xa0200000)
2426 return MTYPE_A000;
2427 return MTYPE_8000;
2428}
2429
2430static void *emit_fastpath_cmp_jump(int i,int addr,int *addr_reg_override)
2431{
2432 void *jaddr = NULL;
2433 int type=0;
2434 int mr=rs1[i];
2435 if(((smrv_strong|smrv_weak)>>mr)&1) {
2436 type=get_ptr_mem_type(smrv[mr]);
2437 //printf("set %08x @%08x r%d %d\n", smrv[mr], start+i*4, mr, type);
2438 }
2439 else {
2440 // use the mirror we are running on
2441 type=get_ptr_mem_type(start);
2442 //printf("set nospec @%08x r%d %d\n", start+i*4, mr, type);
2443 }
2444
2445 if(type==MTYPE_8020) { // RAM 80200000+ mirror
d1e4ebd9 2446 host_tempreg_acquire();
8062d65a 2447 emit_andimm(addr,~0x00e00000,HOST_TEMPREG);
2448 addr=*addr_reg_override=HOST_TEMPREG;
2449 type=0;
2450 }
2451 else if(type==MTYPE_0000) { // RAM 0 mirror
d1e4ebd9 2452 host_tempreg_acquire();
8062d65a 2453 emit_orimm(addr,0x80000000,HOST_TEMPREG);
2454 addr=*addr_reg_override=HOST_TEMPREG;
2455 type=0;
2456 }
2457 else if(type==MTYPE_A000) { // RAM A mirror
d1e4ebd9 2458 host_tempreg_acquire();
8062d65a 2459 emit_andimm(addr,~0x20000000,HOST_TEMPREG);
2460 addr=*addr_reg_override=HOST_TEMPREG;
2461 type=0;
2462 }
2463 else if(type==MTYPE_1F80) { // scratchpad
2464 if (psxH == (void *)0x1f800000) {
d1e4ebd9 2465 host_tempreg_acquire();
3968e69e 2466 emit_xorimm(addr,0x1f800000,HOST_TEMPREG);
8062d65a 2467 emit_cmpimm(HOST_TEMPREG,0x1000);
d1e4ebd9 2468 host_tempreg_release();
8062d65a 2469 jaddr=out;
2470 emit_jc(0);
2471 }
2472 else {
2473 // do the usual RAM check, jump will go to the right handler
2474 type=0;
2475 }
2476 }
2477
2478 if(type==0)
2479 {
2480 emit_cmpimm(addr,RAM_SIZE);
2481 jaddr=out;
2482 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
2483 // Hint to branch predictor that the branch is unlikely to be taken
2484 if(rs1[i]>=28)
2485 emit_jno_unlikely(0);
2486 else
2487 #endif
2488 emit_jno(0);
2489 if(ram_offset!=0) {
d1e4ebd9 2490 host_tempreg_acquire();
8062d65a 2491 emit_addimm(addr,ram_offset,HOST_TEMPREG);
2492 addr=*addr_reg_override=HOST_TEMPREG;
2493 }
2494 }
2495
2496 return jaddr;
2497}
2498
687b4580 2499// return memhandler, or get directly accessable address and return 0
2500static void *get_direct_memhandler(void *table, u_int addr,
2501 enum stub_type type, uintptr_t *addr_host)
2502{
2503 uintptr_t l1, l2 = 0;
2504 l1 = ((uintptr_t *)table)[addr>>12];
2505 if ((l1 & (1ul << (sizeof(l1)*8-1))) == 0) {
2506 uintptr_t v = l1 << 1;
2507 *addr_host = v + addr;
2508 return NULL;
2509 }
2510 else {
2511 l1 <<= 1;
2512 if (type == LOADB_STUB || type == LOADBU_STUB || type == STOREB_STUB)
2513 l2 = ((uintptr_t *)l1)[0x1000/4 + 0x1000/2 + (addr&0xfff)];
2514 else if (type == LOADH_STUB || type == LOADHU_STUB || type == STOREH_STUB)
2515 l2=((uintptr_t *)l1)[0x1000/4 + (addr&0xfff)/2];
2516 else
2517 l2=((uintptr_t *)l1)[(addr&0xfff)/4];
2518 if ((l2 & (1<<31)) == 0) {
2519 uintptr_t v = l2 << 1;
2520 *addr_host = v + (addr&0xfff);
2521 return NULL;
2522 }
2523 return (void *)(l2 << 1);
2524 }
2525}
2526
8062d65a 2527static void load_assemble(int i,struct regstat *i_regs)
57871462 2528{
7c3a5182 2529 int s,tl,addr;
57871462 2530 int offset;
b14b6a8f 2531 void *jaddr=0;
5bf843dc 2532 int memtarget=0,c=0;
d1e4ebd9 2533 int fastio_reg_override=-1;
57871462 2534 u_int hr,reglist=0;
57871462 2535 tl=get_reg(i_regs->regmap,rt1[i]);
2536 s=get_reg(i_regs->regmap,rs1[i]);
2537 offset=imm[i];
2538 for(hr=0;hr<HOST_REGS;hr++) {
2539 if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
2540 }
2541 if(i_regs->regmap[HOST_CCREG]==CCREG) reglist&=~(1<<HOST_CCREG);
2542 if(s>=0) {
2543 c=(i_regs->wasconst>>s)&1;
af4ee1fe 2544 if (c) {
2545 memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
af4ee1fe 2546 }
57871462 2547 }
57871462 2548 //printf("load_assemble: c=%d\n",c);
643aeae3 2549 //if(c) printf("load_assemble: const=%lx\n",(long)constmap[i][s]+offset);
57871462 2550 // FIXME: Even if the load is a NOP, we should check for pagefaults...
581335b0 2551 if((tl<0&&(!c||(((u_int)constmap[i][s]+offset)>>16)==0x1f80))
f18c0f46 2552 ||rt1[i]==0) {
5bf843dc 2553 // could be FIFO, must perform the read
f18c0f46 2554 // ||dummy read
5bf843dc 2555 assem_debug("(forced read)\n");
2556 tl=get_reg(i_regs->regmap,-1);
2557 assert(tl>=0);
5bf843dc 2558 }
2559 if(offset||s<0||c) addr=tl;
2560 else addr=s;
535d208a 2561 //if(tl<0) tl=get_reg(i_regs->regmap,-1);
2562 if(tl>=0) {
2563 //printf("load_assemble: c=%d\n",c);
643aeae3 2564 //if(c) printf("load_assemble: const=%lx\n",(long)constmap[i][s]+offset);
535d208a 2565 assert(tl>=0); // Even if the load is a NOP, we must check for pagefaults and I/O
2566 reglist&=~(1<<tl);
1edfcc68 2567 if(!c) {
1edfcc68 2568 #ifdef R29_HACK
2569 // Strmnnrmn's speed hack
2570 if(rs1[i]!=29||start<0x80001000||start>=0x80000000+RAM_SIZE)
2571 #endif
2572 {
d1e4ebd9 2573 jaddr=emit_fastpath_cmp_jump(i,addr,&fastio_reg_override);
535d208a 2574 }
1edfcc68 2575 }
2576 else if(ram_offset&&memtarget) {
d1e4ebd9 2577 host_tempreg_acquire();
1edfcc68 2578 emit_addimm(addr,ram_offset,HOST_TEMPREG);
d1e4ebd9 2579 fastio_reg_override=HOST_TEMPREG;
535d208a 2580 }
2581 int dummy=(rt1[i]==0)||(tl!=get_reg(i_regs->regmap,rt1[i])); // ignore loads to r0 and unneeded reg
2582 if (opcode[i]==0x20) { // LB
2583 if(!c||memtarget) {
2584 if(!dummy) {
57871462 2585 {
535d208a 2586 int x=0,a=tl;
535d208a 2587 if(!c) a=addr;
d1e4ebd9 2588 if(fastio_reg_override>=0) a=fastio_reg_override;
b1570849 2589
9c45ca93 2590 emit_movsbl_indexed(x,a,tl);
57871462 2591 }
57871462 2592 }
535d208a 2593 if(jaddr)
b14b6a8f 2594 add_stub_r(LOADB_STUB,jaddr,out,i,addr,i_regs,ccadj[i],reglist);
57871462 2595 }
535d208a 2596 else
2597 inline_readstub(LOADB_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2598 }
2599 if (opcode[i]==0x21) { // LH
2600 if(!c||memtarget) {
2601 if(!dummy) {
9c45ca93 2602 int x=0,a=tl;
2603 if(!c) a=addr;
d1e4ebd9 2604 if(fastio_reg_override>=0) a=fastio_reg_override;
9c45ca93 2605 emit_movswl_indexed(x,a,tl);
57871462 2606 }
535d208a 2607 if(jaddr)
b14b6a8f 2608 add_stub_r(LOADH_STUB,jaddr,out,i,addr,i_regs,ccadj[i],reglist);
57871462 2609 }
535d208a 2610 else
2611 inline_readstub(LOADH_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2612 }
2613 if (opcode[i]==0x23) { // LW
2614 if(!c||memtarget) {
2615 if(!dummy) {
dadf55f2 2616 int a=addr;
d1e4ebd9 2617 if(fastio_reg_override>=0) a=fastio_reg_override;
9c45ca93 2618 emit_readword_indexed(0,a,tl);
57871462 2619 }
535d208a 2620 if(jaddr)
b14b6a8f 2621 add_stub_r(LOADW_STUB,jaddr,out,i,addr,i_regs,ccadj[i],reglist);
57871462 2622 }
535d208a 2623 else
2624 inline_readstub(LOADW_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2625 }
2626 if (opcode[i]==0x24) { // LBU
2627 if(!c||memtarget) {
2628 if(!dummy) {
9c45ca93 2629 int x=0,a=tl;
2630 if(!c) a=addr;
d1e4ebd9 2631 if(fastio_reg_override>=0) a=fastio_reg_override;
b1570849 2632
9c45ca93 2633 emit_movzbl_indexed(x,a,tl);
57871462 2634 }
535d208a 2635 if(jaddr)
b14b6a8f 2636 add_stub_r(LOADBU_STUB,jaddr,out,i,addr,i_regs,ccadj[i],reglist);
57871462 2637 }
535d208a 2638 else
2639 inline_readstub(LOADBU_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2640 }
2641 if (opcode[i]==0x25) { // LHU
2642 if(!c||memtarget) {
2643 if(!dummy) {
9c45ca93 2644 int x=0,a=tl;
2645 if(!c) a=addr;
d1e4ebd9 2646 if(fastio_reg_override>=0) a=fastio_reg_override;
9c45ca93 2647 emit_movzwl_indexed(x,a,tl);
57871462 2648 }
535d208a 2649 if(jaddr)
b14b6a8f 2650 add_stub_r(LOADHU_STUB,jaddr,out,i,addr,i_regs,ccadj[i],reglist);
57871462 2651 }
535d208a 2652 else
2653 inline_readstub(LOADHU_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2654 }
2655 if (opcode[i]==0x27) { // LWU
7c3a5182 2656 assert(0);
535d208a 2657 }
2658 if (opcode[i]==0x37) { // LD
9c45ca93 2659 assert(0);
57871462 2660 }
535d208a 2661 }
d1e4ebd9 2662 if (fastio_reg_override == HOST_TEMPREG)
2663 host_tempreg_release();
57871462 2664}
2665
2666#ifndef loadlr_assemble
3968e69e 2667static void loadlr_assemble(int i,struct regstat *i_regs)
57871462 2668{
3968e69e 2669 int s,tl,temp,temp2,addr;
2670 int offset;
2671 void *jaddr=0;
2672 int memtarget=0,c=0;
2673 int fastio_reg_override=-1;
2674 u_int hr,reglist=0;
2675 tl=get_reg(i_regs->regmap,rt1[i]);
2676 s=get_reg(i_regs->regmap,rs1[i]);
2677 temp=get_reg(i_regs->regmap,-1);
2678 temp2=get_reg(i_regs->regmap,FTEMP);
2679 addr=get_reg(i_regs->regmap,AGEN1+(i&1));
2680 assert(addr<0);
2681 offset=imm[i];
2682 for(hr=0;hr<HOST_REGS;hr++) {
2683 if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
2684 }
2685 reglist|=1<<temp;
2686 if(offset||s<0||c) addr=temp2;
2687 else addr=s;
2688 if(s>=0) {
2689 c=(i_regs->wasconst>>s)&1;
2690 if(c) {
2691 memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
2692 }
2693 }
2694 if(!c) {
2695 emit_shlimm(addr,3,temp);
2696 if (opcode[i]==0x22||opcode[i]==0x26) {
2697 emit_andimm(addr,0xFFFFFFFC,temp2); // LWL/LWR
2698 }else{
2699 emit_andimm(addr,0xFFFFFFF8,temp2); // LDL/LDR
2700 }
2701 jaddr=emit_fastpath_cmp_jump(i,temp2,&fastio_reg_override);
2702 }
2703 else {
2704 if(ram_offset&&memtarget) {
2705 host_tempreg_acquire();
2706 emit_addimm(temp2,ram_offset,HOST_TEMPREG);
2707 fastio_reg_override=HOST_TEMPREG;
2708 }
2709 if (opcode[i]==0x22||opcode[i]==0x26) {
2710 emit_movimm(((constmap[i][s]+offset)<<3)&24,temp); // LWL/LWR
2711 }else{
2712 emit_movimm(((constmap[i][s]+offset)<<3)&56,temp); // LDL/LDR
2713 }
2714 }
2715 if (opcode[i]==0x22||opcode[i]==0x26) { // LWL/LWR
2716 if(!c||memtarget) {
2717 int a=temp2;
2718 if(fastio_reg_override>=0) a=fastio_reg_override;
2719 emit_readword_indexed(0,a,temp2);
2720 if(fastio_reg_override==HOST_TEMPREG) host_tempreg_release();
2721 if(jaddr) add_stub_r(LOADW_STUB,jaddr,out,i,temp2,i_regs,ccadj[i],reglist);
2722 }
2723 else
2724 inline_readstub(LOADW_STUB,i,(constmap[i][s]+offset)&0xFFFFFFFC,i_regs->regmap,FTEMP,ccadj[i],reglist);
2725 if(rt1[i]) {
2726 assert(tl>=0);
2727 emit_andimm(temp,24,temp);
2728 if (opcode[i]==0x22) // LWL
2729 emit_xorimm(temp,24,temp);
2730 host_tempreg_acquire();
2731 emit_movimm(-1,HOST_TEMPREG);
2732 if (opcode[i]==0x26) {
2733 emit_shr(temp2,temp,temp2);
2734 emit_bic_lsr(tl,HOST_TEMPREG,temp,tl);
2735 }else{
2736 emit_shl(temp2,temp,temp2);
2737 emit_bic_lsl(tl,HOST_TEMPREG,temp,tl);
2738 }
2739 host_tempreg_release();
2740 emit_or(temp2,tl,tl);
2741 }
2742 //emit_storereg(rt1[i],tl); // DEBUG
2743 }
2744 if (opcode[i]==0x1A||opcode[i]==0x1B) { // LDL/LDR
2745 assert(0);
2746 }
57871462 2747}
2748#endif
2749
2750void store_assemble(int i,struct regstat *i_regs)
2751{
9c45ca93 2752 int s,tl;
57871462 2753 int addr,temp;
2754 int offset;
b14b6a8f 2755 void *jaddr=0;
2756 enum stub_type type;
666a299d 2757 int memtarget=0,c=0;
57871462 2758 int agr=AGEN1+(i&1);
d1e4ebd9 2759 int fastio_reg_override=-1;
57871462 2760 u_int hr,reglist=0;
57871462 2761 tl=get_reg(i_regs->regmap,rs2[i]);
2762 s=get_reg(i_regs->regmap,rs1[i]);
2763 temp=get_reg(i_regs->regmap,agr);
2764 if(temp<0) temp=get_reg(i_regs->regmap,-1);
2765 offset=imm[i];
2766 if(s>=0) {
2767 c=(i_regs->wasconst>>s)&1;
af4ee1fe 2768 if(c) {
2769 memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
af4ee1fe 2770 }
57871462 2771 }
2772 assert(tl>=0);
2773 assert(temp>=0);
2774 for(hr=0;hr<HOST_REGS;hr++) {
2775 if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
2776 }
2777 if(i_regs->regmap[HOST_CCREG]==CCREG) reglist&=~(1<<HOST_CCREG);
2778 if(offset||s<0||c) addr=temp;
2779 else addr=s;
1edfcc68 2780 if(!c) {
d1e4ebd9 2781 jaddr=emit_fastpath_cmp_jump(i,addr,&fastio_reg_override);
1edfcc68 2782 }
2783 else if(ram_offset&&memtarget) {
d1e4ebd9 2784 host_tempreg_acquire();
1edfcc68 2785 emit_addimm(addr,ram_offset,HOST_TEMPREG);
d1e4ebd9 2786 fastio_reg_override=HOST_TEMPREG;
57871462 2787 }
2788
2789 if (opcode[i]==0x28) { // SB
2790 if(!c||memtarget) {
97a238a6 2791 int x=0,a=temp;
97a238a6 2792 if(!c) a=addr;
d1e4ebd9 2793 if(fastio_reg_override>=0) a=fastio_reg_override;
9c45ca93 2794 emit_writebyte_indexed(tl,x,a);
57871462 2795 }
2796 type=STOREB_STUB;
2797 }
2798 if (opcode[i]==0x29) { // SH
2799 if(!c||memtarget) {
97a238a6 2800 int x=0,a=temp;
97a238a6 2801 if(!c) a=addr;
d1e4ebd9 2802 if(fastio_reg_override>=0) a=fastio_reg_override;
9c45ca93 2803 emit_writehword_indexed(tl,x,a);
57871462 2804 }
2805 type=STOREH_STUB;
2806 }
2807 if (opcode[i]==0x2B) { // SW
dadf55f2 2808 if(!c||memtarget) {
2809 int a=addr;
d1e4ebd9 2810 if(fastio_reg_override>=0) a=fastio_reg_override;
9c45ca93 2811 emit_writeword_indexed(tl,0,a);
dadf55f2 2812 }
57871462 2813 type=STOREW_STUB;
2814 }
2815 if (opcode[i]==0x3F) { // SD
9c45ca93 2816 assert(0);
57871462 2817 type=STORED_STUB;
2818 }
d1e4ebd9 2819 if(fastio_reg_override==HOST_TEMPREG)
2820 host_tempreg_release();
b96d3df7 2821 if(jaddr) {
2822 // PCSX store handlers don't check invcode again
2823 reglist|=1<<addr;
b14b6a8f 2824 add_stub_r(type,jaddr,out,i,addr,i_regs,ccadj[i],reglist);
b96d3df7 2825 jaddr=0;
2826 }
1edfcc68 2827 if(!(i_regs->waswritten&(1<<rs1[i]))&&!(new_dynarec_hacks&NDHACK_NO_SMC_CHECK)) {
57871462 2828 if(!c||memtarget) {
2829 #ifdef DESTRUCTIVE_SHIFT
2830 // The x86 shift operation is 'destructive'; it overwrites the
2831 // source register, so we need to make a copy first and use that.
2832 addr=temp;
2833 #endif
2834 #if defined(HOST_IMM8)
2835 int ir=get_reg(i_regs->regmap,INVCP);
2836 assert(ir>=0);
2837 emit_cmpmem_indexedsr12_reg(ir,addr,1);
2838 #else
643aeae3 2839 emit_cmpmem_indexedsr12_imm(invalid_code,addr,1);
57871462 2840 #endif
0bbd1454 2841 #if defined(HAVE_CONDITIONAL_CALL) && !defined(DESTRUCTIVE_SHIFT)
2842 emit_callne(invalidate_addr_reg[addr]);
2843 #else
b14b6a8f 2844 void *jaddr2 = out;
57871462 2845 emit_jne(0);
b14b6a8f 2846 add_stub(INVCODE_STUB,jaddr2,out,reglist|(1<<HOST_CCREG),addr,0,0,0);
0bbd1454 2847 #endif
57871462 2848 }
2849 }
7a518516 2850 u_int addr_val=constmap[i][s]+offset;
3eaa7048 2851 if(jaddr) {
b14b6a8f 2852 add_stub_r(type,jaddr,out,i,addr,i_regs,ccadj[i],reglist);
3eaa7048 2853 } else if(c&&!memtarget) {
7a518516 2854 inline_writestub(type,i,addr_val,i_regs->regmap,rs2[i],ccadj[i],reglist);
2855 }
2856 // basic current block modification detection..
2857 // not looking back as that should be in mips cache already
3968e69e 2858 // (see Spyro2 title->attract mode)
7a518516 2859 if(c&&start+i*4<addr_val&&addr_val<start+slen*4) {
c43b5311 2860 SysPrintf("write to %08x hits block %08x, pc=%08x\n",addr_val,start,start+i*4);
7a518516 2861 assert(i_regs->regmap==regs[i].regmap); // not delay slot
2862 if(i_regs->regmap==regs[i].regmap) {
ad49de89 2863 load_all_consts(regs[i].regmap_entry,regs[i].wasdirty,i);
2864 wb_dirtys(regs[i].regmap_entry,regs[i].wasdirty);
7a518516 2865 emit_movimm(start+i*4+4,0);
643aeae3 2866 emit_writeword(0,&pcaddr);
d1e4ebd9 2867 emit_addimm(HOST_CCREG,2,HOST_CCREG);
2868 emit_call(get_addr_ht);
2869 emit_jmpreg(0);
7a518516 2870 }
3eaa7048 2871 }
57871462 2872}
2873
3968e69e 2874static void storelr_assemble(int i,struct regstat *i_regs)
57871462 2875{
9c45ca93 2876 int s,tl;
57871462 2877 int temp;
57871462 2878 int offset;
b14b6a8f 2879 void *jaddr=0;
df4dc2b1 2880 void *case1, *case2, *case3;
2881 void *done0, *done1, *done2;
af4ee1fe 2882 int memtarget=0,c=0;
fab5d06d 2883 int agr=AGEN1+(i&1);
57871462 2884 u_int hr,reglist=0;
57871462 2885 tl=get_reg(i_regs->regmap,rs2[i]);
2886 s=get_reg(i_regs->regmap,rs1[i]);
fab5d06d 2887 temp=get_reg(i_regs->regmap,agr);
2888 if(temp<0) temp=get_reg(i_regs->regmap,-1);
57871462 2889 offset=imm[i];
2890 if(s>=0) {
2891 c=(i_regs->isconst>>s)&1;
af4ee1fe 2892 if(c) {
2893 memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
af4ee1fe 2894 }
57871462 2895 }
2896 assert(tl>=0);
2897 for(hr=0;hr<HOST_REGS;hr++) {
2898 if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
2899 }
535d208a 2900 assert(temp>=0);
1edfcc68 2901 if(!c) {
2902 emit_cmpimm(s<0||offset?temp:s,RAM_SIZE);
2903 if(!offset&&s!=temp) emit_mov(s,temp);
b14b6a8f 2904 jaddr=out;
1edfcc68 2905 emit_jno(0);
2906 }
2907 else
2908 {
2909 if(!memtarget||!rs1[i]) {
b14b6a8f 2910 jaddr=out;
535d208a 2911 emit_jmp(0);
57871462 2912 }
535d208a 2913 }
3968e69e 2914 if(ram_offset)
2915 emit_addimm_no_flags(ram_offset,temp);
535d208a 2916
2917 if (opcode[i]==0x2C||opcode[i]==0x2D) { // SDL/SDR
9c45ca93 2918 assert(0);
535d208a 2919 }
57871462 2920
9c45ca93 2921 emit_xorimm(temp,3,temp);
535d208a 2922 emit_testimm(temp,2);
df4dc2b1 2923 case2=out;
535d208a 2924 emit_jne(0);
2925 emit_testimm(temp,1);
df4dc2b1 2926 case1=out;
535d208a 2927 emit_jne(0);
2928 // 0
2929 if (opcode[i]==0x2A) { // SWL
2930 emit_writeword_indexed(tl,0,temp);
2931 }
3968e69e 2932 else if (opcode[i]==0x2E) { // SWR
535d208a 2933 emit_writebyte_indexed(tl,3,temp);
2934 }
3968e69e 2935 else
9c45ca93 2936 assert(0);
df4dc2b1 2937 done0=out;
535d208a 2938 emit_jmp(0);
2939 // 1
df4dc2b1 2940 set_jump_target(case1, out);
535d208a 2941 if (opcode[i]==0x2A) { // SWL
2942 // Write 3 msb into three least significant bytes
2943 if(rs2[i]) emit_rorimm(tl,8,tl);
2944 emit_writehword_indexed(tl,-1,temp);
2945 if(rs2[i]) emit_rorimm(tl,16,tl);
2946 emit_writebyte_indexed(tl,1,temp);
2947 if(rs2[i]) emit_rorimm(tl,8,tl);
2948 }
3968e69e 2949 else if (opcode[i]==0x2E) { // SWR
535d208a 2950 // Write two lsb into two most significant bytes
2951 emit_writehword_indexed(tl,1,temp);
2952 }
df4dc2b1 2953 done1=out;
535d208a 2954 emit_jmp(0);
2955 // 2
df4dc2b1 2956 set_jump_target(case2, out);
535d208a 2957 emit_testimm(temp,1);
df4dc2b1 2958 case3=out;
535d208a 2959 emit_jne(0);
2960 if (opcode[i]==0x2A) { // SWL
2961 // Write two msb into two least significant bytes
2962 if(rs2[i]) emit_rorimm(tl,16,tl);
2963 emit_writehword_indexed(tl,-2,temp);
2964 if(rs2[i]) emit_rorimm(tl,16,tl);
2965 }
3968e69e 2966 else if (opcode[i]==0x2E) { // SWR
535d208a 2967 // Write 3 lsb into three most significant bytes
2968 emit_writebyte_indexed(tl,-1,temp);
2969 if(rs2[i]) emit_rorimm(tl,8,tl);
2970 emit_writehword_indexed(tl,0,temp);
2971 if(rs2[i]) emit_rorimm(tl,24,tl);
2972 }
df4dc2b1 2973 done2=out;
535d208a 2974 emit_jmp(0);
2975 // 3
df4dc2b1 2976 set_jump_target(case3, out);
535d208a 2977 if (opcode[i]==0x2A) { // SWL
2978 // Write msb into least significant byte
2979 if(rs2[i]) emit_rorimm(tl,24,tl);
2980 emit_writebyte_indexed(tl,-3,temp);
2981 if(rs2[i]) emit_rorimm(tl,8,tl);
2982 }
3968e69e 2983 else if (opcode[i]==0x2E) { // SWR
535d208a 2984 // Write entire word
2985 emit_writeword_indexed(tl,-3,temp);
2986 }
df4dc2b1 2987 set_jump_target(done0, out);
2988 set_jump_target(done1, out);
2989 set_jump_target(done2, out);
535d208a 2990 if(!c||!memtarget)
b14b6a8f 2991 add_stub_r(STORELR_STUB,jaddr,out,i,temp,i_regs,ccadj[i],reglist);
1edfcc68 2992 if(!(i_regs->waswritten&(1<<rs1[i]))&&!(new_dynarec_hacks&NDHACK_NO_SMC_CHECK)) {
9c45ca93 2993 emit_addimm_no_flags(-ram_offset,temp);
57871462 2994 #if defined(HOST_IMM8)
2995 int ir=get_reg(i_regs->regmap,INVCP);
2996 assert(ir>=0);
2997 emit_cmpmem_indexedsr12_reg(ir,temp,1);
2998 #else
643aeae3 2999 emit_cmpmem_indexedsr12_imm(invalid_code,temp,1);
57871462 3000 #endif
535d208a 3001 #if defined(HAVE_CONDITIONAL_CALL) && !defined(DESTRUCTIVE_SHIFT)
3002 emit_callne(invalidate_addr_reg[temp]);
3003 #else
b14b6a8f 3004 void *jaddr2 = out;
57871462 3005 emit_jne(0);
b14b6a8f 3006 add_stub(INVCODE_STUB,jaddr2,out,reglist|(1<<HOST_CCREG),temp,0,0,0);
535d208a 3007 #endif
57871462 3008 }
57871462 3009}
3010
8062d65a 3011static void cop0_assemble(int i,struct regstat *i_regs)
3012{
3013 if(opcode2[i]==0) // MFC0
3014 {
3015 signed char t=get_reg(i_regs->regmap,rt1[i]);
3016 u_int copr=(source[i]>>11)&0x1f;
3017 //assert(t>=0); // Why does this happen? OOT is weird
3018 if(t>=0&&rt1[i]!=0) {
3019 emit_readword(&reg_cop0[copr],t);
3020 }
3021 }
3022 else if(opcode2[i]==4) // MTC0
3023 {
3024 signed char s=get_reg(i_regs->regmap,rs1[i]);
3025 char copr=(source[i]>>11)&0x1f;
3026 assert(s>=0);
3027 wb_register(rs1[i],i_regs->regmap,i_regs->dirty);
3028 if(copr==9||copr==11||copr==12||copr==13) {
3029 emit_readword(&last_count,HOST_TEMPREG);
3030 emit_loadreg(CCREG,HOST_CCREG); // TODO: do proper reg alloc
3031 emit_add(HOST_CCREG,HOST_TEMPREG,HOST_CCREG);
3032 emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i]),HOST_CCREG);
3033 emit_writeword(HOST_CCREG,&Count);
3034 }
3035 // What a mess. The status register (12) can enable interrupts,
3036 // so needs a special case to handle a pending interrupt.
3037 // The interrupt must be taken immediately, because a subsequent
3038 // instruction might disable interrupts again.
3039 if(copr==12||copr==13) {
3040 if (is_delayslot) {
3041 // burn cycles to cause cc_interrupt, which will
3042 // reschedule next_interupt. Relies on CCREG from above.
3043 assem_debug("MTC0 DS %d\n", copr);
3044 emit_writeword(HOST_CCREG,&last_count);
3045 emit_movimm(0,HOST_CCREG);
3046 emit_storereg(CCREG,HOST_CCREG);
3047 emit_loadreg(rs1[i],1);
3048 emit_movimm(copr,0);
3049 emit_call(pcsx_mtc0_ds);
3050 emit_loadreg(rs1[i],s);
3051 return;
3052 }
3053 emit_movimm(start+i*4+4,HOST_TEMPREG);
3054 emit_writeword(HOST_TEMPREG,&pcaddr);
3055 emit_movimm(0,HOST_TEMPREG);
3056 emit_writeword(HOST_TEMPREG,&pending_exception);
3057 }
3058 //else if(copr==12&&is_delayslot) emit_call((int)MTC0_R12);
3059 //else
3060 if(s==HOST_CCREG)
3061 emit_loadreg(rs1[i],1);
3062 else if(s!=1)
3063 emit_mov(s,1);
3064 emit_movimm(copr,0);
3065 emit_call(pcsx_mtc0);