1 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
2 * Mupen64plus - new_dynarec.c *
3 * Copyright (C) 2009-2011 Ari64 *
5 * This program is free software; you can redistribute it and/or modify *
6 * it under the terms of the GNU General Public License as published by *
7 * the Free Software Foundation; either version 2 of the License, or *
8 * (at your option) any later version. *
10 * This program is distributed in the hope that it will be useful, *
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
13 * GNU General Public License for more details. *
15 * You should have received a copy of the GNU General Public License *
16 * along with this program; if not, write to the *
17 * Free Software Foundation, Inc., *
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. *
19 * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
22 #include <stdint.h> //include for uint64_t
27 #include <libkern/OSCacheControl.h>
30 #include <3ds_utils.h>
33 #include <psp2/kernel/sysmem.h>
37 #include "new_dynarec_config.h"
38 #include "../psxhle.h"
39 #include "../psxinterpreter.h"
41 #include "emu_if.h" // emulator interface
43 #define noinline __attribute__((noinline,noclone))
45 #define ARRAY_SIZE(x) (sizeof(x) / sizeof(x[0]))
48 #define min(a, b) ((b) < (a) ? (b) : (a))
52 //#define assem_debug printf
53 //#define inv_debug printf
54 #define assem_debug(...)
55 #define inv_debug(...)
58 #include "assem_x86.h"
61 #include "assem_x64.h"
64 #include "assem_arm.h"
67 #include "assem_arm64.h"
70 #define RAM_SIZE 0x200000
72 #define MAX_OUTPUT_BLOCK_SIZE 262144
76 u_char translation_cache[1 << TARGET_SIZE_2];
79 struct tramp_insns ops[2048 / sizeof(struct tramp_insns)];
80 const void *f[2048 / sizeof(void *)];
84 #ifdef BASE_ADDR_DYNAMIC
85 static struct ndrc_mem *ndrc;
87 static struct ndrc_mem ndrc_ __attribute__((aligned(4096)));
88 static struct ndrc_mem *ndrc = &ndrc_;
111 signed char regmap_entry[HOST_REGS];
112 signed char regmap[HOST_REGS];
118 u_int loadedconst; // host regs that have constants loaded
119 u_int waswritten; // MIPS regs that were used as store base before
122 // note: asm depends on this layout
128 struct ll_entry *next;
158 struct ht_entry hash_table[65536] __attribute__((aligned(16)));
159 struct ll_entry *jump_in[4096] __attribute__((aligned(16)));
160 struct ll_entry *jump_dirty[4096];
162 static struct ll_entry *jump_out[4096];
164 static u_int *source;
165 static char insn[MAXBLOCK][10];
166 static u_char itype[MAXBLOCK];
167 static u_char opcode[MAXBLOCK];
168 static u_char opcode2[MAXBLOCK];
169 static u_char bt[MAXBLOCK];
170 static u_char rs1[MAXBLOCK];
171 static u_char rs2[MAXBLOCK];
172 static u_char rt1[MAXBLOCK];
173 static u_char rt2[MAXBLOCK];
174 static u_char dep1[MAXBLOCK];
175 static u_char dep2[MAXBLOCK];
176 static u_char lt1[MAXBLOCK];
177 static uint64_t gte_rs[MAXBLOCK]; // gte: 32 data and 32 ctl regs
178 static uint64_t gte_rt[MAXBLOCK];
179 static uint64_t gte_unneeded[MAXBLOCK];
180 static u_int smrv[32]; // speculated MIPS register values
181 static u_int smrv_strong; // mask or regs that are likely to have correct values
182 static u_int smrv_weak; // same, but somewhat less likely
183 static u_int smrv_strong_next; // same, but after current insn executes
184 static u_int smrv_weak_next;
185 static int imm[MAXBLOCK];
186 static u_int ba[MAXBLOCK];
187 static char likely[MAXBLOCK];
188 static char is_ds[MAXBLOCK];
189 static char ooo[MAXBLOCK];
190 static uint64_t unneeded_reg[MAXBLOCK];
191 static uint64_t branch_unneeded_reg[MAXBLOCK];
192 static signed char regmap_pre[MAXBLOCK][HOST_REGS]; // pre-instruction i?
193 // contains 'real' consts at [i] insn, but may differ from what's actually
194 // loaded in host reg as 'final' value is always loaded, see get_final_value()
195 static uint32_t current_constmap[HOST_REGS];
196 static uint32_t constmap[MAXBLOCK][HOST_REGS];
197 static struct regstat regs[MAXBLOCK];
198 static struct regstat branch_regs[MAXBLOCK];
199 static signed char minimum_free_regs[MAXBLOCK];
200 static u_int needed_reg[MAXBLOCK];
201 static u_int wont_dirty[MAXBLOCK];
202 static u_int will_dirty[MAXBLOCK];
203 static int ccadj[MAXBLOCK];
205 static void *instr_addr[MAXBLOCK];
206 static struct link_entry link_addr[MAXBLOCK];
207 static int linkcount;
208 static struct code_stub stubs[MAXBLOCK*3];
209 static int stubcount;
210 static u_int literals[1024][2];
211 static int literalcount;
212 static int is_delayslot;
213 static char shadow[1048576] __attribute__((aligned(16)));
216 static u_int stop_after_jal;
218 static uintptr_t ram_offset;
220 static const uintptr_t ram_offset=0;
223 int new_dynarec_hacks;
224 int new_dynarec_hacks_pergame;
225 int new_dynarec_did_compile;
227 #define HACK_ENABLED(x) ((new_dynarec_hacks | new_dynarec_hacks_pergame) & (x))
229 extern int cycle_count; // ... until end of the timeslice, counts -N -> 0
230 extern int last_count; // last absolute target, often = next_interupt
232 extern int pending_exception;
233 extern int branch_target;
234 extern uintptr_t mini_ht[32][2];
235 extern u_char restore_candidate[512];
237 /* registers that may be allocated */
239 #define LOREG 32 // lo
240 #define HIREG 33 // hi
241 //#define FSREG 34 // FPU status (FCSR)
242 #define CSREG 35 // Coprocessor status
243 #define CCREG 36 // Cycle count
244 #define INVCP 37 // Pointer to invalid_code
245 //#define MMREG 38 // Pointer to memory_map
246 //#define ROREG 39 // ram offset (if rdram!=0x80000000)
248 #define FTEMP 40 // FPU temporary register
249 #define PTEMP 41 // Prefetch temporary register
250 //#define TLREG 42 // TLB mapping offset
251 #define RHASH 43 // Return address hash
252 #define RHTBL 44 // Return address hash table address
253 #define RTEMP 45 // JR/JALR address register
255 #define AGEN1 46 // Address generation temporary register
256 //#define AGEN2 47 // Address generation temporary register
257 //#define MGEN1 48 // Maptable address generation temporary register
258 //#define MGEN2 49 // Maptable address generation temporary register
259 #define BTREG 50 // Branch target temporary register
261 /* instruction types */
262 #define NOP 0 // No operation
263 #define LOAD 1 // Load
264 #define STORE 2 // Store
265 #define LOADLR 3 // Unaligned load
266 #define STORELR 4 // Unaligned store
267 #define MOV 5 // Move
268 #define ALU 6 // Arithmetic/logic
269 #define MULTDIV 7 // Multiply/divide
270 #define SHIFT 8 // Shift by register
271 #define SHIFTIMM 9// Shift by immediate
272 #define IMM16 10 // 16-bit immediate
273 #define RJUMP 11 // Unconditional jump to register
274 #define UJUMP 12 // Unconditional jump
275 #define CJUMP 13 // Conditional branch (BEQ/BNE/BGTZ/BLEZ)
276 #define SJUMP 14 // Conditional branch (regimm format)
277 #define COP0 15 // Coprocessor 0
278 #define COP1 16 // Coprocessor 1
279 #define C1LS 17 // Coprocessor 1 load/store
280 //#define FJUMP 18 // Conditional branch (floating point)
281 //#define FLOAT 19 // Floating point unit
282 //#define FCONV 20 // Convert integer to float
283 //#define FCOMP 21 // Floating point compare (sets FSREG)
284 #define SYSCALL 22// SYSCALL
285 #define OTHER 23 // Other
286 #define SPAN 24 // Branch/delay slot spans 2 pages
287 #define NI 25 // Not implemented
288 #define HLECALL 26// PCSX fake opcodes for HLE
289 #define COP2 27 // Coprocessor 2 move
290 #define C2LS 28 // Coprocessor 2 load/store
291 #define C2OP 29 // Coprocessor 2 operation
292 #define INTCALL 30// Call interpreter to handle rare corner cases
299 #define DJT_1 (void *)1l // no function, just a label in assem_debug log
300 #define DJT_2 (void *)2l
303 int new_recompile_block(u_int addr);
304 void *get_addr_ht(u_int vaddr);
305 void invalidate_block(u_int block);
306 void invalidate_addr(u_int addr);
307 void remove_hash(int vaddr);
309 void dyna_linker_ds();
311 void verify_code_ds();
314 void fp_exception_ds();
315 void jump_to_new_pc();
316 void call_gteStall();
317 void new_dyna_leave();
319 // Needed by assembler
320 static void wb_register(signed char r,signed char regmap[],uint64_t dirty);
321 static void wb_dirtys(signed char i_regmap[],uint64_t i_dirty);
322 static void wb_needed_dirtys(signed char i_regmap[],uint64_t i_dirty,int addr);
323 static void load_all_regs(signed char i_regmap[]);
324 static void load_needed_regs(signed char i_regmap[],signed char next_regmap[]);
325 static void load_regs_entry(int t);
326 static void load_all_consts(signed char regmap[],u_int dirty,int i);
327 static u_int get_host_reglist(const signed char *regmap);
329 static int verify_dirty(const u_int *ptr);
330 static int get_final_value(int hr, int i, int *value);
331 static void add_stub(enum stub_type type, void *addr, void *retaddr,
332 u_int a, uintptr_t b, uintptr_t c, u_int d, u_int e);
333 static void add_stub_r(enum stub_type type, void *addr, void *retaddr,
334 int i, int addr_reg, const struct regstat *i_regs, int ccadj, u_int reglist);
335 static void add_to_linker(void *addr, u_int target, int ext);
336 static void *emit_fastpath_cmp_jump(int i,int addr,int *addr_reg_override);
337 static void *get_direct_memhandler(void *table, u_int addr,
338 enum stub_type type, uintptr_t *addr_host);
339 static void cop2_call_stall_check(u_int op, int i, const struct regstat *i_regs, u_int reglist);
340 static void pass_args(int a0, int a1);
341 static void emit_far_jump(const void *f);
342 static void emit_far_call(const void *f);
344 static void mprotect_w_x(void *start, void *end, int is_x)
348 // *Open* enables write on all memory that was
349 // allocated by sceKernelAllocMemBlockForVM()?
351 sceKernelCloseVMDomain();
353 sceKernelOpenVMDomain();
355 u_long mstart = (u_long)start & ~4095ul;
356 u_long mend = (u_long)end;
357 if (mprotect((void *)mstart, mend - mstart,
358 PROT_READ | (is_x ? PROT_EXEC : PROT_WRITE)) != 0)
359 SysPrintf("mprotect(%c) failed: %s\n", is_x ? 'x' : 'w', strerror(errno));
364 static void start_tcache_write(void *start, void *end)
366 mprotect_w_x(start, end, 0);
369 static void end_tcache_write(void *start, void *end)
371 #if defined(__arm__) || defined(__aarch64__)
372 size_t len = (char *)end - (char *)start;
373 #if defined(__BLACKBERRY_QNX__)
374 msync(start, len, MS_SYNC | MS_CACHE_ONLY | MS_INVALIDATE_ICACHE);
375 #elif defined(__MACH__)
376 sys_cache_control(kCacheFunctionPrepareForExecution, start, len);
378 sceKernelSyncVMDomain(sceBlock, start, len);
380 ctr_flush_invalidate_cache();
381 #elif defined(__aarch64__)
382 // as of 2021, __clear_cache() is still broken on arm64
383 // so here is a custom one :(
384 clear_cache_arm64(start, end);
386 __clear_cache(start, end);
391 mprotect_w_x(start, end, 1);
394 static void *start_block(void)
396 u_char *end = out + MAX_OUTPUT_BLOCK_SIZE;
397 if (end > ndrc->translation_cache + sizeof(ndrc->translation_cache))
398 end = ndrc->translation_cache + sizeof(ndrc->translation_cache);
399 start_tcache_write(out, end);
403 static void end_block(void *start)
405 end_tcache_write(start, out);
408 // also takes care of w^x mappings when patching code
409 static u_int needs_clear_cache[1<<(TARGET_SIZE_2-17)];
411 static void mark_clear_cache(void *target)
413 uintptr_t offset = (u_char *)target - ndrc->translation_cache;
414 u_int mask = 1u << ((offset >> 12) & 31);
415 if (!(needs_clear_cache[offset >> 17] & mask)) {
416 char *start = (char *)((uintptr_t)target & ~4095l);
417 start_tcache_write(start, start + 4095);
418 needs_clear_cache[offset >> 17] |= mask;
422 // Clearing the cache is rather slow on ARM Linux, so mark the areas
423 // that need to be cleared, and then only clear these areas once.
424 static void do_clear_cache(void)
427 for (i = 0; i < (1<<(TARGET_SIZE_2-17)); i++)
429 u_int bitmap = needs_clear_cache[i];
432 for (j = 0; j < 32; j++)
435 if (!(bitmap & (1<<j)))
438 start = ndrc->translation_cache + i*131072 + j*4096;
440 for (j++; j < 32; j++) {
441 if (!(bitmap & (1<<j)))
445 end_tcache_write(start, end);
447 needs_clear_cache[i] = 0;
451 //#define DEBUG_CYCLE_COUNT 1
453 #define NO_CYCLE_PENALTY_THR 12
455 int cycle_multiplier; // 100 for 1.0
456 int cycle_multiplier_override;
458 static int CLOCK_ADJUST(int x)
460 int m = cycle_multiplier_override
461 ? cycle_multiplier_override : cycle_multiplier;
463 return (x * m + s * 50) / 100;
466 // is the op an unconditional jump?
467 static int is_ujump(int i)
469 return itype[i] == UJUMP || itype[i] == RJUMP
470 || (source[i] >> 16) == 0x1000; // beq r0, r0, offset // b offset
473 static int is_jump(int i)
475 return itype[i] == RJUMP || itype[i] == UJUMP || itype[i] == CJUMP || itype[i] == SJUMP;
478 static u_int get_page(u_int vaddr)
480 u_int page=vaddr&~0xe0000000;
481 if (page < 0x1000000)
482 page &= ~0x0e00000; // RAM mirrors
484 if(page>2048) page=2048+(page&2047);
488 // no virtual mem in PCSX
489 static u_int get_vpage(u_int vaddr)
491 return get_page(vaddr);
494 static struct ht_entry *hash_table_get(u_int vaddr)
496 return &hash_table[((vaddr>>16)^vaddr)&0xFFFF];
499 static void hash_table_add(struct ht_entry *ht_bin, u_int vaddr, void *tcaddr)
501 ht_bin->vaddr[1] = ht_bin->vaddr[0];
502 ht_bin->tcaddr[1] = ht_bin->tcaddr[0];
503 ht_bin->vaddr[0] = vaddr;
504 ht_bin->tcaddr[0] = tcaddr;
507 // some messy ari64's code, seems to rely on unsigned 32bit overflow
508 static int doesnt_expire_soon(void *tcaddr)
510 u_int diff = (u_int)((u_char *)tcaddr - out) << (32-TARGET_SIZE_2);
511 return diff > (u_int)(0x60000000 + (MAX_OUTPUT_BLOCK_SIZE << (32-TARGET_SIZE_2)));
514 // Get address from virtual address
515 // This is called from the recompiled JR/JALR instructions
516 void noinline *get_addr(u_int vaddr)
518 u_int page=get_page(vaddr);
519 u_int vpage=get_vpage(vaddr);
520 struct ll_entry *head;
521 //printf("TRACE: count=%d next=%d (get_addr %x,page %d)\n",Count,next_interupt,vaddr,page);
524 if(head->vaddr==vaddr) {
525 //printf("TRACE: count=%d next=%d (get_addr match %x: %p)\n",Count,next_interupt,vaddr,head->addr);
526 hash_table_add(hash_table_get(vaddr), vaddr, head->addr);
531 head=jump_dirty[vpage];
533 if(head->vaddr==vaddr) {
534 //printf("TRACE: count=%d next=%d (get_addr match dirty %x: %p)\n",Count,next_interupt,vaddr,head->addr);
535 // Don't restore blocks which are about to expire from the cache
536 if (doesnt_expire_soon(head->addr))
537 if (verify_dirty(head->addr)) {
538 //printf("restore candidate: %x (%d) d=%d\n",vaddr,page,invalid_code[vaddr>>12]);
539 invalid_code[vaddr>>12]=0;
540 inv_code_start=inv_code_end=~0;
542 restore_candidate[vpage>>3]|=1<<(vpage&7);
544 else restore_candidate[page>>3]|=1<<(page&7);
545 struct ht_entry *ht_bin = hash_table_get(vaddr);
546 if (ht_bin->vaddr[0] == vaddr)
547 ht_bin->tcaddr[0] = head->addr; // Replace existing entry
549 hash_table_add(ht_bin, vaddr, head->addr);
556 //printf("TRACE: count=%d next=%d (get_addr no-match %x)\n",Count,next_interupt,vaddr);
557 int r=new_recompile_block(vaddr);
558 if(r==0) return get_addr(vaddr);
559 // Execute in unmapped page, generate pagefault execption
561 Cause=(vaddr<<31)|0x8;
562 EPC=(vaddr&1)?vaddr-5:vaddr;
564 Context=(Context&0xFF80000F)|((BadVAddr>>9)&0x007FFFF0);
565 EntryHi=BadVAddr&0xFFFFE000;
566 return get_addr_ht(0x80000000);
568 // Look up address in hash table first
569 void *get_addr_ht(u_int vaddr)
571 //printf("TRACE: count=%d next=%d (get_addr_ht %x)\n",Count,next_interupt,vaddr);
572 const struct ht_entry *ht_bin = hash_table_get(vaddr);
573 if (ht_bin->vaddr[0] == vaddr) return ht_bin->tcaddr[0];
574 if (ht_bin->vaddr[1] == vaddr) return ht_bin->tcaddr[1];
575 return get_addr(vaddr);
578 void clear_all_regs(signed char regmap[])
581 for (hr=0;hr<HOST_REGS;hr++) regmap[hr]=-1;
584 static signed char get_reg(const signed char regmap[],int r)
587 for (hr=0;hr<HOST_REGS;hr++) if(hr!=EXCLUDE_REG&®map[hr]==r) return hr;
591 // Find a register that is available for two consecutive cycles
592 static signed char get_reg2(signed char regmap1[], const signed char regmap2[], int r)
595 for (hr=0;hr<HOST_REGS;hr++) if(hr!=EXCLUDE_REG&®map1[hr]==r&®map2[hr]==r) return hr;
599 int count_free_regs(signed char regmap[])
603 for(hr=0;hr<HOST_REGS;hr++)
605 if(hr!=EXCLUDE_REG) {
606 if(regmap[hr]<0) count++;
612 void dirty_reg(struct regstat *cur,signed char reg)
616 for (hr=0;hr<HOST_REGS;hr++) {
617 if((cur->regmap[hr]&63)==reg) {
623 static void set_const(struct regstat *cur, signed char reg, uint32_t value)
627 for (hr=0;hr<HOST_REGS;hr++) {
628 if(cur->regmap[hr]==reg) {
630 current_constmap[hr]=value;
635 static void clear_const(struct regstat *cur, signed char reg)
639 for (hr=0;hr<HOST_REGS;hr++) {
640 if((cur->regmap[hr]&63)==reg) {
641 cur->isconst&=~(1<<hr);
646 static int is_const(struct regstat *cur, signed char reg)
651 for (hr=0;hr<HOST_REGS;hr++) {
652 if((cur->regmap[hr]&63)==reg) {
653 return (cur->isconst>>hr)&1;
659 static uint32_t get_const(struct regstat *cur, signed char reg)
663 for (hr=0;hr<HOST_REGS;hr++) {
664 if(cur->regmap[hr]==reg) {
665 return current_constmap[hr];
668 SysPrintf("Unknown constant in r%d\n",reg);
672 // Least soon needed registers
673 // Look at the next ten instructions and see which registers
674 // will be used. Try not to reallocate these.
675 void lsn(u_char hsn[], int i, int *preferred_reg)
687 // Don't go past an unconditonal jump
694 if(rs1[i+j]) hsn[rs1[i+j]]=j;
695 if(rs2[i+j]) hsn[rs2[i+j]]=j;
696 if(rt1[i+j]) hsn[rt1[i+j]]=j;
697 if(rt2[i+j]) hsn[rt2[i+j]]=j;
698 if(itype[i+j]==STORE || itype[i+j]==STORELR) {
699 // Stores can allocate zero
703 // On some architectures stores need invc_ptr
704 #if defined(HOST_IMM8)
705 if(itype[i+j]==STORE || itype[i+j]==STORELR || (opcode[i+j]&0x3b)==0x39 || (opcode[i+j]&0x3b)==0x3a) {
709 if(i+j>=0&&(itype[i+j]==UJUMP||itype[i+j]==CJUMP||itype[i+j]==SJUMP))
717 if(ba[i+b]>=start && ba[i+b]<(start+slen*4))
719 // Follow first branch
720 int t=(ba[i+b]-start)>>2;
721 j=7-b;if(t+j>=slen) j=slen-t-1;
724 if(rs1[t+j]) if(hsn[rs1[t+j]]>j+b+2) hsn[rs1[t+j]]=j+b+2;
725 if(rs2[t+j]) if(hsn[rs2[t+j]]>j+b+2) hsn[rs2[t+j]]=j+b+2;
726 //if(rt1[t+j]) if(hsn[rt1[t+j]]>j+b+2) hsn[rt1[t+j]]=j+b+2;
727 //if(rt2[t+j]) if(hsn[rt2[t+j]]>j+b+2) hsn[rt2[t+j]]=j+b+2;
730 // TODO: preferred register based on backward branch
732 // Delay slot should preferably not overwrite branch conditions or cycle count
733 if (i > 0 && is_jump(i-1)) {
734 if(rs1[i-1]) if(hsn[rs1[i-1]]>1) hsn[rs1[i-1]]=1;
735 if(rs2[i-1]) if(hsn[rs2[i-1]]>1) hsn[rs2[i-1]]=1;
741 // Coprocessor load/store needs FTEMP, even if not declared
742 if(itype[i]==C1LS||itype[i]==C2LS) {
745 // Load L/R also uses FTEMP as a temporary register
746 if(itype[i]==LOADLR) {
749 // Also SWL/SWR/SDL/SDR
750 if(opcode[i]==0x2a||opcode[i]==0x2e||opcode[i]==0x2c||opcode[i]==0x2d) {
753 // Don't remove the miniht registers
754 if(itype[i]==UJUMP||itype[i]==RJUMP)
761 // We only want to allocate registers if we're going to use them again soon
762 int needed_again(int r, int i)
768 if (i > 0 && is_ujump(i-1))
770 if(ba[i-1]<start || ba[i-1]>start+slen*4-4)
771 return 0; // Don't need any registers if exiting the block
781 // Don't go past an unconditonal jump
785 if(itype[i+j]==SYSCALL||itype[i+j]==HLECALL||itype[i+j]==INTCALL||((source[i+j]&0xfc00003f)==0x0d))
792 if(rs1[i+j]==r) rn=j;
793 if(rs2[i+j]==r) rn=j;
794 if((unneeded_reg[i+j]>>r)&1) rn=10;
795 if(i+j>=0&&(itype[i+j]==UJUMP||itype[i+j]==CJUMP||itype[i+j]==SJUMP))
803 if(ba[i+b]>=start && ba[i+b]<(start+slen*4))
805 // Follow first branch
807 int t=(ba[i+b]-start)>>2;
808 j=7-b;if(t+j>=slen) j=slen-t-1;
811 if(!((unneeded_reg[t+j]>>r)&1)) {
812 if(rs1[t+j]==r) if(rn>j+b+2) rn=j+b+2;
813 if(rs2[t+j]==r) if(rn>j+b+2) rn=j+b+2;
824 // Try to match register allocations at the end of a loop with those
826 int loop_reg(int i, int r, int hr)
837 // Don't go past an unconditonal jump
844 if(itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP)
850 if((unneeded_reg[i+k]>>r)&1) return hr;
851 if(i+k>=0&&(itype[i+k]==UJUMP||itype[i+k]==CJUMP||itype[i+k]==SJUMP))
853 if(ba[i+k]>=start && ba[i+k]<(start+i*4))
855 int t=(ba[i+k]-start)>>2;
856 int reg=get_reg(regs[t].regmap_entry,r);
857 if(reg>=0) return reg;
858 //reg=get_reg(regs[t+1].regmap_entry,r);
859 //if(reg>=0) return reg;
867 // Allocate every register, preserving source/target regs
868 void alloc_all(struct regstat *cur,int i)
872 for(hr=0;hr<HOST_REGS;hr++) {
873 if(hr!=EXCLUDE_REG) {
874 if(((cur->regmap[hr]&63)!=rs1[i])&&((cur->regmap[hr]&63)!=rs2[i])&&
875 ((cur->regmap[hr]&63)!=rt1[i])&&((cur->regmap[hr]&63)!=rt2[i]))
878 cur->dirty&=~(1<<hr);
881 if((cur->regmap[hr]&63)==0)
884 cur->dirty&=~(1<<hr);
891 static int host_tempreg_in_use;
893 static void host_tempreg_acquire(void)
895 assert(!host_tempreg_in_use);
896 host_tempreg_in_use = 1;
899 static void host_tempreg_release(void)
901 host_tempreg_in_use = 0;
904 static void host_tempreg_acquire(void) {}
905 static void host_tempreg_release(void) {}
909 extern void gen_interupt();
910 extern void do_insn_cmp();
911 #define FUNCNAME(f) { f, " " #f }
912 static const struct {
915 } function_names[] = {
916 FUNCNAME(cc_interrupt),
917 FUNCNAME(gen_interupt),
918 FUNCNAME(get_addr_ht),
920 FUNCNAME(jump_handler_read8),
921 FUNCNAME(jump_handler_read16),
922 FUNCNAME(jump_handler_read32),
923 FUNCNAME(jump_handler_write8),
924 FUNCNAME(jump_handler_write16),
925 FUNCNAME(jump_handler_write32),
926 FUNCNAME(invalidate_addr),
927 FUNCNAME(jump_to_new_pc),
928 FUNCNAME(call_gteStall),
929 FUNCNAME(new_dyna_leave),
931 FUNCNAME(pcsx_mtc0_ds),
932 FUNCNAME(do_insn_cmp),
934 FUNCNAME(verify_code),
938 static const char *func_name(const void *a)
941 for (i = 0; i < sizeof(function_names)/sizeof(function_names[0]); i++)
942 if (function_names[i].addr == a)
943 return function_names[i].name;
947 #define func_name(x) ""
951 #include "assem_x86.c"
954 #include "assem_x64.c"
957 #include "assem_arm.c"
960 #include "assem_arm64.c"
963 static void *get_trampoline(const void *f)
967 for (i = 0; i < ARRAY_SIZE(ndrc->tramp.f); i++) {
968 if (ndrc->tramp.f[i] == f || ndrc->tramp.f[i] == NULL)
971 if (i == ARRAY_SIZE(ndrc->tramp.f)) {
972 SysPrintf("trampoline table is full, last func %p\n", f);
975 if (ndrc->tramp.f[i] == NULL) {
976 start_tcache_write(&ndrc->tramp.f[i], &ndrc->tramp.f[i + 1]);
977 ndrc->tramp.f[i] = f;
978 end_tcache_write(&ndrc->tramp.f[i], &ndrc->tramp.f[i + 1]);
980 return &ndrc->tramp.ops[i];
983 static void emit_far_jump(const void *f)
985 if (can_jump_or_call(f)) {
990 f = get_trampoline(f);
994 static void emit_far_call(const void *f)
996 if (can_jump_or_call(f)) {
1001 f = get_trampoline(f);
1005 // Add virtual address mapping to linked list
1006 void ll_add(struct ll_entry **head,int vaddr,void *addr)
1008 struct ll_entry *new_entry;
1009 new_entry=malloc(sizeof(struct ll_entry));
1010 assert(new_entry!=NULL);
1011 new_entry->vaddr=vaddr;
1012 new_entry->reg_sv_flags=0;
1013 new_entry->addr=addr;
1014 new_entry->next=*head;
1018 void ll_add_flags(struct ll_entry **head,int vaddr,u_int reg_sv_flags,void *addr)
1020 ll_add(head,vaddr,addr);
1021 (*head)->reg_sv_flags=reg_sv_flags;
1024 // Check if an address is already compiled
1025 // but don't return addresses which are about to expire from the cache
1026 void *check_addr(u_int vaddr)
1028 struct ht_entry *ht_bin = hash_table_get(vaddr);
1030 for (i = 0; i < ARRAY_SIZE(ht_bin->vaddr); i++) {
1031 if (ht_bin->vaddr[i] == vaddr)
1032 if (doesnt_expire_soon((u_char *)ht_bin->tcaddr[i] - MAX_OUTPUT_BLOCK_SIZE))
1033 if (isclean(ht_bin->tcaddr[i]))
1034 return ht_bin->tcaddr[i];
1036 u_int page=get_page(vaddr);
1037 struct ll_entry *head;
1039 while (head != NULL) {
1040 if (head->vaddr == vaddr) {
1041 if (doesnt_expire_soon(head->addr)) {
1042 // Update existing entry with current address
1043 if (ht_bin->vaddr[0] == vaddr) {
1044 ht_bin->tcaddr[0] = head->addr;
1047 if (ht_bin->vaddr[1] == vaddr) {
1048 ht_bin->tcaddr[1] = head->addr;
1051 // Insert into hash table with low priority.
1052 // Don't evict existing entries, as they are probably
1053 // addresses that are being accessed frequently.
1054 if (ht_bin->vaddr[0] == -1) {
1055 ht_bin->vaddr[0] = vaddr;
1056 ht_bin->tcaddr[0] = head->addr;
1058 else if (ht_bin->vaddr[1] == -1) {
1059 ht_bin->vaddr[1] = vaddr;
1060 ht_bin->tcaddr[1] = head->addr;
1070 void remove_hash(int vaddr)
1072 //printf("remove hash: %x\n",vaddr);
1073 struct ht_entry *ht_bin = hash_table_get(vaddr);
1074 if (ht_bin->vaddr[1] == vaddr) {
1075 ht_bin->vaddr[1] = -1;
1076 ht_bin->tcaddr[1] = NULL;
1078 if (ht_bin->vaddr[0] == vaddr) {
1079 ht_bin->vaddr[0] = ht_bin->vaddr[1];
1080 ht_bin->tcaddr[0] = ht_bin->tcaddr[1];
1081 ht_bin->vaddr[1] = -1;
1082 ht_bin->tcaddr[1] = NULL;
1086 void ll_remove_matching_addrs(struct ll_entry **head,uintptr_t addr,int shift)
1088 struct ll_entry *next;
1090 if(((uintptr_t)((*head)->addr)>>shift)==(addr>>shift) ||
1091 ((uintptr_t)((*head)->addr-MAX_OUTPUT_BLOCK_SIZE)>>shift)==(addr>>shift))
1093 inv_debug("EXP: Remove pointer to %p (%x)\n",(*head)->addr,(*head)->vaddr);
1094 remove_hash((*head)->vaddr);
1101 head=&((*head)->next);
1106 // Remove all entries from linked list
1107 void ll_clear(struct ll_entry **head)
1109 struct ll_entry *cur;
1110 struct ll_entry *next;
1121 // Dereference the pointers and remove if it matches
1122 static void ll_kill_pointers(struct ll_entry *head,uintptr_t addr,int shift)
1125 uintptr_t ptr = (uintptr_t)get_pointer(head->addr);
1126 inv_debug("EXP: Lookup pointer to %lx at %p (%x)\n",(long)ptr,head->addr,head->vaddr);
1127 if(((ptr>>shift)==(addr>>shift)) ||
1128 (((ptr-MAX_OUTPUT_BLOCK_SIZE)>>shift)==(addr>>shift)))
1130 inv_debug("EXP: Kill pointer at %p (%x)\n",head->addr,head->vaddr);
1131 void *host_addr=find_extjump_insn(head->addr);
1132 mark_clear_cache(host_addr);
1133 set_jump_target(host_addr, head->addr);
1139 // This is called when we write to a compiled block (see do_invstub)
1140 static void invalidate_page(u_int page)
1142 struct ll_entry *head;
1143 struct ll_entry *next;
1147 inv_debug("INVALIDATE: %x\n",head->vaddr);
1148 remove_hash(head->vaddr);
1153 head=jump_out[page];
1156 inv_debug("INVALIDATE: kill pointer to %x (%p)\n",head->vaddr,head->addr);
1157 void *host_addr=find_extjump_insn(head->addr);
1158 mark_clear_cache(host_addr);
1159 set_jump_target(host_addr, head->addr);
1166 static void invalidate_block_range(u_int block, u_int first, u_int last)
1168 u_int page=get_page(block<<12);
1169 //printf("first=%d last=%d\n",first,last);
1170 invalidate_page(page);
1171 assert(first+5>page); // NB: this assumes MAXBLOCK<=4096 (4 pages)
1172 assert(last<page+5);
1173 // Invalidate the adjacent pages if a block crosses a 4K boundary
1175 invalidate_page(first);
1178 for(first=page+1;first<last;first++) {
1179 invalidate_page(first);
1183 // Don't trap writes
1184 invalid_code[block]=1;
1187 memset(mini_ht,-1,sizeof(mini_ht));
1191 void invalidate_block(u_int block)
1193 u_int page=get_page(block<<12);
1194 u_int vpage=get_vpage(block<<12);
1195 inv_debug("INVALIDATE: %x (%d)\n",block<<12,page);
1196 //inv_debug("invalid_code[block]=%d\n",invalid_code[block]);
1199 struct ll_entry *head;
1200 head=jump_dirty[vpage];
1201 //printf("page=%d vpage=%d\n",page,vpage);
1203 if(vpage>2047||(head->vaddr>>12)==block) { // Ignore vaddr hash collision
1204 u_char *start, *end;
1205 get_bounds(head->addr, &start, &end);
1206 //printf("start: %p end: %p\n", start, end);
1207 if (page < 2048 && start >= rdram && end < rdram+RAM_SIZE) {
1208 if (((start-rdram)>>12) <= page && ((end-1-rdram)>>12) >= page) {
1209 if ((((start-rdram)>>12)&2047) < first) first = ((start-rdram)>>12)&2047;
1210 if ((((end-1-rdram)>>12)&2047) > last) last = ((end-1-rdram)>>12)&2047;
1216 invalidate_block_range(block,first,last);
1219 void invalidate_addr(u_int addr)
1222 // this check is done by the caller
1223 //if (inv_code_start<=addr&&addr<=inv_code_end) { rhits++; return; }
1224 u_int page=get_vpage(addr);
1225 if(page<2048) { // RAM
1226 struct ll_entry *head;
1227 u_int addr_min=~0, addr_max=0;
1228 u_int mask=RAM_SIZE-1;
1229 u_int addr_main=0x80000000|(addr&mask);
1231 inv_code_start=addr_main&~0xfff;
1232 inv_code_end=addr_main|0xfff;
1235 // must check previous page too because of spans..
1237 inv_code_start-=0x1000;
1239 for(;pg1<=page;pg1++) {
1240 for(head=jump_dirty[pg1];head!=NULL;head=head->next) {
1241 u_char *start_h, *end_h;
1243 get_bounds(head->addr, &start_h, &end_h);
1244 start = (uintptr_t)start_h - ram_offset;
1245 end = (uintptr_t)end_h - ram_offset;
1246 if(start<=addr_main&&addr_main<end) {
1247 if(start<addr_min) addr_min=start;
1248 if(end>addr_max) addr_max=end;
1250 else if(addr_main<start) {
1251 if(start<inv_code_end)
1252 inv_code_end=start-1;
1255 if(end>inv_code_start)
1261 inv_debug("INV ADDR: %08x hit %08x-%08x\n", addr, addr_min, addr_max);
1262 inv_code_start=inv_code_end=~0;
1263 invalidate_block_range(addr>>12,(addr_min&mask)>>12,(addr_max&mask)>>12);
1267 inv_code_start=(addr&~mask)|(inv_code_start&mask);
1268 inv_code_end=(addr&~mask)|(inv_code_end&mask);
1269 inv_debug("INV ADDR: %08x miss, inv %08x-%08x, sk %d\n", addr, inv_code_start, inv_code_end, 0);
1273 invalidate_block(addr>>12);
1276 // This is called when loading a save state.
1277 // Anything could have changed, so invalidate everything.
1278 void invalidate_all_pages(void)
1281 for(page=0;page<4096;page++)
1282 invalidate_page(page);
1283 for(page=0;page<1048576;page++)
1284 if(!invalid_code[page]) {
1285 restore_candidate[(page&2047)>>3]|=1<<(page&7);
1286 restore_candidate[((page&2047)>>3)+256]|=1<<(page&7);
1289 memset(mini_ht,-1,sizeof(mini_ht));
1294 static void do_invstub(int n)
1297 u_int reglist=stubs[n].a;
1298 set_jump_target(stubs[n].addr, out);
1300 if(stubs[n].b!=0) emit_mov(stubs[n].b,0);
1301 emit_far_call(invalidate_addr);
1302 restore_regs(reglist);
1303 emit_jmp(stubs[n].retaddr); // return address
1306 // Add an entry to jump_out after making a link
1307 // src should point to code by emit_extjump2()
1308 void add_link(u_int vaddr,void *src)
1310 u_int page=get_page(vaddr);
1311 inv_debug("add_link: %p -> %x (%d)\n",src,vaddr,page);
1312 check_extjump2(src);
1313 ll_add(jump_out+page,vaddr,src);
1314 //void *ptr=get_pointer(src);
1315 //inv_debug("add_link: Pointer is to %p\n",ptr);
1318 // If a code block was found to be unmodified (bit was set in
1319 // restore_candidate) and it remains unmodified (bit is clear
1320 // in invalid_code) then move the entries for that 4K page from
1321 // the dirty list to the clean list.
1322 void clean_blocks(u_int page)
1324 struct ll_entry *head;
1325 inv_debug("INV: clean_blocks page=%d\n",page);
1326 head=jump_dirty[page];
1328 if(!invalid_code[head->vaddr>>12]) {
1329 // Don't restore blocks which are about to expire from the cache
1330 if (doesnt_expire_soon(head->addr)) {
1331 if(verify_dirty(head->addr)) {
1332 u_char *start, *end;
1333 //printf("Possibly Restore %x (%p)\n",head->vaddr, head->addr);
1336 get_bounds(head->addr, &start, &end);
1337 if (start - rdram < RAM_SIZE) {
1338 for (i = (start-rdram+0x80000000)>>12; i <= (end-1-rdram+0x80000000)>>12; i++) {
1339 inv|=invalid_code[i];
1342 else if((signed int)head->vaddr>=(signed int)0x80000000+RAM_SIZE) {
1346 void *clean_addr = get_clean_addr(head->addr);
1347 if (doesnt_expire_soon(clean_addr)) {
1349 inv_debug("INV: Restored %x (%p/%p)\n",head->vaddr, head->addr, clean_addr);
1350 //printf("page=%x, addr=%x\n",page,head->vaddr);
1351 //assert(head->vaddr>>12==(page|0x80000));
1352 ll_add_flags(jump_in+ppage,head->vaddr,head->reg_sv_flags,clean_addr);
1353 struct ht_entry *ht_bin = hash_table_get(head->vaddr);
1354 if (ht_bin->vaddr[0] == head->vaddr)
1355 ht_bin->tcaddr[0] = clean_addr; // Replace existing entry
1356 if (ht_bin->vaddr[1] == head->vaddr)
1357 ht_bin->tcaddr[1] = clean_addr; // Replace existing entry
1367 /* Register allocation */
1369 // Note: registers are allocated clean (unmodified state)
1370 // if you intend to modify the register, you must call dirty_reg().
1371 static void alloc_reg(struct regstat *cur,int i,signed char reg)
1374 int preferred_reg = (reg&7);
1375 if(reg==CCREG) preferred_reg=HOST_CCREG;
1376 if(reg==PTEMP||reg==FTEMP) preferred_reg=12;
1378 // Don't allocate unused registers
1379 if((cur->u>>reg)&1) return;
1381 // see if it's already allocated
1382 for(hr=0;hr<HOST_REGS;hr++)
1384 if(cur->regmap[hr]==reg) return;
1387 // Keep the same mapping if the register was already allocated in a loop
1388 preferred_reg = loop_reg(i,reg,preferred_reg);
1390 // Try to allocate the preferred register
1391 if(cur->regmap[preferred_reg]==-1) {
1392 cur->regmap[preferred_reg]=reg;
1393 cur->dirty&=~(1<<preferred_reg);
1394 cur->isconst&=~(1<<preferred_reg);
1397 r=cur->regmap[preferred_reg];
1400 cur->regmap[preferred_reg]=reg;
1401 cur->dirty&=~(1<<preferred_reg);
1402 cur->isconst&=~(1<<preferred_reg);
1406 // Clear any unneeded registers
1407 // We try to keep the mapping consistent, if possible, because it
1408 // makes branches easier (especially loops). So we try to allocate
1409 // first (see above) before removing old mappings. If this is not
1410 // possible then go ahead and clear out the registers that are no
1412 for(hr=0;hr<HOST_REGS;hr++)
1417 if((cur->u>>r)&1) {cur->regmap[hr]=-1;break;}
1420 // Try to allocate any available register, but prefer
1421 // registers that have not been used recently.
1423 for(hr=0;hr<HOST_REGS;hr++) {
1424 if(hr!=EXCLUDE_REG&&cur->regmap[hr]==-1) {
1425 if(regs[i-1].regmap[hr]!=rs1[i-1]&®s[i-1].regmap[hr]!=rs2[i-1]&®s[i-1].regmap[hr]!=rt1[i-1]&®s[i-1].regmap[hr]!=rt2[i-1]) {
1426 cur->regmap[hr]=reg;
1427 cur->dirty&=~(1<<hr);
1428 cur->isconst&=~(1<<hr);
1434 // Try to allocate any available register
1435 for(hr=0;hr<HOST_REGS;hr++) {
1436 if(hr!=EXCLUDE_REG&&cur->regmap[hr]==-1) {
1437 cur->regmap[hr]=reg;
1438 cur->dirty&=~(1<<hr);
1439 cur->isconst&=~(1<<hr);
1444 // Ok, now we have to evict someone
1445 // Pick a register we hopefully won't need soon
1446 u_char hsn[MAXREG+1];
1447 memset(hsn,10,sizeof(hsn));
1449 lsn(hsn,i,&preferred_reg);
1450 //printf("eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",cur->regmap[0],cur->regmap[1],cur->regmap[2],cur->regmap[3],cur->regmap[5],cur->regmap[6],cur->regmap[7]);
1451 //printf("hsn(%x): %d %d %d %d %d %d %d\n",start+i*4,hsn[cur->regmap[0]&63],hsn[cur->regmap[1]&63],hsn[cur->regmap[2]&63],hsn[cur->regmap[3]&63],hsn[cur->regmap[5]&63],hsn[cur->regmap[6]&63],hsn[cur->regmap[7]&63]);
1453 // Don't evict the cycle count at entry points, otherwise the entry
1454 // stub will have to write it.
1455 if(bt[i]&&hsn[CCREG]>2) hsn[CCREG]=2;
1456 if(i>1&&hsn[CCREG]>2&&(itype[i-2]==RJUMP||itype[i-2]==UJUMP||itype[i-2]==CJUMP||itype[i-2]==SJUMP)) hsn[CCREG]=2;
1459 // Alloc preferred register if available
1460 if(hsn[r=cur->regmap[preferred_reg]&63]==j) {
1461 for(hr=0;hr<HOST_REGS;hr++) {
1462 // Evict both parts of a 64-bit register
1463 if((cur->regmap[hr]&63)==r) {
1465 cur->dirty&=~(1<<hr);
1466 cur->isconst&=~(1<<hr);
1469 cur->regmap[preferred_reg]=reg;
1472 for(r=1;r<=MAXREG;r++)
1474 if(hsn[r]==j&&r!=rs1[i-1]&&r!=rs2[i-1]&&r!=rt1[i-1]&&r!=rt2[i-1]) {
1475 for(hr=0;hr<HOST_REGS;hr++) {
1476 if(hr!=HOST_CCREG||j<hsn[CCREG]) {
1477 if(cur->regmap[hr]==r) {
1478 cur->regmap[hr]=reg;
1479 cur->dirty&=~(1<<hr);
1480 cur->isconst&=~(1<<hr);
1491 for(r=1;r<=MAXREG;r++)
1494 for(hr=0;hr<HOST_REGS;hr++) {
1495 if(cur->regmap[hr]==r) {
1496 cur->regmap[hr]=reg;
1497 cur->dirty&=~(1<<hr);
1498 cur->isconst&=~(1<<hr);
1505 SysPrintf("This shouldn't happen (alloc_reg)");abort();
1508 // Allocate a temporary register. This is done without regard to
1509 // dirty status or whether the register we request is on the unneeded list
1510 // Note: This will only allocate one register, even if called multiple times
1511 static void alloc_reg_temp(struct regstat *cur,int i,signed char reg)
1514 int preferred_reg = -1;
1516 // see if it's already allocated
1517 for(hr=0;hr<HOST_REGS;hr++)
1519 if(hr!=EXCLUDE_REG&&cur->regmap[hr]==reg) return;
1522 // Try to allocate any available register
1523 for(hr=HOST_REGS-1;hr>=0;hr--) {
1524 if(hr!=EXCLUDE_REG&&cur->regmap[hr]==-1) {
1525 cur->regmap[hr]=reg;
1526 cur->dirty&=~(1<<hr);
1527 cur->isconst&=~(1<<hr);
1532 // Find an unneeded register
1533 for(hr=HOST_REGS-1;hr>=0;hr--)
1539 if(i==0||((unneeded_reg[i-1]>>r)&1)) {
1540 cur->regmap[hr]=reg;
1541 cur->dirty&=~(1<<hr);
1542 cur->isconst&=~(1<<hr);
1549 // Ok, now we have to evict someone
1550 // Pick a register we hopefully won't need soon
1551 // TODO: we might want to follow unconditional jumps here
1552 // TODO: get rid of dupe code and make this into a function
1553 u_char hsn[MAXREG+1];
1554 memset(hsn,10,sizeof(hsn));
1556 lsn(hsn,i,&preferred_reg);
1557 //printf("hsn: %d %d %d %d %d %d %d\n",hsn[cur->regmap[0]&63],hsn[cur->regmap[1]&63],hsn[cur->regmap[2]&63],hsn[cur->regmap[3]&63],hsn[cur->regmap[5]&63],hsn[cur->regmap[6]&63],hsn[cur->regmap[7]&63]);
1559 // Don't evict the cycle count at entry points, otherwise the entry
1560 // stub will have to write it.
1561 if(bt[i]&&hsn[CCREG]>2) hsn[CCREG]=2;
1562 if(i>1&&hsn[CCREG]>2&&(itype[i-2]==RJUMP||itype[i-2]==UJUMP||itype[i-2]==CJUMP||itype[i-2]==SJUMP)) hsn[CCREG]=2;
1565 for(r=1;r<=MAXREG;r++)
1567 if(hsn[r]==j&&r!=rs1[i-1]&&r!=rs2[i-1]&&r!=rt1[i-1]&&r!=rt2[i-1]) {
1568 for(hr=0;hr<HOST_REGS;hr++) {
1569 if(hr!=HOST_CCREG||hsn[CCREG]>2) {
1570 if(cur->regmap[hr]==r) {
1571 cur->regmap[hr]=reg;
1572 cur->dirty&=~(1<<hr);
1573 cur->isconst&=~(1<<hr);
1584 for(r=1;r<=MAXREG;r++)
1587 for(hr=0;hr<HOST_REGS;hr++) {
1588 if(cur->regmap[hr]==r) {
1589 cur->regmap[hr]=reg;
1590 cur->dirty&=~(1<<hr);
1591 cur->isconst&=~(1<<hr);
1598 SysPrintf("This shouldn't happen");abort();
1601 static void mov_alloc(struct regstat *current,int i)
1603 // Note: Don't need to actually alloc the source registers
1604 //alloc_reg(current,i,rs1[i]);
1605 alloc_reg(current,i,rt1[i]);
1607 clear_const(current,rs1[i]);
1608 clear_const(current,rt1[i]);
1609 dirty_reg(current,rt1[i]);
1612 static void shiftimm_alloc(struct regstat *current,int i)
1614 if(opcode2[i]<=0x3) // SLL/SRL/SRA
1617 if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1619 alloc_reg(current,i,rt1[i]);
1620 dirty_reg(current,rt1[i]);
1621 if(is_const(current,rs1[i])) {
1622 int v=get_const(current,rs1[i]);
1623 if(opcode2[i]==0x00) set_const(current,rt1[i],v<<imm[i]);
1624 if(opcode2[i]==0x02) set_const(current,rt1[i],(u_int)v>>imm[i]);
1625 if(opcode2[i]==0x03) set_const(current,rt1[i],v>>imm[i]);
1627 else clear_const(current,rt1[i]);
1632 clear_const(current,rs1[i]);
1633 clear_const(current,rt1[i]);
1636 if(opcode2[i]>=0x38&&opcode2[i]<=0x3b) // DSLL/DSRL/DSRA
1640 if(opcode2[i]==0x3c) // DSLL32
1644 if(opcode2[i]==0x3e) // DSRL32
1648 if(opcode2[i]==0x3f) // DSRA32
1654 static void shift_alloc(struct regstat *current,int i)
1657 if(opcode2[i]<=0x07) // SLLV/SRLV/SRAV
1659 if(rs1[i]) alloc_reg(current,i,rs1[i]);
1660 if(rs2[i]) alloc_reg(current,i,rs2[i]);
1661 alloc_reg(current,i,rt1[i]);
1662 if(rt1[i]==rs2[i]) {
1663 alloc_reg_temp(current,i,-1);
1664 minimum_free_regs[i]=1;
1666 } else { // DSLLV/DSRLV/DSRAV
1669 clear_const(current,rs1[i]);
1670 clear_const(current,rs2[i]);
1671 clear_const(current,rt1[i]);
1672 dirty_reg(current,rt1[i]);
1676 static void alu_alloc(struct regstat *current,int i)
1678 if(opcode2[i]>=0x20&&opcode2[i]<=0x23) { // ADD/ADDU/SUB/SUBU
1680 if(rs1[i]&&rs2[i]) {
1681 alloc_reg(current,i,rs1[i]);
1682 alloc_reg(current,i,rs2[i]);
1685 if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1686 if(rs2[i]&&needed_again(rs2[i],i)) alloc_reg(current,i,rs2[i]);
1688 alloc_reg(current,i,rt1[i]);
1691 if(opcode2[i]==0x2a||opcode2[i]==0x2b) { // SLT/SLTU
1693 alloc_reg(current,i,rs1[i]);
1694 alloc_reg(current,i,rs2[i]);
1695 alloc_reg(current,i,rt1[i]);
1698 if(opcode2[i]>=0x24&&opcode2[i]<=0x27) { // AND/OR/XOR/NOR
1700 if(rs1[i]&&rs2[i]) {
1701 alloc_reg(current,i,rs1[i]);
1702 alloc_reg(current,i,rs2[i]);
1706 if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1707 if(rs2[i]&&needed_again(rs2[i],i)) alloc_reg(current,i,rs2[i]);
1709 alloc_reg(current,i,rt1[i]);
1712 if(opcode2[i]>=0x2c&&opcode2[i]<=0x2f) { // DADD/DADDU/DSUB/DSUBU
1715 clear_const(current,rs1[i]);
1716 clear_const(current,rs2[i]);
1717 clear_const(current,rt1[i]);
1718 dirty_reg(current,rt1[i]);
1721 static void imm16_alloc(struct regstat *current,int i)
1723 if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1725 if(rt1[i]) alloc_reg(current,i,rt1[i]);
1726 if(opcode[i]==0x18||opcode[i]==0x19) { // DADDI/DADDIU
1729 else if(opcode[i]==0x0a||opcode[i]==0x0b) { // SLTI/SLTIU
1730 clear_const(current,rs1[i]);
1731 clear_const(current,rt1[i]);
1733 else if(opcode[i]>=0x0c&&opcode[i]<=0x0e) { // ANDI/ORI/XORI
1734 if(is_const(current,rs1[i])) {
1735 int v=get_const(current,rs1[i]);
1736 if(opcode[i]==0x0c) set_const(current,rt1[i],v&imm[i]);
1737 if(opcode[i]==0x0d) set_const(current,rt1[i],v|imm[i]);
1738 if(opcode[i]==0x0e) set_const(current,rt1[i],v^imm[i]);
1740 else clear_const(current,rt1[i]);
1742 else if(opcode[i]==0x08||opcode[i]==0x09) { // ADDI/ADDIU
1743 if(is_const(current,rs1[i])) {
1744 int v=get_const(current,rs1[i]);
1745 set_const(current,rt1[i],v+imm[i]);
1747 else clear_const(current,rt1[i]);
1750 set_const(current,rt1[i],imm[i]<<16); // LUI
1752 dirty_reg(current,rt1[i]);
1755 static void load_alloc(struct regstat *current,int i)
1757 clear_const(current,rt1[i]);
1758 //if(rs1[i]!=rt1[i]&&needed_again(rs1[i],i)) clear_const(current,rs1[i]); // Does this help or hurt?
1759 if(!rs1[i]) current->u&=~1LL; // Allow allocating r0 if it's the source register
1760 if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1761 if(rt1[i]&&!((current->u>>rt1[i])&1)) {
1762 alloc_reg(current,i,rt1[i]);
1763 assert(get_reg(current->regmap,rt1[i])>=0);
1764 if(opcode[i]==0x27||opcode[i]==0x37) // LWU/LD
1768 else if(opcode[i]==0x1A||opcode[i]==0x1B) // LDL/LDR
1772 dirty_reg(current,rt1[i]);
1773 // LWL/LWR need a temporary register for the old value
1774 if(opcode[i]==0x22||opcode[i]==0x26)
1776 alloc_reg(current,i,FTEMP);
1777 alloc_reg_temp(current,i,-1);
1778 minimum_free_regs[i]=1;
1783 // Load to r0 or unneeded register (dummy load)
1784 // but we still need a register to calculate the address
1785 if(opcode[i]==0x22||opcode[i]==0x26)
1787 alloc_reg(current,i,FTEMP); // LWL/LWR need another temporary
1789 alloc_reg_temp(current,i,-1);
1790 minimum_free_regs[i]=1;
1791 if(opcode[i]==0x1A||opcode[i]==0x1B) // LDL/LDR
1798 void store_alloc(struct regstat *current,int i)
1800 clear_const(current,rs2[i]);
1801 if(!(rs2[i])) current->u&=~1LL; // Allow allocating r0 if necessary
1802 if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1803 alloc_reg(current,i,rs2[i]);
1804 if(opcode[i]==0x2c||opcode[i]==0x2d||opcode[i]==0x3f) { // 64-bit SDL/SDR/SD
1807 #if defined(HOST_IMM8)
1808 // On CPUs without 32-bit immediates we need a pointer to invalid_code
1809 else alloc_reg(current,i,INVCP);
1811 if(opcode[i]==0x2a||opcode[i]==0x2e||opcode[i]==0x2c||opcode[i]==0x2d) { // SWL/SWL/SDL/SDR
1812 alloc_reg(current,i,FTEMP);
1814 // We need a temporary register for address generation
1815 alloc_reg_temp(current,i,-1);
1816 minimum_free_regs[i]=1;
1819 void c1ls_alloc(struct regstat *current,int i)
1821 //clear_const(current,rs1[i]); // FIXME
1822 clear_const(current,rt1[i]);
1823 if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1824 alloc_reg(current,i,CSREG); // Status
1825 alloc_reg(current,i,FTEMP);
1826 if(opcode[i]==0x35||opcode[i]==0x3d) { // 64-bit LDC1/SDC1
1829 #if defined(HOST_IMM8)
1830 // On CPUs without 32-bit immediates we need a pointer to invalid_code
1831 else if((opcode[i]&0x3b)==0x39) // SWC1/SDC1
1832 alloc_reg(current,i,INVCP);
1834 // We need a temporary register for address generation
1835 alloc_reg_temp(current,i,-1);
1838 void c2ls_alloc(struct regstat *current,int i)
1840 clear_const(current,rt1[i]);
1841 if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1842 alloc_reg(current,i,FTEMP);
1843 #if defined(HOST_IMM8)
1844 // On CPUs without 32-bit immediates we need a pointer to invalid_code
1845 if((opcode[i]&0x3b)==0x3a) // SWC2/SDC2
1846 alloc_reg(current,i,INVCP);
1848 // We need a temporary register for address generation
1849 alloc_reg_temp(current,i,-1);
1850 minimum_free_regs[i]=1;
1853 #ifndef multdiv_alloc
1854 void multdiv_alloc(struct regstat *current,int i)
1861 // case 0x1D: DMULTU
1864 clear_const(current,rs1[i]);
1865 clear_const(current,rs2[i]);
1868 if((opcode2[i]&4)==0) // 32-bit
1870 current->u&=~(1LL<<HIREG);
1871 current->u&=~(1LL<<LOREG);
1872 alloc_reg(current,i,HIREG);
1873 alloc_reg(current,i,LOREG);
1874 alloc_reg(current,i,rs1[i]);
1875 alloc_reg(current,i,rs2[i]);
1876 dirty_reg(current,HIREG);
1877 dirty_reg(current,LOREG);
1886 // Multiply by zero is zero.
1887 // MIPS does not have a divide by zero exception.
1888 // The result is undefined, we return zero.
1889 alloc_reg(current,i,HIREG);
1890 alloc_reg(current,i,LOREG);
1891 dirty_reg(current,HIREG);
1892 dirty_reg(current,LOREG);
1897 void cop0_alloc(struct regstat *current,int i)
1899 if(opcode2[i]==0) // MFC0
1902 clear_const(current,rt1[i]);
1903 alloc_all(current,i);
1904 alloc_reg(current,i,rt1[i]);
1905 dirty_reg(current,rt1[i]);
1908 else if(opcode2[i]==4) // MTC0
1911 clear_const(current,rs1[i]);
1912 alloc_reg(current,i,rs1[i]);
1913 alloc_all(current,i);
1916 alloc_all(current,i); // FIXME: Keep r0
1918 alloc_reg(current,i,0);
1923 // TLBR/TLBWI/TLBWR/TLBP/ERET
1924 assert(opcode2[i]==0x10);
1925 alloc_all(current,i);
1927 minimum_free_regs[i]=HOST_REGS;
1930 static void cop2_alloc(struct regstat *current,int i)
1932 if (opcode2[i] < 3) // MFC2/CFC2
1934 alloc_cc(current,i); // for stalls
1935 dirty_reg(current,CCREG);
1937 clear_const(current,rt1[i]);
1938 alloc_reg(current,i,rt1[i]);
1939 dirty_reg(current,rt1[i]);
1942 else if (opcode2[i] > 3) // MTC2/CTC2
1945 clear_const(current,rs1[i]);
1946 alloc_reg(current,i,rs1[i]);
1950 alloc_reg(current,i,0);
1953 alloc_reg_temp(current,i,-1);
1954 minimum_free_regs[i]=1;
1957 void c2op_alloc(struct regstat *current,int i)
1959 alloc_cc(current,i); // for stalls
1960 dirty_reg(current,CCREG);
1961 alloc_reg_temp(current,i,-1);
1964 void syscall_alloc(struct regstat *current,int i)
1966 alloc_cc(current,i);
1967 dirty_reg(current,CCREG);
1968 alloc_all(current,i);
1969 minimum_free_regs[i]=HOST_REGS;
1973 void delayslot_alloc(struct regstat *current,int i)
1983 assem_debug("jump in the delay slot. this shouldn't happen.\n");//abort();
1984 SysPrintf("Disabled speculative precompilation\n");
1988 imm16_alloc(current,i);
1992 load_alloc(current,i);
1996 store_alloc(current,i);
1999 alu_alloc(current,i);
2002 shift_alloc(current,i);
2005 multdiv_alloc(current,i);
2008 shiftimm_alloc(current,i);
2011 mov_alloc(current,i);
2014 cop0_alloc(current,i);
2019 cop2_alloc(current,i);
2022 c1ls_alloc(current,i);
2025 c2ls_alloc(current,i);
2028 c2op_alloc(current,i);
2033 // Special case where a branch and delay slot span two pages in virtual memory
2034 static void pagespan_alloc(struct regstat *current,int i)
2037 current->wasconst=0;
2039 minimum_free_regs[i]=HOST_REGS;
2040 alloc_all(current,i);
2041 alloc_cc(current,i);
2042 dirty_reg(current,CCREG);
2043 if(opcode[i]==3) // JAL
2045 alloc_reg(current,i,31);
2046 dirty_reg(current,31);
2048 if(opcode[i]==0&&(opcode2[i]&0x3E)==8) // JR/JALR
2050 alloc_reg(current,i,rs1[i]);
2052 alloc_reg(current,i,rt1[i]);
2053 dirty_reg(current,rt1[i]);
2056 if((opcode[i]&0x2E)==4) // BEQ/BNE/BEQL/BNEL
2058 if(rs1[i]) alloc_reg(current,i,rs1[i]);
2059 if(rs2[i]) alloc_reg(current,i,rs2[i]);
2062 if((opcode[i]&0x2E)==6) // BLEZ/BGTZ/BLEZL/BGTZL
2064 if(rs1[i]) alloc_reg(current,i,rs1[i]);
2069 static void add_stub(enum stub_type type, void *addr, void *retaddr,
2070 u_int a, uintptr_t b, uintptr_t c, u_int d, u_int e)
2072 assert(stubcount < ARRAY_SIZE(stubs));
2073 stubs[stubcount].type = type;
2074 stubs[stubcount].addr = addr;
2075 stubs[stubcount].retaddr = retaddr;
2076 stubs[stubcount].a = a;
2077 stubs[stubcount].b = b;
2078 stubs[stubcount].c = c;
2079 stubs[stubcount].d = d;
2080 stubs[stubcount].e = e;
2084 static void add_stub_r(enum stub_type type, void *addr, void *retaddr,
2085 int i, int addr_reg, const struct regstat *i_regs, int ccadj, u_int reglist)
2087 add_stub(type, addr, retaddr, i, addr_reg, (uintptr_t)i_regs, ccadj, reglist);
2090 // Write out a single register
2091 static void wb_register(signed char r,signed char regmap[],uint64_t dirty)
2094 for(hr=0;hr<HOST_REGS;hr++) {
2095 if(hr!=EXCLUDE_REG) {
2096 if((regmap[hr]&63)==r) {
2098 assert(regmap[hr]<64);
2099 emit_storereg(r,hr);
2106 static void wb_valid(signed char pre[],signed char entry[],u_int dirty_pre,u_int dirty,uint64_t u)
2108 //if(dirty_pre==dirty) return;
2110 for(hr=0;hr<HOST_REGS;hr++) {
2111 if(hr!=EXCLUDE_REG) {
2113 if(((~u)>>(reg&63))&1) {
2115 if(((dirty_pre&~dirty)>>hr)&1) {
2117 emit_storereg(reg,hr);
2130 static void pass_args(int a0, int a1)
2134 emit_mov(a0,2); emit_mov(a1,1); emit_mov(2,0);
2136 else if(a0!=0&&a1==0) {
2138 if (a0>=0) emit_mov(a0,0);
2141 if(a0>=0&&a0!=0) emit_mov(a0,0);
2142 if(a1>=0&&a1!=1) emit_mov(a1,1);
2146 static void alu_assemble(int i,struct regstat *i_regs)
2148 if(opcode2[i]>=0x20&&opcode2[i]<=0x23) { // ADD/ADDU/SUB/SUBU
2150 signed char s1,s2,t;
2151 t=get_reg(i_regs->regmap,rt1[i]);
2153 s1=get_reg(i_regs->regmap,rs1[i]);
2154 s2=get_reg(i_regs->regmap,rs2[i]);
2155 if(rs1[i]&&rs2[i]) {
2158 if(opcode2[i]&2) emit_sub(s1,s2,t);
2159 else emit_add(s1,s2,t);
2162 if(s1>=0) emit_mov(s1,t);
2163 else emit_loadreg(rs1[i],t);
2167 if(opcode2[i]&2) emit_neg(s2,t);
2168 else emit_mov(s2,t);
2171 emit_loadreg(rs2[i],t);
2172 if(opcode2[i]&2) emit_neg(t,t);
2175 else emit_zeroreg(t);
2179 if(opcode2[i]>=0x2c&&opcode2[i]<=0x2f) { // DADD/DADDU/DSUB/DSUBU
2182 if(opcode2[i]==0x2a||opcode2[i]==0x2b) { // SLT/SLTU
2184 signed char s1l,s2l,t;
2186 t=get_reg(i_regs->regmap,rt1[i]);
2189 s1l=get_reg(i_regs->regmap,rs1[i]);
2190 s2l=get_reg(i_regs->regmap,rs2[i]);
2191 if(rs2[i]==0) // rx<r0
2193 if(opcode2[i]==0x2a&&rs1[i]!=0) { // SLT
2195 emit_shrimm(s1l,31,t);
2197 else // SLTU (unsigned can not be less than zero, 0<0)
2200 else if(rs1[i]==0) // r0<rx
2203 if(opcode2[i]==0x2a) // SLT
2204 emit_set_gz32(s2l,t);
2205 else // SLTU (set if not zero)
2206 emit_set_nz32(s2l,t);
2209 assert(s1l>=0);assert(s2l>=0);
2210 if(opcode2[i]==0x2a) // SLT
2211 emit_set_if_less32(s1l,s2l,t);
2213 emit_set_if_carry32(s1l,s2l,t);
2219 if(opcode2[i]>=0x24&&opcode2[i]<=0x27) { // AND/OR/XOR/NOR
2221 signed char s1l,s2l,tl;
2222 tl=get_reg(i_regs->regmap,rt1[i]);
2225 s1l=get_reg(i_regs->regmap,rs1[i]);
2226 s2l=get_reg(i_regs->regmap,rs2[i]);
2227 if(rs1[i]&&rs2[i]) {
2230 if(opcode2[i]==0x24) { // AND
2231 emit_and(s1l,s2l,tl);
2233 if(opcode2[i]==0x25) { // OR
2234 emit_or(s1l,s2l,tl);
2236 if(opcode2[i]==0x26) { // XOR
2237 emit_xor(s1l,s2l,tl);
2239 if(opcode2[i]==0x27) { // NOR
2240 emit_or(s1l,s2l,tl);
2246 if(opcode2[i]==0x24) { // AND
2249 if(opcode2[i]==0x25||opcode2[i]==0x26) { // OR/XOR
2251 if(s1l>=0) emit_mov(s1l,tl);
2252 else emit_loadreg(rs1[i],tl); // CHECK: regmap_entry?
2256 if(s2l>=0) emit_mov(s2l,tl);
2257 else emit_loadreg(rs2[i],tl); // CHECK: regmap_entry?
2259 else emit_zeroreg(tl);
2261 if(opcode2[i]==0x27) { // NOR
2263 if(s1l>=0) emit_not(s1l,tl);
2265 emit_loadreg(rs1[i],tl);
2271 if(s2l>=0) emit_not(s2l,tl);
2273 emit_loadreg(rs2[i],tl);
2277 else emit_movimm(-1,tl);
2286 void imm16_assemble(int i,struct regstat *i_regs)
2288 if (opcode[i]==0x0f) { // LUI
2291 t=get_reg(i_regs->regmap,rt1[i]);
2294 if(!((i_regs->isconst>>t)&1))
2295 emit_movimm(imm[i]<<16,t);
2299 if(opcode[i]==0x08||opcode[i]==0x09) { // ADDI/ADDIU
2302 t=get_reg(i_regs->regmap,rt1[i]);
2303 s=get_reg(i_regs->regmap,rs1[i]);
2308 if(!((i_regs->isconst>>t)&1)) {
2310 if(i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2311 emit_addimm(t,imm[i],t);
2313 if(!((i_regs->wasconst>>s)&1))
2314 emit_addimm(s,imm[i],t);
2316 emit_movimm(constmap[i][s]+imm[i],t);
2322 if(!((i_regs->isconst>>t)&1))
2323 emit_movimm(imm[i],t);
2328 if(opcode[i]==0x18||opcode[i]==0x19) { // DADDI/DADDIU
2331 tl=get_reg(i_regs->regmap,rt1[i]);
2332 sl=get_reg(i_regs->regmap,rs1[i]);
2336 emit_addimm(sl,imm[i],tl);
2338 emit_movimm(imm[i],tl);
2343 else if(opcode[i]==0x0a||opcode[i]==0x0b) { // SLTI/SLTIU
2345 //assert(rs1[i]!=0); // r0 might be valid, but it's probably a bug
2347 t=get_reg(i_regs->regmap,rt1[i]);
2348 sl=get_reg(i_regs->regmap,rs1[i]);
2352 if(opcode[i]==0x0a) { // SLTI
2354 if(i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2355 emit_slti32(t,imm[i],t);
2357 emit_slti32(sl,imm[i],t);
2362 if(i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2363 emit_sltiu32(t,imm[i],t);
2365 emit_sltiu32(sl,imm[i],t);
2369 // SLTI(U) with r0 is just stupid,
2370 // nonetheless examples can be found
2371 if(opcode[i]==0x0a) // SLTI
2372 if(0<imm[i]) emit_movimm(1,t);
2373 else emit_zeroreg(t);
2376 if(imm[i]) emit_movimm(1,t);
2377 else emit_zeroreg(t);
2383 else if(opcode[i]>=0x0c&&opcode[i]<=0x0e) { // ANDI/ORI/XORI
2386 tl=get_reg(i_regs->regmap,rt1[i]);
2387 sl=get_reg(i_regs->regmap,rs1[i]);
2388 if(tl>=0 && !((i_regs->isconst>>tl)&1)) {
2389 if(opcode[i]==0x0c) //ANDI
2393 if(i_regs->regmap_entry[tl]!=rs1[i]) emit_loadreg(rs1[i],tl);
2394 emit_andimm(tl,imm[i],tl);
2396 if(!((i_regs->wasconst>>sl)&1))
2397 emit_andimm(sl,imm[i],tl);
2399 emit_movimm(constmap[i][sl]&imm[i],tl);
2409 if(i_regs->regmap_entry[tl]!=rs1[i]) emit_loadreg(rs1[i],tl);
2411 if(opcode[i]==0x0d) { // ORI
2413 emit_orimm(tl,imm[i],tl);
2415 if(!((i_regs->wasconst>>sl)&1))
2416 emit_orimm(sl,imm[i],tl);
2418 emit_movimm(constmap[i][sl]|imm[i],tl);
2421 if(opcode[i]==0x0e) { // XORI
2423 emit_xorimm(tl,imm[i],tl);
2425 if(!((i_regs->wasconst>>sl)&1))
2426 emit_xorimm(sl,imm[i],tl);
2428 emit_movimm(constmap[i][sl]^imm[i],tl);
2433 emit_movimm(imm[i],tl);
2441 void shiftimm_assemble(int i,struct regstat *i_regs)
2443 if(opcode2[i]<=0x3) // SLL/SRL/SRA
2447 t=get_reg(i_regs->regmap,rt1[i]);
2448 s=get_reg(i_regs->regmap,rs1[i]);
2450 if(t>=0&&!((i_regs->isconst>>t)&1)){
2457 if(s<0&&i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2459 if(opcode2[i]==0) // SLL
2461 emit_shlimm(s<0?t:s,imm[i],t);
2463 if(opcode2[i]==2) // SRL
2465 emit_shrimm(s<0?t:s,imm[i],t);
2467 if(opcode2[i]==3) // SRA
2469 emit_sarimm(s<0?t:s,imm[i],t);
2473 if(s>=0 && s!=t) emit_mov(s,t);
2477 //emit_storereg(rt1[i],t); //DEBUG
2480 if(opcode2[i]>=0x38&&opcode2[i]<=0x3b) // DSLL/DSRL/DSRA
2484 if(opcode2[i]==0x3c) // DSLL32
2488 if(opcode2[i]==0x3e) // DSRL32
2492 if(opcode2[i]==0x3f) // DSRA32
2498 #ifndef shift_assemble
2499 static void shift_assemble(int i,struct regstat *i_regs)
2501 signed char s,t,shift;
2504 assert(opcode2[i]<=0x07); // SLLV/SRLV/SRAV
2505 t = get_reg(i_regs->regmap, rt1[i]);
2506 s = get_reg(i_regs->regmap, rs1[i]);
2507 shift = get_reg(i_regs->regmap, rs2[i]);
2513 else if(rs2[i]==0) {
2515 if(s!=t) emit_mov(s,t);
2518 host_tempreg_acquire();
2519 emit_andimm(shift,31,HOST_TEMPREG);
2520 switch(opcode2[i]) {
2522 emit_shl(s,HOST_TEMPREG,t);
2525 emit_shr(s,HOST_TEMPREG,t);
2528 emit_sar(s,HOST_TEMPREG,t);
2533 host_tempreg_release();
2547 static int get_ptr_mem_type(u_int a)
2549 if(a < 0x00200000) {
2550 if(a<0x1000&&((start>>20)==0xbfc||(start>>24)==0xa0))
2551 // return wrong, must use memhandler for BIOS self-test to pass
2552 // 007 does similar stuff from a00 mirror, weird stuff
2556 if(0x1f800000 <= a && a < 0x1f801000)
2558 if(0x80200000 <= a && a < 0x80800000)
2560 if(0xa0000000 <= a && a < 0xa0200000)
2565 static void *emit_fastpath_cmp_jump(int i,int addr,int *addr_reg_override)
2570 if(((smrv_strong|smrv_weak)>>mr)&1) {
2571 type=get_ptr_mem_type(smrv[mr]);
2572 //printf("set %08x @%08x r%d %d\n", smrv[mr], start+i*4, mr, type);
2575 // use the mirror we are running on
2576 type=get_ptr_mem_type(start);
2577 //printf("set nospec @%08x r%d %d\n", start+i*4, mr, type);
2580 if(type==MTYPE_8020) { // RAM 80200000+ mirror
2581 host_tempreg_acquire();
2582 emit_andimm(addr,~0x00e00000,HOST_TEMPREG);
2583 addr=*addr_reg_override=HOST_TEMPREG;
2586 else if(type==MTYPE_0000) { // RAM 0 mirror
2587 host_tempreg_acquire();
2588 emit_orimm(addr,0x80000000,HOST_TEMPREG);
2589 addr=*addr_reg_override=HOST_TEMPREG;
2592 else if(type==MTYPE_A000) { // RAM A mirror
2593 host_tempreg_acquire();
2594 emit_andimm(addr,~0x20000000,HOST_TEMPREG);
2595 addr=*addr_reg_override=HOST_TEMPREG;
2598 else if(type==MTYPE_1F80) { // scratchpad
2599 if (psxH == (void *)0x1f800000) {
2600 host_tempreg_acquire();
2601 emit_xorimm(addr,0x1f800000,HOST_TEMPREG);
2602 emit_cmpimm(HOST_TEMPREG,0x1000);
2603 host_tempreg_release();
2608 // do the usual RAM check, jump will go to the right handler
2615 emit_cmpimm(addr,RAM_SIZE);
2617 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
2618 // Hint to branch predictor that the branch is unlikely to be taken
2620 emit_jno_unlikely(0);
2625 host_tempreg_acquire();
2626 emit_addimm(addr,ram_offset,HOST_TEMPREG);
2627 addr=*addr_reg_override=HOST_TEMPREG;
2634 // return memhandler, or get directly accessable address and return 0
2635 static void *get_direct_memhandler(void *table, u_int addr,
2636 enum stub_type type, uintptr_t *addr_host)
2638 uintptr_t l1, l2 = 0;
2639 l1 = ((uintptr_t *)table)[addr>>12];
2640 if ((l1 & (1ul << (sizeof(l1)*8-1))) == 0) {
2641 uintptr_t v = l1 << 1;
2642 *addr_host = v + addr;
2647 if (type == LOADB_STUB || type == LOADBU_STUB || type == STOREB_STUB)
2648 l2 = ((uintptr_t *)l1)[0x1000/4 + 0x1000/2 + (addr&0xfff)];
2649 else if (type == LOADH_STUB || type == LOADHU_STUB || type == STOREH_STUB)
2650 l2=((uintptr_t *)l1)[0x1000/4 + (addr&0xfff)/2];
2652 l2=((uintptr_t *)l1)[(addr&0xfff)/4];
2653 if ((l2 & (1<<31)) == 0) {
2654 uintptr_t v = l2 << 1;
2655 *addr_host = v + (addr&0xfff);
2658 return (void *)(l2 << 1);
2662 static u_int get_host_reglist(const signed char *regmap)
2664 u_int reglist = 0, hr;
2665 for (hr = 0; hr < HOST_REGS; hr++) {
2666 if (hr != EXCLUDE_REG && regmap[hr] >= 0)
2672 static u_int reglist_exclude(u_int reglist, int r1, int r2)
2675 reglist &= ~(1u << r1);
2677 reglist &= ~(1u << r2);
2681 // find a temp caller-saved register not in reglist (so assumed to be free)
2682 static int reglist_find_free(u_int reglist)
2684 u_int free_regs = ~reglist & CALLER_SAVE_REGS;
2687 return __builtin_ctz(free_regs);
2690 static void load_assemble(int i, const struct regstat *i_regs)
2695 int memtarget=0,c=0;
2696 int fastio_reg_override=-1;
2697 u_int reglist=get_host_reglist(i_regs->regmap);
2698 tl=get_reg(i_regs->regmap,rt1[i]);
2699 s=get_reg(i_regs->regmap,rs1[i]);
2701 if(i_regs->regmap[HOST_CCREG]==CCREG) reglist&=~(1<<HOST_CCREG);
2703 c=(i_regs->wasconst>>s)&1;
2705 memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
2708 //printf("load_assemble: c=%d\n",c);
2709 //if(c) printf("load_assemble: const=%lx\n",(long)constmap[i][s]+offset);
2710 // FIXME: Even if the load is a NOP, we should check for pagefaults...
2711 if((tl<0&&(!c||(((u_int)constmap[i][s]+offset)>>16)==0x1f80))
2713 // could be FIFO, must perform the read
2715 assem_debug("(forced read)\n");
2716 tl=get_reg(i_regs->regmap,-1);
2719 if(offset||s<0||c) addr=tl;
2721 //if(tl<0) tl=get_reg(i_regs->regmap,-1);
2723 //printf("load_assemble: c=%d\n",c);
2724 //if(c) printf("load_assemble: const=%lx\n",(long)constmap[i][s]+offset);
2725 assert(tl>=0); // Even if the load is a NOP, we must check for pagefaults and I/O
2729 // Strmnnrmn's speed hack
2730 if(rs1[i]!=29||start<0x80001000||start>=0x80000000+RAM_SIZE)
2733 jaddr=emit_fastpath_cmp_jump(i,addr,&fastio_reg_override);
2736 else if(ram_offset&&memtarget) {
2737 host_tempreg_acquire();
2738 emit_addimm(addr,ram_offset,HOST_TEMPREG);
2739 fastio_reg_override=HOST_TEMPREG;
2741 int dummy=(rt1[i]==0)||(tl!=get_reg(i_regs->regmap,rt1[i])); // ignore loads to r0 and unneeded reg
2742 if (opcode[i]==0x20) { // LB
2748 if(fastio_reg_override>=0) a=fastio_reg_override;
2750 emit_movsbl_indexed(x,a,tl);
2754 add_stub_r(LOADB_STUB,jaddr,out,i,addr,i_regs,ccadj[i],reglist);
2757 inline_readstub(LOADB_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2759 if (opcode[i]==0x21) { // LH
2764 if(fastio_reg_override>=0) a=fastio_reg_override;
2765 emit_movswl_indexed(x,a,tl);
2768 add_stub_r(LOADH_STUB,jaddr,out,i,addr,i_regs,ccadj[i],reglist);
2771 inline_readstub(LOADH_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2773 if (opcode[i]==0x23) { // LW
2777 if(fastio_reg_override>=0) a=fastio_reg_override;
2778 emit_readword_indexed(0,a,tl);
2781 add_stub_r(LOADW_STUB,jaddr,out,i,addr,i_regs,ccadj[i],reglist);
2784 inline_readstub(LOADW_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2786 if (opcode[i]==0x24) { // LBU
2791 if(fastio_reg_override>=0) a=fastio_reg_override;
2793 emit_movzbl_indexed(x,a,tl);
2796 add_stub_r(LOADBU_STUB,jaddr,out,i,addr,i_regs,ccadj[i],reglist);
2799 inline_readstub(LOADBU_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2801 if (opcode[i]==0x25) { // LHU
2806 if(fastio_reg_override>=0) a=fastio_reg_override;
2807 emit_movzwl_indexed(x,a,tl);
2810 add_stub_r(LOADHU_STUB,jaddr,out,i,addr,i_regs,ccadj[i],reglist);
2813 inline_readstub(LOADHU_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2815 if (opcode[i]==0x27) { // LWU
2818 if (opcode[i]==0x37) { // LD
2822 if (fastio_reg_override == HOST_TEMPREG)
2823 host_tempreg_release();
2826 #ifndef loadlr_assemble
2827 static void loadlr_assemble(int i, const struct regstat *i_regs)
2829 int s,tl,temp,temp2,addr;
2832 int memtarget=0,c=0;
2833 int fastio_reg_override=-1;
2834 u_int reglist=get_host_reglist(i_regs->regmap);
2835 tl=get_reg(i_regs->regmap,rt1[i]);
2836 s=get_reg(i_regs->regmap,rs1[i]);
2837 temp=get_reg(i_regs->regmap,-1);
2838 temp2=get_reg(i_regs->regmap,FTEMP);
2839 addr=get_reg(i_regs->regmap,AGEN1+(i&1));
2843 if(offset||s<0||c) addr=temp2;
2846 c=(i_regs->wasconst>>s)&1;
2848 memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
2852 emit_shlimm(addr,3,temp);
2853 if (opcode[i]==0x22||opcode[i]==0x26) {
2854 emit_andimm(addr,0xFFFFFFFC,temp2); // LWL/LWR
2856 emit_andimm(addr,0xFFFFFFF8,temp2); // LDL/LDR
2858 jaddr=emit_fastpath_cmp_jump(i,temp2,&fastio_reg_override);
2861 if(ram_offset&&memtarget) {
2862 host_tempreg_acquire();
2863 emit_addimm(temp2,ram_offset,HOST_TEMPREG);
2864 fastio_reg_override=HOST_TEMPREG;
2866 if (opcode[i]==0x22||opcode[i]==0x26) {
2867 emit_movimm(((constmap[i][s]+offset)<<3)&24,temp); // LWL/LWR
2869 emit_movimm(((constmap[i][s]+offset)<<3)&56,temp); // LDL/LDR
2872 if (opcode[i]==0x22||opcode[i]==0x26) { // LWL/LWR
2875 if(fastio_reg_override>=0) a=fastio_reg_override;
2876 emit_readword_indexed(0,a,temp2);
2877 if(fastio_reg_override==HOST_TEMPREG) host_tempreg_release();
2878 if(jaddr) add_stub_r(LOADW_STUB,jaddr,out,i,temp2,i_regs,ccadj[i],reglist);
2881 inline_readstub(LOADW_STUB,i,(constmap[i][s]+offset)&0xFFFFFFFC,i_regs->regmap,FTEMP,ccadj[i],reglist);
2884 emit_andimm(temp,24,temp);
2885 if (opcode[i]==0x22) // LWL
2886 emit_xorimm(temp,24,temp);
2887 host_tempreg_acquire();
2888 emit_movimm(-1,HOST_TEMPREG);
2889 if (opcode[i]==0x26) {
2890 emit_shr(temp2,temp,temp2);
2891 emit_bic_lsr(tl,HOST_TEMPREG,temp,tl);
2893 emit_shl(temp2,temp,temp2);
2894 emit_bic_lsl(tl,HOST_TEMPREG,temp,tl);
2896 host_tempreg_release();
2897 emit_or(temp2,tl,tl);
2899 //emit_storereg(rt1[i],tl); // DEBUG
2901 if (opcode[i]==0x1A||opcode[i]==0x1B) { // LDL/LDR
2907 void store_assemble(int i, const struct regstat *i_regs)
2913 enum stub_type type;
2914 int memtarget=0,c=0;
2915 int agr=AGEN1+(i&1);
2916 int fastio_reg_override=-1;
2917 u_int reglist=get_host_reglist(i_regs->regmap);
2918 tl=get_reg(i_regs->regmap,rs2[i]);
2919 s=get_reg(i_regs->regmap,rs1[i]);
2920 temp=get_reg(i_regs->regmap,agr);
2921 if(temp<0) temp=get_reg(i_regs->regmap,-1);
2924 c=(i_regs->wasconst>>s)&1;
2926 memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
2931 if(i_regs->regmap[HOST_CCREG]==CCREG) reglist&=~(1<<HOST_CCREG);
2932 if(offset||s<0||c) addr=temp;
2935 jaddr=emit_fastpath_cmp_jump(i,addr,&fastio_reg_override);
2937 else if(ram_offset&&memtarget) {
2938 host_tempreg_acquire();
2939 emit_addimm(addr,ram_offset,HOST_TEMPREG);
2940 fastio_reg_override=HOST_TEMPREG;
2943 if (opcode[i]==0x28) { // SB
2947 if(fastio_reg_override>=0) a=fastio_reg_override;
2948 emit_writebyte_indexed(tl,x,a);
2952 if (opcode[i]==0x29) { // SH
2956 if(fastio_reg_override>=0) a=fastio_reg_override;
2957 emit_writehword_indexed(tl,x,a);
2961 if (opcode[i]==0x2B) { // SW
2964 if(fastio_reg_override>=0) a=fastio_reg_override;
2965 emit_writeword_indexed(tl,0,a);
2969 if (opcode[i]==0x3F) { // SD
2973 if(fastio_reg_override==HOST_TEMPREG)
2974 host_tempreg_release();
2976 // PCSX store handlers don't check invcode again
2978 add_stub_r(type,jaddr,out,i,addr,i_regs,ccadj[i],reglist);
2981 if(!(i_regs->waswritten&(1<<rs1[i])) && !HACK_ENABLED(NDHACK_NO_SMC_CHECK)) {
2983 #ifdef DESTRUCTIVE_SHIFT
2984 // The x86 shift operation is 'destructive'; it overwrites the
2985 // source register, so we need to make a copy first and use that.
2988 #if defined(HOST_IMM8)
2989 int ir=get_reg(i_regs->regmap,INVCP);
2991 emit_cmpmem_indexedsr12_reg(ir,addr,1);
2993 emit_cmpmem_indexedsr12_imm(invalid_code,addr,1);
2995 #if defined(HAVE_CONDITIONAL_CALL) && !defined(DESTRUCTIVE_SHIFT)
2996 emit_callne(invalidate_addr_reg[addr]);
3000 add_stub(INVCODE_STUB,jaddr2,out,reglist|(1<<HOST_CCREG),addr,0,0,0);
3004 u_int addr_val=constmap[i][s]+offset;
3006 add_stub_r(type,jaddr,out,i,addr,i_regs,ccadj[i],reglist);
3007 } else if(c&&!memtarget) {
3008 inline_writestub(type,i,addr_val,i_regs->regmap,rs2[i],ccadj[i],reglist);
3010 // basic current block modification detection..
3011 // not looking back as that should be in mips cache already
3012 // (see Spyro2 title->attract mode)
3013 if(c&&start+i*4<addr_val&&addr_val<start+slen*4) {
3014 SysPrintf("write to %08x hits block %08x, pc=%08x\n",addr_val,start,start+i*4);
3015 assert(i_regs->regmap==regs[i].regmap); // not delay slot
3016 if(i_regs->regmap==regs[i].regmap) {
3017 load_all_consts(regs[i].regmap_entry,regs[i].wasdirty,i);
3018 wb_dirtys(regs[i].regmap_entry,regs[i].wasdirty);
3019 emit_movimm(start+i*4+4,0);
3020 emit_writeword(0,&pcaddr);
3021 emit_addimm(HOST_CCREG,2,HOST_CCREG);
3022 emit_far_call(get_addr_ht);
3028 static void storelr_assemble(int i, const struct regstat *i_regs)
3034 void *case1, *case2, *case3;
3035 void *done0, *done1, *done2;
3036 int memtarget=0,c=0;
3037 int agr=AGEN1+(i&1);
3038 u_int reglist=get_host_reglist(i_regs->regmap);
3039 tl=get_reg(i_regs->regmap,rs2[i]);
3040 s=get_reg(i_regs->regmap,rs1[i]);
3041 temp=get_reg(i_regs->regmap,agr);
3042 if(temp<0) temp=get_reg(i_regs->regmap,-1);
3045 c=(i_regs->isconst>>s)&1;
3047 memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
3053 emit_cmpimm(s<0||offset?temp:s,RAM_SIZE);
3054 if(!offset&&s!=temp) emit_mov(s,temp);
3060 if(!memtarget||!rs1[i]) {
3066 emit_addimm_no_flags(ram_offset,temp);
3068 if (opcode[i]==0x2C||opcode[i]==0x2D) { // SDL/SDR
3072 emit_xorimm(temp,3,temp);
3073 emit_testimm(temp,2);
3076 emit_testimm(temp,1);
3080 if (opcode[i]==0x2A) { // SWL
3081 emit_writeword_indexed(tl,0,temp);
3083 else if (opcode[i]==0x2E) { // SWR
3084 emit_writebyte_indexed(tl,3,temp);
3091 set_jump_target(case1, out);
3092 if (opcode[i]==0x2A) { // SWL
3093 // Write 3 msb into three least significant bytes
3094 if(rs2[i]) emit_rorimm(tl,8,tl);
3095 emit_writehword_indexed(tl,-1,temp);
3096 if(rs2[i]) emit_rorimm(tl,16,tl);
3097 emit_writebyte_indexed(tl,1,temp);
3098 if(rs2[i]) emit_rorimm(tl,8,tl);
3100 else if (opcode[i]==0x2E) { // SWR
3101 // Write two lsb into two most significant bytes
3102 emit_writehword_indexed(tl,1,temp);
3107 set_jump_target(case2, out);
3108 emit_testimm(temp,1);
3111 if (opcode[i]==0x2A) { // SWL
3112 // Write two msb into two least significant bytes
3113 if(rs2[i]) emit_rorimm(tl,16,tl);
3114 emit_writehword_indexed(tl,-2,temp);
3115 if(rs2[i]) emit_rorimm(tl,16,tl);
3117 else if (opcode[i]==0x2E) { // SWR
3118 // Write 3 lsb into three most significant bytes
3119 emit_writebyte_indexed(tl,-1,temp);
3120 if(rs2[i]) emit_rorimm(tl,8,tl);
3121 emit_writehword_indexed(tl,0,temp);
3122 if(rs2[i]) emit_rorimm(tl,24,tl);
3127 set_jump_target(case3, out);
3128 if (opcode[i]==0x2A) { // SWL
3129 // Write msb into least significant byte
3130 if(rs2[i]) emit_rorimm(tl,24,tl);
3131 emit_writebyte_indexed(tl,-3,temp);
3132 if(rs2[i]) emit_rorimm(tl,8,tl);
3134 else if (opcode[i]==0x2E) { // SWR
3135 // Write entire word
3136 emit_writeword_indexed(tl,-3,temp);
3138 set_jump_target(done0, out);
3139 set_jump_target(done1, out);
3140 set_jump_target(done2, out);
3142 add_stub_r(STORELR_STUB,jaddr,out,i,temp,i_regs,ccadj[i],reglist);
3143 if(!(i_regs->waswritten&(1<<rs1[i])) && !HACK_ENABLED(NDHACK_NO_SMC_CHECK)) {
3144 emit_addimm_no_flags(-ram_offset,temp);
3145 #if defined(HOST_IMM8)
3146 int ir=get_reg(i_regs->regmap,INVCP);
3148 emit_cmpmem_indexedsr12_reg(ir,temp,1);
3150 emit_cmpmem_indexedsr12_imm(invalid_code,temp,1);
3152 #if defined(HAVE_CONDITIONAL_CALL) && !defined(DESTRUCTIVE_SHIFT)
3153 emit_callne(invalidate_addr_reg[temp]);
3157 add_stub(INVCODE_STUB,jaddr2,out,reglist|(1<<HOST_CCREG),temp,0,0,0);
3162 static void cop0_assemble(int i,struct regstat *i_regs)
3164 if(opcode2[i]==0) // MFC0
3166 signed char t=get_reg(i_regs->regmap,rt1[i]);
3167 u_int copr=(source[i]>>11)&0x1f;
3168 //assert(t>=0); // Why does this happen? OOT is weird
3169 if(t>=0&&rt1[i]!=0) {
3170 emit_readword(®_cop0[copr],t);
3173 else if(opcode2[i]==4) // MTC0
3175 signed char s=get_reg(i_regs->regmap,rs1[i]);
3176 char copr=(source[i]>>11)&0x1f;
3178 wb_register(rs1[i],i_regs->regmap,i_regs->dirty);
3179 if(copr==9||copr==11||copr==12||copr==13) {
3180 emit_readword(&last_count,HOST_TEMPREG);
3181 emit_loadreg(CCREG,HOST_CCREG); // TODO: do proper reg alloc
3182 emit_add(HOST_CCREG,HOST_TEMPREG,HOST_CCREG);
3183 emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i]),HOST_CCREG);
3184 emit_writeword(HOST_CCREG,&Count);
3186 // What a mess. The status register (12) can enable interrupts,
3187 // so needs a special case to handle a pending interrupt.
3188 // The interrupt must be taken immediately, because a subsequent
3189 // instruction might disable interrupts again.
3190 if(copr==12||copr==13) {
3192 // burn cycles to cause cc_interrupt, which will
3193 // reschedule next_interupt. Relies on CCREG from above.
3194 assem_debug("MTC0 DS %d\n", copr);
3195 emit_writeword(HOST_CCREG,&last_count);
3196 emit_movimm(0,HOST_CCREG);
3197 emit_storereg(CCREG,HOST_CCREG);
3198 emit_loadreg(rs1[i],1);
3199 emit_movimm(copr,0);
3200 emit_far_call(pcsx_mtc0_ds);
3201 emit_loadreg(rs1[i],s);
3204 emit_movimm(start+i*4+4,HOST_TEMPREG);
3205 emit_writeword(HOST_TEMPREG,&pcaddr);
3206 emit_movimm(0,HOST_TEMPREG);
3207 emit_writeword(HOST_TEMPREG,&pending_exception);
3210 emit_loadreg(rs1[i],1);
3213 emit_movimm(copr,0);
3214 emit_far_call(pcsx_mtc0);
3215 if(copr==9||copr==11||copr==12||copr==13) {
3216 emit_readword(&Count,HOST_CCREG);
3217 emit_readword(&next_interupt,HOST_TEMPREG);
3218 emit_addimm(HOST_CCREG,-CLOCK_ADJUST(ccadj[i]),HOST_CCREG);
3219 emit_sub(HOST_CCREG,HOST_TEMPREG,HOST_CCREG);
3220 emit_writeword(HOST_TEMPREG,&last_count);
3221 emit_storereg(CCREG,HOST_CCREG);
3223 if(copr==12||copr==13) {
3224 assert(!is_delayslot);
3225 emit_readword(&pending_exception,14);
3229 emit_readword(&pcaddr, 0);
3230 emit_addimm(HOST_CCREG,2,HOST_CCREG);
3231 emit_far_call(get_addr_ht);
3233 set_jump_target(jaddr, out);
3235 emit_loadreg(rs1[i],s);
3239 assert(opcode2[i]==0x10);
3240 //if((source[i]&0x3f)==0x10) // RFE
3242 emit_readword(&Status,0);
3243 emit_andimm(0,0x3c,1);
3244 emit_andimm(0,~0xf,0);
3245 emit_orrshr_imm(1,2,0);
3246 emit_writeword(0,&Status);
3251 static void cop1_unusable(int i,struct regstat *i_regs)
3253 // XXX: should just just do the exception instead
3258 add_stub_r(FP_STUB,jaddr,out,i,0,i_regs,is_delayslot,0);
3262 static void cop1_assemble(int i,struct regstat *i_regs)
3264 cop1_unusable(i, i_regs);
3267 static void c1ls_assemble(int i,struct regstat *i_regs)
3269 cop1_unusable(i, i_regs);
3273 static void do_cop1stub(int n)
3276 assem_debug("do_cop1stub %x\n",start+stubs[n].a*4);
3277 set_jump_target(stubs[n].addr, out);
3279 // int rs=stubs[n].b;
3280 struct regstat *i_regs=(struct regstat *)stubs[n].c;
3283 load_all_consts(regs[i].regmap_entry,regs[i].wasdirty,i);
3284 //if(i_regs!=®s[i]) printf("oops: regs[i]=%x i_regs=%x",(int)®s[i],(int)i_regs);
3286 //else {printf("fp exception in delay slot\n");}
3287 wb_dirtys(i_regs->regmap_entry,i_regs->wasdirty);
3288 if(regs[i].regmap_entry[HOST_CCREG]!=CCREG) emit_loadreg(CCREG,HOST_CCREG);
3289 emit_movimm(start+(i-ds)*4,EAX); // Get PC
3290 emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i]),HOST_CCREG); // CHECK: is this right? There should probably be an extra cycle...
3291 emit_far_jump(ds?fp_exception_ds:fp_exception);
3294 static int cop2_is_stalling_op(int i, int *cycles)
3296 if (opcode[i] == 0x3a) { // SWC2
3300 if (itype[i] == COP2 && (opcode2[i] == 0 || opcode2[i] == 2)) { // MFC2/CFC2
3304 if (itype[i] == C2OP) {
3305 *cycles = gte_cycletab[source[i] & 0x3f];
3308 // ... what about MTC2/CTC2/LWC2?
3313 static void log_gte_stall(int stall, u_int cycle)
3315 if ((u_int)stall <= 44)
3316 printf("x stall %2d %u\n", stall, cycle + last_count);
3317 if (cycle + last_count > 1215348544) exit(1);
3320 static void emit_log_gte_stall(int i, int stall, u_int reglist)
3324 emit_movimm(stall, 0);
3326 emit_mov(HOST_TEMPREG, 0);
3327 emit_addimm(HOST_CCREG, CLOCK_ADJUST(ccadj[i]), 1);
3328 emit_far_call(log_gte_stall);
3329 restore_regs(reglist);
3333 static void cop2_call_stall_check(u_int op, int i, const struct regstat *i_regs, u_int reglist)
3335 int j = i, other_gte_op_cycles = -1, stall = -MAXBLOCK, cycles_passed;
3336 int rtmp = reglist_find_free(reglist);
3338 if (HACK_ENABLED(NDHACK_GTE_NO_STALL))
3340 //assert(get_reg(i_regs->regmap, CCREG) == HOST_CCREG);
3341 if (get_reg(i_regs->regmap, CCREG) != HOST_CCREG) {
3342 // happens occasionally... cc evicted? Don't bother then
3343 //printf("no cc %08x\n", start + i*4);
3347 for (j = i - 1; j >= 0; j--) {
3348 //if (is_ds[j]) break;
3349 if (cop2_is_stalling_op(j, &other_gte_op_cycles) || bt[j])
3353 cycles_passed = CLOCK_ADJUST(ccadj[i] - ccadj[j]);
3354 if (other_gte_op_cycles >= 0)
3355 stall = other_gte_op_cycles - cycles_passed;
3356 else if (cycles_passed >= 44)
3357 stall = 0; // can't stall
3358 if (stall == -MAXBLOCK && rtmp >= 0) {
3359 // unknown stall, do the expensive runtime check
3360 assem_debug("; cop2_call_stall_check\n");
3363 emit_movimm(gte_cycletab[op], 0);
3364 emit_addimm(HOST_CCREG, CLOCK_ADJUST(ccadj[i]), 1);
3365 emit_far_call(call_gteStall);
3366 restore_regs(reglist);
3368 host_tempreg_acquire();
3369 emit_readword(&psxRegs.gteBusyCycle, rtmp);
3370 emit_addimm(rtmp, -CLOCK_ADJUST(ccadj[i]), rtmp);
3371 emit_sub(rtmp, HOST_CCREG, HOST_TEMPREG);
3372 emit_cmpimm(HOST_TEMPREG, 44);
3373 emit_cmovb_reg(rtmp, HOST_CCREG);
3374 //emit_log_gte_stall(i, 0, reglist);
3375 host_tempreg_release();
3378 else if (stall > 0) {
3379 //emit_log_gte_stall(i, stall, reglist);
3380 emit_addimm(HOST_CCREG, stall, HOST_CCREG);
3383 // save gteBusyCycle, if needed
3384 if (gte_cycletab[op] == 0)
3386 other_gte_op_cycles = -1;
3387 for (j = i + 1; j < slen; j++) {
3388 if (cop2_is_stalling_op(j, &other_gte_op_cycles))
3392 if (j + 1 < slen && cop2_is_stalling_op(j + 1, &other_gte_op_cycles))
3397 if (other_gte_op_cycles >= 0)
3398 // will handle stall when assembling that op
3400 cycles_passed = CLOCK_ADJUST(ccadj[min(j, slen -1)] - ccadj[i]);
3401 if (cycles_passed >= 44)
3403 assem_debug("; save gteBusyCycle\n");
3404 host_tempreg_acquire();
3406 emit_readword(&last_count, HOST_TEMPREG);
3407 emit_add(HOST_TEMPREG, HOST_CCREG, HOST_TEMPREG);
3408 emit_addimm(HOST_TEMPREG, CLOCK_ADJUST(ccadj[i]), HOST_TEMPREG);
3409 emit_addimm(HOST_TEMPREG, gte_cycletab[op]), HOST_TEMPREG);
3410 emit_writeword(HOST_TEMPREG, &psxRegs.gteBusyCycle);
3412 emit_addimm(HOST_CCREG, CLOCK_ADJUST(ccadj[i]) + gte_cycletab[op], HOST_TEMPREG);
3413 emit_writeword(HOST_TEMPREG, &psxRegs.gteBusyCycle);
3415 host_tempreg_release();
3418 static void cop2_get_dreg(u_int copr,signed char tl,signed char temp)
3428 emit_readword(®_cop2d[copr],tl);
3429 emit_signextend16(tl,tl);
3430 emit_writeword(tl,®_cop2d[copr]); // hmh
3437 emit_readword(®_cop2d[copr],tl);
3438 emit_andimm(tl,0xffff,tl);
3439 emit_writeword(tl,®_cop2d[copr]);
3442 emit_readword(®_cop2d[14],tl); // SXY2
3443 emit_writeword(tl,®_cop2d[copr]);
3447 c2op_mfc2_29_assemble(tl,temp);