some drc debug patches
[pcsx_rearmed.git] / libpcsxcore / new_dynarec / new_dynarec.c
CommitLineData
57871462 1/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
2 * Mupen64plus - new_dynarec.c *
20d507ba 3 * Copyright (C) 2009-2011 Ari64 *
57871462 4 * *
5 * This program is free software; you can redistribute it and/or modify *
6 * it under the terms of the GNU General Public License as published by *
7 * the Free Software Foundation; either version 2 of the License, or *
8 * (at your option) any later version. *
9 * *
10 * This program is distributed in the hope that it will be useful, *
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
13 * GNU General Public License for more details. *
14 * *
15 * You should have received a copy of the GNU General Public License *
16 * along with this program; if not, write to the *
17 * Free Software Foundation, Inc., *
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. *
19 * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
20
21#include <stdlib.h>
22#include <stdint.h> //include for uint64_t
23#include <assert.h>
d848b60a 24#include <errno.h>
4600ba03 25#include <sys/mman.h>
d148d265 26#ifdef __MACH__
27#include <libkern/OSCacheControl.h>
28#endif
1e212a25 29#ifdef _3DS
30#include <3ds_utils.h>
31#endif
32#ifdef VITA
33#include <psp2/kernel/sysmem.h>
34static int sceBlock;
35#endif
57871462 36
d148d265 37#include "new_dynarec_config.h"
dd79da89 38#include "../psxhle.h" //emulator interface
3d624f89 39#include "emu_if.h" //emulator interface
57871462 40
4600ba03 41//#define DISASM
42//#define assem_debug printf
43//#define inv_debug printf
44#define assem_debug(...)
45#define inv_debug(...)
57871462 46
47#ifdef __i386__
48#include "assem_x86.h"
49#endif
50#ifdef __x86_64__
51#include "assem_x64.h"
52#endif
53#ifdef __arm__
54#include "assem_arm.h"
55#endif
56
57#define MAXBLOCK 4096
58#define MAX_OUTPUT_BLOCK_SIZE 262144
2573466a 59
57871462 60struct regstat
61{
62 signed char regmap_entry[HOST_REGS];
63 signed char regmap[HOST_REGS];
64 uint64_t was32;
65 uint64_t is32;
66 uint64_t wasdirty;
67 uint64_t dirty;
68 uint64_t u;
69 uint64_t uu;
70 u_int wasconst;
71 u_int isconst;
8575a877 72 u_int loadedconst; // host regs that have constants loaded
73 u_int waswritten; // MIPS regs that were used as store base before
57871462 74};
75
de5a60c3 76// note: asm depends on this layout
57871462 77struct ll_entry
78{
79 u_int vaddr;
de5a60c3 80 u_int reg_sv_flags;
57871462 81 void *addr;
82 struct ll_entry *next;
83};
84
e2b5e7aa 85 // used by asm:
86 u_char *out;
87 u_int hash_table[65536][4] __attribute__((aligned(16)));
88 struct ll_entry *jump_in[4096] __attribute__((aligned(16)));
89 struct ll_entry *jump_dirty[4096];
90
91 static struct ll_entry *jump_out[4096];
92 static u_int start;
93 static u_int *source;
94 static char insn[MAXBLOCK][10];
95 static u_char itype[MAXBLOCK];
96 static u_char opcode[MAXBLOCK];
97 static u_char opcode2[MAXBLOCK];
98 static u_char bt[MAXBLOCK];
99 static u_char rs1[MAXBLOCK];
100 static u_char rs2[MAXBLOCK];
101 static u_char rt1[MAXBLOCK];
102 static u_char rt2[MAXBLOCK];
103 static u_char us1[MAXBLOCK];
104 static u_char us2[MAXBLOCK];
105 static u_char dep1[MAXBLOCK];
106 static u_char dep2[MAXBLOCK];
107 static u_char lt1[MAXBLOCK];
bedfea38 108 static uint64_t gte_rs[MAXBLOCK]; // gte: 32 data and 32 ctl regs
109 static uint64_t gte_rt[MAXBLOCK];
110 static uint64_t gte_unneeded[MAXBLOCK];
ffb0b9e0 111 static u_int smrv[32]; // speculated MIPS register values
112 static u_int smrv_strong; // mask or regs that are likely to have correct values
113 static u_int smrv_weak; // same, but somewhat less likely
114 static u_int smrv_strong_next; // same, but after current insn executes
115 static u_int smrv_weak_next;
e2b5e7aa 116 static int imm[MAXBLOCK];
117 static u_int ba[MAXBLOCK];
118 static char likely[MAXBLOCK];
119 static char is_ds[MAXBLOCK];
120 static char ooo[MAXBLOCK];
121 static uint64_t unneeded_reg[MAXBLOCK];
122 static uint64_t unneeded_reg_upper[MAXBLOCK];
123 static uint64_t branch_unneeded_reg[MAXBLOCK];
124 static uint64_t branch_unneeded_reg_upper[MAXBLOCK];
125 static signed char regmap_pre[MAXBLOCK][HOST_REGS];
956f3129 126 static uint64_t current_constmap[HOST_REGS];
127 static uint64_t constmap[MAXBLOCK][HOST_REGS];
128 static struct regstat regs[MAXBLOCK];
129 static struct regstat branch_regs[MAXBLOCK];
e2b5e7aa 130 static signed char minimum_free_regs[MAXBLOCK];
131 static u_int needed_reg[MAXBLOCK];
132 static u_int wont_dirty[MAXBLOCK];
133 static u_int will_dirty[MAXBLOCK];
134 static int ccadj[MAXBLOCK];
135 static int slen;
136 static u_int instr_addr[MAXBLOCK];
137 static u_int link_addr[MAXBLOCK][3];
138 static int linkcount;
139 static u_int stubs[MAXBLOCK*3][8];
140 static int stubcount;
141 static u_int literals[1024][2];
142 static int literalcount;
143 static int is_delayslot;
144 static int cop1_usable;
145 static char shadow[1048576] __attribute__((aligned(16)));
146 static void *copy;
147 static int expirep;
148 static u_int stop_after_jal;
a327ad27 149#ifndef RAM_FIXED
150 static u_int ram_offset;
151#else
152 static const u_int ram_offset=0;
153#endif
e2b5e7aa 154
155 int new_dynarec_hacks;
156 int new_dynarec_did_compile;
57871462 157 extern u_char restore_candidate[512];
158 extern int cycle_count;
159
160 /* registers that may be allocated */
161 /* 1-31 gpr */
162#define HIREG 32 // hi
163#define LOREG 33 // lo
164#define FSREG 34 // FPU status (FCSR)
165#define CSREG 35 // Coprocessor status
166#define CCREG 36 // Cycle count
167#define INVCP 37 // Pointer to invalid_code
1edfcc68 168//#define MMREG 38 // Pointer to memory_map
619e5ded 169#define ROREG 39 // ram offset (if rdram!=0x80000000)
170#define TEMPREG 40
171#define FTEMP 40 // FPU temporary register
172#define PTEMP 41 // Prefetch temporary register
1edfcc68 173//#define TLREG 42 // TLB mapping offset
619e5ded 174#define RHASH 43 // Return address hash
175#define RHTBL 44 // Return address hash table address
176#define RTEMP 45 // JR/JALR address register
177#define MAXREG 45
178#define AGEN1 46 // Address generation temporary register
1edfcc68 179//#define AGEN2 47 // Address generation temporary register
180//#define MGEN1 48 // Maptable address generation temporary register
181//#define MGEN2 49 // Maptable address generation temporary register
619e5ded 182#define BTREG 50 // Branch target temporary register
57871462 183
184 /* instruction types */
185#define NOP 0 // No operation
186#define LOAD 1 // Load
187#define STORE 2 // Store
188#define LOADLR 3 // Unaligned load
189#define STORELR 4 // Unaligned store
9f51b4b9 190#define MOV 5 // Move
57871462 191#define ALU 6 // Arithmetic/logic
192#define MULTDIV 7 // Multiply/divide
193#define SHIFT 8 // Shift by register
194#define SHIFTIMM 9// Shift by immediate
195#define IMM16 10 // 16-bit immediate
196#define RJUMP 11 // Unconditional jump to register
197#define UJUMP 12 // Unconditional jump
198#define CJUMP 13 // Conditional branch (BEQ/BNE/BGTZ/BLEZ)
199#define SJUMP 14 // Conditional branch (regimm format)
200#define COP0 15 // Coprocessor 0
201#define COP1 16 // Coprocessor 1
202#define C1LS 17 // Coprocessor 1 load/store
203#define FJUMP 18 // Conditional branch (floating point)
204#define FLOAT 19 // Floating point unit
205#define FCONV 20 // Convert integer to float
206#define FCOMP 21 // Floating point compare (sets FSREG)
207#define SYSCALL 22// SYSCALL
208#define OTHER 23 // Other
209#define SPAN 24 // Branch/delay slot spans 2 pages
210#define NI 25 // Not implemented
7139f3c8 211#define HLECALL 26// PCSX fake opcodes for HLE
b9b61529 212#define COP2 27 // Coprocessor 2 move
213#define C2LS 28 // Coprocessor 2 load/store
214#define C2OP 29 // Coprocessor 2 operation
1e973cb0 215#define INTCALL 30// Call interpreter to handle rare corner cases
57871462 216
217 /* stubs */
218#define CC_STUB 1
219#define FP_STUB 2
220#define LOADB_STUB 3
221#define LOADH_STUB 4
222#define LOADW_STUB 5
223#define LOADD_STUB 6
224#define LOADBU_STUB 7
225#define LOADHU_STUB 8
226#define STOREB_STUB 9
227#define STOREH_STUB 10
228#define STOREW_STUB 11
229#define STORED_STUB 12
230#define STORELR_STUB 13
231#define INVCODE_STUB 14
232
233 /* branch codes */
234#define TAKEN 1
235#define NOTTAKEN 2
236#define NULLDS 3
237
238// asm linkage
239int new_recompile_block(int addr);
240void *get_addr_ht(u_int vaddr);
241void invalidate_block(u_int block);
242void invalidate_addr(u_int addr);
243void remove_hash(int vaddr);
57871462 244void dyna_linker();
245void dyna_linker_ds();
246void verify_code();
247void verify_code_vm();
248void verify_code_ds();
249void cc_interrupt();
250void fp_exception();
251void fp_exception_ds();
7139f3c8 252void jump_syscall_hle();
7139f3c8 253void jump_hlecall();
1e973cb0 254void jump_intcall();
7139f3c8 255void new_dyna_leave();
57871462 256
57871462 257// Needed by assembler
e2b5e7aa 258static void wb_register(signed char r,signed char regmap[],uint64_t dirty,uint64_t is32);
259static void wb_dirtys(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty);
260static void wb_needed_dirtys(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr);
261static void load_all_regs(signed char i_regmap[]);
262static void load_needed_regs(signed char i_regmap[],signed char next_regmap[]);
263static void load_regs_entry(int t);
264static void load_all_consts(signed char regmap[],int is32,u_int dirty,int i);
265
266static int verify_dirty(u_int *ptr);
267static int get_final_value(int hr, int i, int *value);
268static void add_stub(int type,int addr,int retaddr,int a,int b,int c,int d,int e);
269static void add_to_linker(int addr,int target,int ext);
57871462 270
e2b5e7aa 271static int tracedebug=0;
57871462 272
d148d265 273static void mprotect_w_x(void *start, void *end, int is_x)
274{
275#ifdef NO_WRITE_EXEC
1e212a25 276 #if defined(VITA)
277 // *Open* enables write on all memory that was
278 // allocated by sceKernelAllocMemBlockForVM()?
279 if (is_x)
280 sceKernelCloseVMDomain();
281 else
282 sceKernelOpenVMDomain();
283 #else
d148d265 284 u_long mstart = (u_long)start & ~4095ul;
285 u_long mend = (u_long)end;
286 if (mprotect((void *)mstart, mend - mstart,
287 PROT_READ | (is_x ? PROT_EXEC : PROT_WRITE)) != 0)
288 SysPrintf("mprotect(%c) failed: %s\n", is_x ? 'x' : 'w', strerror(errno));
1e212a25 289 #endif
d148d265 290#endif
291}
292
293static void start_tcache_write(void *start, void *end)
294{
295 mprotect_w_x(start, end, 0);
296}
297
298static void end_tcache_write(void *start, void *end)
299{
300#ifdef __arm__
301 size_t len = (char *)end - (char *)start;
302 #if defined(__BLACKBERRY_QNX__)
303 msync(start, len, MS_SYNC | MS_CACHE_ONLY | MS_INVALIDATE_ICACHE);
304 #elif defined(__MACH__)
305 sys_cache_control(kCacheFunctionPrepareForExecution, start, len);
306 #elif defined(VITA)
1e212a25 307 sceKernelSyncVMDomain(sceBlock, start, len);
308 #elif defined(_3DS)
309 ctr_flush_invalidate_cache();
d148d265 310 #else
311 __clear_cache(start, end);
312 #endif
313 (void)len;
314#endif
315
316 mprotect_w_x(start, end, 1);
317}
318
319static void *start_block(void)
320{
321 u_char *end = out + MAX_OUTPUT_BLOCK_SIZE;
322 if (end > (u_char *)BASE_ADDR + (1<<TARGET_SIZE_2))
323 end = (u_char *)BASE_ADDR + (1<<TARGET_SIZE_2);
324 start_tcache_write(out, end);
325 return out;
326}
327
328static void end_block(void *start)
329{
330 end_tcache_write(start, out);
331}
332
57871462 333//#define DEBUG_CYCLE_COUNT 1
334
b6e87b2b 335#define NO_CYCLE_PENALTY_THR 12
336
4e9dcd7f 337int cycle_multiplier; // 100 for 1.0
338
339static int CLOCK_ADJUST(int x)
340{
341 int s=(x>>31)|1;
342 return (x * cycle_multiplier + s * 50) / 100;
343}
344
94d23bb9 345static u_int get_page(u_int vaddr)
57871462 346{
0ce47d46 347 u_int page=vaddr&~0xe0000000;
348 if (page < 0x1000000)
349 page &= ~0x0e00000; // RAM mirrors
350 page>>=12;
57871462 351 if(page>2048) page=2048+(page&2047);
94d23bb9 352 return page;
353}
354
d25604ca 355// no virtual mem in PCSX
356static u_int get_vpage(u_int vaddr)
357{
358 return get_page(vaddr);
359}
94d23bb9 360
361// Get address from virtual address
362// This is called from the recompiled JR/JALR instructions
363void *get_addr(u_int vaddr)
364{
365 u_int page=get_page(vaddr);
366 u_int vpage=get_vpage(vaddr);
57871462 367 struct ll_entry *head;
368 //printf("TRACE: count=%d next=%d (get_addr %x,page %d)\n",Count,next_interupt,vaddr,page);
369 head=jump_in[page];
370 while(head!=NULL) {
de5a60c3 371 if(head->vaddr==vaddr) {
57871462 372 //printf("TRACE: count=%d next=%d (get_addr match %x: %x)\n",Count,next_interupt,vaddr,(int)head->addr);
581335b0 373 u_int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
57871462 374 ht_bin[3]=ht_bin[1];
375 ht_bin[2]=ht_bin[0];
581335b0 376 ht_bin[1]=(u_int)head->addr;
57871462 377 ht_bin[0]=vaddr;
378 return head->addr;
379 }
380 head=head->next;
381 }
382 head=jump_dirty[vpage];
383 while(head!=NULL) {
de5a60c3 384 if(head->vaddr==vaddr) {
57871462 385 //printf("TRACE: count=%d next=%d (get_addr match dirty %x: %x)\n",Count,next_interupt,vaddr,(int)head->addr);
386 // Don't restore blocks which are about to expire from the cache
387 if((((u_int)head->addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2)))
388 if(verify_dirty(head->addr)) {
389 //printf("restore candidate: %x (%d) d=%d\n",vaddr,page,invalid_code[vaddr>>12]);
390 invalid_code[vaddr>>12]=0;
9be4ba64 391 inv_code_start=inv_code_end=~0;
57871462 392 if(vpage<2048) {
57871462 393 restore_candidate[vpage>>3]|=1<<(vpage&7);
394 }
395 else restore_candidate[page>>3]|=1<<(page&7);
581335b0 396 u_int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
57871462 397 if(ht_bin[0]==vaddr) {
581335b0 398 ht_bin[1]=(u_int)head->addr; // Replace existing entry
57871462 399 }
400 else
401 {
402 ht_bin[3]=ht_bin[1];
403 ht_bin[2]=ht_bin[0];
404 ht_bin[1]=(int)head->addr;
405 ht_bin[0]=vaddr;
406 }
407 return head->addr;
408 }
409 }
410 head=head->next;
411 }
412 //printf("TRACE: count=%d next=%d (get_addr no-match %x)\n",Count,next_interupt,vaddr);
413 int r=new_recompile_block(vaddr);
414 if(r==0) return get_addr(vaddr);
415 // Execute in unmapped page, generate pagefault execption
416 Status|=2;
417 Cause=(vaddr<<31)|0x8;
418 EPC=(vaddr&1)?vaddr-5:vaddr;
419 BadVAddr=(vaddr&~1);
420 Context=(Context&0xFF80000F)|((BadVAddr>>9)&0x007FFFF0);
421 EntryHi=BadVAddr&0xFFFFE000;
422 return get_addr_ht(0x80000000);
423}
424// Look up address in hash table first
425void *get_addr_ht(u_int vaddr)
426{
427 //printf("TRACE: count=%d next=%d (get_addr_ht %x)\n",Count,next_interupt,vaddr);
581335b0 428 u_int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
57871462 429 if(ht_bin[0]==vaddr) return (void *)ht_bin[1];
430 if(ht_bin[2]==vaddr) return (void *)ht_bin[3];
431 return get_addr(vaddr);
432}
433
57871462 434void clear_all_regs(signed char regmap[])
435{
436 int hr;
437 for (hr=0;hr<HOST_REGS;hr++) regmap[hr]=-1;
438}
439
440signed char get_reg(signed char regmap[],int r)
441{
442 int hr;
443 for (hr=0;hr<HOST_REGS;hr++) if(hr!=EXCLUDE_REG&&regmap[hr]==r) return hr;
444 return -1;
445}
446
447// Find a register that is available for two consecutive cycles
448signed char get_reg2(signed char regmap1[],signed char regmap2[],int r)
449{
450 int hr;
451 for (hr=0;hr<HOST_REGS;hr++) if(hr!=EXCLUDE_REG&&regmap1[hr]==r&&regmap2[hr]==r) return hr;
452 return -1;
453}
454
455int count_free_regs(signed char regmap[])
456{
457 int count=0;
458 int hr;
459 for(hr=0;hr<HOST_REGS;hr++)
460 {
461 if(hr!=EXCLUDE_REG) {
462 if(regmap[hr]<0) count++;
463 }
464 }
465 return count;
466}
467
468void dirty_reg(struct regstat *cur,signed char reg)
469{
470 int hr;
471 if(!reg) return;
472 for (hr=0;hr<HOST_REGS;hr++) {
473 if((cur->regmap[hr]&63)==reg) {
474 cur->dirty|=1<<hr;
475 }
476 }
477}
478
479// If we dirty the lower half of a 64 bit register which is now being
480// sign-extended, we need to dump the upper half.
481// Note: Do this only after completion of the instruction, because
482// some instructions may need to read the full 64-bit value even if
483// overwriting it (eg SLTI, DSRA32).
484static void flush_dirty_uppers(struct regstat *cur)
485{
486 int hr,reg;
487 for (hr=0;hr<HOST_REGS;hr++) {
488 if((cur->dirty>>hr)&1) {
489 reg=cur->regmap[hr];
9f51b4b9 490 if(reg>=64)
57871462 491 if((cur->is32>>(reg&63))&1) cur->regmap[hr]=-1;
492 }
493 }
494}
495
496void set_const(struct regstat *cur,signed char reg,uint64_t value)
497{
498 int hr;
499 if(!reg) return;
500 for (hr=0;hr<HOST_REGS;hr++) {
501 if(cur->regmap[hr]==reg) {
502 cur->isconst|=1<<hr;
956f3129 503 current_constmap[hr]=value;
57871462 504 }
505 else if((cur->regmap[hr]^64)==reg) {
506 cur->isconst|=1<<hr;
956f3129 507 current_constmap[hr]=value>>32;
57871462 508 }
509 }
510}
511
512void clear_const(struct regstat *cur,signed char reg)
513{
514 int hr;
515 if(!reg) return;
516 for (hr=0;hr<HOST_REGS;hr++) {
517 if((cur->regmap[hr]&63)==reg) {
518 cur->isconst&=~(1<<hr);
519 }
520 }
521}
522
523int is_const(struct regstat *cur,signed char reg)
524{
525 int hr;
79c75f1b 526 if(reg<0) return 0;
57871462 527 if(!reg) return 1;
528 for (hr=0;hr<HOST_REGS;hr++) {
529 if((cur->regmap[hr]&63)==reg) {
530 return (cur->isconst>>hr)&1;
531 }
532 }
533 return 0;
534}
535uint64_t get_const(struct regstat *cur,signed char reg)
536{
537 int hr;
538 if(!reg) return 0;
539 for (hr=0;hr<HOST_REGS;hr++) {
540 if(cur->regmap[hr]==reg) {
956f3129 541 return current_constmap[hr];
57871462 542 }
543 }
c43b5311 544 SysPrintf("Unknown constant in r%d\n",reg);
57871462 545 exit(1);
546}
547
548// Least soon needed registers
549// Look at the next ten instructions and see which registers
550// will be used. Try not to reallocate these.
551void lsn(u_char hsn[], int i, int *preferred_reg)
552{
553 int j;
554 int b=-1;
555 for(j=0;j<9;j++)
556 {
557 if(i+j>=slen) {
558 j=slen-i-1;
559 break;
560 }
561 if(itype[i+j]==UJUMP||itype[i+j]==RJUMP||(source[i+j]>>16)==0x1000)
562 {
563 // Don't go past an unconditonal jump
564 j++;
565 break;
566 }
567 }
568 for(;j>=0;j--)
569 {
570 if(rs1[i+j]) hsn[rs1[i+j]]=j;
571 if(rs2[i+j]) hsn[rs2[i+j]]=j;
572 if(rt1[i+j]) hsn[rt1[i+j]]=j;
573 if(rt2[i+j]) hsn[rt2[i+j]]=j;
574 if(itype[i+j]==STORE || itype[i+j]==STORELR) {
575 // Stores can allocate zero
576 hsn[rs1[i+j]]=j;
577 hsn[rs2[i+j]]=j;
578 }
579 // On some architectures stores need invc_ptr
580 #if defined(HOST_IMM8)
b9b61529 581 if(itype[i+j]==STORE || itype[i+j]==STORELR || (opcode[i+j]&0x3b)==0x39 || (opcode[i+j]&0x3b)==0x3a) {
57871462 582 hsn[INVCP]=j;
583 }
584 #endif
585 if(i+j>=0&&(itype[i+j]==UJUMP||itype[i+j]==CJUMP||itype[i+j]==SJUMP||itype[i+j]==FJUMP))
586 {
587 hsn[CCREG]=j;
588 b=j;
589 }
590 }
591 if(b>=0)
592 {
593 if(ba[i+b]>=start && ba[i+b]<(start+slen*4))
594 {
595 // Follow first branch
596 int t=(ba[i+b]-start)>>2;
597 j=7-b;if(t+j>=slen) j=slen-t-1;
598 for(;j>=0;j--)
599 {
600 if(rs1[t+j]) if(hsn[rs1[t+j]]>j+b+2) hsn[rs1[t+j]]=j+b+2;
601 if(rs2[t+j]) if(hsn[rs2[t+j]]>j+b+2) hsn[rs2[t+j]]=j+b+2;
602 //if(rt1[t+j]) if(hsn[rt1[t+j]]>j+b+2) hsn[rt1[t+j]]=j+b+2;
603 //if(rt2[t+j]) if(hsn[rt2[t+j]]>j+b+2) hsn[rt2[t+j]]=j+b+2;
604 }
605 }
606 // TODO: preferred register based on backward branch
607 }
608 // Delay slot should preferably not overwrite branch conditions or cycle count
609 if(i>0&&(itype[i-1]==RJUMP||itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==FJUMP)) {
610 if(rs1[i-1]) if(hsn[rs1[i-1]]>1) hsn[rs1[i-1]]=1;
611 if(rs2[i-1]) if(hsn[rs2[i-1]]>1) hsn[rs2[i-1]]=1;
612 hsn[CCREG]=1;
613 // ...or hash tables
614 hsn[RHASH]=1;
615 hsn[RHTBL]=1;
616 }
617 // Coprocessor load/store needs FTEMP, even if not declared
b9b61529 618 if(itype[i]==C1LS||itype[i]==C2LS) {
57871462 619 hsn[FTEMP]=0;
620 }
621 // Load L/R also uses FTEMP as a temporary register
622 if(itype[i]==LOADLR) {
623 hsn[FTEMP]=0;
624 }
b7918751 625 // Also SWL/SWR/SDL/SDR
626 if(opcode[i]==0x2a||opcode[i]==0x2e||opcode[i]==0x2c||opcode[i]==0x2d) {
57871462 627 hsn[FTEMP]=0;
628 }
57871462 629 // Don't remove the miniht registers
630 if(itype[i]==UJUMP||itype[i]==RJUMP)
631 {
632 hsn[RHASH]=0;
633 hsn[RHTBL]=0;
634 }
635}
636
637// We only want to allocate registers if we're going to use them again soon
638int needed_again(int r, int i)
639{
640 int j;
641 int b=-1;
642 int rn=10;
9f51b4b9 643
57871462 644 if(i>0&&(itype[i-1]==UJUMP||itype[i-1]==RJUMP||(source[i-1]>>16)==0x1000))
645 {
646 if(ba[i-1]<start || ba[i-1]>start+slen*4-4)
647 return 0; // Don't need any registers if exiting the block
648 }
649 for(j=0;j<9;j++)
650 {
651 if(i+j>=slen) {
652 j=slen-i-1;
653 break;
654 }
655 if(itype[i+j]==UJUMP||itype[i+j]==RJUMP||(source[i+j]>>16)==0x1000)
656 {
657 // Don't go past an unconditonal jump
658 j++;
659 break;
660 }
1e973cb0 661 if(itype[i+j]==SYSCALL||itype[i+j]==HLECALL||itype[i+j]==INTCALL||((source[i+j]&0xfc00003f)==0x0d))
57871462 662 {
663 break;
664 }
665 }
666 for(;j>=1;j--)
667 {
668 if(rs1[i+j]==r) rn=j;
669 if(rs2[i+j]==r) rn=j;
670 if((unneeded_reg[i+j]>>r)&1) rn=10;
671 if(i+j>=0&&(itype[i+j]==UJUMP||itype[i+j]==CJUMP||itype[i+j]==SJUMP||itype[i+j]==FJUMP))
672 {
673 b=j;
674 }
675 }
676 /*
677 if(b>=0)
678 {
679 if(ba[i+b]>=start && ba[i+b]<(start+slen*4))
680 {
681 // Follow first branch
682 int o=rn;
683 int t=(ba[i+b]-start)>>2;
684 j=7-b;if(t+j>=slen) j=slen-t-1;
685 for(;j>=0;j--)
686 {
687 if(!((unneeded_reg[t+j]>>r)&1)) {
688 if(rs1[t+j]==r) if(rn>j+b+2) rn=j+b+2;
689 if(rs2[t+j]==r) if(rn>j+b+2) rn=j+b+2;
690 }
691 else rn=o;
692 }
693 }
694 }*/
b7217e13 695 if(rn<10) return 1;
581335b0 696 (void)b;
57871462 697 return 0;
698}
699
700// Try to match register allocations at the end of a loop with those
701// at the beginning
702int loop_reg(int i, int r, int hr)
703{
704 int j,k;
705 for(j=0;j<9;j++)
706 {
707 if(i+j>=slen) {
708 j=slen-i-1;
709 break;
710 }
711 if(itype[i+j]==UJUMP||itype[i+j]==RJUMP||(source[i+j]>>16)==0x1000)
712 {
713 // Don't go past an unconditonal jump
714 j++;
715 break;
716 }
717 }
718 k=0;
719 if(i>0){
720 if(itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==FJUMP)
721 k--;
722 }
723 for(;k<j;k++)
724 {
725 if(r<64&&((unneeded_reg[i+k]>>r)&1)) return hr;
726 if(r>64&&((unneeded_reg_upper[i+k]>>r)&1)) return hr;
727 if(i+k>=0&&(itype[i+k]==UJUMP||itype[i+k]==CJUMP||itype[i+k]==SJUMP||itype[i+k]==FJUMP))
728 {
729 if(ba[i+k]>=start && ba[i+k]<(start+i*4))
730 {
731 int t=(ba[i+k]-start)>>2;
732 int reg=get_reg(regs[t].regmap_entry,r);
733 if(reg>=0) return reg;
734 //reg=get_reg(regs[t+1].regmap_entry,r);
735 //if(reg>=0) return reg;
736 }
737 }
738 }
739 return hr;
740}
741
742
743// Allocate every register, preserving source/target regs
744void alloc_all(struct regstat *cur,int i)
745{
746 int hr;
9f51b4b9 747
57871462 748 for(hr=0;hr<HOST_REGS;hr++) {
749 if(hr!=EXCLUDE_REG) {
750 if(((cur->regmap[hr]&63)!=rs1[i])&&((cur->regmap[hr]&63)!=rs2[i])&&
751 ((cur->regmap[hr]&63)!=rt1[i])&&((cur->regmap[hr]&63)!=rt2[i]))
752 {
753 cur->regmap[hr]=-1;
754 cur->dirty&=~(1<<hr);
755 }
756 // Don't need zeros
757 if((cur->regmap[hr]&63)==0)
758 {
759 cur->regmap[hr]=-1;
760 cur->dirty&=~(1<<hr);
761 }
762 }
763 }
764}
765
57871462 766#ifdef __i386__
767#include "assem_x86.c"
768#endif
769#ifdef __x86_64__
770#include "assem_x64.c"
771#endif
772#ifdef __arm__
773#include "assem_arm.c"
774#endif
775
776// Add virtual address mapping to linked list
777void ll_add(struct ll_entry **head,int vaddr,void *addr)
778{
779 struct ll_entry *new_entry;
780 new_entry=malloc(sizeof(struct ll_entry));
781 assert(new_entry!=NULL);
782 new_entry->vaddr=vaddr;
de5a60c3 783 new_entry->reg_sv_flags=0;
57871462 784 new_entry->addr=addr;
785 new_entry->next=*head;
786 *head=new_entry;
787}
788
de5a60c3 789void ll_add_flags(struct ll_entry **head,int vaddr,u_int reg_sv_flags,void *addr)
57871462 790{
7139f3c8 791 ll_add(head,vaddr,addr);
de5a60c3 792 (*head)->reg_sv_flags=reg_sv_flags;
57871462 793}
794
795// Check if an address is already compiled
796// but don't return addresses which are about to expire from the cache
797void *check_addr(u_int vaddr)
798{
799 u_int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
800 if(ht_bin[0]==vaddr) {
801 if(((ht_bin[1]-MAX_OUTPUT_BLOCK_SIZE-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2)))
802 if(isclean(ht_bin[1])) return (void *)ht_bin[1];
803 }
804 if(ht_bin[2]==vaddr) {
805 if(((ht_bin[3]-MAX_OUTPUT_BLOCK_SIZE-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2)))
806 if(isclean(ht_bin[3])) return (void *)ht_bin[3];
807 }
94d23bb9 808 u_int page=get_page(vaddr);
57871462 809 struct ll_entry *head;
810 head=jump_in[page];
811 while(head!=NULL) {
de5a60c3 812 if(head->vaddr==vaddr) {
57871462 813 if((((u_int)head->addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2))) {
814 // Update existing entry with current address
815 if(ht_bin[0]==vaddr) {
816 ht_bin[1]=(int)head->addr;
817 return head->addr;
818 }
819 if(ht_bin[2]==vaddr) {
820 ht_bin[3]=(int)head->addr;
821 return head->addr;
822 }
823 // Insert into hash table with low priority.
824 // Don't evict existing entries, as they are probably
825 // addresses that are being accessed frequently.
826 if(ht_bin[0]==-1) {
827 ht_bin[1]=(int)head->addr;
828 ht_bin[0]=vaddr;
829 }else if(ht_bin[2]==-1) {
830 ht_bin[3]=(int)head->addr;
831 ht_bin[2]=vaddr;
832 }
833 return head->addr;
834 }
835 }
836 head=head->next;
837 }
838 return 0;
839}
840
841void remove_hash(int vaddr)
842{
843 //printf("remove hash: %x\n",vaddr);
581335b0 844 u_int *ht_bin=hash_table[(((vaddr)>>16)^vaddr)&0xFFFF];
57871462 845 if(ht_bin[2]==vaddr) {
846 ht_bin[2]=ht_bin[3]=-1;
847 }
848 if(ht_bin[0]==vaddr) {
849 ht_bin[0]=ht_bin[2];
850 ht_bin[1]=ht_bin[3];
851 ht_bin[2]=ht_bin[3]=-1;
852 }
853}
854
855void ll_remove_matching_addrs(struct ll_entry **head,int addr,int shift)
856{
857 struct ll_entry *next;
858 while(*head) {
9f51b4b9 859 if(((u_int)((*head)->addr)>>shift)==(addr>>shift) ||
57871462 860 ((u_int)((*head)->addr-MAX_OUTPUT_BLOCK_SIZE)>>shift)==(addr>>shift))
861 {
862 inv_debug("EXP: Remove pointer to %x (%x)\n",(int)(*head)->addr,(*head)->vaddr);
863 remove_hash((*head)->vaddr);
864 next=(*head)->next;
865 free(*head);
866 *head=next;
867 }
868 else
869 {
870 head=&((*head)->next);
871 }
872 }
873}
874
875// Remove all entries from linked list
876void ll_clear(struct ll_entry **head)
877{
878 struct ll_entry *cur;
879 struct ll_entry *next;
581335b0 880 if((cur=*head)) {
57871462 881 *head=0;
882 while(cur) {
883 next=cur->next;
884 free(cur);
885 cur=next;
886 }
887 }
888}
889
890// Dereference the pointers and remove if it matches
d148d265 891static void ll_kill_pointers(struct ll_entry *head,int addr,int shift)
57871462 892{
893 while(head) {
894 int ptr=get_pointer(head->addr);
895 inv_debug("EXP: Lookup pointer to %x at %x (%x)\n",(int)ptr,(int)head->addr,head->vaddr);
896 if(((ptr>>shift)==(addr>>shift)) ||
897 (((ptr-MAX_OUTPUT_BLOCK_SIZE)>>shift)==(addr>>shift)))
898 {
5088bb70 899 inv_debug("EXP: Kill pointer at %x (%x)\n",(int)head->addr,head->vaddr);
d148d265 900 void *host_addr=find_extjump_insn(head->addr);
dd3a91a1 901 #ifdef __arm__
d148d265 902 mark_clear_cache(host_addr);
dd3a91a1 903 #endif
d148d265 904 set_jump_target((int)host_addr,(int)head->addr);
57871462 905 }
906 head=head->next;
907 }
908}
909
910// This is called when we write to a compiled block (see do_invstub)
f76eeef9 911void invalidate_page(u_int page)
57871462 912{
57871462 913 struct ll_entry *head;
914 struct ll_entry *next;
915 head=jump_in[page];
916 jump_in[page]=0;
917 while(head!=NULL) {
918 inv_debug("INVALIDATE: %x\n",head->vaddr);
919 remove_hash(head->vaddr);
920 next=head->next;
921 free(head);
922 head=next;
923 }
924 head=jump_out[page];
925 jump_out[page]=0;
926 while(head!=NULL) {
927 inv_debug("INVALIDATE: kill pointer to %x (%x)\n",head->vaddr,(int)head->addr);
d148d265 928 void *host_addr=find_extjump_insn(head->addr);
dd3a91a1 929 #ifdef __arm__
d148d265 930 mark_clear_cache(host_addr);
dd3a91a1 931 #endif
d148d265 932 set_jump_target((int)host_addr,(int)head->addr);
57871462 933 next=head->next;
934 free(head);
935 head=next;
936 }
57871462 937}
9be4ba64 938
939static void invalidate_block_range(u_int block, u_int first, u_int last)
57871462 940{
94d23bb9 941 u_int page=get_page(block<<12);
57871462 942 //printf("first=%d last=%d\n",first,last);
f76eeef9 943 invalidate_page(page);
57871462 944 assert(first+5>page); // NB: this assumes MAXBLOCK<=4096 (4 pages)
945 assert(last<page+5);
946 // Invalidate the adjacent pages if a block crosses a 4K boundary
947 while(first<page) {
948 invalidate_page(first);
949 first++;
950 }
951 for(first=page+1;first<last;first++) {
952 invalidate_page(first);
953 }
dd3a91a1 954 #ifdef __arm__
955 do_clear_cache();
956 #endif
9f51b4b9 957
57871462 958 // Don't trap writes
959 invalid_code[block]=1;
f76eeef9 960
57871462 961 #ifdef USE_MINI_HT
962 memset(mini_ht,-1,sizeof(mini_ht));
963 #endif
964}
9be4ba64 965
966void invalidate_block(u_int block)
967{
968 u_int page=get_page(block<<12);
969 u_int vpage=get_vpage(block<<12);
970 inv_debug("INVALIDATE: %x (%d)\n",block<<12,page);
971 //inv_debug("invalid_code[block]=%d\n",invalid_code[block]);
972 u_int first,last;
973 first=last=page;
974 struct ll_entry *head;
975 head=jump_dirty[vpage];
976 //printf("page=%d vpage=%d\n",page,vpage);
977 while(head!=NULL) {
978 u_int start,end;
979 if(vpage>2047||(head->vaddr>>12)==block) { // Ignore vaddr hash collision
980 get_bounds((int)head->addr,&start,&end);
981 //printf("start: %x end: %x\n",start,end);
4a35de07 982 if(page<2048&&start>=(u_int)rdram&&end<(u_int)rdram+RAM_SIZE) {
9be4ba64 983 if(((start-(u_int)rdram)>>12)<=page&&((end-1-(u_int)rdram)>>12)>=page) {
984 if((((start-(u_int)rdram)>>12)&2047)<first) first=((start-(u_int)rdram)>>12)&2047;
985 if((((end-1-(u_int)rdram)>>12)&2047)>last) last=((end-1-(u_int)rdram)>>12)&2047;
986 }
987 }
9be4ba64 988 }
989 head=head->next;
990 }
991 invalidate_block_range(block,first,last);
992}
993
57871462 994void invalidate_addr(u_int addr)
995{
9be4ba64 996 //static int rhits;
997 // this check is done by the caller
998 //if (inv_code_start<=addr&&addr<=inv_code_end) { rhits++; return; }
d25604ca 999 u_int page=get_vpage(addr);
9be4ba64 1000 if(page<2048) { // RAM
1001 struct ll_entry *head;
1002 u_int addr_min=~0, addr_max=0;
4a35de07 1003 u_int mask=RAM_SIZE-1;
1004 u_int addr_main=0x80000000|(addr&mask);
9be4ba64 1005 int pg1;
4a35de07 1006 inv_code_start=addr_main&~0xfff;
1007 inv_code_end=addr_main|0xfff;
9be4ba64 1008 pg1=page;
1009 if (pg1>0) {
1010 // must check previous page too because of spans..
1011 pg1--;
1012 inv_code_start-=0x1000;
1013 }
1014 for(;pg1<=page;pg1++) {
1015 for(head=jump_dirty[pg1];head!=NULL;head=head->next) {
1016 u_int start,end;
1017 get_bounds((int)head->addr,&start,&end);
4a35de07 1018 if(ram_offset) {
1019 start-=ram_offset;
1020 end-=ram_offset;
1021 }
1022 if(start<=addr_main&&addr_main<end) {
9be4ba64 1023 if(start<addr_min) addr_min=start;
1024 if(end>addr_max) addr_max=end;
1025 }
4a35de07 1026 else if(addr_main<start) {
9be4ba64 1027 if(start<inv_code_end)
1028 inv_code_end=start-1;
1029 }
1030 else {
1031 if(end>inv_code_start)
1032 inv_code_start=end;
1033 }
1034 }
1035 }
1036 if (addr_min!=~0) {
1037 inv_debug("INV ADDR: %08x hit %08x-%08x\n", addr, addr_min, addr_max);
1038 inv_code_start=inv_code_end=~0;
1039 invalidate_block_range(addr>>12,(addr_min&mask)>>12,(addr_max&mask)>>12);
1040 return;
1041 }
1042 else {
4a35de07 1043 inv_code_start=(addr&~mask)|(inv_code_start&mask);
1044 inv_code_end=(addr&~mask)|(inv_code_end&mask);
d25604ca 1045 inv_debug("INV ADDR: %08x miss, inv %08x-%08x, sk %d\n", addr, inv_code_start, inv_code_end, 0);
9be4ba64 1046 return;
d25604ca 1047 }
9be4ba64 1048 }
57871462 1049 invalidate_block(addr>>12);
1050}
9be4ba64 1051
dd3a91a1 1052// This is called when loading a save state.
1053// Anything could have changed, so invalidate everything.
57871462 1054void invalidate_all_pages()
1055{
581335b0 1056 u_int page;
57871462 1057 for(page=0;page<4096;page++)
1058 invalidate_page(page);
1059 for(page=0;page<1048576;page++)
1060 if(!invalid_code[page]) {
1061 restore_candidate[(page&2047)>>3]|=1<<(page&7);
1062 restore_candidate[((page&2047)>>3)+256]|=1<<(page&7);
1063 }
57871462 1064 #ifdef USE_MINI_HT
1065 memset(mini_ht,-1,sizeof(mini_ht));
1066 #endif
57871462 1067}
1068
1069// Add an entry to jump_out after making a link
1070void add_link(u_int vaddr,void *src)
1071{
94d23bb9 1072 u_int page=get_page(vaddr);
57871462 1073 inv_debug("add_link: %x -> %x (%d)\n",(int)src,vaddr,page);
76f71c27 1074 int *ptr=(int *)(src+4);
1075 assert((*ptr&0x0fff0000)==0x059f0000);
581335b0 1076 (void)ptr;
57871462 1077 ll_add(jump_out+page,vaddr,src);
1078 //int ptr=get_pointer(src);
1079 //inv_debug("add_link: Pointer is to %x\n",(int)ptr);
1080}
1081
1082// If a code block was found to be unmodified (bit was set in
1083// restore_candidate) and it remains unmodified (bit is clear
1084// in invalid_code) then move the entries for that 4K page from
1085// the dirty list to the clean list.
1086void clean_blocks(u_int page)
1087{
1088 struct ll_entry *head;
1089 inv_debug("INV: clean_blocks page=%d\n",page);
1090 head=jump_dirty[page];
1091 while(head!=NULL) {
1092 if(!invalid_code[head->vaddr>>12]) {
1093 // Don't restore blocks which are about to expire from the cache
1094 if((((u_int)head->addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2))) {
1095 u_int start,end;
581335b0 1096 if(verify_dirty(head->addr)) {
57871462 1097 //printf("Possibly Restore %x (%x)\n",head->vaddr, (int)head->addr);
1098 u_int i;
1099 u_int inv=0;
1100 get_bounds((int)head->addr,&start,&end);
4cb76aa4 1101 if(start-(u_int)rdram<RAM_SIZE) {
57871462 1102 for(i=(start-(u_int)rdram+0x80000000)>>12;i<=(end-1-(u_int)rdram+0x80000000)>>12;i++) {
1103 inv|=invalid_code[i];
1104 }
1105 }
4cb76aa4 1106 else if((signed int)head->vaddr>=(signed int)0x80000000+RAM_SIZE) {
57871462 1107 inv=1;
1108 }
1109 if(!inv) {
1110 void * clean_addr=(void *)get_clean_addr((int)head->addr);
1111 if((((u_int)clean_addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2))) {
1112 u_int ppage=page;
57871462 1113 inv_debug("INV: Restored %x (%x/%x)\n",head->vaddr, (int)head->addr, (int)clean_addr);
1114 //printf("page=%x, addr=%x\n",page,head->vaddr);
1115 //assert(head->vaddr>>12==(page|0x80000));
de5a60c3 1116 ll_add_flags(jump_in+ppage,head->vaddr,head->reg_sv_flags,clean_addr);
581335b0 1117 u_int *ht_bin=hash_table[((head->vaddr>>16)^head->vaddr)&0xFFFF];
de5a60c3 1118 if(ht_bin[0]==head->vaddr) {
581335b0 1119 ht_bin[1]=(u_int)clean_addr; // Replace existing entry
de5a60c3 1120 }
1121 if(ht_bin[2]==head->vaddr) {
581335b0 1122 ht_bin[3]=(u_int)clean_addr; // Replace existing entry
57871462 1123 }
1124 }
1125 }
1126 }
1127 }
1128 }
1129 head=head->next;
1130 }
1131}
1132
1133
1134void mov_alloc(struct regstat *current,int i)
1135{
1136 // Note: Don't need to actually alloc the source registers
1137 if((~current->is32>>rs1[i])&1) {
1138 //alloc_reg64(current,i,rs1[i]);
1139 alloc_reg64(current,i,rt1[i]);
1140 current->is32&=~(1LL<<rt1[i]);
1141 } else {
1142 //alloc_reg(current,i,rs1[i]);
1143 alloc_reg(current,i,rt1[i]);
1144 current->is32|=(1LL<<rt1[i]);
1145 }
1146 clear_const(current,rs1[i]);
1147 clear_const(current,rt1[i]);
1148 dirty_reg(current,rt1[i]);
1149}
1150
1151void shiftimm_alloc(struct regstat *current,int i)
1152{
57871462 1153 if(opcode2[i]<=0x3) // SLL/SRL/SRA
1154 {
1155 if(rt1[i]) {
1156 if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1157 else lt1[i]=rs1[i];
1158 alloc_reg(current,i,rt1[i]);
1159 current->is32|=1LL<<rt1[i];
1160 dirty_reg(current,rt1[i]);
dc49e339 1161 if(is_const(current,rs1[i])) {
1162 int v=get_const(current,rs1[i]);
1163 if(opcode2[i]==0x00) set_const(current,rt1[i],v<<imm[i]);
1164 if(opcode2[i]==0x02) set_const(current,rt1[i],(u_int)v>>imm[i]);
1165 if(opcode2[i]==0x03) set_const(current,rt1[i],v>>imm[i]);
1166 }
1167 else clear_const(current,rt1[i]);
57871462 1168 }
1169 }
dc49e339 1170 else
1171 {
1172 clear_const(current,rs1[i]);
1173 clear_const(current,rt1[i]);
1174 }
1175
57871462 1176 if(opcode2[i]>=0x38&&opcode2[i]<=0x3b) // DSLL/DSRL/DSRA
1177 {
1178 if(rt1[i]) {
1179 if(rs1[i]) alloc_reg64(current,i,rs1[i]);
1180 alloc_reg64(current,i,rt1[i]);
1181 current->is32&=~(1LL<<rt1[i]);
1182 dirty_reg(current,rt1[i]);
1183 }
1184 }
1185 if(opcode2[i]==0x3c) // DSLL32
1186 {
1187 if(rt1[i]) {
1188 if(rs1[i]) alloc_reg(current,i,rs1[i]);
1189 alloc_reg64(current,i,rt1[i]);
1190 current->is32&=~(1LL<<rt1[i]);
1191 dirty_reg(current,rt1[i]);
1192 }
1193 }
1194 if(opcode2[i]==0x3e) // DSRL32
1195 {
1196 if(rt1[i]) {
1197 alloc_reg64(current,i,rs1[i]);
1198 if(imm[i]==32) {
1199 alloc_reg64(current,i,rt1[i]);
1200 current->is32&=~(1LL<<rt1[i]);
1201 } else {
1202 alloc_reg(current,i,rt1[i]);
1203 current->is32|=1LL<<rt1[i];
1204 }
1205 dirty_reg(current,rt1[i]);
1206 }
1207 }
1208 if(opcode2[i]==0x3f) // DSRA32
1209 {
1210 if(rt1[i]) {
1211 alloc_reg64(current,i,rs1[i]);
1212 alloc_reg(current,i,rt1[i]);
1213 current->is32|=1LL<<rt1[i];
1214 dirty_reg(current,rt1[i]);
1215 }
1216 }
1217}
1218
1219void shift_alloc(struct regstat *current,int i)
1220{
1221 if(rt1[i]) {
1222 if(opcode2[i]<=0x07) // SLLV/SRLV/SRAV
1223 {
1224 if(rs1[i]) alloc_reg(current,i,rs1[i]);
1225 if(rs2[i]) alloc_reg(current,i,rs2[i]);
1226 alloc_reg(current,i,rt1[i]);
e1190b87 1227 if(rt1[i]==rs2[i]) {
1228 alloc_reg_temp(current,i,-1);
1229 minimum_free_regs[i]=1;
1230 }
57871462 1231 current->is32|=1LL<<rt1[i];
1232 } else { // DSLLV/DSRLV/DSRAV
1233 if(rs1[i]) alloc_reg64(current,i,rs1[i]);
1234 if(rs2[i]) alloc_reg(current,i,rs2[i]);
1235 alloc_reg64(current,i,rt1[i]);
1236 current->is32&=~(1LL<<rt1[i]);
1237 if(opcode2[i]==0x16||opcode2[i]==0x17) // DSRLV and DSRAV need a temporary register
e1190b87 1238 {
57871462 1239 alloc_reg_temp(current,i,-1);
e1190b87 1240 minimum_free_regs[i]=1;
1241 }
57871462 1242 }
1243 clear_const(current,rs1[i]);
1244 clear_const(current,rs2[i]);
1245 clear_const(current,rt1[i]);
1246 dirty_reg(current,rt1[i]);
1247 }
1248}
1249
1250void alu_alloc(struct regstat *current,int i)
1251{
1252 if(opcode2[i]>=0x20&&opcode2[i]<=0x23) { // ADD/ADDU/SUB/SUBU
1253 if(rt1[i]) {
1254 if(rs1[i]&&rs2[i]) {
1255 alloc_reg(current,i,rs1[i]);
1256 alloc_reg(current,i,rs2[i]);
1257 }
1258 else {
1259 if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1260 if(rs2[i]&&needed_again(rs2[i],i)) alloc_reg(current,i,rs2[i]);
1261 }
1262 alloc_reg(current,i,rt1[i]);
1263 }
1264 current->is32|=1LL<<rt1[i];
1265 }
1266 if(opcode2[i]==0x2a||opcode2[i]==0x2b) { // SLT/SLTU
1267 if(rt1[i]) {
1268 if(!((current->is32>>rs1[i])&(current->is32>>rs2[i])&1))
1269 {
1270 alloc_reg64(current,i,rs1[i]);
1271 alloc_reg64(current,i,rs2[i]);
1272 alloc_reg(current,i,rt1[i]);
1273 } else {
1274 alloc_reg(current,i,rs1[i]);
1275 alloc_reg(current,i,rs2[i]);
1276 alloc_reg(current,i,rt1[i]);
1277 }
1278 }
1279 current->is32|=1LL<<rt1[i];
1280 }
1281 if(opcode2[i]>=0x24&&opcode2[i]<=0x27) { // AND/OR/XOR/NOR
1282 if(rt1[i]) {
1283 if(rs1[i]&&rs2[i]) {
1284 alloc_reg(current,i,rs1[i]);
1285 alloc_reg(current,i,rs2[i]);
1286 }
1287 else
1288 {
1289 if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1290 if(rs2[i]&&needed_again(rs2[i],i)) alloc_reg(current,i,rs2[i]);
1291 }
1292 alloc_reg(current,i,rt1[i]);
1293 if(!((current->is32>>rs1[i])&(current->is32>>rs2[i])&1))
1294 {
1295 if(!((current->uu>>rt1[i])&1)) {
1296 alloc_reg64(current,i,rt1[i]);
1297 }
1298 if(get_reg(current->regmap,rt1[i]|64)>=0) {
1299 if(rs1[i]&&rs2[i]) {
1300 alloc_reg64(current,i,rs1[i]);
1301 alloc_reg64(current,i,rs2[i]);
1302 }
1303 else
1304 {
1305 // Is is really worth it to keep 64-bit values in registers?
1306 #ifdef NATIVE_64BIT
1307 if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg64(current,i,rs1[i]);
1308 if(rs2[i]&&needed_again(rs2[i],i)) alloc_reg64(current,i,rs2[i]);
1309 #endif
1310 }
1311 }
1312 current->is32&=~(1LL<<rt1[i]);
1313 } else {
1314 current->is32|=1LL<<rt1[i];
1315 }
1316 }
1317 }
1318 if(opcode2[i]>=0x2c&&opcode2[i]<=0x2f) { // DADD/DADDU/DSUB/DSUBU
1319 if(rt1[i]) {
1320 if(rs1[i]&&rs2[i]) {
1321 if(!((current->uu>>rt1[i])&1)||get_reg(current->regmap,rt1[i]|64)>=0) {
1322 alloc_reg64(current,i,rs1[i]);
1323 alloc_reg64(current,i,rs2[i]);
1324 alloc_reg64(current,i,rt1[i]);
1325 } else {
1326 alloc_reg(current,i,rs1[i]);
1327 alloc_reg(current,i,rs2[i]);
1328 alloc_reg(current,i,rt1[i]);
1329 }
1330 }
1331 else {
1332 alloc_reg(current,i,rt1[i]);
1333 if(!((current->uu>>rt1[i])&1)||get_reg(current->regmap,rt1[i]|64)>=0) {
1334 // DADD used as move, or zeroing
1335 // If we have a 64-bit source, then make the target 64 bits too
1336 if(rs1[i]&&!((current->is32>>rs1[i])&1)) {
1337 if(get_reg(current->regmap,rs1[i])>=0) alloc_reg64(current,i,rs1[i]);
1338 alloc_reg64(current,i,rt1[i]);
1339 } else if(rs2[i]&&!((current->is32>>rs2[i])&1)) {
1340 if(get_reg(current->regmap,rs2[i])>=0) alloc_reg64(current,i,rs2[i]);
1341 alloc_reg64(current,i,rt1[i]);
1342 }
1343 if(opcode2[i]>=0x2e&&rs2[i]) {
1344 // DSUB used as negation - 64-bit result
1345 // If we have a 32-bit register, extend it to 64 bits
1346 if(get_reg(current->regmap,rs2[i])>=0) alloc_reg64(current,i,rs2[i]);
1347 alloc_reg64(current,i,rt1[i]);
1348 }
1349 }
1350 }
1351 if(rs1[i]&&rs2[i]) {
1352 current->is32&=~(1LL<<rt1[i]);
1353 } else if(rs1[i]) {
1354 current->is32&=~(1LL<<rt1[i]);
1355 if((current->is32>>rs1[i])&1)
1356 current->is32|=1LL<<rt1[i];
1357 } else if(rs2[i]) {
1358 current->is32&=~(1LL<<rt1[i]);
1359 if((current->is32>>rs2[i])&1)
1360 current->is32|=1LL<<rt1[i];
1361 } else {
1362 current->is32|=1LL<<rt1[i];
1363 }
1364 }
1365 }
1366 clear_const(current,rs1[i]);
1367 clear_const(current,rs2[i]);
1368 clear_const(current,rt1[i]);
1369 dirty_reg(current,rt1[i]);
1370}
1371
1372void imm16_alloc(struct regstat *current,int i)
1373{
1374 if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1375 else lt1[i]=rs1[i];
1376 if(rt1[i]) alloc_reg(current,i,rt1[i]);
1377 if(opcode[i]==0x18||opcode[i]==0x19) { // DADDI/DADDIU
1378 current->is32&=~(1LL<<rt1[i]);
1379 if(!((current->uu>>rt1[i])&1)||get_reg(current->regmap,rt1[i]|64)>=0) {
1380 // TODO: Could preserve the 32-bit flag if the immediate is zero
1381 alloc_reg64(current,i,rt1[i]);
1382 alloc_reg64(current,i,rs1[i]);
1383 }
1384 clear_const(current,rs1[i]);
1385 clear_const(current,rt1[i]);
1386 }
1387 else if(opcode[i]==0x0a||opcode[i]==0x0b) { // SLTI/SLTIU
1388 if((~current->is32>>rs1[i])&1) alloc_reg64(current,i,rs1[i]);
1389 current->is32|=1LL<<rt1[i];
1390 clear_const(current,rs1[i]);
1391 clear_const(current,rt1[i]);
1392 }
1393 else if(opcode[i]>=0x0c&&opcode[i]<=0x0e) { // ANDI/ORI/XORI
1394 if(((~current->is32>>rs1[i])&1)&&opcode[i]>0x0c) {
1395 if(rs1[i]!=rt1[i]) {
1396 if(needed_again(rs1[i],i)) alloc_reg64(current,i,rs1[i]);
1397 alloc_reg64(current,i,rt1[i]);
1398 current->is32&=~(1LL<<rt1[i]);
1399 }
1400 }
1401 else current->is32|=1LL<<rt1[i]; // ANDI clears upper bits
1402 if(is_const(current,rs1[i])) {
1403 int v=get_const(current,rs1[i]);
1404 if(opcode[i]==0x0c) set_const(current,rt1[i],v&imm[i]);
1405 if(opcode[i]==0x0d) set_const(current,rt1[i],v|imm[i]);
1406 if(opcode[i]==0x0e) set_const(current,rt1[i],v^imm[i]);
1407 }
1408 else clear_const(current,rt1[i]);
1409 }
1410 else if(opcode[i]==0x08||opcode[i]==0x09) { // ADDI/ADDIU
1411 if(is_const(current,rs1[i])) {
1412 int v=get_const(current,rs1[i]);
1413 set_const(current,rt1[i],v+imm[i]);
1414 }
1415 else clear_const(current,rt1[i]);
1416 current->is32|=1LL<<rt1[i];
1417 }
1418 else {
1419 set_const(current,rt1[i],((long long)((short)imm[i]))<<16); // LUI
1420 current->is32|=1LL<<rt1[i];
1421 }
1422 dirty_reg(current,rt1[i]);
1423}
1424
1425void load_alloc(struct regstat *current,int i)
1426{
1427 clear_const(current,rt1[i]);
1428 //if(rs1[i]!=rt1[i]&&needed_again(rs1[i],i)) clear_const(current,rs1[i]); // Does this help or hurt?
1429 if(!rs1[i]) current->u&=~1LL; // Allow allocating r0 if it's the source register
1430 if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
373d1d07 1431 if(rt1[i]&&!((current->u>>rt1[i])&1)) {
57871462 1432 alloc_reg(current,i,rt1[i]);
373d1d07 1433 assert(get_reg(current->regmap,rt1[i])>=0);
57871462 1434 if(opcode[i]==0x27||opcode[i]==0x37) // LWU/LD
1435 {
1436 current->is32&=~(1LL<<rt1[i]);
1437 alloc_reg64(current,i,rt1[i]);
1438 }
1439 else if(opcode[i]==0x1A||opcode[i]==0x1B) // LDL/LDR
1440 {
1441 current->is32&=~(1LL<<rt1[i]);
1442 alloc_reg64(current,i,rt1[i]);
1443 alloc_all(current,i);
1444 alloc_reg64(current,i,FTEMP);
e1190b87 1445 minimum_free_regs[i]=HOST_REGS;
57871462 1446 }
1447 else current->is32|=1LL<<rt1[i];
1448 dirty_reg(current,rt1[i]);
57871462 1449 // LWL/LWR need a temporary register for the old value
1450 if(opcode[i]==0x22||opcode[i]==0x26)
1451 {
1452 alloc_reg(current,i,FTEMP);
1453 alloc_reg_temp(current,i,-1);
e1190b87 1454 minimum_free_regs[i]=1;
57871462 1455 }
1456 }
1457 else
1458 {
373d1d07 1459 // Load to r0 or unneeded register (dummy load)
57871462 1460 // but we still need a register to calculate the address
535d208a 1461 if(opcode[i]==0x22||opcode[i]==0x26)
1462 {
1463 alloc_reg(current,i,FTEMP); // LWL/LWR need another temporary
1464 }
57871462 1465 alloc_reg_temp(current,i,-1);
e1190b87 1466 minimum_free_regs[i]=1;
535d208a 1467 if(opcode[i]==0x1A||opcode[i]==0x1B) // LDL/LDR
1468 {
1469 alloc_all(current,i);
1470 alloc_reg64(current,i,FTEMP);
e1190b87 1471 minimum_free_regs[i]=HOST_REGS;
535d208a 1472 }
57871462 1473 }
1474}
1475
1476void store_alloc(struct regstat *current,int i)
1477{
1478 clear_const(current,rs2[i]);
1479 if(!(rs2[i])) current->u&=~1LL; // Allow allocating r0 if necessary
1480 if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1481 alloc_reg(current,i,rs2[i]);
1482 if(opcode[i]==0x2c||opcode[i]==0x2d||opcode[i]==0x3f) { // 64-bit SDL/SDR/SD
1483 alloc_reg64(current,i,rs2[i]);
1484 if(rs2[i]) alloc_reg(current,i,FTEMP);
1485 }
57871462 1486 #if defined(HOST_IMM8)
1487 // On CPUs without 32-bit immediates we need a pointer to invalid_code
1488 else alloc_reg(current,i,INVCP);
1489 #endif
b7918751 1490 if(opcode[i]==0x2a||opcode[i]==0x2e||opcode[i]==0x2c||opcode[i]==0x2d) { // SWL/SWL/SDL/SDR
57871462 1491 alloc_reg(current,i,FTEMP);
1492 }
1493 // We need a temporary register for address generation
1494 alloc_reg_temp(current,i,-1);
e1190b87 1495 minimum_free_regs[i]=1;
57871462 1496}
1497
1498void c1ls_alloc(struct regstat *current,int i)
1499{
1500 //clear_const(current,rs1[i]); // FIXME
1501 clear_const(current,rt1[i]);
1502 if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1503 alloc_reg(current,i,CSREG); // Status
1504 alloc_reg(current,i,FTEMP);
1505 if(opcode[i]==0x35||opcode[i]==0x3d) { // 64-bit LDC1/SDC1
1506 alloc_reg64(current,i,FTEMP);
1507 }
57871462 1508 #if defined(HOST_IMM8)
1509 // On CPUs without 32-bit immediates we need a pointer to invalid_code
1510 else if((opcode[i]&0x3b)==0x39) // SWC1/SDC1
1511 alloc_reg(current,i,INVCP);
1512 #endif
1513 // We need a temporary register for address generation
1514 alloc_reg_temp(current,i,-1);
1515}
1516
b9b61529 1517void c2ls_alloc(struct regstat *current,int i)
1518{
1519 clear_const(current,rt1[i]);
1520 if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1521 alloc_reg(current,i,FTEMP);
b9b61529 1522 #if defined(HOST_IMM8)
1523 // On CPUs without 32-bit immediates we need a pointer to invalid_code
1edfcc68 1524 if((opcode[i]&0x3b)==0x3a) // SWC2/SDC2
b9b61529 1525 alloc_reg(current,i,INVCP);
1526 #endif
1527 // We need a temporary register for address generation
1528 alloc_reg_temp(current,i,-1);
e1190b87 1529 minimum_free_regs[i]=1;
b9b61529 1530}
1531
57871462 1532#ifndef multdiv_alloc
1533void multdiv_alloc(struct regstat *current,int i)
1534{
1535 // case 0x18: MULT
1536 // case 0x19: MULTU
1537 // case 0x1A: DIV
1538 // case 0x1B: DIVU
1539 // case 0x1C: DMULT
1540 // case 0x1D: DMULTU
1541 // case 0x1E: DDIV
1542 // case 0x1F: DDIVU
1543 clear_const(current,rs1[i]);
1544 clear_const(current,rs2[i]);
1545 if(rs1[i]&&rs2[i])
1546 {
1547 if((opcode2[i]&4)==0) // 32-bit
1548 {
1549 current->u&=~(1LL<<HIREG);
1550 current->u&=~(1LL<<LOREG);
1551 alloc_reg(current,i,HIREG);
1552 alloc_reg(current,i,LOREG);
1553 alloc_reg(current,i,rs1[i]);
1554 alloc_reg(current,i,rs2[i]);
1555 current->is32|=1LL<<HIREG;
1556 current->is32|=1LL<<LOREG;
1557 dirty_reg(current,HIREG);
1558 dirty_reg(current,LOREG);
1559 }
1560 else // 64-bit
1561 {
1562 current->u&=~(1LL<<HIREG);
1563 current->u&=~(1LL<<LOREG);
1564 current->uu&=~(1LL<<HIREG);
1565 current->uu&=~(1LL<<LOREG);
1566 alloc_reg64(current,i,HIREG);
1567 //if(HOST_REGS>10) alloc_reg64(current,i,LOREG);
1568 alloc_reg64(current,i,rs1[i]);
1569 alloc_reg64(current,i,rs2[i]);
1570 alloc_all(current,i);
1571 current->is32&=~(1LL<<HIREG);
1572 current->is32&=~(1LL<<LOREG);
1573 dirty_reg(current,HIREG);
1574 dirty_reg(current,LOREG);
e1190b87 1575 minimum_free_regs[i]=HOST_REGS;
57871462 1576 }
1577 }
1578 else
1579 {
1580 // Multiply by zero is zero.
1581 // MIPS does not have a divide by zero exception.
1582 // The result is undefined, we return zero.
1583 alloc_reg(current,i,HIREG);
1584 alloc_reg(current,i,LOREG);
1585 current->is32|=1LL<<HIREG;
1586 current->is32|=1LL<<LOREG;
1587 dirty_reg(current,HIREG);
1588 dirty_reg(current,LOREG);
1589 }
1590}
1591#endif
1592
1593void cop0_alloc(struct regstat *current,int i)
1594{
1595 if(opcode2[i]==0) // MFC0
1596 {
1597 if(rt1[i]) {
1598 clear_const(current,rt1[i]);
1599 alloc_all(current,i);
1600 alloc_reg(current,i,rt1[i]);
1601 current->is32|=1LL<<rt1[i];
1602 dirty_reg(current,rt1[i]);
1603 }
1604 }
1605 else if(opcode2[i]==4) // MTC0
1606 {
1607 if(rs1[i]){
1608 clear_const(current,rs1[i]);
1609 alloc_reg(current,i,rs1[i]);
1610 alloc_all(current,i);
1611 }
1612 else {
1613 alloc_all(current,i); // FIXME: Keep r0
1614 current->u&=~1LL;
1615 alloc_reg(current,i,0);
1616 }
1617 }
1618 else
1619 {
1620 // TLBR/TLBWI/TLBWR/TLBP/ERET
1621 assert(opcode2[i]==0x10);
1622 alloc_all(current,i);
1623 }
e1190b87 1624 minimum_free_regs[i]=HOST_REGS;
57871462 1625}
1626
1627void cop1_alloc(struct regstat *current,int i)
1628{
1629 alloc_reg(current,i,CSREG); // Load status
1630 if(opcode2[i]<3) // MFC1/DMFC1/CFC1
1631 {
7de557a6 1632 if(rt1[i]){
1633 clear_const(current,rt1[i]);
1634 if(opcode2[i]==1) {
1635 alloc_reg64(current,i,rt1[i]); // DMFC1
1636 current->is32&=~(1LL<<rt1[i]);
1637 }else{
1638 alloc_reg(current,i,rt1[i]); // MFC1/CFC1
1639 current->is32|=1LL<<rt1[i];
1640 }
1641 dirty_reg(current,rt1[i]);
57871462 1642 }
57871462 1643 alloc_reg_temp(current,i,-1);
1644 }
1645 else if(opcode2[i]>3) // MTC1/DMTC1/CTC1
1646 {
1647 if(rs1[i]){
1648 clear_const(current,rs1[i]);
1649 if(opcode2[i]==5)
1650 alloc_reg64(current,i,rs1[i]); // DMTC1
1651 else
1652 alloc_reg(current,i,rs1[i]); // MTC1/CTC1
1653 alloc_reg_temp(current,i,-1);
1654 }
1655 else {
1656 current->u&=~1LL;
1657 alloc_reg(current,i,0);
1658 alloc_reg_temp(current,i,-1);
1659 }
1660 }
e1190b87 1661 minimum_free_regs[i]=1;
57871462 1662}
1663void fconv_alloc(struct regstat *current,int i)
1664{
1665 alloc_reg(current,i,CSREG); // Load status
1666 alloc_reg_temp(current,i,-1);
e1190b87 1667 minimum_free_regs[i]=1;
57871462 1668}
1669void float_alloc(struct regstat *current,int i)
1670{
1671 alloc_reg(current,i,CSREG); // Load status
1672 alloc_reg_temp(current,i,-1);
e1190b87 1673 minimum_free_regs[i]=1;
57871462 1674}
b9b61529 1675void c2op_alloc(struct regstat *current,int i)
1676{
1677 alloc_reg_temp(current,i,-1);
1678}
57871462 1679void fcomp_alloc(struct regstat *current,int i)
1680{
1681 alloc_reg(current,i,CSREG); // Load status
1682 alloc_reg(current,i,FSREG); // Load flags
1683 dirty_reg(current,FSREG); // Flag will be modified
1684 alloc_reg_temp(current,i,-1);
e1190b87 1685 minimum_free_regs[i]=1;
57871462 1686}
1687
1688void syscall_alloc(struct regstat *current,int i)
1689{
1690 alloc_cc(current,i);
1691 dirty_reg(current,CCREG);
1692 alloc_all(current,i);
e1190b87 1693 minimum_free_regs[i]=HOST_REGS;
57871462 1694 current->isconst=0;
1695}
1696
1697void delayslot_alloc(struct regstat *current,int i)
1698{
1699 switch(itype[i]) {
1700 case UJUMP:
1701 case CJUMP:
1702 case SJUMP:
1703 case RJUMP:
1704 case FJUMP:
1705 case SYSCALL:
7139f3c8 1706 case HLECALL:
57871462 1707 case SPAN:
1708 assem_debug("jump in the delay slot. this shouldn't happen.\n");//exit(1);
c43b5311 1709 SysPrintf("Disabled speculative precompilation\n");
57871462 1710 stop_after_jal=1;
1711 break;
1712 case IMM16:
1713 imm16_alloc(current,i);
1714 break;
1715 case LOAD:
1716 case LOADLR:
1717 load_alloc(current,i);
1718 break;
1719 case STORE:
1720 case STORELR:
1721 store_alloc(current,i);
1722 break;
1723 case ALU:
1724 alu_alloc(current,i);
1725 break;
1726 case SHIFT:
1727 shift_alloc(current,i);
1728 break;
1729 case MULTDIV:
1730 multdiv_alloc(current,i);
1731 break;
1732 case SHIFTIMM:
1733 shiftimm_alloc(current,i);
1734 break;
1735 case MOV:
1736 mov_alloc(current,i);
1737 break;
1738 case COP0:
1739 cop0_alloc(current,i);
1740 break;
1741 case COP1:
b9b61529 1742 case COP2:
57871462 1743 cop1_alloc(current,i);
1744 break;
1745 case C1LS:
1746 c1ls_alloc(current,i);
1747 break;
b9b61529 1748 case C2LS:
1749 c2ls_alloc(current,i);
1750 break;
57871462 1751 case FCONV:
1752 fconv_alloc(current,i);
1753 break;
1754 case FLOAT:
1755 float_alloc(current,i);
1756 break;
1757 case FCOMP:
1758 fcomp_alloc(current,i);
1759 break;
b9b61529 1760 case C2OP:
1761 c2op_alloc(current,i);
1762 break;
57871462 1763 }
1764}
1765
1766// Special case where a branch and delay slot span two pages in virtual memory
1767static void pagespan_alloc(struct regstat *current,int i)
1768{
1769 current->isconst=0;
1770 current->wasconst=0;
1771 regs[i].wasconst=0;
e1190b87 1772 minimum_free_regs[i]=HOST_REGS;
57871462 1773 alloc_all(current,i);
1774 alloc_cc(current,i);
1775 dirty_reg(current,CCREG);
1776 if(opcode[i]==3) // JAL
1777 {
1778 alloc_reg(current,i,31);
1779 dirty_reg(current,31);
1780 }
1781 if(opcode[i]==0&&(opcode2[i]&0x3E)==8) // JR/JALR
1782 {
1783 alloc_reg(current,i,rs1[i]);
5067f341 1784 if (rt1[i]!=0) {
1785 alloc_reg(current,i,rt1[i]);
1786 dirty_reg(current,rt1[i]);
57871462 1787 }
1788 }
1789 if((opcode[i]&0x2E)==4) // BEQ/BNE/BEQL/BNEL
1790 {
1791 if(rs1[i]) alloc_reg(current,i,rs1[i]);
1792 if(rs2[i]) alloc_reg(current,i,rs2[i]);
1793 if(!((current->is32>>rs1[i])&(current->is32>>rs2[i])&1))
1794 {
1795 if(rs1[i]) alloc_reg64(current,i,rs1[i]);
1796 if(rs2[i]) alloc_reg64(current,i,rs2[i]);
1797 }
1798 }
1799 else
1800 if((opcode[i]&0x2E)==6) // BLEZ/BGTZ/BLEZL/BGTZL
1801 {
1802 if(rs1[i]) alloc_reg(current,i,rs1[i]);
1803 if(!((current->is32>>rs1[i])&1))
1804 {
1805 if(rs1[i]) alloc_reg64(current,i,rs1[i]);
1806 }
1807 }
1808 else
1809 if(opcode[i]==0x11) // BC1
1810 {
1811 alloc_reg(current,i,FSREG);
1812 alloc_reg(current,i,CSREG);
1813 }
1814 //else ...
1815}
1816
e2b5e7aa 1817static void add_stub(int type,int addr,int retaddr,int a,int b,int c,int d,int e)
57871462 1818{
1819 stubs[stubcount][0]=type;
1820 stubs[stubcount][1]=addr;
1821 stubs[stubcount][2]=retaddr;
1822 stubs[stubcount][3]=a;
1823 stubs[stubcount][4]=b;
1824 stubs[stubcount][5]=c;
1825 stubs[stubcount][6]=d;
1826 stubs[stubcount][7]=e;
1827 stubcount++;
1828}
1829
1830// Write out a single register
1831void wb_register(signed char r,signed char regmap[],uint64_t dirty,uint64_t is32)
1832{
1833 int hr;
1834 for(hr=0;hr<HOST_REGS;hr++) {
1835 if(hr!=EXCLUDE_REG) {
1836 if((regmap[hr]&63)==r) {
1837 if((dirty>>hr)&1) {
1838 if(regmap[hr]<64) {
1839 emit_storereg(r,hr);
57871462 1840 }else{
1841 emit_storereg(r|64,hr);
1842 }
1843 }
1844 }
1845 }
1846 }
1847}
1848
1849int mchecksum()
1850{
1851 //if(!tracedebug) return 0;
1852 int i;
1853 int sum=0;
1854 for(i=0;i<2097152;i++) {
1855 unsigned int temp=sum;
1856 sum<<=1;
1857 sum|=(~temp)>>31;
1858 sum^=((u_int *)rdram)[i];
1859 }
1860 return sum;
1861}
1862int rchecksum()
1863{
1864 int i;
1865 int sum=0;
1866 for(i=0;i<64;i++)
1867 sum^=((u_int *)reg)[i];
1868 return sum;
1869}
57871462 1870void rlist()
1871{
1872 int i;
1873 printf("TRACE: ");
1874 for(i=0;i<32;i++)
1875 printf("r%d:%8x%8x ",i,((int *)(reg+i))[1],((int *)(reg+i))[0]);
1876 printf("\n");
57871462 1877}
1878
1879void enabletrace()
1880{
1881 tracedebug=1;
1882}
1883
1884void memdebug(int i)
1885{
1886 //printf("TRACE: count=%d next=%d (checksum %x) lo=%8x%8x\n",Count,next_interupt,mchecksum(),(int)(reg[LOREG]>>32),(int)reg[LOREG]);
1887 //printf("TRACE: count=%d next=%d (rchecksum %x)\n",Count,next_interupt,rchecksum());
1888 //rlist();
1889 //if(tracedebug) {
1890 //if(Count>=-2084597794) {
1891 if((signed int)Count>=-2084597794&&(signed int)Count<0) {
1892 //if(0) {
1893 printf("TRACE: count=%d next=%d (checksum %x)\n",Count,next_interupt,mchecksum());
1894 //printf("TRACE: count=%d next=%d (checksum %x) Status=%x\n",Count,next_interupt,mchecksum(),Status);
1895 //printf("TRACE: count=%d next=%d (checksum %x) hi=%8x%8x\n",Count,next_interupt,mchecksum(),(int)(reg[HIREG]>>32),(int)reg[HIREG]);
1896 rlist();
1897 #ifdef __i386__
1898 printf("TRACE: %x\n",(&i)[-1]);
1899 #endif
1900 #ifdef __arm__
1901 int j;
1902 printf("TRACE: %x \n",(&j)[10]);
1903 printf("TRACE: %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x\n",(&j)[1],(&j)[2],(&j)[3],(&j)[4],(&j)[5],(&j)[6],(&j)[7],(&j)[8],(&j)[9],(&j)[10],(&j)[11],(&j)[12],(&j)[13],(&j)[14],(&j)[15],(&j)[16],(&j)[17],(&j)[18],(&j)[19],(&j)[20]);
1904 #endif
1905 //fflush(stdout);
1906 }
1907 //printf("TRACE: %x\n",(&i)[-1]);
1908}
1909
57871462 1910void alu_assemble(int i,struct regstat *i_regs)
1911{
1912 if(opcode2[i]>=0x20&&opcode2[i]<=0x23) { // ADD/ADDU/SUB/SUBU
1913 if(rt1[i]) {
1914 signed char s1,s2,t;
1915 t=get_reg(i_regs->regmap,rt1[i]);
1916 if(t>=0) {
1917 s1=get_reg(i_regs->regmap,rs1[i]);
1918 s2=get_reg(i_regs->regmap,rs2[i]);
1919 if(rs1[i]&&rs2[i]) {
1920 assert(s1>=0);
1921 assert(s2>=0);
1922 if(opcode2[i]&2) emit_sub(s1,s2,t);
1923 else emit_add(s1,s2,t);
1924 }
1925 else if(rs1[i]) {
1926 if(s1>=0) emit_mov(s1,t);
1927 else emit_loadreg(rs1[i],t);
1928 }
1929 else if(rs2[i]) {
1930 if(s2>=0) {
1931 if(opcode2[i]&2) emit_neg(s2,t);
1932 else emit_mov(s2,t);
1933 }
1934 else {
1935 emit_loadreg(rs2[i],t);
1936 if(opcode2[i]&2) emit_neg(t,t);
1937 }
1938 }
1939 else emit_zeroreg(t);
1940 }
1941 }
1942 }
1943 if(opcode2[i]>=0x2c&&opcode2[i]<=0x2f) { // DADD/DADDU/DSUB/DSUBU
1944 if(rt1[i]) {
1945 signed char s1l,s2l,s1h,s2h,tl,th;
1946 tl=get_reg(i_regs->regmap,rt1[i]);
1947 th=get_reg(i_regs->regmap,rt1[i]|64);
1948 if(tl>=0) {
1949 s1l=get_reg(i_regs->regmap,rs1[i]);
1950 s2l=get_reg(i_regs->regmap,rs2[i]);
1951 s1h=get_reg(i_regs->regmap,rs1[i]|64);
1952 s2h=get_reg(i_regs->regmap,rs2[i]|64);
1953 if(rs1[i]&&rs2[i]) {
1954 assert(s1l>=0);
1955 assert(s2l>=0);
1956 if(opcode2[i]&2) emit_subs(s1l,s2l,tl);
1957 else emit_adds(s1l,s2l,tl);
1958 if(th>=0) {
1959 #ifdef INVERTED_CARRY
1960 if(opcode2[i]&2) {if(s1h!=th) emit_mov(s1h,th);emit_sbb(th,s2h);}
1961 #else
1962 if(opcode2[i]&2) emit_sbc(s1h,s2h,th);
1963 #endif
1964 else emit_add(s1h,s2h,th);
1965 }
1966 }
1967 else if(rs1[i]) {
1968 if(s1l>=0) emit_mov(s1l,tl);
1969 else emit_loadreg(rs1[i],tl);
1970 if(th>=0) {
1971 if(s1h>=0) emit_mov(s1h,th);
1972 else emit_loadreg(rs1[i]|64,th);
1973 }
1974 }
1975 else if(rs2[i]) {
1976 if(s2l>=0) {
1977 if(opcode2[i]&2) emit_negs(s2l,tl);
1978 else emit_mov(s2l,tl);
1979 }
1980 else {
1981 emit_loadreg(rs2[i],tl);
1982 if(opcode2[i]&2) emit_negs(tl,tl);
1983 }
1984 if(th>=0) {
1985 #ifdef INVERTED_CARRY
1986 if(s2h>=0) emit_mov(s2h,th);
1987 else emit_loadreg(rs2[i]|64,th);
1988 if(opcode2[i]&2) {
1989 emit_adcimm(-1,th); // x86 has inverted carry flag
1990 emit_not(th,th);
1991 }
1992 #else
1993 if(opcode2[i]&2) {
1994 if(s2h>=0) emit_rscimm(s2h,0,th);
1995 else {
1996 emit_loadreg(rs2[i]|64,th);
1997 emit_rscimm(th,0,th);
1998 }
1999 }else{
2000 if(s2h>=0) emit_mov(s2h,th);
2001 else emit_loadreg(rs2[i]|64,th);
2002 }
2003 #endif
2004 }
2005 }
2006 else {
2007 emit_zeroreg(tl);
2008 if(th>=0) emit_zeroreg(th);
2009 }
2010 }
2011 }
2012 }
2013 if(opcode2[i]==0x2a||opcode2[i]==0x2b) { // SLT/SLTU
2014 if(rt1[i]) {
2015 signed char s1l,s1h,s2l,s2h,t;
2016 if(!((i_regs->was32>>rs1[i])&(i_regs->was32>>rs2[i])&1))
2017 {
2018 t=get_reg(i_regs->regmap,rt1[i]);
2019 //assert(t>=0);
2020 if(t>=0) {
2021 s1l=get_reg(i_regs->regmap,rs1[i]);
2022 s1h=get_reg(i_regs->regmap,rs1[i]|64);
2023 s2l=get_reg(i_regs->regmap,rs2[i]);
2024 s2h=get_reg(i_regs->regmap,rs2[i]|64);
2025 if(rs2[i]==0) // rx<r0
2026 {
2027 assert(s1h>=0);
2028 if(opcode2[i]==0x2a) // SLT
2029 emit_shrimm(s1h,31,t);
2030 else // SLTU (unsigned can not be less than zero)
2031 emit_zeroreg(t);
2032 }
2033 else if(rs1[i]==0) // r0<rx
2034 {
2035 assert(s2h>=0);
2036 if(opcode2[i]==0x2a) // SLT
2037 emit_set_gz64_32(s2h,s2l,t);
2038 else // SLTU (set if not zero)
2039 emit_set_nz64_32(s2h,s2l,t);
2040 }
2041 else {
2042 assert(s1l>=0);assert(s1h>=0);
2043 assert(s2l>=0);assert(s2h>=0);
2044 if(opcode2[i]==0x2a) // SLT
2045 emit_set_if_less64_32(s1h,s1l,s2h,s2l,t);
2046 else // SLTU
2047 emit_set_if_carry64_32(s1h,s1l,s2h,s2l,t);
2048 }
2049 }
2050 } else {
2051 t=get_reg(i_regs->regmap,rt1[i]);
2052 //assert(t>=0);
2053 if(t>=0) {
2054 s1l=get_reg(i_regs->regmap,rs1[i]);
2055 s2l=get_reg(i_regs->regmap,rs2[i]);
2056 if(rs2[i]==0) // rx<r0
2057 {
2058 assert(s1l>=0);
2059 if(opcode2[i]==0x2a) // SLT
2060 emit_shrimm(s1l,31,t);
2061 else // SLTU (unsigned can not be less than zero)
2062 emit_zeroreg(t);
2063 }
2064 else if(rs1[i]==0) // r0<rx
2065 {
2066 assert(s2l>=0);
2067 if(opcode2[i]==0x2a) // SLT
2068 emit_set_gz32(s2l,t);
2069 else // SLTU (set if not zero)
2070 emit_set_nz32(s2l,t);
2071 }
2072 else{
2073 assert(s1l>=0);assert(s2l>=0);
2074 if(opcode2[i]==0x2a) // SLT
2075 emit_set_if_less32(s1l,s2l,t);
2076 else // SLTU
2077 emit_set_if_carry32(s1l,s2l,t);
2078 }
2079 }
2080 }
2081 }
2082 }
2083 if(opcode2[i]>=0x24&&opcode2[i]<=0x27) { // AND/OR/XOR/NOR
2084 if(rt1[i]) {
2085 signed char s1l,s1h,s2l,s2h,th,tl;
2086 tl=get_reg(i_regs->regmap,rt1[i]);
2087 th=get_reg(i_regs->regmap,rt1[i]|64);
2088 if(!((i_regs->was32>>rs1[i])&(i_regs->was32>>rs2[i])&1)&&th>=0)
2089 {
2090 assert(tl>=0);
2091 if(tl>=0) {
2092 s1l=get_reg(i_regs->regmap,rs1[i]);
2093 s1h=get_reg(i_regs->regmap,rs1[i]|64);
2094 s2l=get_reg(i_regs->regmap,rs2[i]);
2095 s2h=get_reg(i_regs->regmap,rs2[i]|64);
2096 if(rs1[i]&&rs2[i]) {
2097 assert(s1l>=0);assert(s1h>=0);
2098 assert(s2l>=0);assert(s2h>=0);
2099 if(opcode2[i]==0x24) { // AND
2100 emit_and(s1l,s2l,tl);
2101 emit_and(s1h,s2h,th);
2102 } else
2103 if(opcode2[i]==0x25) { // OR
2104 emit_or(s1l,s2l,tl);
2105 emit_or(s1h,s2h,th);
2106 } else
2107 if(opcode2[i]==0x26) { // XOR
2108 emit_xor(s1l,s2l,tl);
2109 emit_xor(s1h,s2h,th);
2110 } else
2111 if(opcode2[i]==0x27) { // NOR
2112 emit_or(s1l,s2l,tl);
2113 emit_or(s1h,s2h,th);
2114 emit_not(tl,tl);
2115 emit_not(th,th);
2116 }
2117 }
2118 else
2119 {
2120 if(opcode2[i]==0x24) { // AND
2121 emit_zeroreg(tl);
2122 emit_zeroreg(th);
2123 } else
2124 if(opcode2[i]==0x25||opcode2[i]==0x26) { // OR/XOR
2125 if(rs1[i]){
2126 if(s1l>=0) emit_mov(s1l,tl);
2127 else emit_loadreg(rs1[i],tl);
2128 if(s1h>=0) emit_mov(s1h,th);
2129 else emit_loadreg(rs1[i]|64,th);
2130 }
2131 else
2132 if(rs2[i]){
2133 if(s2l>=0) emit_mov(s2l,tl);
2134 else emit_loadreg(rs2[i],tl);
2135 if(s2h>=0) emit_mov(s2h,th);
2136 else emit_loadreg(rs2[i]|64,th);
2137 }
2138 else{
2139 emit_zeroreg(tl);
2140 emit_zeroreg(th);
2141 }
2142 } else
2143 if(opcode2[i]==0x27) { // NOR
2144 if(rs1[i]){
2145 if(s1l>=0) emit_not(s1l,tl);
2146 else{
2147 emit_loadreg(rs1[i],tl);
2148 emit_not(tl,tl);
2149 }
2150 if(s1h>=0) emit_not(s1h,th);
2151 else{
2152 emit_loadreg(rs1[i]|64,th);
2153 emit_not(th,th);
2154 }
2155 }
2156 else
2157 if(rs2[i]){
2158 if(s2l>=0) emit_not(s2l,tl);
2159 else{
2160 emit_loadreg(rs2[i],tl);
2161 emit_not(tl,tl);
2162 }
2163 if(s2h>=0) emit_not(s2h,th);
2164 else{
2165 emit_loadreg(rs2[i]|64,th);
2166 emit_not(th,th);
2167 }
2168 }
2169 else {
2170 emit_movimm(-1,tl);
2171 emit_movimm(-1,th);
2172 }
2173 }
2174 }
2175 }
2176 }
2177 else
2178 {
2179 // 32 bit
2180 if(tl>=0) {
2181 s1l=get_reg(i_regs->regmap,rs1[i]);
2182 s2l=get_reg(i_regs->regmap,rs2[i]);
2183 if(rs1[i]&&rs2[i]) {
2184 assert(s1l>=0);
2185 assert(s2l>=0);
2186 if(opcode2[i]==0x24) { // AND
2187 emit_and(s1l,s2l,tl);
2188 } else
2189 if(opcode2[i]==0x25) { // OR
2190 emit_or(s1l,s2l,tl);
2191 } else
2192 if(opcode2[i]==0x26) { // XOR
2193 emit_xor(s1l,s2l,tl);
2194 } else
2195 if(opcode2[i]==0x27) { // NOR
2196 emit_or(s1l,s2l,tl);
2197 emit_not(tl,tl);
2198 }
2199 }
2200 else
2201 {
2202 if(opcode2[i]==0x24) { // AND
2203 emit_zeroreg(tl);
2204 } else
2205 if(opcode2[i]==0x25||opcode2[i]==0x26) { // OR/XOR
2206 if(rs1[i]){
2207 if(s1l>=0) emit_mov(s1l,tl);
2208 else emit_loadreg(rs1[i],tl); // CHECK: regmap_entry?
2209 }
2210 else
2211 if(rs2[i]){
2212 if(s2l>=0) emit_mov(s2l,tl);
2213 else emit_loadreg(rs2[i],tl); // CHECK: regmap_entry?
2214 }
2215 else emit_zeroreg(tl);
2216 } else
2217 if(opcode2[i]==0x27) { // NOR
2218 if(rs1[i]){
2219 if(s1l>=0) emit_not(s1l,tl);
2220 else {
2221 emit_loadreg(rs1[i],tl);
2222 emit_not(tl,tl);
2223 }
2224 }
2225 else
2226 if(rs2[i]){
2227 if(s2l>=0) emit_not(s2l,tl);
2228 else {
2229 emit_loadreg(rs2[i],tl);
2230 emit_not(tl,tl);
2231 }
2232 }
2233 else emit_movimm(-1,tl);
2234 }
2235 }
2236 }
2237 }
2238 }
2239 }
2240}
2241
2242void imm16_assemble(int i,struct regstat *i_regs)
2243{
2244 if (opcode[i]==0x0f) { // LUI
2245 if(rt1[i]) {
2246 signed char t;
2247 t=get_reg(i_regs->regmap,rt1[i]);
2248 //assert(t>=0);
2249 if(t>=0) {
2250 if(!((i_regs->isconst>>t)&1))
2251 emit_movimm(imm[i]<<16,t);
2252 }
2253 }
2254 }
2255 if(opcode[i]==0x08||opcode[i]==0x09) { // ADDI/ADDIU
2256 if(rt1[i]) {
2257 signed char s,t;
2258 t=get_reg(i_regs->regmap,rt1[i]);
2259 s=get_reg(i_regs->regmap,rs1[i]);
2260 if(rs1[i]) {
2261 //assert(t>=0);
2262 //assert(s>=0);
2263 if(t>=0) {
2264 if(!((i_regs->isconst>>t)&1)) {
2265 if(s<0) {
2266 if(i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2267 emit_addimm(t,imm[i],t);
2268 }else{
2269 if(!((i_regs->wasconst>>s)&1))
2270 emit_addimm(s,imm[i],t);
2271 else
2272 emit_movimm(constmap[i][s]+imm[i],t);
2273 }
2274 }
2275 }
2276 } else {
2277 if(t>=0) {
2278 if(!((i_regs->isconst>>t)&1))
2279 emit_movimm(imm[i],t);
2280 }
2281 }
2282 }
2283 }
2284 if(opcode[i]==0x18||opcode[i]==0x19) { // DADDI/DADDIU
2285 if(rt1[i]) {
2286 signed char sh,sl,th,tl;
2287 th=get_reg(i_regs->regmap,rt1[i]|64);
2288 tl=get_reg(i_regs->regmap,rt1[i]);
2289 sh=get_reg(i_regs->regmap,rs1[i]|64);
2290 sl=get_reg(i_regs->regmap,rs1[i]);
2291 if(tl>=0) {
2292 if(rs1[i]) {
2293 assert(sh>=0);
2294 assert(sl>=0);
2295 if(th>=0) {
2296 emit_addimm64_32(sh,sl,imm[i],th,tl);
2297 }
2298 else {
2299 emit_addimm(sl,imm[i],tl);
2300 }
2301 } else {
2302 emit_movimm(imm[i],tl);
2303 if(th>=0) emit_movimm(((signed int)imm[i])>>31,th);
2304 }
2305 }
2306 }
2307 }
2308 else if(opcode[i]==0x0a||opcode[i]==0x0b) { // SLTI/SLTIU
2309 if(rt1[i]) {
2310 //assert(rs1[i]!=0); // r0 might be valid, but it's probably a bug
2311 signed char sh,sl,t;
2312 t=get_reg(i_regs->regmap,rt1[i]);
2313 sh=get_reg(i_regs->regmap,rs1[i]|64);
2314 sl=get_reg(i_regs->regmap,rs1[i]);
2315 //assert(t>=0);
2316 if(t>=0) {
2317 if(rs1[i]>0) {
2318 if(sh<0) assert((i_regs->was32>>rs1[i])&1);
2319 if(sh<0||((i_regs->was32>>rs1[i])&1)) {
2320 if(opcode[i]==0x0a) { // SLTI
2321 if(sl<0) {
2322 if(i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2323 emit_slti32(t,imm[i],t);
2324 }else{
2325 emit_slti32(sl,imm[i],t);
2326 }
2327 }
2328 else { // SLTIU
2329 if(sl<0) {
2330 if(i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2331 emit_sltiu32(t,imm[i],t);
2332 }else{
2333 emit_sltiu32(sl,imm[i],t);
2334 }
2335 }
2336 }else{ // 64-bit
2337 assert(sl>=0);
2338 if(opcode[i]==0x0a) // SLTI
2339 emit_slti64_32(sh,sl,imm[i],t);
2340 else // SLTIU
2341 emit_sltiu64_32(sh,sl,imm[i],t);
2342 }
2343 }else{
2344 // SLTI(U) with r0 is just stupid,
2345 // nonetheless examples can be found
2346 if(opcode[i]==0x0a) // SLTI
2347 if(0<imm[i]) emit_movimm(1,t);
2348 else emit_zeroreg(t);
2349 else // SLTIU
2350 {
2351 if(imm[i]) emit_movimm(1,t);
2352 else emit_zeroreg(t);
2353 }
2354 }
2355 }
2356 }
2357 }
2358 else if(opcode[i]>=0x0c&&opcode[i]<=0x0e) { // ANDI/ORI/XORI
2359 if(rt1[i]) {
2360 signed char sh,sl,th,tl;
2361 th=get_reg(i_regs->regmap,rt1[i]|64);
2362 tl=get_reg(i_regs->regmap,rt1[i]);
2363 sh=get_reg(i_regs->regmap,rs1[i]|64);
2364 sl=get_reg(i_regs->regmap,rs1[i]);
2365 if(tl>=0 && !((i_regs->isconst>>tl)&1)) {
2366 if(opcode[i]==0x0c) //ANDI
2367 {
2368 if(rs1[i]) {
2369 if(sl<0) {
2370 if(i_regs->regmap_entry[tl]!=rs1[i]) emit_loadreg(rs1[i],tl);
2371 emit_andimm(tl,imm[i],tl);
2372 }else{
2373 if(!((i_regs->wasconst>>sl)&1))
2374 emit_andimm(sl,imm[i],tl);
2375 else
2376 emit_movimm(constmap[i][sl]&imm[i],tl);
2377 }
2378 }
2379 else
2380 emit_zeroreg(tl);
2381 if(th>=0) emit_zeroreg(th);
2382 }
2383 else
2384 {
2385 if(rs1[i]) {
2386 if(sl<0) {
2387 if(i_regs->regmap_entry[tl]!=rs1[i]) emit_loadreg(rs1[i],tl);
2388 }
2389 if(th>=0) {
2390 if(sh<0) {
2391 emit_loadreg(rs1[i]|64,th);
2392 }else{
2393 emit_mov(sh,th);
2394 }
2395 }
581335b0 2396 if(opcode[i]==0x0d) { // ORI
2397 if(sl<0) {
2398 emit_orimm(tl,imm[i],tl);
2399 }else{
2400 if(!((i_regs->wasconst>>sl)&1))
2401 emit_orimm(sl,imm[i],tl);
2402 else
2403 emit_movimm(constmap[i][sl]|imm[i],tl);
2404 }
57871462 2405 }
581335b0 2406 if(opcode[i]==0x0e) { // XORI
2407 if(sl<0) {
2408 emit_xorimm(tl,imm[i],tl);
2409 }else{
2410 if(!((i_regs->wasconst>>sl)&1))
2411 emit_xorimm(sl,imm[i],tl);
2412 else
2413 emit_movimm(constmap[i][sl]^imm[i],tl);
2414 }
57871462 2415 }
2416 }
2417 else {
2418 emit_movimm(imm[i],tl);
2419 if(th>=0) emit_zeroreg(th);
2420 }
2421 }
2422 }
2423 }
2424 }
2425}
2426
2427void shiftimm_assemble(int i,struct regstat *i_regs)
2428{
2429 if(opcode2[i]<=0x3) // SLL/SRL/SRA
2430 {
2431 if(rt1[i]) {
2432 signed char s,t;
2433 t=get_reg(i_regs->regmap,rt1[i]);
2434 s=get_reg(i_regs->regmap,rs1[i]);
2435 //assert(t>=0);
dc49e339 2436 if(t>=0&&!((i_regs->isconst>>t)&1)){
57871462 2437 if(rs1[i]==0)
2438 {
2439 emit_zeroreg(t);
2440 }
2441 else
2442 {
2443 if(s<0&&i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2444 if(imm[i]) {
2445 if(opcode2[i]==0) // SLL
2446 {
2447 emit_shlimm(s<0?t:s,imm[i],t);
2448 }
2449 if(opcode2[i]==2) // SRL
2450 {
2451 emit_shrimm(s<0?t:s,imm[i],t);
2452 }
2453 if(opcode2[i]==3) // SRA
2454 {
2455 emit_sarimm(s<0?t:s,imm[i],t);
2456 }
2457 }else{
2458 // Shift by zero
2459 if(s>=0 && s!=t) emit_mov(s,t);
2460 }
2461 }
2462 }
2463 //emit_storereg(rt1[i],t); //DEBUG
2464 }
2465 }
2466 if(opcode2[i]>=0x38&&opcode2[i]<=0x3b) // DSLL/DSRL/DSRA
2467 {
2468 if(rt1[i]) {
2469 signed char sh,sl,th,tl;
2470 th=get_reg(i_regs->regmap,rt1[i]|64);
2471 tl=get_reg(i_regs->regmap,rt1[i]);
2472 sh=get_reg(i_regs->regmap,rs1[i]|64);
2473 sl=get_reg(i_regs->regmap,rs1[i]);
2474 if(tl>=0) {
2475 if(rs1[i]==0)
2476 {
2477 emit_zeroreg(tl);
2478 if(th>=0) emit_zeroreg(th);
2479 }
2480 else
2481 {
2482 assert(sl>=0);
2483 assert(sh>=0);
2484 if(imm[i]) {
2485 if(opcode2[i]==0x38) // DSLL
2486 {
2487 if(th>=0) emit_shldimm(sh,sl,imm[i],th);
2488 emit_shlimm(sl,imm[i],tl);
2489 }
2490 if(opcode2[i]==0x3a) // DSRL
2491 {
2492 emit_shrdimm(sl,sh,imm[i],tl);
2493 if(th>=0) emit_shrimm(sh,imm[i],th);
2494 }
2495 if(opcode2[i]==0x3b) // DSRA
2496 {
2497 emit_shrdimm(sl,sh,imm[i],tl);
2498 if(th>=0) emit_sarimm(sh,imm[i],th);
2499 }
2500 }else{
2501 // Shift by zero
2502 if(sl!=tl) emit_mov(sl,tl);
2503 if(th>=0&&sh!=th) emit_mov(sh,th);
2504 }
2505 }
2506 }
2507 }
2508 }
2509 if(opcode2[i]==0x3c) // DSLL32
2510 {
2511 if(rt1[i]) {
2512 signed char sl,tl,th;
2513 tl=get_reg(i_regs->regmap,rt1[i]);
2514 th=get_reg(i_regs->regmap,rt1[i]|64);
2515 sl=get_reg(i_regs->regmap,rs1[i]);
2516 if(th>=0||tl>=0){
2517 assert(tl>=0);
2518 assert(th>=0);
2519 assert(sl>=0);
2520 emit_mov(sl,th);
2521 emit_zeroreg(tl);
2522 if(imm[i]>32)
2523 {
2524 emit_shlimm(th,imm[i]&31,th);
2525 }
2526 }
2527 }
2528 }
2529 if(opcode2[i]==0x3e) // DSRL32
2530 {
2531 if(rt1[i]) {
2532 signed char sh,tl,th;
2533 tl=get_reg(i_regs->regmap,rt1[i]);
2534 th=get_reg(i_regs->regmap,rt1[i]|64);
2535 sh=get_reg(i_regs->regmap,rs1[i]|64);
2536 if(tl>=0){
2537 assert(sh>=0);
2538 emit_mov(sh,tl);
2539 if(th>=0) emit_zeroreg(th);
2540 if(imm[i]>32)
2541 {
2542 emit_shrimm(tl,imm[i]&31,tl);
2543 }
2544 }
2545 }
2546 }
2547 if(opcode2[i]==0x3f) // DSRA32
2548 {
2549 if(rt1[i]) {
2550 signed char sh,tl;
2551 tl=get_reg(i_regs->regmap,rt1[i]);
2552 sh=get_reg(i_regs->regmap,rs1[i]|64);
2553 if(tl>=0){
2554 assert(sh>=0);
2555 emit_mov(sh,tl);
2556 if(imm[i]>32)
2557 {
2558 emit_sarimm(tl,imm[i]&31,tl);
2559 }
2560 }
2561 }
2562 }
2563}
2564
2565#ifndef shift_assemble
2566void shift_assemble(int i,struct regstat *i_regs)
2567{
2568 printf("Need shift_assemble for this architecture.\n");
2569 exit(1);
2570}
2571#endif
2572
2573void load_assemble(int i,struct regstat *i_regs)
2574{
2575 int s,th,tl,addr,map=-1;
2576 int offset;
2577 int jaddr=0;
5bf843dc 2578 int memtarget=0,c=0;
b1570849 2579 int fastload_reg_override=0;
57871462 2580 u_int hr,reglist=0;
2581 th=get_reg(i_regs->regmap,rt1[i]|64);
2582 tl=get_reg(i_regs->regmap,rt1[i]);
2583 s=get_reg(i_regs->regmap,rs1[i]);
2584 offset=imm[i];
2585 for(hr=0;hr<HOST_REGS;hr++) {
2586 if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
2587 }
2588 if(i_regs->regmap[HOST_CCREG]==CCREG) reglist&=~(1<<HOST_CCREG);
2589 if(s>=0) {
2590 c=(i_regs->wasconst>>s)&1;
af4ee1fe 2591 if (c) {
2592 memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
af4ee1fe 2593 }
57871462 2594 }
57871462 2595 //printf("load_assemble: c=%d\n",c);
2596 //if(c) printf("load_assemble: const=%x\n",(int)constmap[i][s]+offset);
2597 // FIXME: Even if the load is a NOP, we should check for pagefaults...
581335b0 2598 if((tl<0&&(!c||(((u_int)constmap[i][s]+offset)>>16)==0x1f80))
f18c0f46 2599 ||rt1[i]==0) {
5bf843dc 2600 // could be FIFO, must perform the read
f18c0f46 2601 // ||dummy read
5bf843dc 2602 assem_debug("(forced read)\n");
2603 tl=get_reg(i_regs->regmap,-1);
2604 assert(tl>=0);
5bf843dc 2605 }
2606 if(offset||s<0||c) addr=tl;
2607 else addr=s;
535d208a 2608 //if(tl<0) tl=get_reg(i_regs->regmap,-1);
2609 if(tl>=0) {
2610 //printf("load_assemble: c=%d\n",c);
2611 //if(c) printf("load_assemble: const=%x\n",(int)constmap[i][s]+offset);
2612 assert(tl>=0); // Even if the load is a NOP, we must check for pagefaults and I/O
2613 reglist&=~(1<<tl);
2614 if(th>=0) reglist&=~(1<<th);
1edfcc68 2615 if(!c) {
2616 #ifdef RAM_OFFSET
2617 map=get_reg(i_regs->regmap,ROREG);
2618 if(map<0) emit_loadreg(ROREG,map=HOST_TEMPREG);
2619 #endif
2620 #ifdef R29_HACK
2621 // Strmnnrmn's speed hack
2622 if(rs1[i]!=29||start<0x80001000||start>=0x80000000+RAM_SIZE)
2623 #endif
2624 {
2625 jaddr=emit_fastpath_cmp_jump(i,addr,&fastload_reg_override);
535d208a 2626 }
1edfcc68 2627 }
2628 else if(ram_offset&&memtarget) {
2629 emit_addimm(addr,ram_offset,HOST_TEMPREG);
2630 fastload_reg_override=HOST_TEMPREG;
535d208a 2631 }
2632 int dummy=(rt1[i]==0)||(tl!=get_reg(i_regs->regmap,rt1[i])); // ignore loads to r0 and unneeded reg
2633 if (opcode[i]==0x20) { // LB
2634 if(!c||memtarget) {
2635 if(!dummy) {
57871462 2636 #ifdef HOST_IMM_ADDR32
2637 if(c)
2638 emit_movsbl_tlb((constmap[i][s]+offset)^3,map,tl);
2639 else
2640 #endif
2641 {
2642 //emit_xorimm(addr,3,tl);
57871462 2643 //emit_movsbl_indexed((int)rdram-0x80000000,tl,tl);
535d208a 2644 int x=0,a=tl;
2002a1db 2645#ifdef BIG_ENDIAN_MIPS
57871462 2646 if(!c) emit_xorimm(addr,3,tl);
2647 else x=((constmap[i][s]+offset)^3)-(constmap[i][s]+offset);
2002a1db 2648#else
535d208a 2649 if(!c) a=addr;
dadf55f2 2650#endif
b1570849 2651 if(fastload_reg_override) a=fastload_reg_override;
2652
535d208a 2653 emit_movsbl_indexed_tlb(x,a,map,tl);
57871462 2654 }
57871462 2655 }
535d208a 2656 if(jaddr)
2657 add_stub(LOADB_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
57871462 2658 }
535d208a 2659 else
2660 inline_readstub(LOADB_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2661 }
2662 if (opcode[i]==0x21) { // LH
2663 if(!c||memtarget) {
2664 if(!dummy) {
57871462 2665 #ifdef HOST_IMM_ADDR32
2666 if(c)
2667 emit_movswl_tlb((constmap[i][s]+offset)^2,map,tl);
2668 else
2669 #endif
2670 {
535d208a 2671 int x=0,a=tl;
2002a1db 2672#ifdef BIG_ENDIAN_MIPS
57871462 2673 if(!c) emit_xorimm(addr,2,tl);
2674 else x=((constmap[i][s]+offset)^2)-(constmap[i][s]+offset);
2002a1db 2675#else
535d208a 2676 if(!c) a=addr;
dadf55f2 2677#endif
b1570849 2678 if(fastload_reg_override) a=fastload_reg_override;
57871462 2679 //#ifdef
2680 //emit_movswl_indexed_tlb(x,tl,map,tl);
2681 //else
2682 if(map>=0) {
535d208a 2683 emit_movswl_indexed(x,a,tl);
2684 }else{
a327ad27 2685 #if 1 //def RAM_OFFSET
535d208a 2686 emit_movswl_indexed(x,a,tl);
2687 #else
2688 emit_movswl_indexed((int)rdram-0x80000000+x,a,tl);
2689 #endif
2690 }
57871462 2691 }
57871462 2692 }
535d208a 2693 if(jaddr)
2694 add_stub(LOADH_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
57871462 2695 }
535d208a 2696 else
2697 inline_readstub(LOADH_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2698 }
2699 if (opcode[i]==0x23) { // LW
2700 if(!c||memtarget) {
2701 if(!dummy) {
dadf55f2 2702 int a=addr;
b1570849 2703 if(fastload_reg_override) a=fastload_reg_override;
57871462 2704 //emit_readword_indexed((int)rdram-0x80000000,addr,tl);
2705 #ifdef HOST_IMM_ADDR32
2706 if(c)
2707 emit_readword_tlb(constmap[i][s]+offset,map,tl);
2708 else
2709 #endif
dadf55f2 2710 emit_readword_indexed_tlb(0,a,map,tl);
57871462 2711 }
535d208a 2712 if(jaddr)
2713 add_stub(LOADW_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
57871462 2714 }
535d208a 2715 else
2716 inline_readstub(LOADW_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2717 }
2718 if (opcode[i]==0x24) { // LBU
2719 if(!c||memtarget) {
2720 if(!dummy) {
57871462 2721 #ifdef HOST_IMM_ADDR32
2722 if(c)
2723 emit_movzbl_tlb((constmap[i][s]+offset)^3,map,tl);
2724 else
2725 #endif
2726 {
2727 //emit_xorimm(addr,3,tl);
57871462 2728 //emit_movzbl_indexed((int)rdram-0x80000000,tl,tl);
535d208a 2729 int x=0,a=tl;
2002a1db 2730#ifdef BIG_ENDIAN_MIPS
57871462 2731 if(!c) emit_xorimm(addr,3,tl);
2732 else x=((constmap[i][s]+offset)^3)-(constmap[i][s]+offset);
2002a1db 2733#else
535d208a 2734 if(!c) a=addr;
dadf55f2 2735#endif
b1570849 2736 if(fastload_reg_override) a=fastload_reg_override;
2737
535d208a 2738 emit_movzbl_indexed_tlb(x,a,map,tl);
57871462 2739 }
57871462 2740 }
535d208a 2741 if(jaddr)
2742 add_stub(LOADBU_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
57871462 2743 }
535d208a 2744 else
2745 inline_readstub(LOADBU_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2746 }
2747 if (opcode[i]==0x25) { // LHU
2748 if(!c||memtarget) {
2749 if(!dummy) {
57871462 2750 #ifdef HOST_IMM_ADDR32
2751 if(c)
2752 emit_movzwl_tlb((constmap[i][s]+offset)^2,map,tl);
2753 else
2754 #endif
2755 {
535d208a 2756 int x=0,a=tl;
2002a1db 2757#ifdef BIG_ENDIAN_MIPS
57871462 2758 if(!c) emit_xorimm(addr,2,tl);
2759 else x=((constmap[i][s]+offset)^2)-(constmap[i][s]+offset);
2002a1db 2760#else
535d208a 2761 if(!c) a=addr;
dadf55f2 2762#endif
b1570849 2763 if(fastload_reg_override) a=fastload_reg_override;
57871462 2764 //#ifdef
2765 //emit_movzwl_indexed_tlb(x,tl,map,tl);
2766 //#else
2767 if(map>=0) {
535d208a 2768 emit_movzwl_indexed(x,a,tl);
2769 }else{
a327ad27 2770 #if 1 //def RAM_OFFSET
535d208a 2771 emit_movzwl_indexed(x,a,tl);
2772 #else
2773 emit_movzwl_indexed((int)rdram-0x80000000+x,a,tl);
2774 #endif
2775 }
57871462 2776 }
2777 }
535d208a 2778 if(jaddr)
2779 add_stub(LOADHU_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
57871462 2780 }
535d208a 2781 else
2782 inline_readstub(LOADHU_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2783 }
2784 if (opcode[i]==0x27) { // LWU
2785 assert(th>=0);
2786 if(!c||memtarget) {
2787 if(!dummy) {
dadf55f2 2788 int a=addr;
b1570849 2789 if(fastload_reg_override) a=fastload_reg_override;
57871462 2790 //emit_readword_indexed((int)rdram-0x80000000,addr,tl);
2791 #ifdef HOST_IMM_ADDR32
2792 if(c)
2793 emit_readword_tlb(constmap[i][s]+offset,map,tl);
2794 else
2795 #endif
dadf55f2 2796 emit_readword_indexed_tlb(0,a,map,tl);
57871462 2797 }
535d208a 2798 if(jaddr)
2799 add_stub(LOADW_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2800 }
2801 else {
2802 inline_readstub(LOADW_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
57871462 2803 }
535d208a 2804 emit_zeroreg(th);
2805 }
2806 if (opcode[i]==0x37) { // LD
2807 if(!c||memtarget) {
2808 if(!dummy) {
dadf55f2 2809 int a=addr;
b1570849 2810 if(fastload_reg_override) a=fastload_reg_override;
57871462 2811 //if(th>=0) emit_readword_indexed((int)rdram-0x80000000,addr,th);
2812 //emit_readword_indexed((int)rdram-0x7FFFFFFC,addr,tl);
2813 #ifdef HOST_IMM_ADDR32
2814 if(c)
2815 emit_readdword_tlb(constmap[i][s]+offset,map,th,tl);
2816 else
2817 #endif
dadf55f2 2818 emit_readdword_indexed_tlb(0,a,map,th,tl);
57871462 2819 }
535d208a 2820 if(jaddr)
2821 add_stub(LOADD_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
57871462 2822 }
535d208a 2823 else
2824 inline_readstub(LOADD_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
57871462 2825 }
535d208a 2826 }
2827 //emit_storereg(rt1[i],tl); // DEBUG
57871462 2828 //if(opcode[i]==0x23)
2829 //if(opcode[i]==0x24)
2830 //if(opcode[i]==0x23||opcode[i]==0x24)
2831 /*if(opcode[i]==0x21||opcode[i]==0x23||opcode[i]==0x24)
2832 {
2833 //emit_pusha();
2834 save_regs(0x100f);
2835 emit_readword((int)&last_count,ECX);
2836 #ifdef __i386__
2837 if(get_reg(i_regs->regmap,CCREG)<0)
2838 emit_loadreg(CCREG,HOST_CCREG);
2839 emit_add(HOST_CCREG,ECX,HOST_CCREG);
2840 emit_addimm(HOST_CCREG,2*ccadj[i],HOST_CCREG);
2841 emit_writeword(HOST_CCREG,(int)&Count);
2842 #endif
2843 #ifdef __arm__
2844 if(get_reg(i_regs->regmap,CCREG)<0)
2845 emit_loadreg(CCREG,0);
2846 else
2847 emit_mov(HOST_CCREG,0);
2848 emit_add(0,ECX,0);
2849 emit_addimm(0,2*ccadj[i],0);
2850 emit_writeword(0,(int)&Count);
2851 #endif
2852 emit_call((int)memdebug);
2853 //emit_popa();
2854 restore_regs(0x100f);
581335b0 2855 }*/
57871462 2856}
2857
2858#ifndef loadlr_assemble
2859void loadlr_assemble(int i,struct regstat *i_regs)
2860{
2861 printf("Need loadlr_assemble for this architecture.\n");
2862 exit(1);
2863}
2864#endif
2865
2866void store_assemble(int i,struct regstat *i_regs)
2867{
2868 int s,th,tl,map=-1;
2869 int addr,temp;
2870 int offset;
581335b0 2871 int jaddr=0,type;
666a299d 2872 int memtarget=0,c=0;
57871462 2873 int agr=AGEN1+(i&1);
b1570849 2874 int faststore_reg_override=0;
57871462 2875 u_int hr,reglist=0;
2876 th=get_reg(i_regs->regmap,rs2[i]|64);
2877 tl=get_reg(i_regs->regmap,rs2[i]);
2878 s=get_reg(i_regs->regmap,rs1[i]);
2879 temp=get_reg(i_regs->regmap,agr);
2880 if(temp<0) temp=get_reg(i_regs->regmap,-1);
2881 offset=imm[i];
2882 if(s>=0) {
2883 c=(i_regs->wasconst>>s)&1;
af4ee1fe 2884 if(c) {
2885 memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
af4ee1fe 2886 }
57871462 2887 }
2888 assert(tl>=0);
2889 assert(temp>=0);
2890 for(hr=0;hr<HOST_REGS;hr++) {
2891 if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
2892 }
2893 if(i_regs->regmap[HOST_CCREG]==CCREG) reglist&=~(1<<HOST_CCREG);
2894 if(offset||s<0||c) addr=temp;
2895 else addr=s;
1edfcc68 2896 if(!c) {
2897 jaddr=emit_fastpath_cmp_jump(i,addr,&faststore_reg_override);
2898 }
2899 else if(ram_offset&&memtarget) {
2900 emit_addimm(addr,ram_offset,HOST_TEMPREG);
2901 faststore_reg_override=HOST_TEMPREG;
57871462 2902 }
2903
2904 if (opcode[i]==0x28) { // SB
2905 if(!c||memtarget) {
97a238a6 2906 int x=0,a=temp;
2002a1db 2907#ifdef BIG_ENDIAN_MIPS
57871462 2908 if(!c) emit_xorimm(addr,3,temp);
2909 else x=((constmap[i][s]+offset)^3)-(constmap[i][s]+offset);
2002a1db 2910#else
97a238a6 2911 if(!c) a=addr;
dadf55f2 2912#endif
b1570849 2913 if(faststore_reg_override) a=faststore_reg_override;
57871462 2914 //emit_writebyte_indexed(tl,(int)rdram-0x80000000,temp);
97a238a6 2915 emit_writebyte_indexed_tlb(tl,x,a,map,a);
57871462 2916 }
2917 type=STOREB_STUB;
2918 }
2919 if (opcode[i]==0x29) { // SH
2920 if(!c||memtarget) {
97a238a6 2921 int x=0,a=temp;
2002a1db 2922#ifdef BIG_ENDIAN_MIPS
57871462 2923 if(!c) emit_xorimm(addr,2,temp);
2924 else x=((constmap[i][s]+offset)^2)-(constmap[i][s]+offset);
2002a1db 2925#else
97a238a6 2926 if(!c) a=addr;
dadf55f2 2927#endif
b1570849 2928 if(faststore_reg_override) a=faststore_reg_override;
57871462 2929 //#ifdef
2930 //emit_writehword_indexed_tlb(tl,x,temp,map,temp);
2931 //#else
2932 if(map>=0) {
97a238a6 2933 emit_writehword_indexed(tl,x,a);
57871462 2934 }else
a327ad27 2935 //emit_writehword_indexed(tl,(int)rdram-0x80000000+x,a);
2936 emit_writehword_indexed(tl,x,a);
57871462 2937 }
2938 type=STOREH_STUB;
2939 }
2940 if (opcode[i]==0x2B) { // SW
dadf55f2 2941 if(!c||memtarget) {
2942 int a=addr;
b1570849 2943 if(faststore_reg_override) a=faststore_reg_override;
57871462 2944 //emit_writeword_indexed(tl,(int)rdram-0x80000000,addr);
dadf55f2 2945 emit_writeword_indexed_tlb(tl,0,a,map,temp);
2946 }
57871462 2947 type=STOREW_STUB;
2948 }
2949 if (opcode[i]==0x3F) { // SD
2950 if(!c||memtarget) {
dadf55f2 2951 int a=addr;
b1570849 2952 if(faststore_reg_override) a=faststore_reg_override;
57871462 2953 if(rs2[i]) {
2954 assert(th>=0);
2955 //emit_writeword_indexed(th,(int)rdram-0x80000000,addr);
2956 //emit_writeword_indexed(tl,(int)rdram-0x7FFFFFFC,addr);
dadf55f2 2957 emit_writedword_indexed_tlb(th,tl,0,a,map,temp);
57871462 2958 }else{
2959 // Store zero
2960 //emit_writeword_indexed(tl,(int)rdram-0x80000000,temp);
2961 //emit_writeword_indexed(tl,(int)rdram-0x7FFFFFFC,temp);
dadf55f2 2962 emit_writedword_indexed_tlb(tl,tl,0,a,map,temp);
57871462 2963 }
2964 }
2965 type=STORED_STUB;
2966 }
b96d3df7 2967 if(jaddr) {
2968 // PCSX store handlers don't check invcode again
2969 reglist|=1<<addr;
2970 add_stub(type,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2971 jaddr=0;
2972 }
1edfcc68 2973 if(!(i_regs->waswritten&(1<<rs1[i]))&&!(new_dynarec_hacks&NDHACK_NO_SMC_CHECK)) {
57871462 2974 if(!c||memtarget) {
2975 #ifdef DESTRUCTIVE_SHIFT
2976 // The x86 shift operation is 'destructive'; it overwrites the
2977 // source register, so we need to make a copy first and use that.
2978 addr=temp;
2979 #endif
2980 #if defined(HOST_IMM8)
2981 int ir=get_reg(i_regs->regmap,INVCP);
2982 assert(ir>=0);
2983 emit_cmpmem_indexedsr12_reg(ir,addr,1);
2984 #else
2985 emit_cmpmem_indexedsr12_imm((int)invalid_code,addr,1);
2986 #endif
0bbd1454 2987 #if defined(HAVE_CONDITIONAL_CALL) && !defined(DESTRUCTIVE_SHIFT)
2988 emit_callne(invalidate_addr_reg[addr]);
2989 #else
581335b0 2990 int jaddr2=(int)out;
57871462 2991 emit_jne(0);
2992 add_stub(INVCODE_STUB,jaddr2,(int)out,reglist|(1<<HOST_CCREG),addr,0,0,0);
0bbd1454 2993 #endif
57871462 2994 }
2995 }
7a518516 2996 u_int addr_val=constmap[i][s]+offset;
3eaa7048 2997 if(jaddr) {
2998 add_stub(type,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2999 } else if(c&&!memtarget) {
7a518516 3000 inline_writestub(type,i,addr_val,i_regs->regmap,rs2[i],ccadj[i],reglist);
3001 }
3002 // basic current block modification detection..
3003 // not looking back as that should be in mips cache already
3004 if(c&&start+i*4<addr_val&&addr_val<start+slen*4) {
c43b5311 3005 SysPrintf("write to %08x hits block %08x, pc=%08x\n",addr_val,start,start+i*4);
7a518516 3006 assert(i_regs->regmap==regs[i].regmap); // not delay slot
3007 if(i_regs->regmap==regs[i].regmap) {
3008 load_all_consts(regs[i].regmap_entry,regs[i].was32,regs[i].wasdirty,i);
3009 wb_dirtys(regs[i].regmap_entry,regs[i].was32,regs[i].wasdirty);
3010 emit_movimm(start+i*4+4,0);
3011 emit_writeword(0,(int)&pcaddr);
3012 emit_jmp((int)do_interrupt);
3013 }
3eaa7048 3014 }
57871462 3015 //if(opcode[i]==0x2B || opcode[i]==0x3F)
3016 //if(opcode[i]==0x2B || opcode[i]==0x28)
3017 //if(opcode[i]==0x2B || opcode[i]==0x29)
3018 //if(opcode[i]==0x2B)
3019 /*if(opcode[i]==0x2B || opcode[i]==0x28 || opcode[i]==0x29 || opcode[i]==0x3F)
3020 {
28d74ee8 3021 #ifdef __i386__
3022 emit_pusha();
3023 #endif
3024 #ifdef __arm__
57871462 3025 save_regs(0x100f);
28d74ee8 3026 #endif
57871462 3027 emit_readword((int)&last_count,ECX);
3028 #ifdef __i386__
3029 if(get_reg(i_regs->regmap,CCREG)<0)
3030 emit_loadreg(CCREG,HOST_CCREG);
3031 emit_add(HOST_CCREG,ECX,HOST_CCREG);
3032 emit_addimm(HOST_CCREG,2*ccadj[i],HOST_CCREG);
3033 emit_writeword(HOST_CCREG,(int)&Count);
3034 #endif
3035 #ifdef __arm__
3036 if(get_reg(i_regs->regmap,CCREG)<0)
3037 emit_loadreg(CCREG,0);
3038 else
3039 emit_mov(HOST_CCREG,0);
3040 emit_add(0,ECX,0);
3041 emit_addimm(0,2*ccadj[i],0);
3042 emit_writeword(0,(int)&Count);
3043 #endif
3044 emit_call((int)memdebug);
28d74ee8 3045 #ifdef __i386__
3046 emit_popa();
3047 #endif
3048 #ifdef __arm__
57871462 3049 restore_regs(0x100f);
28d74ee8 3050 #endif
581335b0 3051 }*/
57871462 3052}
3053
3054void storelr_assemble(int i,struct regstat *i_regs)
3055{
3056 int s,th,tl;
3057 int temp;
581335b0 3058 int temp2=-1;
57871462 3059 int offset;
581335b0 3060 int jaddr=0;
57871462 3061 int case1,case2,case3;
3062 int done0,done1,done2;
af4ee1fe 3063 int memtarget=0,c=0;
fab5d06d 3064 int agr=AGEN1+(i&1);
57871462 3065 u_int hr,reglist=0;
3066 th=get_reg(i_regs->regmap,rs2[i]|64);
3067 tl=get_reg(i_regs->regmap,rs2[i]);
3068 s=get_reg(i_regs->regmap,rs1[i]);
fab5d06d 3069 temp=get_reg(i_regs->regmap,agr);
3070 if(temp<0) temp=get_reg(i_regs->regmap,-1);
57871462 3071 offset=imm[i];
3072 if(s>=0) {
3073 c=(i_regs->isconst>>s)&1;
af4ee1fe 3074 if(c) {
3075 memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
af4ee1fe 3076 }
57871462 3077 }
3078 assert(tl>=0);
3079 for(hr=0;hr<HOST_REGS;hr++) {
3080 if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
3081 }
535d208a 3082 assert(temp>=0);
1edfcc68 3083 if(!c) {
3084 emit_cmpimm(s<0||offset?temp:s,RAM_SIZE);
3085 if(!offset&&s!=temp) emit_mov(s,temp);
3086 jaddr=(int)out;
3087 emit_jno(0);
3088 }
3089 else
3090 {
3091 if(!memtarget||!rs1[i]) {
535d208a 3092 jaddr=(int)out;
3093 emit_jmp(0);
57871462 3094 }
535d208a 3095 }
1edfcc68 3096 #ifdef RAM_OFFSET
3097 int map=get_reg(i_regs->regmap,ROREG);
3098 if(map<0) emit_loadreg(ROREG,map=HOST_TEMPREG);
3099 #else
9f51b4b9 3100 if((u_int)rdram!=0x80000000)
1edfcc68 3101 emit_addimm_no_flags((u_int)rdram-(u_int)0x80000000,temp);
3102 #endif
535d208a 3103
3104 if (opcode[i]==0x2C||opcode[i]==0x2D) { // SDL/SDR
3105 temp2=get_reg(i_regs->regmap,FTEMP);
3106 if(!rs2[i]) temp2=th=tl;
3107 }
57871462 3108
2002a1db 3109#ifndef BIG_ENDIAN_MIPS
3110 emit_xorimm(temp,3,temp);
3111#endif
535d208a 3112 emit_testimm(temp,2);
3113 case2=(int)out;
3114 emit_jne(0);
3115 emit_testimm(temp,1);
3116 case1=(int)out;
3117 emit_jne(0);
3118 // 0
3119 if (opcode[i]==0x2A) { // SWL
3120 emit_writeword_indexed(tl,0,temp);
3121 }
3122 if (opcode[i]==0x2E) { // SWR
3123 emit_writebyte_indexed(tl,3,temp);
3124 }
3125 if (opcode[i]==0x2C) { // SDL
3126 emit_writeword_indexed(th,0,temp);
3127 if(rs2[i]) emit_mov(tl,temp2);
3128 }
3129 if (opcode[i]==0x2D) { // SDR
3130 emit_writebyte_indexed(tl,3,temp);
3131 if(rs2[i]) emit_shldimm(th,tl,24,temp2);
3132 }
3133 done0=(int)out;
3134 emit_jmp(0);
3135 // 1
3136 set_jump_target(case1,(int)out);
3137 if (opcode[i]==0x2A) { // SWL
3138 // Write 3 msb into three least significant bytes
3139 if(rs2[i]) emit_rorimm(tl,8,tl);
3140 emit_writehword_indexed(tl,-1,temp);
3141 if(rs2[i]) emit_rorimm(tl,16,tl);
3142 emit_writebyte_indexed(tl,1,temp);
3143 if(rs2[i]) emit_rorimm(tl,8,tl);
3144 }
3145 if (opcode[i]==0x2E) { // SWR
3146 // Write two lsb into two most significant bytes
3147 emit_writehword_indexed(tl,1,temp);
3148 }
3149 if (opcode[i]==0x2C) { // SDL
3150 if(rs2[i]) emit_shrdimm(tl,th,8,temp2);
3151 // Write 3 msb into three least significant bytes
3152 if(rs2[i]) emit_rorimm(th,8,th);
3153 emit_writehword_indexed(th,-1,temp);
3154 if(rs2[i]) emit_rorimm(th,16,th);
3155 emit_writebyte_indexed(th,1,temp);
3156 if(rs2[i]) emit_rorimm(th,8,th);
3157 }
3158 if (opcode[i]==0x2D) { // SDR
3159 if(rs2[i]) emit_shldimm(th,tl,16,temp2);
3160 // Write two lsb into two most significant bytes
3161 emit_writehword_indexed(tl,1,temp);
3162 }
3163 done1=(int)out;
3164 emit_jmp(0);
3165 // 2
3166 set_jump_target(case2,(int)out);
3167 emit_testimm(temp,1);
3168 case3=(int)out;
3169 emit_jne(0);
3170 if (opcode[i]==0x2A) { // SWL
3171 // Write two msb into two least significant bytes
3172 if(rs2[i]) emit_rorimm(tl,16,tl);
3173 emit_writehword_indexed(tl,-2,temp);
3174 if(rs2[i]) emit_rorimm(tl,16,tl);
3175 }
3176 if (opcode[i]==0x2E) { // SWR
3177 // Write 3 lsb into three most significant bytes
3178 emit_writebyte_indexed(tl,-1,temp);
3179 if(rs2[i]) emit_rorimm(tl,8,tl);
3180 emit_writehword_indexed(tl,0,temp);
3181 if(rs2[i]) emit_rorimm(tl,24,tl);
3182 }
3183 if (opcode[i]==0x2C) { // SDL
3184 if(rs2[i]) emit_shrdimm(tl,th,16,temp2);
3185 // Write two msb into two least significant bytes
3186 if(rs2[i]) emit_rorimm(th,16,th);
3187 emit_writehword_indexed(th,-2,temp);
3188 if(rs2[i]) emit_rorimm(th,16,th);
3189 }
3190 if (opcode[i]==0x2D) { // SDR
3191 if(rs2[i]) emit_shldimm(th,tl,8,temp2);
3192 // Write 3 lsb into three most significant bytes
3193 emit_writebyte_indexed(tl,-1,temp);
3194 if(rs2[i]) emit_rorimm(tl,8,tl);
3195 emit_writehword_indexed(tl,0,temp);
3196 if(rs2[i]) emit_rorimm(tl,24,tl);
3197 }
3198 done2=(int)out;
3199 emit_jmp(0);
3200 // 3
3201 set_jump_target(case3,(int)out);
3202 if (opcode[i]==0x2A) { // SWL
3203 // Write msb into least significant byte
3204 if(rs2[i]) emit_rorimm(tl,24,tl);
3205 emit_writebyte_indexed(tl,-3,temp);
3206 if(rs2[i]) emit_rorimm(tl,8,tl);
3207 }
3208 if (opcode[i]==0x2E) { // SWR
3209 // Write entire word
3210 emit_writeword_indexed(tl,-3,temp);
3211 }
3212 if (opcode[i]==0x2C) { // SDL
3213 if(rs2[i]) emit_shrdimm(tl,th,24,temp2);
3214 // Write msb into least significant byte
3215 if(rs2[i]) emit_rorimm(th,24,th);
3216 emit_writebyte_indexed(th,-3,temp);
3217 if(rs2[i]) emit_rorimm(th,8,th);
3218 }
3219 if (opcode[i]==0x2D) { // SDR
3220 if(rs2[i]) emit_mov(th,temp2);
3221 // Write entire word
3222 emit_writeword_indexed(tl,-3,temp);
3223 }
3224 set_jump_target(done0,(int)out);