drc: some libnx support
[pcsx_rearmed.git] / libpcsxcore / new_dynarec / new_dynarec.c
CommitLineData
57871462 1/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
2 * Mupen64plus - new_dynarec.c *
20d507ba 3 * Copyright (C) 2009-2011 Ari64 *
57871462 4 * *
5 * This program is free software; you can redistribute it and/or modify *
6 * it under the terms of the GNU General Public License as published by *
7 * the Free Software Foundation; either version 2 of the License, or *
8 * (at your option) any later version. *
9 * *
10 * This program is distributed in the hope that it will be useful, *
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
13 * GNU General Public License for more details. *
14 * *
15 * You should have received a copy of the GNU General Public License *
16 * along with this program; if not, write to the *
17 * Free Software Foundation, Inc., *
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. *
19 * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
20
21#include <stdlib.h>
22#include <stdint.h> //include for uint64_t
23#include <assert.h>
d848b60a 24#include <errno.h>
4600ba03 25#include <sys/mman.h>
d148d265 26#ifdef __MACH__
27#include <libkern/OSCacheControl.h>
28#endif
1e212a25 29#ifdef _3DS
30#include <3ds_utils.h>
31#endif
3039c914 32#ifdef HAVE_LIBNX
33#include <switch.h>
34static Jit g_jit;
35#endif
57871462 36
d148d265 37#include "new_dynarec_config.h"
3968e69e 38#include "../psxhle.h"
39#include "../psxinterpreter.h"
81dbbf4c 40#include "../gte.h"
41#include "emu_if.h" // emulator interface
cdc2da64 42#include "arm_features.h"
57871462 43
d1e4ebd9 44#define noinline __attribute__((noinline,noclone))
b14b6a8f 45#ifndef ARRAY_SIZE
46#define ARRAY_SIZE(x) (sizeof(x) / sizeof(x[0]))
47#endif
e3c6bdb5 48#ifndef min
49#define min(a, b) ((b) < (a) ? (b) : (a))
50#endif
32631e6a 51#ifndef max
52#define max(a, b) ((b) > (a) ? (b) : (a))
53#endif
b14b6a8f 54
4600ba03 55//#define DISASM
32631e6a 56//#define ASSEM_PRINT
ece032e6 57//#define STAT_PRINT
32631e6a 58
59#ifdef ASSEM_PRINT
60#define assem_debug printf
61#else
4600ba03 62#define assem_debug(...)
32631e6a 63#endif
64//#define inv_debug printf
4600ba03 65#define inv_debug(...)
57871462 66
67#ifdef __i386__
68#include "assem_x86.h"
69#endif
70#ifdef __x86_64__
71#include "assem_x64.h"
72#endif
73#ifdef __arm__
74#include "assem_arm.h"
75#endif
be516ebe 76#ifdef __aarch64__
77#include "assem_arm64.h"
78#endif
57871462 79
81dbbf4c 80#define RAM_SIZE 0x200000
57871462 81#define MAXBLOCK 4096
82#define MAX_OUTPUT_BLOCK_SIZE 262144
93c0345b 83#define EXPIRITY_OFFSET (MAX_OUTPUT_BLOCK_SIZE * 2)
84#define PAGE_COUNT 1024
2573466a 85
882a08fc 86#if defined(HAVE_CONDITIONAL_CALL) && !defined(DESTRUCTIVE_SHIFT)
87#define INVALIDATE_USE_COND_CALL
88#endif
89
66ea165f 90#ifdef VITA
91// apparently Vita has a 16MB limit, so either we cut tc in half,
92// or use this hack (it's a hack because tc size was designed to be power-of-2)
93#define TC_REDUCE_BYTES 4096
94#else
95#define TC_REDUCE_BYTES 0
96#endif
97
2a014d73 98struct ndrc_mem
99{
66ea165f 100 u_char translation_cache[(1 << TARGET_SIZE_2) - TC_REDUCE_BYTES];
2a014d73 101 struct
102 {
103 struct tramp_insns ops[2048 / sizeof(struct tramp_insns)];
104 const void *f[2048 / sizeof(void *)];
105 } tramp;
106};
107
108#ifdef BASE_ADDR_DYNAMIC
109static struct ndrc_mem *ndrc;
110#else
111static struct ndrc_mem ndrc_ __attribute__((aligned(4096)));
112static struct ndrc_mem *ndrc = &ndrc_;
113#endif
3039c914 114#ifdef NDRC_WRITE_OFFSET
115# ifdef __GLIBC__
116# include <sys/types.h>
117# include <sys/stat.h>
118# include <fcntl.h>
119# include <unistd.h>
120# endif
121static long ndrc_write_ofs;
122#else
123#define ndrc_write_ofs 0
124#endif
2a014d73 125
b14b6a8f 126// stubs
127enum stub_type {
128 CC_STUB = 1,
129 FP_STUB = 2,
130 LOADB_STUB = 3,
131 LOADH_STUB = 4,
132 LOADW_STUB = 5,
133 LOADD_STUB = 6,
134 LOADBU_STUB = 7,
135 LOADHU_STUB = 8,
136 STOREB_STUB = 9,
137 STOREH_STUB = 10,
138 STOREW_STUB = 11,
139 STORED_STUB = 12,
140 STORELR_STUB = 13,
141 INVCODE_STUB = 14,
142};
143
6cc8d23c 144// regmap_pre[i] - regs before [i] insn starts; dirty things here that
145// don't match .regmap will be written back
146// [i].regmap_entry - regs that must be set up if someone jumps here
147// [i].regmap - regs [i] insn will read/(over)write
2acc46cd 148// branch_regs[i].* - same as above but for branches, takes delay slot into account
57871462 149struct regstat
150{
6cc8d23c 151 signed char regmap_entry[HOST_REGS];
57871462 152 signed char regmap[HOST_REGS];
57871462 153 uint64_t wasdirty;
154 uint64_t dirty;
155 uint64_t u;
24058131 156 u_int wasconst; // before; for example 'lw r2, (r2)' wasconst is true
157 u_int isconst; // ... but isconst is false when r2 is known
8575a877 158 u_int loadedconst; // host regs that have constants loaded
159 u_int waswritten; // MIPS regs that were used as store base before
57871462 160};
161
df4dc2b1 162struct ht_entry
163{
164 u_int vaddr[2];
165 void *tcaddr[2];
166};
167
b14b6a8f 168struct code_stub
169{
170 enum stub_type type;
171 void *addr;
172 void *retaddr;
173 u_int a;
174 uintptr_t b;
175 uintptr_t c;
176 u_int d;
177 u_int e;
178};
179
643aeae3 180struct link_entry
181{
182 void *addr;
183 u_int target;
104df9d3 184 u_int internal;
185};
186
187struct block_info
188{
189 struct block_info *next;
190 const void *source;
191 const void *copy;
192 u_int start; // vaddr of the block start
193 u_int len; // of the whole block source
194 u_int tc_offs;
195 //u_int tc_len;
196 u_int reg_sv_flags;
3280e616 197 u_char is_dirty;
198 u_char inv_near_misses;
104df9d3 199 u_short jump_in_cnt;
200 struct {
201 u_int vaddr;
202 void *addr;
203 } jump_in[0];
643aeae3 204};
205
b7ad2f2c 206struct jump_info
207{
208 int alloc;
209 int count;
210 struct {
211 u_int target_vaddr;
212 void *stub;
213 } e[0];
214};
215
cf95b4f0 216static struct decoded_insn
217{
218 u_char itype;
219 u_char opcode;
220 u_char opcode2;
221 u_char rs1;
222 u_char rs2;
223 u_char rt1;
224 u_char rt2;
53dc27f6 225 u_char use_lt1:1;
cf95b4f0 226 u_char bt:1;
cf95b4f0 227 u_char ooo:1;
228 u_char is_ds:1;
fe807a8a 229 u_char is_jump:1;
230 u_char is_ujump:1;
37387d8b 231 u_char is_load:1;
232 u_char is_store:1;
cf95b4f0 233} dops[MAXBLOCK];
234
398d6924 235 static u_char *out;
104df9d3 236 static struct ht_entry hash_table[65536];
93c0345b 237 static struct block_info *blocks[PAGE_COUNT];
b7ad2f2c 238 static struct jump_info *jumps[PAGE_COUNT];
e2b5e7aa 239 static u_int start;
240 static u_int *source;
bedfea38 241 static uint64_t gte_rs[MAXBLOCK]; // gte: 32 data and 32 ctl regs
242 static uint64_t gte_rt[MAXBLOCK];
243 static uint64_t gte_unneeded[MAXBLOCK];
ffb0b9e0 244 static u_int smrv[32]; // speculated MIPS register values
245 static u_int smrv_strong; // mask or regs that are likely to have correct values
246 static u_int smrv_weak; // same, but somewhat less likely
247 static u_int smrv_strong_next; // same, but after current insn executes
248 static u_int smrv_weak_next;
e2b5e7aa 249 static int imm[MAXBLOCK];
250 static u_int ba[MAXBLOCK];
e2b5e7aa 251 static uint64_t unneeded_reg[MAXBLOCK];
e2b5e7aa 252 static uint64_t branch_unneeded_reg[MAXBLOCK];
6cc8d23c 253 // see 'struct regstat' for a description
2330734f 254 static signed char regmap_pre[MAXBLOCK][HOST_REGS];
40fca85b 255 // contains 'real' consts at [i] insn, but may differ from what's actually
256 // loaded in host reg as 'final' value is always loaded, see get_final_value()
257 static uint32_t current_constmap[HOST_REGS];
258 static uint32_t constmap[MAXBLOCK][HOST_REGS];
956f3129 259 static struct regstat regs[MAXBLOCK];
260 static struct regstat branch_regs[MAXBLOCK];
e2b5e7aa 261 static signed char minimum_free_regs[MAXBLOCK];
e2b5e7aa 262 static int ccadj[MAXBLOCK];
263 static int slen;
df4dc2b1 264 static void *instr_addr[MAXBLOCK];
643aeae3 265 static struct link_entry link_addr[MAXBLOCK];
e2b5e7aa 266 static int linkcount;
b14b6a8f 267 static struct code_stub stubs[MAXBLOCK*3];
e2b5e7aa 268 static int stubcount;
269 static u_int literals[1024][2];
270 static int literalcount;
271 static int is_delayslot;
e2b5e7aa 272 static char shadow[1048576] __attribute__((aligned(16)));
273 static void *copy;
93c0345b 274 static u_int expirep;
e2b5e7aa 275 static u_int stop_after_jal;
7f94b097 276 static u_int f1_hack;
ece032e6 277#ifdef STAT_PRINT
278 static int stat_bc_direct;
279 static int stat_bc_pre;
280 static int stat_bc_restore;
104df9d3 281 static int stat_ht_lookups;
ece032e6 282 static int stat_jump_in_lookups;
283 static int stat_restore_tries;
284 static int stat_restore_compares;
285 static int stat_inv_addr_calls;
286 static int stat_inv_hits;
104df9d3 287 static int stat_blocks;
288 static int stat_links;
ece032e6 289 #define stat_inc(s) s++
104df9d3 290 #define stat_dec(s) s--
291 #define stat_clear(s) s = 0
ece032e6 292#else
293 #define stat_inc(s)
104df9d3 294 #define stat_dec(s)
295 #define stat_clear(s)
ece032e6 296#endif
e2b5e7aa 297
298 int new_dynarec_hacks;
d62c125a 299 int new_dynarec_hacks_pergame;
32631e6a 300 int new_dynarec_hacks_old;
e2b5e7aa 301 int new_dynarec_did_compile;
687b4580 302
d62c125a 303 #define HACK_ENABLED(x) ((new_dynarec_hacks | new_dynarec_hacks_pergame) & (x))
304
687b4580 305 extern int cycle_count; // ... until end of the timeslice, counts -N -> 0
306 extern int last_count; // last absolute target, often = next_interupt
307 extern int pcaddr;
308 extern int pending_exception;
309 extern int branch_target;
37387d8b 310 extern uintptr_t ram_offset;
d1e4ebd9 311 extern uintptr_t mini_ht[32][2];
57871462 312
313 /* registers that may be allocated */
314 /* 1-31 gpr */
7c3a5182 315#define LOREG 32 // lo
316#define HIREG 33 // hi
00fa9369 317//#define FSREG 34 // FPU status (FCSR)
57871462 318#define CSREG 35 // Coprocessor status
319#define CCREG 36 // Cycle count
320#define INVCP 37 // Pointer to invalid_code
1edfcc68 321//#define MMREG 38 // Pointer to memory_map
37387d8b 322#define ROREG 39 // ram offset (if rdram!=0x80000000)
619e5ded 323#define TEMPREG 40
324#define FTEMP 40 // FPU temporary register
325#define PTEMP 41 // Prefetch temporary register
1edfcc68 326//#define TLREG 42 // TLB mapping offset
619e5ded 327#define RHASH 43 // Return address hash
328#define RHTBL 44 // Return address hash table address
329#define RTEMP 45 // JR/JALR address register
330#define MAXREG 45
331#define AGEN1 46 // Address generation temporary register
1edfcc68 332//#define AGEN2 47 // Address generation temporary register
333//#define MGEN1 48 // Maptable address generation temporary register
334//#define MGEN2 49 // Maptable address generation temporary register
619e5ded 335#define BTREG 50 // Branch target temporary register
57871462 336
337 /* instruction types */
338#define NOP 0 // No operation
339#define LOAD 1 // Load
340#define STORE 2 // Store
341#define LOADLR 3 // Unaligned load
342#define STORELR 4 // Unaligned store
9f51b4b9 343#define MOV 5 // Move
57871462 344#define ALU 6 // Arithmetic/logic
345#define MULTDIV 7 // Multiply/divide
346#define SHIFT 8 // Shift by register
347#define SHIFTIMM 9// Shift by immediate
348#define IMM16 10 // 16-bit immediate
349#define RJUMP 11 // Unconditional jump to register
350#define UJUMP 12 // Unconditional jump
351#define CJUMP 13 // Conditional branch (BEQ/BNE/BGTZ/BLEZ)
352#define SJUMP 14 // Conditional branch (regimm format)
353#define COP0 15 // Coprocessor 0
354#define COP1 16 // Coprocessor 1
355#define C1LS 17 // Coprocessor 1 load/store
ad49de89 356//#define FJUMP 18 // Conditional branch (floating point)
00fa9369 357//#define FLOAT 19 // Floating point unit
358//#define FCONV 20 // Convert integer to float
359//#define FCOMP 21 // Floating point compare (sets FSREG)
d1150cd6 360#define SYSCALL 22// SYSCALL,BREAK
57871462 361#define OTHER 23 // Other
4bdc30ab 362//#define SPAN 24 // Branch/delay slot spans 2 pages
57871462 363#define NI 25 // Not implemented
7139f3c8 364#define HLECALL 26// PCSX fake opcodes for HLE
b9b61529 365#define COP2 27 // Coprocessor 2 move
366#define C2LS 28 // Coprocessor 2 load/store
367#define C2OP 29 // Coprocessor 2 operation
1e973cb0 368#define INTCALL 30// Call interpreter to handle rare corner cases
57871462 369
57871462 370 /* branch codes */
371#define TAKEN 1
372#define NOTTAKEN 2
373#define NULLDS 3
374
7c3a5182 375#define DJT_1 (void *)1l // no function, just a label in assem_debug log
376#define DJT_2 (void *)2l
377
57871462 378// asm linkage
57871462 379void dyna_linker();
57871462 380void cc_interrupt();
381void fp_exception();
382void fp_exception_ds();
d1150cd6 383void jump_syscall (u_int u0, u_int u1, u_int pc);
384void jump_syscall_ds(u_int u0, u_int u1, u_int pc);
385void jump_break (u_int u0, u_int u1, u_int pc);
386void jump_break_ds(u_int u0, u_int u1, u_int pc);
3968e69e 387void jump_to_new_pc();
81dbbf4c 388void call_gteStall();
7139f3c8 389void new_dyna_leave();
57871462 390
104df9d3 391void *ndrc_get_addr_ht_param(u_int vaddr, int can_compile);
392void *ndrc_get_addr_ht(u_int vaddr);
393void ndrc_invalidate_addr(u_int addr);
394void ndrc_add_jump_out(u_int vaddr, void *src);
395
396static int new_recompile_block(u_int addr);
397static void invalidate_block(struct block_info *block);
398d6924 398
57871462 399// Needed by assembler
2330734f 400static void wb_register(signed char r, const signed char regmap[], uint64_t dirty);
401static void wb_dirtys(const signed char i_regmap[], uint64_t i_dirty);
402static void wb_needed_dirtys(const signed char i_regmap[], uint64_t i_dirty, int addr);
403static void load_all_regs(const signed char i_regmap[]);
404static void load_needed_regs(const signed char i_regmap[], const signed char next_regmap[]);
e2b5e7aa 405static void load_regs_entry(int t);
2330734f 406static void load_all_consts(const signed char regmap[], u_int dirty, int i);
81dbbf4c 407static u_int get_host_reglist(const signed char *regmap);
e2b5e7aa 408
e2b5e7aa 409static int get_final_value(int hr, int i, int *value);
b14b6a8f 410static void add_stub(enum stub_type type, void *addr, void *retaddr,
411 u_int a, uintptr_t b, uintptr_t c, u_int d, u_int e);
412static void add_stub_r(enum stub_type type, void *addr, void *retaddr,
81dbbf4c 413 int i, int addr_reg, const struct regstat *i_regs, int ccadj, u_int reglist);
643aeae3 414static void add_to_linker(void *addr, u_int target, int ext);
37387d8b 415static void *emit_fastpath_cmp_jump(int i, const struct regstat *i_regs,
416 int addr, int *offset_reg, int *addr_reg_override);
687b4580 417static void *get_direct_memhandler(void *table, u_int addr,
418 enum stub_type type, uintptr_t *addr_host);
32631e6a 419static void cop2_do_stall_check(u_int op, int i, const struct regstat *i_regs, u_int reglist);
687b4580 420static void pass_args(int a0, int a1);
2a014d73 421static void emit_far_jump(const void *f);
422static void emit_far_call(const void *f);
57871462 423
9c67c98f 424#ifdef VITA
425#include <psp2/kernel/sysmem.h>
426static int sceBlock;
427// note: this interacts with RetroArch's Vita bootstrap code: bootstrap/vita/sbrk.c
428extern int getVMBlock();
429int _newlib_vm_size_user = sizeof(*ndrc);
430#endif
431
d148d265 432static void mprotect_w_x(void *start, void *end, int is_x)
433{
434#ifdef NO_WRITE_EXEC
1e212a25 435 #if defined(VITA)
436 // *Open* enables write on all memory that was
437 // allocated by sceKernelAllocMemBlockForVM()?
438 if (is_x)
439 sceKernelCloseVMDomain();
440 else
441 sceKernelOpenVMDomain();
3039c914 442 #elif defined(HAVE_LIBNX)
443 Result rc;
444 if (is_x)
445 rc = jitTransitionToExecutable(&g_jit);
446 else
447 rc = jitTransitionToWritable(&g_jit);
448 if (R_FAILED(rc))
449 SysPrintf("jitTransition %d %08x\n", is_x, rc);
450 #elif defined(NDRC_WRITE_OFFSET)
451 // separated rx and rw areas are always available
1e212a25 452 #else
d148d265 453 u_long mstart = (u_long)start & ~4095ul;
454 u_long mend = (u_long)end;
455 if (mprotect((void *)mstart, mend - mstart,
456 PROT_READ | (is_x ? PROT_EXEC : PROT_WRITE)) != 0)
457 SysPrintf("mprotect(%c) failed: %s\n", is_x ? 'x' : 'w', strerror(errno));
1e212a25 458 #endif
d148d265 459#endif
460}
461
3039c914 462static void *start_tcache_write(void *start, void *end)
d148d265 463{
464 mprotect_w_x(start, end, 0);
3039c914 465 return (char *)start + ndrc_write_ofs;
d148d265 466}
467
468static void end_tcache_write(void *start, void *end)
469{
919981d0 470#if defined(__arm__) || defined(__aarch64__)
d148d265 471 size_t len = (char *)end - (char *)start;
472 #if defined(__BLACKBERRY_QNX__)
473 msync(start, len, MS_SYNC | MS_CACHE_ONLY | MS_INVALIDATE_ICACHE);
474 #elif defined(__MACH__)
475 sys_cache_control(kCacheFunctionPrepareForExecution, start, len);
476 #elif defined(VITA)
1e212a25 477 sceKernelSyncVMDomain(sceBlock, start, len);
478 #elif defined(_3DS)
479 ctr_flush_invalidate_cache();
3039c914 480 #elif defined(HAVE_LIBNX)
481 // handled in mprotect_w_x()
919981d0 482 #elif defined(__aarch64__)
483 // as of 2021, __clear_cache() is still broken on arm64
484 // so here is a custom one :(
485 clear_cache_arm64(start, end);
d148d265 486 #else
487 __clear_cache(start, end);
488 #endif
489 (void)len;
490#endif
491
492 mprotect_w_x(start, end, 1);
493}
494
495static void *start_block(void)
496{
497 u_char *end = out + MAX_OUTPUT_BLOCK_SIZE;
2a014d73 498 if (end > ndrc->translation_cache + sizeof(ndrc->translation_cache))
499 end = ndrc->translation_cache + sizeof(ndrc->translation_cache);
d148d265 500 start_tcache_write(out, end);
501 return out;
502}
503
504static void end_block(void *start)
505{
506 end_tcache_write(start, out);
507}
508
919981d0 509// also takes care of w^x mappings when patching code
510static u_int needs_clear_cache[1<<(TARGET_SIZE_2-17)];
511
512static void mark_clear_cache(void *target)
513{
514 uintptr_t offset = (u_char *)target - ndrc->translation_cache;
515 u_int mask = 1u << ((offset >> 12) & 31);
516 if (!(needs_clear_cache[offset >> 17] & mask)) {
517 char *start = (char *)((uintptr_t)target & ~4095l);
518 start_tcache_write(start, start + 4095);
519 needs_clear_cache[offset >> 17] |= mask;
520 }
521}
522
523// Clearing the cache is rather slow on ARM Linux, so mark the areas
524// that need to be cleared, and then only clear these areas once.
525static void do_clear_cache(void)
526{
527 int i, j;
528 for (i = 0; i < (1<<(TARGET_SIZE_2-17)); i++)
529 {
530 u_int bitmap = needs_clear_cache[i];
531 if (!bitmap)
532 continue;
533 for (j = 0; j < 32; j++)
534 {
535 u_char *start, *end;
93c0345b 536 if (!(bitmap & (1u << j)))
919981d0 537 continue;
538
539 start = ndrc->translation_cache + i*131072 + j*4096;
540 end = start + 4095;
541 for (j++; j < 32; j++) {
93c0345b 542 if (!(bitmap & (1u << j)))
919981d0 543 break;
544 end += 4096;
545 }
546 end_tcache_write(start, end);
547 }
548 needs_clear_cache[i] = 0;
549 }
550}
551
57871462 552//#define DEBUG_CYCLE_COUNT 1
553
b6e87b2b 554#define NO_CYCLE_PENALTY_THR 12
555
26bd3dad 556int cycle_multiplier = CYCLE_MULT_DEFAULT; // 100 for 1.0
a3203cf4 557int cycle_multiplier_override;
32631e6a 558int cycle_multiplier_old;
24058131 559static int cycle_multiplier_active;
4e9dcd7f 560
561static int CLOCK_ADJUST(int x)
562{
24058131 563 int m = cycle_multiplier_active;
564 int s = (x >> 31) | 1;
a3203cf4 565 return (x * m + s * 50) / 100;
4e9dcd7f 566}
567
4919de1e 568static int ds_writes_rjump_rs(int i)
569{
cf95b4f0 570 return dops[i].rs1 != 0 && (dops[i].rs1 == dops[i+1].rt1 || dops[i].rs1 == dops[i+1].rt2);
4919de1e 571}
572
104df9d3 573// psx addr mirror masking (for invalidation)
574static u_int pmmask(u_int vaddr)
575{
576 vaddr &= ~0xe0000000;
577 if (vaddr < 0x01000000)
578 vaddr &= ~0x00e00000; // RAM mirrors
579 return vaddr;
580}
581
94d23bb9 582static u_int get_page(u_int vaddr)
57871462 583{
104df9d3 584 u_int page = pmmask(vaddr) >> 12;
93c0345b 585 if (page >= PAGE_COUNT / 2)
586 page = PAGE_COUNT / 2 + (page & (PAGE_COUNT / 2 - 1));
94d23bb9 587 return page;
588}
589
104df9d3 590// get a page for looking for a block that has vaddr
591// (needed because the block may start in previous page)
592static u_int get_page_prev(u_int vaddr)
d25604ca 593{
104df9d3 594 assert(MAXBLOCK <= (1 << 12));
595 u_int page = get_page(vaddr);
596 if (page & 511)
597 page--;
598 return page;
d25604ca 599}
94d23bb9 600
df4dc2b1 601static struct ht_entry *hash_table_get(u_int vaddr)
602{
603 return &hash_table[((vaddr>>16)^vaddr)&0xFFFF];
604}
605
104df9d3 606static void hash_table_add(u_int vaddr, void *tcaddr)
df4dc2b1 607{
104df9d3 608 struct ht_entry *ht_bin = hash_table_get(vaddr);
609 assert(tcaddr);
df4dc2b1 610 ht_bin->vaddr[1] = ht_bin->vaddr[0];
611 ht_bin->tcaddr[1] = ht_bin->tcaddr[0];
612 ht_bin->vaddr[0] = vaddr;
613 ht_bin->tcaddr[0] = tcaddr;
614}
615
104df9d3 616static void hash_table_remove(int vaddr)
617{
618 //printf("remove hash: %x\n",vaddr);
619 struct ht_entry *ht_bin = hash_table_get(vaddr);
620 if (ht_bin->vaddr[1] == vaddr) {
621 ht_bin->vaddr[1] = -1;
622 ht_bin->tcaddr[1] = NULL;
623 }
624 if (ht_bin->vaddr[0] == vaddr) {
625 ht_bin->vaddr[0] = ht_bin->vaddr[1];
626 ht_bin->tcaddr[0] = ht_bin->tcaddr[1];
627 ht_bin->vaddr[1] = -1;
628 ht_bin->tcaddr[1] = NULL;
629 }
630}
631
632static void mark_invalid_code(u_int vaddr, u_int len, char invalid)
398d6924 633{
ab4377be 634 u_int vaddr_m = vaddr & 0x1fffffff;
398d6924 635 u_int i, j;
ab4377be 636 for (i = vaddr_m & ~0xfff; i < vaddr_m + len; i += 0x1000) {
398d6924 637 // ram mirrors, but should not hurt bios
638 for (j = 0; j < 0x800000; j += 0x200000) {
639 invalid_code[(i|j) >> 12] =
640 invalid_code[(i|j|0x80000000u) >> 12] =
104df9d3 641 invalid_code[(i|j|0xa0000000u) >> 12] = invalid;
398d6924 642 }
643 }
882a08fc 644 if (!invalid && vaddr + len > inv_code_start && vaddr <= inv_code_end)
104df9d3 645 inv_code_start = inv_code_end = ~0;
398d6924 646}
647
93c0345b 648static int doesnt_expire_soon(u_char *tcaddr)
df4dc2b1 649{
93c0345b 650 u_int diff = (u_int)(tcaddr - out) & ((1u << TARGET_SIZE_2) - 1u);
651 return diff > EXPIRITY_OFFSET + MAX_OUTPUT_BLOCK_SIZE;
df4dc2b1 652}
653
104df9d3 654static void *try_restore_block(u_int vaddr, u_int start_page, u_int end_page)
398d6924 655{
104df9d3 656 void *found_clean = NULL;
657 u_int i, page;
398d6924 658
ece032e6 659 stat_inc(stat_restore_tries);
104df9d3 660 for (page = start_page; page <= end_page; page++) {
661 struct block_info *block;
662 for (block = blocks[page]; block != NULL; block = block->next) {
663 if (vaddr < block->start)
664 break;
665 if (!block->is_dirty || vaddr >= block->start + block->len)
666 continue;
667 for (i = 0; i < block->jump_in_cnt; i++)
668 if (block->jump_in[i].vaddr == vaddr)
669 break;
670 if (i == block->jump_in_cnt)
671 continue;
672 assert(block->source && block->copy);
673 stat_inc(stat_restore_compares);
674 if (memcmp(block->source, block->copy, block->len))
675 continue;
398d6924 676
3280e616 677 block->is_dirty = block->inv_near_misses = 0;
104df9d3 678 found_clean = block->jump_in[i].addr;
679 hash_table_add(vaddr, found_clean);
680 mark_invalid_code(block->start, block->len, 0);
681 stat_inc(stat_bc_restore);
682 inv_debug("INV: restored %08x %p (%d)\n", vaddr, found_clean, block->jump_in_cnt);
683 return found_clean;
398d6924 684 }
398d6924 685 }
104df9d3 686 return NULL;
398d6924 687}
688
94d23bb9 689// Get address from virtual address
690// This is called from the recompiled JR/JALR instructions
104df9d3 691static void noinline *get_addr(u_int vaddr, int can_compile)
94d23bb9 692{
104df9d3 693 u_int start_page = get_page_prev(vaddr);
694 u_int i, page, end_page = get_page(vaddr);
695 void *found_clean = NULL;
398d6924 696
ece032e6 697 stat_inc(stat_jump_in_lookups);
104df9d3 698 for (page = start_page; page <= end_page; page++) {
699 const struct block_info *block;
700 for (block = blocks[page]; block != NULL; block = block->next) {
701 if (vaddr < block->start)
702 break;
703 if (block->is_dirty || vaddr >= block->start + block->len)
704 continue;
705 for (i = 0; i < block->jump_in_cnt; i++)
706 if (block->jump_in[i].vaddr == vaddr)
707 break;
708 if (i == block->jump_in_cnt)
709 continue;
710 found_clean = block->jump_in[i].addr;
711 hash_table_add(vaddr, found_clean);
712 return found_clean;
57871462 713 }
57871462 714 }
104df9d3 715 found_clean = try_restore_block(vaddr, start_page, end_page);
716 if (found_clean)
717 return found_clean;
718
719 if (!can_compile)
720 return NULL;
398d6924 721
722 int r = new_recompile_block(vaddr);
723 if (r == 0)
104df9d3 724 return ndrc_get_addr_ht(vaddr);
df4dc2b1 725
b4ab351d 726 // generate an address error
57871462 727 Status|=2;
b4ab351d 728 Cause=(vaddr<<31)|(4<<2);
57871462 729 EPC=(vaddr&1)?vaddr-5:vaddr;
730 BadVAddr=(vaddr&~1);
104df9d3 731 return ndrc_get_addr_ht(0x80000080);
57871462 732}
104df9d3 733
57871462 734// Look up address in hash table first
104df9d3 735void *ndrc_get_addr_ht_param(u_int vaddr, int can_compile)
57871462 736{
df4dc2b1 737 const struct ht_entry *ht_bin = hash_table_get(vaddr);
104df9d3 738 stat_inc(stat_ht_lookups);
df4dc2b1 739 if (ht_bin->vaddr[0] == vaddr) return ht_bin->tcaddr[0];
740 if (ht_bin->vaddr[1] == vaddr) return ht_bin->tcaddr[1];
104df9d3 741 return get_addr(vaddr, can_compile);
742}
743
744void *ndrc_get_addr_ht(u_int vaddr)
745{
746 return ndrc_get_addr_ht_param(vaddr, 1);
57871462 747}
748
6cc8d23c 749static void clear_all_regs(signed char regmap[])
57871462 750{
6cc8d23c 751 memset(regmap, -1, sizeof(regmap[0]) * HOST_REGS);
57871462 752}
753
53358c1d 754// get_reg: get allocated host reg from mips reg
755// returns -1 if no such mips reg was allocated
cdc2da64 756#if defined(__arm__) && defined(HAVE_ARMV6) && HOST_REGS == 13 && EXCLUDE_REG == 11
757
758extern signed char get_reg(const signed char regmap[], signed char r);
759
760#else
761
9de8a0c3 762static signed char get_reg(const signed char regmap[], signed char r)
57871462 763{
764 int hr;
9de8a0c3 765 for (hr = 0; hr < HOST_REGS; hr++) {
766 if (hr == EXCLUDE_REG)
767 continue;
768 if (regmap[hr] == r)
769 return hr;
770 }
771 return -1;
772}
773
cdc2da64 774#endif
775
53358c1d 776// get reg as mask bit (1 << hr)
777static u_int get_regm(const signed char regmap[], signed char r)
778{
779 return (1u << (get_reg(regmap, r) & 31)) & ~(1u << 31);
780}
781
9de8a0c3 782static signed char get_reg_temp(const signed char regmap[])
783{
784 int hr;
785 for (hr = 0; hr < HOST_REGS; hr++) {
786 if (hr == EXCLUDE_REG)
787 continue;
788 if (regmap[hr] == (signed char)-1)
789 return hr;
790 }
57871462 791 return -1;
792}
793
794// Find a register that is available for two consecutive cycles
d1e4ebd9 795static signed char get_reg2(signed char regmap1[], const signed char regmap2[], int r)
57871462 796{
797 int hr;
798 for (hr=0;hr<HOST_REGS;hr++) if(hr!=EXCLUDE_REG&&regmap1[hr]==r&&regmap2[hr]==r) return hr;
799 return -1;
800}
801
53dc27f6 802// reverse reg map: mips -> host
803#define RRMAP_SIZE 64
804static void make_rregs(const signed char regmap[], signed char rrmap[RRMAP_SIZE],
805 u_int *regs_can_change)
806{
807 u_int r, hr, hr_can_change = 0;
808 memset(rrmap, -1, RRMAP_SIZE);
809 for (hr = 0; hr < HOST_REGS; )
810 {
811 r = regmap[hr];
812 rrmap[r & (RRMAP_SIZE - 1)] = hr;
813 // only add mips $1-$31+$lo, others shifted out
814 hr_can_change |= (uint64_t)1 << (hr + ((r - 1) & 32));
815 hr++;
816 if (hr == EXCLUDE_REG)
817 hr++;
818 }
819 hr_can_change |= 1u << (rrmap[33] & 31);
820 hr_can_change |= 1u << (rrmap[CCREG] & 31);
821 hr_can_change &= ~(1u << 31);
822 *regs_can_change = hr_can_change;
823}
824
825// same as get_reg, but takes rrmap
826static signed char get_rreg(signed char rrmap[RRMAP_SIZE], signed char r)
827{
828 assert(0 <= r && r < RRMAP_SIZE);
829 return rrmap[r];
830}
831
9de8a0c3 832static int count_free_regs(const signed char regmap[])
57871462 833{
834 int count=0;
835 int hr;
836 for(hr=0;hr<HOST_REGS;hr++)
837 {
838 if(hr!=EXCLUDE_REG) {
839 if(regmap[hr]<0) count++;
840 }
841 }
842 return count;
843}
844
9de8a0c3 845static void dirty_reg(struct regstat *cur, signed char reg)
57871462 846{
847 int hr;
9de8a0c3 848 if (!reg) return;
849 hr = get_reg(cur->regmap, reg);
850 if (hr >= 0)
851 cur->dirty |= 1<<hr;
57871462 852}
853
40fca85b 854static void set_const(struct regstat *cur, signed char reg, uint32_t value)
57871462 855{
856 int hr;
9de8a0c3 857 if (!reg) return;
858 hr = get_reg(cur->regmap, reg);
859 if (hr >= 0) {
860 cur->isconst |= 1<<hr;
861 current_constmap[hr] = value;
57871462 862 }
863}
864
40fca85b 865static void clear_const(struct regstat *cur, signed char reg)
57871462 866{
867 int hr;
9de8a0c3 868 if (!reg) return;
869 hr = get_reg(cur->regmap, reg);
870 if (hr >= 0)
871 cur->isconst &= ~(1<<hr);
57871462 872}
873
9de8a0c3 874static int is_const(const struct regstat *cur, signed char reg)
57871462 875{
876 int hr;
9de8a0c3 877 if (reg < 0) return 0;
878 if (!reg) return 1;
879 hr = get_reg(cur->regmap, reg);
880 if (hr >= 0)
881 return (cur->isconst>>hr)&1;
57871462 882 return 0;
883}
40fca85b 884
9de8a0c3 885static uint32_t get_const(const struct regstat *cur, signed char reg)
57871462 886{
887 int hr;
9de8a0c3 888 if (!reg) return 0;
889 hr = get_reg(cur->regmap, reg);
890 if (hr >= 0)
891 return current_constmap[hr];
892
893 SysPrintf("Unknown constant in r%d\n", reg);
7c3a5182 894 abort();
57871462 895}
896
897// Least soon needed registers
898// Look at the next ten instructions and see which registers
899// will be used. Try not to reallocate these.
4149788d 900static void lsn(u_char hsn[], int i, int *preferred_reg)
57871462 901{
902 int j;
903 int b=-1;
904 for(j=0;j<9;j++)
905 {
906 if(i+j>=slen) {
907 j=slen-i-1;
908 break;
909 }
fe807a8a 910 if (dops[i+j].is_ujump)
57871462 911 {
912 // Don't go past an unconditonal jump
913 j++;
914 break;
915 }
916 }
917 for(;j>=0;j--)
918 {
cf95b4f0 919 if(dops[i+j].rs1) hsn[dops[i+j].rs1]=j;
920 if(dops[i+j].rs2) hsn[dops[i+j].rs2]=j;
921 if(dops[i+j].rt1) hsn[dops[i+j].rt1]=j;
922 if(dops[i+j].rt2) hsn[dops[i+j].rt2]=j;
923 if(dops[i+j].itype==STORE || dops[i+j].itype==STORELR) {
57871462 924 // Stores can allocate zero
cf95b4f0 925 hsn[dops[i+j].rs1]=j;
926 hsn[dops[i+j].rs2]=j;
57871462 927 }
37387d8b 928 if (ram_offset && (dops[i+j].is_load || dops[i+j].is_store))
929 hsn[ROREG] = j;
57871462 930 // On some architectures stores need invc_ptr
931 #if defined(HOST_IMM8)
37387d8b 932 if (dops[i+j].is_store)
933 hsn[INVCP] = j;
57871462 934 #endif
cf95b4f0 935 if(i+j>=0&&(dops[i+j].itype==UJUMP||dops[i+j].itype==CJUMP||dops[i+j].itype==SJUMP))
57871462 936 {
937 hsn[CCREG]=j;
938 b=j;
939 }
940 }
941 if(b>=0)
942 {
943 if(ba[i+b]>=start && ba[i+b]<(start+slen*4))
944 {
945 // Follow first branch
946 int t=(ba[i+b]-start)>>2;
947 j=7-b;if(t+j>=slen) j=slen-t-1;
948 for(;j>=0;j--)
949 {
cf95b4f0 950 if(dops[t+j].rs1) if(hsn[dops[t+j].rs1]>j+b+2) hsn[dops[t+j].rs1]=j+b+2;
951 if(dops[t+j].rs2) if(hsn[dops[t+j].rs2]>j+b+2) hsn[dops[t+j].rs2]=j+b+2;
952 //if(dops[t+j].rt1) if(hsn[dops[t+j].rt1]>j+b+2) hsn[dops[t+j].rt1]=j+b+2;
953 //if(dops[t+j].rt2) if(hsn[dops[t+j].rt2]>j+b+2) hsn[dops[t+j].rt2]=j+b+2;
57871462 954 }
955 }
956 // TODO: preferred register based on backward branch
957 }
958 // Delay slot should preferably not overwrite branch conditions or cycle count
fe807a8a 959 if (i > 0 && dops[i-1].is_jump) {
cf95b4f0 960 if(dops[i-1].rs1) if(hsn[dops[i-1].rs1]>1) hsn[dops[i-1].rs1]=1;
961 if(dops[i-1].rs2) if(hsn[dops[i-1].rs2]>1) hsn[dops[i-1].rs2]=1;
57871462 962 hsn[CCREG]=1;
963 // ...or hash tables
964 hsn[RHASH]=1;
965 hsn[RHTBL]=1;
966 }
967 // Coprocessor load/store needs FTEMP, even if not declared
37387d8b 968 if(dops[i].itype==C2LS) {
57871462 969 hsn[FTEMP]=0;
970 }
971 // Load L/R also uses FTEMP as a temporary register
cf95b4f0 972 if(dops[i].itype==LOADLR) {
57871462 973 hsn[FTEMP]=0;
974 }
b7918751 975 // Also SWL/SWR/SDL/SDR
cf95b4f0 976 if(dops[i].opcode==0x2a||dops[i].opcode==0x2e||dops[i].opcode==0x2c||dops[i].opcode==0x2d) {
57871462 977 hsn[FTEMP]=0;
978 }
57871462 979 // Don't remove the miniht registers
cf95b4f0 980 if(dops[i].itype==UJUMP||dops[i].itype==RJUMP)
57871462 981 {
982 hsn[RHASH]=0;
983 hsn[RHTBL]=0;
984 }
985}
986
987// We only want to allocate registers if we're going to use them again soon
4149788d 988static int needed_again(int r, int i)
57871462 989{
990 int j;
991 int b=-1;
992 int rn=10;
9f51b4b9 993
fe807a8a 994 if (i > 0 && dops[i-1].is_ujump)
57871462 995 {
996 if(ba[i-1]<start || ba[i-1]>start+slen*4-4)
997 return 0; // Don't need any registers if exiting the block
998 }
999 for(j=0;j<9;j++)
1000 {
1001 if(i+j>=slen) {
1002 j=slen-i-1;
1003 break;
1004 }
fe807a8a 1005 if (dops[i+j].is_ujump)
57871462 1006 {
1007 // Don't go past an unconditonal jump
1008 j++;
1009 break;
1010 }
cf95b4f0 1011 if(dops[i+j].itype==SYSCALL||dops[i+j].itype==HLECALL||dops[i+j].itype==INTCALL||((source[i+j]&0xfc00003f)==0x0d))
57871462 1012 {
1013 break;
1014 }
1015 }
1016 for(;j>=1;j--)
1017 {
cf95b4f0 1018 if(dops[i+j].rs1==r) rn=j;
1019 if(dops[i+j].rs2==r) rn=j;
57871462 1020 if((unneeded_reg[i+j]>>r)&1) rn=10;
cf95b4f0 1021 if(i+j>=0&&(dops[i+j].itype==UJUMP||dops[i+j].itype==CJUMP||dops[i+j].itype==SJUMP))
57871462 1022 {
1023 b=j;
1024 }
1025 }
b7217e13 1026 if(rn<10) return 1;
581335b0 1027 (void)b;
57871462 1028 return 0;
1029}
1030
1031// Try to match register allocations at the end of a loop with those
1032// at the beginning
4149788d 1033static int loop_reg(int i, int r, int hr)
57871462 1034{
1035 int j,k;
1036 for(j=0;j<9;j++)
1037 {
1038 if(i+j>=slen) {
1039 j=slen-i-1;
1040 break;
1041 }
fe807a8a 1042 if (dops[i+j].is_ujump)
57871462 1043 {
1044 // Don't go past an unconditonal jump
1045 j++;
1046 break;
1047 }
1048 }
1049 k=0;
1050 if(i>0){
cf95b4f0 1051 if(dops[i-1].itype==UJUMP||dops[i-1].itype==CJUMP||dops[i-1].itype==SJUMP)
57871462 1052 k--;
1053 }
1054 for(;k<j;k++)
1055 {
00fa9369 1056 assert(r < 64);
1057 if((unneeded_reg[i+k]>>r)&1) return hr;
cf95b4f0 1058 if(i+k>=0&&(dops[i+k].itype==UJUMP||dops[i+k].itype==CJUMP||dops[i+k].itype==SJUMP))
57871462 1059 {
1060 if(ba[i+k]>=start && ba[i+k]<(start+i*4))
1061 {
1062 int t=(ba[i+k]-start)>>2;
1063 int reg=get_reg(regs[t].regmap_entry,r);
1064 if(reg>=0) return reg;
1065 //reg=get_reg(regs[t+1].regmap_entry,r);
1066 //if(reg>=0) return reg;
1067 }
1068 }
1069 }
1070 return hr;
1071}
1072
1073
1074// Allocate every register, preserving source/target regs
4149788d 1075static void alloc_all(struct regstat *cur,int i)
57871462 1076{
1077 int hr;
9f51b4b9 1078
57871462 1079 for(hr=0;hr<HOST_REGS;hr++) {
1080 if(hr!=EXCLUDE_REG) {
9de8a0c3 1081 if((cur->regmap[hr]!=dops[i].rs1)&&(cur->regmap[hr]!=dops[i].rs2)&&
1082 (cur->regmap[hr]!=dops[i].rt1)&&(cur->regmap[hr]!=dops[i].rt2))
57871462 1083 {
1084 cur->regmap[hr]=-1;
1085 cur->dirty&=~(1<<hr);
1086 }
1087 // Don't need zeros
9de8a0c3 1088 if(cur->regmap[hr]==0)
57871462 1089 {
1090 cur->regmap[hr]=-1;
1091 cur->dirty&=~(1<<hr);
1092 }
1093 }
1094 }
1095}
1096
d1e4ebd9 1097#ifndef NDEBUG
1098static int host_tempreg_in_use;
1099
1100static void host_tempreg_acquire(void)
1101{
1102 assert(!host_tempreg_in_use);
1103 host_tempreg_in_use = 1;
1104}
1105
1106static void host_tempreg_release(void)
1107{
1108 host_tempreg_in_use = 0;
1109}
1110#else
1111static void host_tempreg_acquire(void) {}
1112static void host_tempreg_release(void) {}
1113#endif
1114
32631e6a 1115#ifdef ASSEM_PRINT
8062d65a 1116extern void gen_interupt();
1117extern void do_insn_cmp();
d1e4ebd9 1118#define FUNCNAME(f) { f, " " #f }
8062d65a 1119static const struct {
d1e4ebd9 1120 void *addr;
8062d65a 1121 const char *name;
1122} function_names[] = {
1123 FUNCNAME(cc_interrupt),
1124 FUNCNAME(gen_interupt),
104df9d3 1125 FUNCNAME(ndrc_get_addr_ht),
8062d65a 1126 FUNCNAME(jump_handler_read8),
1127 FUNCNAME(jump_handler_read16),
1128 FUNCNAME(jump_handler_read32),
1129 FUNCNAME(jump_handler_write8),
1130 FUNCNAME(jump_handler_write16),
1131 FUNCNAME(jump_handler_write32),
104df9d3 1132 FUNCNAME(ndrc_invalidate_addr),
3968e69e 1133 FUNCNAME(jump_to_new_pc),
d1150cd6 1134 FUNCNAME(jump_break),
1135 FUNCNAME(jump_break_ds),
1136 FUNCNAME(jump_syscall),
1137 FUNCNAME(jump_syscall_ds),
81dbbf4c 1138 FUNCNAME(call_gteStall),
8062d65a 1139 FUNCNAME(new_dyna_leave),
1140 FUNCNAME(pcsx_mtc0),
1141 FUNCNAME(pcsx_mtc0_ds),
32631e6a 1142#ifdef DRC_DBG
8062d65a 1143 FUNCNAME(do_insn_cmp),
32631e6a 1144#endif
8062d65a 1145};
1146
d1e4ebd9 1147static const char *func_name(const void *a)
8062d65a 1148{
1149 int i;
1150 for (i = 0; i < sizeof(function_names)/sizeof(function_names[0]); i++)
1151 if (function_names[i].addr == a)
1152 return function_names[i].name;
1153 return "";
1154}
1155#else
1156#define func_name(x) ""
1157#endif
1158
57871462 1159#ifdef __i386__
1160#include "assem_x86.c"
1161#endif
1162#ifdef __x86_64__
1163#include "assem_x64.c"
1164#endif
1165#ifdef __arm__
1166#include "assem_arm.c"
1167#endif
be516ebe 1168#ifdef __aarch64__
1169#include "assem_arm64.c"
1170#endif
57871462 1171
2a014d73 1172static void *get_trampoline(const void *f)
1173{
1174 size_t i;
1175
1176 for (i = 0; i < ARRAY_SIZE(ndrc->tramp.f); i++) {
1177 if (ndrc->tramp.f[i] == f || ndrc->tramp.f[i] == NULL)
1178 break;
1179 }
1180 if (i == ARRAY_SIZE(ndrc->tramp.f)) {
1181 SysPrintf("trampoline table is full, last func %p\n", f);
1182 abort();
1183 }
1184 if (ndrc->tramp.f[i] == NULL) {
3039c914 1185 const void **d = start_tcache_write(&ndrc->tramp.f[i], &ndrc->tramp.f[i + 1]);
1186 *d = f;
2a014d73 1187 end_tcache_write(&ndrc->tramp.f[i], &ndrc->tramp.f[i + 1]);
1188 }
1189 return &ndrc->tramp.ops[i];
1190}
1191
1192static void emit_far_jump(const void *f)
1193{
1194 if (can_jump_or_call(f)) {
1195 emit_jmp(f);
1196 return;
1197 }
1198
1199 f = get_trampoline(f);
1200 emit_jmp(f);
1201}
1202
1203static void emit_far_call(const void *f)
1204{
1205 if (can_jump_or_call(f)) {
1206 emit_call(f);
1207 return;
1208 }
1209
1210 f = get_trampoline(f);
1211 emit_call(f);
1212}
1213
57871462 1214// Check if an address is already compiled
1215// but don't return addresses which are about to expire from the cache
4149788d 1216static void *check_addr(u_int vaddr)
57871462 1217{
df4dc2b1 1218 struct ht_entry *ht_bin = hash_table_get(vaddr);
1219 size_t i;
b14b6a8f 1220 for (i = 0; i < ARRAY_SIZE(ht_bin->vaddr); i++) {
df4dc2b1 1221 if (ht_bin->vaddr[i] == vaddr)
93c0345b 1222 if (doesnt_expire_soon(ht_bin->tcaddr[i]))
104df9d3 1223 return ht_bin->tcaddr[i];
57871462 1224 }
104df9d3 1225
1226 // refactor to get_addr_nocompile?
1227 u_int start_page = get_page_prev(vaddr);
1228 u_int page, end_page = get_page(vaddr);
1229
1230 stat_inc(stat_jump_in_lookups);
1231 for (page = start_page; page <= end_page; page++) {
1232 const struct block_info *block;
1233 for (block = blocks[page]; block != NULL; block = block->next) {
1234 if (vaddr < block->start)
1235 break;
1236 if (block->is_dirty || vaddr >= block->start + block->len)
1237 continue;
1238 if (!doesnt_expire_soon(ndrc->translation_cache + block->tc_offs))
1239 continue;
1240 for (i = 0; i < block->jump_in_cnt; i++)
1241 if (block->jump_in[i].vaddr == vaddr)
1242 break;
1243 if (i == block->jump_in_cnt)
1244 continue;
1245
1246 // Update existing entry with current address
1247 void *addr = block->jump_in[i].addr;
1248 if (ht_bin->vaddr[0] == vaddr) {
1249 ht_bin->tcaddr[0] = addr;
1250 return addr;
1251 }
1252 if (ht_bin->vaddr[1] == vaddr) {
1253 ht_bin->tcaddr[1] = addr;
1254 return addr;
1255 }
1256 // Insert into hash table with low priority.
1257 // Don't evict existing entries, as they are probably
1258 // addresses that are being accessed frequently.
1259 if (ht_bin->vaddr[0] == -1) {
1260 ht_bin->vaddr[0] = vaddr;
1261 ht_bin->tcaddr[0] = addr;
57871462 1262 }
104df9d3 1263 else if (ht_bin->vaddr[1] == -1) {
1264 ht_bin->vaddr[1] = vaddr;
1265 ht_bin->tcaddr[1] = addr;
1266 }
1267 return addr;
57871462 1268 }
57871462 1269 }
104df9d3 1270 return NULL;
57871462 1271}
1272
104df9d3 1273static void blocks_clear(struct block_info **head)
1274{
1275 struct block_info *cur, *next;
1276
1277 if ((cur = *head)) {
1278 *head = NULL;
1279 while (cur) {
1280 next = cur->next;
1281 free(cur);
1282 cur = next;
1283 }
1284 }
1285}
1286
93c0345b 1287static int blocks_remove_matching_addrs(struct block_info **head,
1288 u_int base_offs, int shift)
104df9d3 1289{
1290 struct block_info *next;
93c0345b 1291 int hit = 0;
104df9d3 1292 while (*head) {
93c0345b 1293 if ((((*head)->tc_offs ^ base_offs) >> shift) == 0) {
1294 inv_debug("EXP: rm block %08x (tc_offs %zx)\n", (*head)->start, (*head)->tc_offs);
104df9d3 1295 invalidate_block(*head);
1296 next = (*head)->next;
1297 free(*head);
1298 *head = next;
1299 stat_dec(stat_blocks);
93c0345b 1300 hit = 1;
104df9d3 1301 }
1302 else
1303 {
1304 head = &((*head)->next);
1305 }
1306 }
93c0345b 1307 return hit;
104df9d3 1308}
57871462 1309
1310// This is called when we write to a compiled block (see do_invstub)
b7ad2f2c 1311static void unlink_jumps_vaddr_range(u_int start, u_int end)
57871462 1312{
104df9d3 1313 u_int page, start_page = get_page(start), end_page = get_page(end - 1);
b7ad2f2c 1314 int i;
104df9d3 1315
1316 for (page = start_page; page <= end_page; page++) {
b7ad2f2c 1317 struct jump_info *ji = jumps[page];
1318 if (ji == NULL)
1319 continue;
1320 for (i = 0; i < ji->count; ) {
1321 if (ji->e[i].target_vaddr < start || ji->e[i].target_vaddr >= end) {
1322 i++;
104df9d3 1323 continue;
1324 }
b7ad2f2c 1325
1326 inv_debug("INV: rm link to %08x (tc_offs %zx)\n", ji->e[i].target_vaddr,
1327 (u_char *)ji->e[i].stub - ndrc->translation_cache);
1328 void *host_addr = find_extjump_insn(ji->e[i].stub);
104df9d3 1329 mark_clear_cache(host_addr);
b7ad2f2c 1330 set_jump_target(host_addr, ji->e[i].stub); // point back to dyna_linker stub
104df9d3 1331
104df9d3 1332 stat_dec(stat_links);
b7ad2f2c 1333 ji->count--;
1334 if (i < ji->count) {
1335 ji->e[i] = ji->e[ji->count];
1336 continue;
1337 }
1338 i++;
1339 }
1340 }
1341}
1342
1343static void unlink_jumps_tc_range(struct jump_info *ji, u_int base_offs, int shift)
1344{
1345 int i;
1346 if (ji == NULL)
1347 return;
1348 for (i = 0; i < ji->count; ) {
1349 u_int tc_offs = (u_char *)ji->e[i].stub - ndrc->translation_cache;
1350 if (((tc_offs ^ base_offs) >> shift) != 0) {
1351 i++;
1352 continue;
1353 }
1354
1355 inv_debug("EXP: rm link to %08x (tc_offs %zx)\n", ji->e[i].target_vaddr, tc_offs);
1356 stat_dec(stat_links);
1357 ji->count--;
1358 if (i < ji->count) {
1359 ji->e[i] = ji->e[ji->count];
1360 continue;
104df9d3 1361 }
b7ad2f2c 1362 i++;
57871462 1363 }
104df9d3 1364}
9f51b4b9 1365
104df9d3 1366static void invalidate_block(struct block_info *block)
1367{
1368 u_int i;
f76eeef9 1369
104df9d3 1370 block->is_dirty = 1;
b7ad2f2c 1371 unlink_jumps_vaddr_range(block->start, block->start + block->len);
104df9d3 1372 for (i = 0; i < block->jump_in_cnt; i++)
1373 hash_table_remove(block->jump_in[i].vaddr);
57871462 1374}
9be4ba64 1375
104df9d3 1376static int invalidate_range(u_int start, u_int end,
1377 u32 *inv_start_ret, u32 *inv_end_ret)
9be4ba64 1378{
3280e616 1379 struct block_info *last_block = NULL;
104df9d3 1380 u_int start_page = get_page_prev(start);
1381 u_int end_page = get_page(end - 1);
1382 u_int start_m = pmmask(start);
ab4377be 1383 u_int end_m = pmmask(end - 1);
104df9d3 1384 u_int inv_start, inv_end;
1385 u_int blk_start_m, blk_end_m;
1386 u_int page;
1387 int hit = 0;
1388
1389 // additional area without code (to supplement invalid_code[]), [start, end)
1390 // avoids excessive ndrc_invalidate_addr() calls
1391 inv_start = start_m & ~0xfff;
1392 inv_end = end_m | 0xfff;
1393
1394 for (page = start_page; page <= end_page; page++) {
1395 struct block_info *block;
1396 for (block = blocks[page]; block != NULL; block = block->next) {
1397 if (block->is_dirty)
1398 continue;
3280e616 1399 last_block = block;
104df9d3 1400 blk_end_m = pmmask(block->start + block->len);
1401 if (blk_end_m <= start_m) {
1402 inv_start = max(inv_start, blk_end_m);
1403 continue;
1404 }
1405 blk_start_m = pmmask(block->start);
1406 if (end_m <= blk_start_m) {
1407 inv_end = min(inv_end, blk_start_m - 1);
1408 continue;
9be4ba64 1409 }
104df9d3 1410 if (!block->source) // "hack" block - leave it alone
1411 continue;
1412
1413 hit++;
1414 invalidate_block(block);
1415 stat_inc(stat_inv_hits);
9be4ba64 1416 }
9be4ba64 1417 }
104df9d3 1418
3280e616 1419 if (!hit && last_block && last_block->source) {
1420 // could be some leftover unused block, uselessly trapping writes
1421 last_block->inv_near_misses++;
1422 if (last_block->inv_near_misses > 128) {
1423 invalidate_block(last_block);
1424 stat_inc(stat_inv_hits);
1425 hit++;
1426 }
1427 }
104df9d3 1428 if (hit) {
1429 do_clear_cache();
1430#ifdef USE_MINI_HT
1431 memset(mini_ht, -1, sizeof(mini_ht));
1432#endif
1433 }
3280e616 1434
104df9d3 1435 if (inv_start <= (start_m & ~0xfff) && inv_end >= (start_m | 0xfff))
1436 // the whole page is empty now
1437 mark_invalid_code(start, 1, 1);
1438
1439 if (inv_start_ret) *inv_start_ret = inv_start | (start & 0xe0000000);
1440 if (inv_end_ret) *inv_end_ret = inv_end | (end & 0xe0000000);
1441 return hit;
9be4ba64 1442}
1443
104df9d3 1444void new_dynarec_invalidate_range(unsigned int start, unsigned int end)
1445{
1446 invalidate_range(start, end, NULL, NULL);
1447}
1448
1449void ndrc_invalidate_addr(u_int addr)
57871462 1450{
9be4ba64 1451 // this check is done by the caller
1452 //if (inv_code_start<=addr&&addr<=inv_code_end) { rhits++; return; }
104df9d3 1453 int ret = invalidate_range(addr, addr + 4, &inv_code_start, &inv_code_end);
1454 if (ret)
1455 inv_debug("INV ADDR: %08x hit %d blocks\n", addr, ret);
1456 else
1457 inv_debug("INV ADDR: %08x miss, inv %08x-%08x\n", addr, inv_code_start, inv_code_end);
ece032e6 1458 stat_inc(stat_inv_addr_calls);
57871462 1459}
9be4ba64 1460
dd3a91a1 1461// This is called when loading a save state.
1462// Anything could have changed, so invalidate everything.
104df9d3 1463void new_dynarec_invalidate_all_pages(void)
57871462 1464{
104df9d3 1465 struct block_info *block;
581335b0 1466 u_int page;
104df9d3 1467 for (page = 0; page < ARRAY_SIZE(blocks); page++) {
1468 for (block = blocks[page]; block != NULL; block = block->next) {
1469 if (block->is_dirty)
1470 continue;
1471 if (!block->source) // hack block?
1472 continue;
1473 invalidate_block(block);
1474 }
1475 }
1476
57871462 1477 #ifdef USE_MINI_HT
93c0345b 1478 memset(mini_ht, -1, sizeof(mini_ht));
57871462 1479 #endif
919981d0 1480 do_clear_cache();
57871462 1481}
1482
d1e4ebd9 1483static void do_invstub(int n)
1484{
1485 literal_pool(20);
882a08fc 1486 u_int reglist = stubs[n].a;
d1e4ebd9 1487 set_jump_target(stubs[n].addr, out);
1488 save_regs(reglist);
882a08fc 1489 if (stubs[n].b != 0)
1490 emit_mov(stubs[n].b, 0);
1491 emit_readword(&inv_code_start, 1);
1492 emit_readword(&inv_code_end, 2);
1493 emit_cmp(0, 1);
1494 emit_cmpcs(2, 0);
1495 void *jaddr = out;
1496 emit_jc(0);
104df9d3 1497 emit_far_call(ndrc_invalidate_addr);
882a08fc 1498 set_jump_target(jaddr, out);
d1e4ebd9 1499 restore_regs(reglist);
1500 emit_jmp(stubs[n].retaddr); // return address
1501}
1502
57871462 1503// Add an entry to jump_out after making a link
104df9d3 1504// src should point to code by emit_extjump()
b7ad2f2c 1505void ndrc_add_jump_out(u_int vaddr, void *src)
57871462 1506{
b7ad2f2c 1507 inv_debug("ndrc_add_jump_out: %p -> %x\n", src, vaddr);
1508 u_int page = get_page(vaddr);
1509 struct jump_info *ji;
1510
104df9d3 1511 stat_inc(stat_links);
b7ad2f2c 1512 check_extjump2(src);
1513 ji = jumps[page];
1514 if (ji == NULL) {
1515 ji = malloc(sizeof(*ji) + sizeof(ji->e[0]) * 16);
1516 ji->alloc = 16;
1517 ji->count = 0;
1518 }
1519 else if (ji->count >= ji->alloc) {
1520 ji->alloc += 16;
1521 ji = realloc(ji, sizeof(*ji) + sizeof(ji->e[0]) * ji->alloc);
1522 }
1523 jumps[page] = ji;
1524 ji->e[ji->count].target_vaddr = vaddr;
1525 ji->e[ji->count].stub = src;
1526 ji->count++;
57871462 1527}
1528
8062d65a 1529/* Register allocation */
1530
1531// Note: registers are allocated clean (unmodified state)
1532// if you intend to modify the register, you must call dirty_reg().
1533static void alloc_reg(struct regstat *cur,int i,signed char reg)
1534{
1535 int r,hr;
b7ec323c 1536 int preferred_reg = PREFERRED_REG_FIRST
1537 + reg % (PREFERRED_REG_LAST - PREFERRED_REG_FIRST + 1);
1538 if (reg == CCREG) preferred_reg = HOST_CCREG;
1539 if (reg == PTEMP || reg == FTEMP) preferred_reg = 12;
1540 assert(PREFERRED_REG_FIRST != EXCLUDE_REG && EXCLUDE_REG != HOST_REGS);
53358c1d 1541 assert(reg >= 0);
8062d65a 1542
1543 // Don't allocate unused registers
1544 if((cur->u>>reg)&1) return;
1545
1546 // see if it's already allocated
53358c1d 1547 if (get_reg(cur->regmap, reg) >= 0)
1548 return;
8062d65a 1549
1550 // Keep the same mapping if the register was already allocated in a loop
1551 preferred_reg = loop_reg(i,reg,preferred_reg);
1552
1553 // Try to allocate the preferred register
1554 if(cur->regmap[preferred_reg]==-1) {
1555 cur->regmap[preferred_reg]=reg;
1556 cur->dirty&=~(1<<preferred_reg);
1557 cur->isconst&=~(1<<preferred_reg);
1558 return;
1559 }
1560 r=cur->regmap[preferred_reg];
1561 assert(r < 64);
1562 if((cur->u>>r)&1) {
1563 cur->regmap[preferred_reg]=reg;
1564 cur->dirty&=~(1<<preferred_reg);
1565 cur->isconst&=~(1<<preferred_reg);
1566 return;
1567 }
1568
1569 // Clear any unneeded registers
1570 // We try to keep the mapping consistent, if possible, because it
1571 // makes branches easier (especially loops). So we try to allocate
1572 // first (see above) before removing old mappings. If this is not
1573 // possible then go ahead and clear out the registers that are no
1574 // longer needed.
1575 for(hr=0;hr<HOST_REGS;hr++)
1576 {
1577 r=cur->regmap[hr];
1578 if(r>=0) {
1579 assert(r < 64);
1580 if((cur->u>>r)&1) {cur->regmap[hr]=-1;break;}
1581 }
1582 }
b7ec323c 1583
8062d65a 1584 // Try to allocate any available register, but prefer
1585 // registers that have not been used recently.
b7ec323c 1586 if (i > 0) {
1587 for (hr = PREFERRED_REG_FIRST; ; ) {
1588 if (cur->regmap[hr] < 0) {
1589 int oldreg = regs[i-1].regmap[hr];
1590 if (oldreg < 0 || (oldreg != dops[i-1].rs1 && oldreg != dops[i-1].rs2
1591 && oldreg != dops[i-1].rt1 && oldreg != dops[i-1].rt2))
1592 {
8062d65a 1593 cur->regmap[hr]=reg;
1594 cur->dirty&=~(1<<hr);
1595 cur->isconst&=~(1<<hr);
1596 return;
1597 }
1598 }
b7ec323c 1599 hr++;
1600 if (hr == EXCLUDE_REG)
1601 hr++;
1602 if (hr == HOST_REGS)
1603 hr = 0;
1604 if (hr == PREFERRED_REG_FIRST)
1605 break;
8062d65a 1606 }
1607 }
b7ec323c 1608
8062d65a 1609 // Try to allocate any available register
b7ec323c 1610 for (hr = PREFERRED_REG_FIRST; ; ) {
1611 if (cur->regmap[hr] < 0) {
8062d65a 1612 cur->regmap[hr]=reg;
1613 cur->dirty&=~(1<<hr);
1614 cur->isconst&=~(1<<hr);
1615 return;
1616 }
b7ec323c 1617 hr++;
1618 if (hr == EXCLUDE_REG)
1619 hr++;
1620 if (hr == HOST_REGS)
1621 hr = 0;
1622 if (hr == PREFERRED_REG_FIRST)
1623 break;
8062d65a 1624 }
1625
1626 // Ok, now we have to evict someone
1627 // Pick a register we hopefully won't need soon
1628 u_char hsn[MAXREG+1];
1629 memset(hsn,10,sizeof(hsn));
1630 int j;
1631 lsn(hsn,i,&preferred_reg);
1632 //printf("eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",cur->regmap[0],cur->regmap[1],cur->regmap[2],cur->regmap[3],cur->regmap[5],cur->regmap[6],cur->regmap[7]);
1633 //printf("hsn(%x): %d %d %d %d %d %d %d\n",start+i*4,hsn[cur->regmap[0]&63],hsn[cur->regmap[1]&63],hsn[cur->regmap[2]&63],hsn[cur->regmap[3]&63],hsn[cur->regmap[5]&63],hsn[cur->regmap[6]&63],hsn[cur->regmap[7]&63]);
1634 if(i>0) {
1635 // Don't evict the cycle count at entry points, otherwise the entry
1636 // stub will have to write it.
cf95b4f0 1637 if(dops[i].bt&&hsn[CCREG]>2) hsn[CCREG]=2;
fe807a8a 1638 if (i>1 && hsn[CCREG] > 2 && dops[i-2].is_jump) hsn[CCREG]=2;
8062d65a 1639 for(j=10;j>=3;j--)
1640 {
1641 // Alloc preferred register if available
1642 if(hsn[r=cur->regmap[preferred_reg]&63]==j) {
1643 for(hr=0;hr<HOST_REGS;hr++) {
1644 // Evict both parts of a 64-bit register
9de8a0c3 1645 if(cur->regmap[hr]==r) {
8062d65a 1646 cur->regmap[hr]=-1;
1647 cur->dirty&=~(1<<hr);
1648 cur->isconst&=~(1<<hr);
1649 }
1650 }
1651 cur->regmap[preferred_reg]=reg;
1652 return;
1653 }
1654 for(r=1;r<=MAXREG;r++)
1655 {
cf95b4f0 1656 if(hsn[r]==j&&r!=dops[i-1].rs1&&r!=dops[i-1].rs2&&r!=dops[i-1].rt1&&r!=dops[i-1].rt2) {
8062d65a 1657 for(hr=0;hr<HOST_REGS;hr++) {
1658 if(hr!=HOST_CCREG||j<hsn[CCREG]) {
1659 if(cur->regmap[hr]==r) {
1660 cur->regmap[hr]=reg;
1661 cur->dirty&=~(1<<hr);
1662 cur->isconst&=~(1<<hr);
1663 return;
1664 }
1665 }
1666 }
1667 }
1668 }
1669 }
1670 }
1671 for(j=10;j>=0;j--)
1672 {
1673 for(r=1;r<=MAXREG;r++)
1674 {
1675 if(hsn[r]==j) {
8062d65a 1676 for(hr=0;hr<HOST_REGS;hr++) {
1677 if(cur->regmap[hr]==r) {
1678 cur->regmap[hr]=reg;
1679 cur->dirty&=~(1<<hr);
1680 cur->isconst&=~(1<<hr);
1681 return;
1682 }
1683 }
1684 }
1685 }
1686 }
7c3a5182 1687 SysPrintf("This shouldn't happen (alloc_reg)");abort();
8062d65a 1688}
1689
1690// Allocate a temporary register. This is done without regard to
1691// dirty status or whether the register we request is on the unneeded list
1692// Note: This will only allocate one register, even if called multiple times
1693static void alloc_reg_temp(struct regstat *cur,int i,signed char reg)
1694{
1695 int r,hr;
1696 int preferred_reg = -1;
1697
1698 // see if it's already allocated
1699 for(hr=0;hr<HOST_REGS;hr++)
1700 {
1701 if(hr!=EXCLUDE_REG&&cur->regmap[hr]==reg) return;
1702 }
1703
1704 // Try to allocate any available register
1705 for(hr=HOST_REGS-1;hr>=0;hr--) {
1706 if(hr!=EXCLUDE_REG&&cur->regmap[hr]==-1) {
1707 cur->regmap[hr]=reg;
1708 cur->dirty&=~(1<<hr);
1709 cur->isconst&=~(1<<hr);
1710 return;
1711 }
1712 }
1713
1714 // Find an unneeded register
1715 for(hr=HOST_REGS-1;hr>=0;hr--)
1716 {
1717 r=cur->regmap[hr];
1718 if(r>=0) {
1719 assert(r < 64);
1720 if((cur->u>>r)&1) {
1721 if(i==0||((unneeded_reg[i-1]>>r)&1)) {
1722 cur->regmap[hr]=reg;
1723 cur->dirty&=~(1<<hr);
1724 cur->isconst&=~(1<<hr);
1725 return;
1726 }
1727 }
1728 }
1729 }
1730
1731 // Ok, now we have to evict someone
1732 // Pick a register we hopefully won't need soon
1733 // TODO: we might want to follow unconditional jumps here
1734 // TODO: get rid of dupe code and make this into a function
1735 u_char hsn[MAXREG+1];
1736 memset(hsn,10,sizeof(hsn));
1737 int j;
1738 lsn(hsn,i,&preferred_reg);
1739 //printf("hsn: %d %d %d %d %d %d %d\n",hsn[cur->regmap[0]&63],hsn[cur->regmap[1]&63],hsn[cur->regmap[2]&63],hsn[cur->regmap[3]&63],hsn[cur->regmap[5]&63],hsn[cur->regmap[6]&63],hsn[cur->regmap[7]&63]);
1740 if(i>0) {
1741 // Don't evict the cycle count at entry points, otherwise the entry
1742 // stub will have to write it.
cf95b4f0 1743 if(dops[i].bt&&hsn[CCREG]>2) hsn[CCREG]=2;
fe807a8a 1744 if (i>1 && hsn[CCREG] > 2 && dops[i-2].is_jump) hsn[CCREG]=2;
8062d65a 1745 for(j=10;j>=3;j--)
1746 {
1747 for(r=1;r<=MAXREG;r++)
1748 {
cf95b4f0 1749 if(hsn[r]==j&&r!=dops[i-1].rs1&&r!=dops[i-1].rs2&&r!=dops[i-1].rt1&&r!=dops[i-1].rt2) {
8062d65a 1750 for(hr=0;hr<HOST_REGS;hr++) {
1751 if(hr!=HOST_CCREG||hsn[CCREG]>2) {
1752 if(cur->regmap[hr]==r) {
1753 cur->regmap[hr]=reg;
1754 cur->dirty&=~(1<<hr);
1755 cur->isconst&=~(1<<hr);
1756 return;
1757 }
1758 }
1759 }
1760 }
1761 }
1762 }
1763 }
1764 for(j=10;j>=0;j--)
1765 {
1766 for(r=1;r<=MAXREG;r++)
1767 {
1768 if(hsn[r]==j) {
8062d65a 1769 for(hr=0;hr<HOST_REGS;hr++) {
1770 if(cur->regmap[hr]==r) {
1771 cur->regmap[hr]=reg;
1772 cur->dirty&=~(1<<hr);
1773 cur->isconst&=~(1<<hr);
1774 return;
1775 }
1776 }
1777 }
1778 }
1779 }
7c3a5182 1780 SysPrintf("This shouldn't happen");abort();
8062d65a 1781}
1782
ad49de89 1783static void mov_alloc(struct regstat *current,int i)
57871462 1784{
cf95b4f0 1785 if (dops[i].rs1 == HIREG || dops[i].rs1 == LOREG) {
9a3ccfeb 1786 alloc_cc(current,i); // for stalls
1787 dirty_reg(current,CCREG);
32631e6a 1788 }
1789
57871462 1790 // Note: Don't need to actually alloc the source registers
cf95b4f0 1791 //alloc_reg(current,i,dops[i].rs1);
1792 alloc_reg(current,i,dops[i].rt1);
ad49de89 1793
cf95b4f0 1794 clear_const(current,dops[i].rs1);
1795 clear_const(current,dops[i].rt1);
1796 dirty_reg(current,dops[i].rt1);
57871462 1797}
1798
ad49de89 1799static void shiftimm_alloc(struct regstat *current,int i)
57871462 1800{
cf95b4f0 1801 if(dops[i].opcode2<=0x3) // SLL/SRL/SRA
57871462 1802 {
cf95b4f0 1803 if(dops[i].rt1) {
1804 if(dops[i].rs1&&needed_again(dops[i].rs1,i)) alloc_reg(current,i,dops[i].rs1);
53dc27f6 1805 else dops[i].use_lt1=!!dops[i].rs1;
cf95b4f0 1806 alloc_reg(current,i,dops[i].rt1);
1807 dirty_reg(current,dops[i].rt1);
1808 if(is_const(current,dops[i].rs1)) {
1809 int v=get_const(current,dops[i].rs1);
1810 if(dops[i].opcode2==0x00) set_const(current,dops[i].rt1,v<<imm[i]);
1811 if(dops[i].opcode2==0x02) set_const(current,dops[i].rt1,(u_int)v>>imm[i]);
1812 if(dops[i].opcode2==0x03) set_const(current,dops[i].rt1,v>>imm[i]);
dc49e339 1813 }
cf95b4f0 1814 else clear_const(current,dops[i].rt1);
57871462 1815 }
1816 }
dc49e339 1817 else
1818 {
cf95b4f0 1819 clear_const(current,dops[i].rs1);
1820 clear_const(current,dops[i].rt1);
dc49e339 1821 }
1822
cf95b4f0 1823 if(dops[i].opcode2>=0x38&&dops[i].opcode2<=0x3b) // DSLL/DSRL/DSRA
57871462 1824 {
9c45ca93 1825 assert(0);
57871462 1826 }
cf95b4f0 1827 if(dops[i].opcode2==0x3c) // DSLL32
57871462 1828 {
9c45ca93 1829 assert(0);
57871462 1830 }
cf95b4f0 1831 if(dops[i].opcode2==0x3e) // DSRL32
57871462 1832 {
9c45ca93 1833 assert(0);
57871462 1834 }
cf95b4f0 1835 if(dops[i].opcode2==0x3f) // DSRA32
57871462 1836 {
9c45ca93 1837 assert(0);
57871462 1838 }
1839}
1840
ad49de89 1841static void shift_alloc(struct regstat *current,int i)
57871462 1842{
cf95b4f0 1843 if(dops[i].rt1) {
1844 if(dops[i].opcode2<=0x07) // SLLV/SRLV/SRAV
57871462 1845 {
cf95b4f0 1846 if(dops[i].rs1) alloc_reg(current,i,dops[i].rs1);
1847 if(dops[i].rs2) alloc_reg(current,i,dops[i].rs2);
1848 alloc_reg(current,i,dops[i].rt1);
1849 if(dops[i].rt1==dops[i].rs2) {
e1190b87 1850 alloc_reg_temp(current,i,-1);
1851 minimum_free_regs[i]=1;
1852 }
57871462 1853 } else { // DSLLV/DSRLV/DSRAV
00fa9369 1854 assert(0);
57871462 1855 }
cf95b4f0 1856 clear_const(current,dops[i].rs1);
1857 clear_const(current,dops[i].rs2);
1858 clear_const(current,dops[i].rt1);
1859 dirty_reg(current,dops[i].rt1);
57871462 1860 }
1861}
1862
ad49de89 1863static void alu_alloc(struct regstat *current,int i)
57871462 1864{
cf95b4f0 1865 if(dops[i].opcode2>=0x20&&dops[i].opcode2<=0x23) { // ADD/ADDU/SUB/SUBU
1866 if(dops[i].rt1) {
1867 if(dops[i].rs1&&dops[i].rs2) {
1868 alloc_reg(current,i,dops[i].rs1);
1869 alloc_reg(current,i,dops[i].rs2);
57871462 1870 }
1871 else {
cf95b4f0 1872 if(dops[i].rs1&&needed_again(dops[i].rs1,i)) alloc_reg(current,i,dops[i].rs1);
1873 if(dops[i].rs2&&needed_again(dops[i].rs2,i)) alloc_reg(current,i,dops[i].rs2);
57871462 1874 }
cf95b4f0 1875 alloc_reg(current,i,dops[i].rt1);
57871462 1876 }
57871462 1877 }
cf95b4f0 1878 if(dops[i].opcode2==0x2a||dops[i].opcode2==0x2b) { // SLT/SLTU
1879 if(dops[i].rt1) {
1880 alloc_reg(current,i,dops[i].rs1);
1881 alloc_reg(current,i,dops[i].rs2);
1882 alloc_reg(current,i,dops[i].rt1);
57871462 1883 }
57871462 1884 }
cf95b4f0 1885 if(dops[i].opcode2>=0x24&&dops[i].opcode2<=0x27) { // AND/OR/XOR/NOR
1886 if(dops[i].rt1) {
1887 if(dops[i].rs1&&dops[i].rs2) {
1888 alloc_reg(current,i,dops[i].rs1);
1889 alloc_reg(current,i,dops[i].rs2);
57871462 1890 }
1891 else
1892 {
cf95b4f0 1893 if(dops[i].rs1&&needed_again(dops[i].rs1,i)) alloc_reg(current,i,dops[i].rs1);
1894 if(dops[i].rs2&&needed_again(dops[i].rs2,i)) alloc_reg(current,i,dops[i].rs2);
57871462 1895 }
cf95b4f0 1896 alloc_reg(current,i,dops[i].rt1);
57871462 1897 }
1898 }
cf95b4f0 1899 if(dops[i].opcode2>=0x2c&&dops[i].opcode2<=0x2f) { // DADD/DADDU/DSUB/DSUBU
00fa9369 1900 assert(0);
57871462 1901 }
cf95b4f0 1902 clear_const(current,dops[i].rs1);
1903 clear_const(current,dops[i].rs2);
1904 clear_const(current,dops[i].rt1);
1905 dirty_reg(current,dops[i].rt1);
57871462 1906}
1907
ad49de89 1908static void imm16_alloc(struct regstat *current,int i)
57871462 1909{
cf95b4f0 1910 if(dops[i].rs1&&needed_again(dops[i].rs1,i)) alloc_reg(current,i,dops[i].rs1);
53dc27f6 1911 else dops[i].use_lt1=!!dops[i].rs1;
cf95b4f0 1912 if(dops[i].rt1) alloc_reg(current,i,dops[i].rt1);
1913 if(dops[i].opcode==0x18||dops[i].opcode==0x19) { // DADDI/DADDIU
00fa9369 1914 assert(0);
57871462 1915 }
cf95b4f0 1916 else if(dops[i].opcode==0x0a||dops[i].opcode==0x0b) { // SLTI/SLTIU
1917 clear_const(current,dops[i].rs1);
1918 clear_const(current,dops[i].rt1);
57871462 1919 }
cf95b4f0 1920 else if(dops[i].opcode>=0x0c&&dops[i].opcode<=0x0e) { // ANDI/ORI/XORI
1921 if(is_const(current,dops[i].rs1)) {
1922 int v=get_const(current,dops[i].rs1);
1923 if(dops[i].opcode==0x0c) set_const(current,dops[i].rt1,v&imm[i]);
1924 if(dops[i].opcode==0x0d) set_const(current,dops[i].rt1,v|imm[i]);
1925 if(dops[i].opcode==0x0e) set_const(current,dops[i].rt1,v^imm[i]);
57871462 1926 }
cf95b4f0 1927 else clear_const(current,dops[i].rt1);
57871462 1928 }
cf95b4f0 1929 else if(dops[i].opcode==0x08||dops[i].opcode==0x09) { // ADDI/ADDIU
1930 if(is_const(current,dops[i].rs1)) {
1931 int v=get_const(current,dops[i].rs1);
1932 set_const(current,dops[i].rt1,v+imm[i]);
57871462 1933 }
cf95b4f0 1934 else clear_const(current,dops[i].rt1);
57871462 1935 }
1936 else {
cf95b4f0 1937 set_const(current,dops[i].rt1,imm[i]<<16); // LUI
57871462 1938 }
cf95b4f0 1939 dirty_reg(current,dops[i].rt1);
57871462 1940}
1941
ad49de89 1942static void load_alloc(struct regstat *current,int i)
57871462 1943{
cf95b4f0 1944 clear_const(current,dops[i].rt1);
1945 //if(dops[i].rs1!=dops[i].rt1&&needed_again(dops[i].rs1,i)) clear_const(current,dops[i].rs1); // Does this help or hurt?
1946 if(!dops[i].rs1) current->u&=~1LL; // Allow allocating r0 if it's the source register
37387d8b 1947 if (needed_again(dops[i].rs1, i))
1948 alloc_reg(current, i, dops[i].rs1);
1949 if (ram_offset)
1950 alloc_reg(current, i, ROREG);
cf95b4f0 1951 if(dops[i].rt1&&!((current->u>>dops[i].rt1)&1)) {
1952 alloc_reg(current,i,dops[i].rt1);
1953 assert(get_reg(current->regmap,dops[i].rt1)>=0);
1954 if(dops[i].opcode==0x27||dops[i].opcode==0x37) // LWU/LD
57871462 1955 {
ad49de89 1956 assert(0);
57871462 1957 }
cf95b4f0 1958 else if(dops[i].opcode==0x1A||dops[i].opcode==0x1B) // LDL/LDR
57871462 1959 {
ad49de89 1960 assert(0);
57871462 1961 }
cf95b4f0 1962 dirty_reg(current,dops[i].rt1);
57871462 1963 // LWL/LWR need a temporary register for the old value
cf95b4f0 1964 if(dops[i].opcode==0x22||dops[i].opcode==0x26)
57871462 1965 {
1966 alloc_reg(current,i,FTEMP);
1967 alloc_reg_temp(current,i,-1);
e1190b87 1968 minimum_free_regs[i]=1;
57871462 1969 }
1970 }
1971 else
1972 {
373d1d07 1973 // Load to r0 or unneeded register (dummy load)
57871462 1974 // but we still need a register to calculate the address
cf95b4f0 1975 if(dops[i].opcode==0x22||dops[i].opcode==0x26)
535d208a 1976 {
1977 alloc_reg(current,i,FTEMP); // LWL/LWR need another temporary
1978 }
57871462 1979 alloc_reg_temp(current,i,-1);
e1190b87 1980 minimum_free_regs[i]=1;
cf95b4f0 1981 if(dops[i].opcode==0x1A||dops[i].opcode==0x1B) // LDL/LDR
535d208a 1982 {
ad49de89 1983 assert(0);
535d208a 1984 }
57871462 1985 }
1986}
1987
4149788d 1988static void store_alloc(struct regstat *current,int i)
57871462 1989{
cf95b4f0 1990 clear_const(current,dops[i].rs2);
1991 if(!(dops[i].rs2)) current->u&=~1LL; // Allow allocating r0 if necessary
1992 if(needed_again(dops[i].rs1,i)) alloc_reg(current,i,dops[i].rs1);
1993 alloc_reg(current,i,dops[i].rs2);
1994 if(dops[i].opcode==0x2c||dops[i].opcode==0x2d||dops[i].opcode==0x3f) { // 64-bit SDL/SDR/SD
ad49de89 1995 assert(0);
57871462 1996 }
37387d8b 1997 if (ram_offset)
1998 alloc_reg(current, i, ROREG);
57871462 1999 #if defined(HOST_IMM8)
2000 // On CPUs without 32-bit immediates we need a pointer to invalid_code
37387d8b 2001 alloc_reg(current, i, INVCP);
57871462 2002 #endif
cf95b4f0 2003 if(dops[i].opcode==0x2a||dops[i].opcode==0x2e||dops[i].opcode==0x2c||dops[i].opcode==0x2d) { // SWL/SWL/SDL/SDR
57871462 2004 alloc_reg(current,i,FTEMP);
2005 }
2006 // We need a temporary register for address generation
2007 alloc_reg_temp(current,i,-1);
e1190b87 2008 minimum_free_regs[i]=1;
57871462 2009}
2010
4149788d 2011static void c1ls_alloc(struct regstat *current,int i)
57871462 2012{
cf95b4f0 2013 clear_const(current,dops[i].rt1);
57871462 2014 alloc_reg(current,i,CSREG); // Status
57871462 2015}
2016
4149788d 2017static void c2ls_alloc(struct regstat *current,int i)
b9b61529 2018{
cf95b4f0 2019 clear_const(current,dops[i].rt1);
2020 if(needed_again(dops[i].rs1,i)) alloc_reg(current,i,dops[i].rs1);
b9b61529 2021 alloc_reg(current,i,FTEMP);
37387d8b 2022 if (ram_offset)
2023 alloc_reg(current, i, ROREG);
b9b61529 2024 #if defined(HOST_IMM8)
2025 // On CPUs without 32-bit immediates we need a pointer to invalid_code
37387d8b 2026 if (dops[i].opcode == 0x3a) // SWC2
b9b61529 2027 alloc_reg(current,i,INVCP);
2028 #endif
2029 // We need a temporary register for address generation
2030 alloc_reg_temp(current,i,-1);
e1190b87 2031 minimum_free_regs[i]=1;
b9b61529 2032}
2033
57871462 2034#ifndef multdiv_alloc
4149788d 2035static void multdiv_alloc(struct regstat *current,int i)
57871462 2036{
2037 // case 0x18: MULT
2038 // case 0x19: MULTU
2039 // case 0x1A: DIV
2040 // case 0x1B: DIVU
2041 // case 0x1C: DMULT
2042 // case 0x1D: DMULTU
2043 // case 0x1E: DDIV
2044 // case 0x1F: DDIVU
cf95b4f0 2045 clear_const(current,dops[i].rs1);
2046 clear_const(current,dops[i].rs2);
32631e6a 2047 alloc_cc(current,i); // for stalls
cf95b4f0 2048 if(dops[i].rs1&&dops[i].rs2)
57871462 2049 {
cf95b4f0 2050 if((dops[i].opcode2&4)==0) // 32-bit
57871462 2051 {
2052 current->u&=~(1LL<<HIREG);
2053 current->u&=~(1LL<<LOREG);
2054 alloc_reg(current,i,HIREG);
2055 alloc_reg(current,i,LOREG);
cf95b4f0 2056 alloc_reg(current,i,dops[i].rs1);
2057 alloc_reg(current,i,dops[i].rs2);
57871462 2058 dirty_reg(current,HIREG);
2059 dirty_reg(current,LOREG);
2060 }
2061 else // 64-bit
2062 {
00fa9369 2063 assert(0);
57871462 2064 }
2065 }
2066 else
2067 {
2068 // Multiply by zero is zero.
2069 // MIPS does not have a divide by zero exception.
2070 // The result is undefined, we return zero.
2071 alloc_reg(current,i,HIREG);
2072 alloc_reg(current,i,LOREG);
57871462 2073 dirty_reg(current,HIREG);
2074 dirty_reg(current,LOREG);
2075 }
2076}
2077#endif
2078
4149788d 2079static void cop0_alloc(struct regstat *current,int i)
57871462 2080{
cf95b4f0 2081 if(dops[i].opcode2==0) // MFC0
57871462 2082 {
cf95b4f0 2083 if(dops[i].rt1) {
2084 clear_const(current,dops[i].rt1);
57871462 2085 alloc_all(current,i);
cf95b4f0 2086 alloc_reg(current,i,dops[i].rt1);
2087 dirty_reg(current,dops[i].rt1);
57871462 2088 }
2089 }
cf95b4f0 2090 else if(dops[i].opcode2==4) // MTC0
57871462 2091 {
cf95b4f0 2092 if(dops[i].rs1){
2093 clear_const(current,dops[i].rs1);
2094 alloc_reg(current,i,dops[i].rs1);
57871462 2095 alloc_all(current,i);
2096 }
2097 else {
2098 alloc_all(current,i); // FIXME: Keep r0
2099 current->u&=~1LL;
2100 alloc_reg(current,i,0);
2101 }
2102 }
2103 else
2104 {
55a695d9 2105 // RFE
cf95b4f0 2106 assert(dops[i].opcode2==0x10);
57871462 2107 alloc_all(current,i);
2108 }
e1190b87 2109 minimum_free_regs[i]=HOST_REGS;
57871462 2110}
2111
81dbbf4c 2112static void cop2_alloc(struct regstat *current,int i)
57871462 2113{
cf95b4f0 2114 if (dops[i].opcode2 < 3) // MFC2/CFC2
57871462 2115 {
81dbbf4c 2116 alloc_cc(current,i); // for stalls
2117 dirty_reg(current,CCREG);
cf95b4f0 2118 if(dops[i].rt1){
2119 clear_const(current,dops[i].rt1);
2120 alloc_reg(current,i,dops[i].rt1);
2121 dirty_reg(current,dops[i].rt1);
57871462 2122 }
57871462 2123 }
cf95b4f0 2124 else if (dops[i].opcode2 > 3) // MTC2/CTC2
57871462 2125 {
cf95b4f0 2126 if(dops[i].rs1){
2127 clear_const(current,dops[i].rs1);
2128 alloc_reg(current,i,dops[i].rs1);
57871462 2129 }
2130 else {
2131 current->u&=~1LL;
2132 alloc_reg(current,i,0);
57871462 2133 }
2134 }
81dbbf4c 2135 alloc_reg_temp(current,i,-1);
e1190b87 2136 minimum_free_regs[i]=1;
57871462 2137}
00fa9369 2138
4149788d 2139static void c2op_alloc(struct regstat *current,int i)
b9b61529 2140{
81dbbf4c 2141 alloc_cc(current,i); // for stalls
2142 dirty_reg(current,CCREG);
b9b61529 2143 alloc_reg_temp(current,i,-1);
2144}
57871462 2145
4149788d 2146static void syscall_alloc(struct regstat *current,int i)
57871462 2147{
2148 alloc_cc(current,i);
2149 dirty_reg(current,CCREG);
2150 alloc_all(current,i);
e1190b87 2151 minimum_free_regs[i]=HOST_REGS;
57871462 2152 current->isconst=0;
2153}
2154
4149788d 2155static void delayslot_alloc(struct regstat *current,int i)
57871462 2156{
cf95b4f0 2157 switch(dops[i].itype) {
57871462 2158 case UJUMP:
2159 case CJUMP:
2160 case SJUMP:
2161 case RJUMP:
57871462 2162 case SYSCALL:
7139f3c8 2163 case HLECALL:
57871462 2164 case IMM16:
2165 imm16_alloc(current,i);
2166 break;
2167 case LOAD:
2168 case LOADLR:
2169 load_alloc(current,i);
2170 break;
2171 case STORE:
2172 case STORELR:
2173 store_alloc(current,i);
2174 break;
2175 case ALU:
2176 alu_alloc(current,i);
2177 break;
2178 case SHIFT:
2179 shift_alloc(current,i);
2180 break;
2181 case MULTDIV:
2182 multdiv_alloc(current,i);
2183 break;
2184 case SHIFTIMM:
2185 shiftimm_alloc(current,i);
2186 break;
2187 case MOV:
2188 mov_alloc(current,i);
2189 break;
2190 case COP0:
2191 cop0_alloc(current,i);
2192 break;
2193 case COP1:
81dbbf4c 2194 break;
b9b61529 2195 case COP2:
81dbbf4c 2196 cop2_alloc(current,i);
57871462 2197 break;
2198 case C1LS:
2199 c1ls_alloc(current,i);
2200 break;
b9b61529 2201 case C2LS:
2202 c2ls_alloc(current,i);
2203 break;
b9b61529 2204 case C2OP:
2205 c2op_alloc(current,i);
2206 break;
57871462 2207 }
2208}
2209
b14b6a8f 2210static void add_stub(enum stub_type type, void *addr, void *retaddr,
2211 u_int a, uintptr_t b, uintptr_t c, u_int d, u_int e)
2212{
d1e4ebd9 2213 assert(stubcount < ARRAY_SIZE(stubs));
b14b6a8f 2214 stubs[stubcount].type = type;
2215 stubs[stubcount].addr = addr;
2216 stubs[stubcount].retaddr = retaddr;
2217 stubs[stubcount].a = a;
2218 stubs[stubcount].b = b;
2219 stubs[stubcount].c = c;
2220 stubs[stubcount].d = d;
2221 stubs[stubcount].e = e;
57871462 2222 stubcount++;
2223}
2224
b14b6a8f 2225static void add_stub_r(enum stub_type type, void *addr, void *retaddr,
81dbbf4c 2226 int i, int addr_reg, const struct regstat *i_regs, int ccadj, u_int reglist)
b14b6a8f 2227{
2228 add_stub(type, addr, retaddr, i, addr_reg, (uintptr_t)i_regs, ccadj, reglist);
2229}
2230
57871462 2231// Write out a single register
2330734f 2232static void wb_register(signed char r, const signed char regmap[], uint64_t dirty)
57871462 2233{
2234 int hr;
2235 for(hr=0;hr<HOST_REGS;hr++) {
2236 if(hr!=EXCLUDE_REG) {
9de8a0c3 2237 if(regmap[hr]==r) {
57871462 2238 if((dirty>>hr)&1) {
ad49de89 2239 assert(regmap[hr]<64);
2240 emit_storereg(r,hr);
57871462 2241 }
2242 }
2243 }
2244 }
2245}
2246
8062d65a 2247static void wb_valid(signed char pre[],signed char entry[],u_int dirty_pre,u_int dirty,uint64_t u)
2248{
2249 //if(dirty_pre==dirty) return;
53358c1d 2250 int hr, r;
2251 for (hr = 0; hr < HOST_REGS; hr++) {
2252 r = pre[hr];
2253 if (r < 1 || r > 33 || ((u >> r) & 1))
2254 continue;
2255 if (((dirty_pre & ~dirty) >> hr) & 1)
2256 emit_storereg(r, hr);
8062d65a 2257 }
2258}
2259
687b4580 2260// trashes r2
2261static void pass_args(int a0, int a1)
2262{
2263 if(a0==1&&a1==0) {
2264 // must swap
2265 emit_mov(a0,2); emit_mov(a1,1); emit_mov(2,0);
2266 }
2267 else if(a0!=0&&a1==0) {
2268 emit_mov(a1,1);
2269 if (a0>=0) emit_mov(a0,0);
2270 }
2271 else {
2272 if(a0>=0&&a0!=0) emit_mov(a0,0);
2273 if(a1>=0&&a1!=1) emit_mov(a1,1);
2274 }
2275}
2276
2330734f 2277static void alu_assemble(int i, const struct regstat *i_regs)
57871462 2278{
cf95b4f0 2279 if(dops[i].opcode2>=0x20&&dops[i].opcode2<=0x23) { // ADD/ADDU/SUB/SUBU
2280 if(dops[i].rt1) {
57871462 2281 signed char s1,s2,t;
cf95b4f0 2282 t=get_reg(i_regs->regmap,dops[i].rt1);
57871462 2283 if(t>=0) {
cf95b4f0 2284 s1=get_reg(i_regs->regmap,dops[i].rs1);
2285 s2=get_reg(i_regs->regmap,dops[i].rs2);
2286 if(dops[i].rs1&&dops[i].rs2) {
57871462 2287 assert(s1>=0);
2288 assert(s2>=0);
cf95b4f0 2289 if(dops[i].opcode2&2) emit_sub(s1,s2,t);
57871462 2290 else emit_add(s1,s2,t);
2291 }
cf95b4f0 2292 else if(dops[i].rs1) {
57871462 2293 if(s1>=0) emit_mov(s1,t);
cf95b4f0 2294 else emit_loadreg(dops[i].rs1,t);
57871462 2295 }
cf95b4f0 2296 else if(dops[i].rs2) {
57871462 2297 if(s2>=0) {
cf95b4f0 2298 if(dops[i].opcode2&2) emit_neg(s2,t);
57871462 2299 else emit_mov(s2,t);
2300 }
2301 else {
cf95b4f0 2302 emit_loadreg(dops[i].rs2,t);
2303 if(dops[i].opcode2&2) emit_neg(t,t);
57871462 2304 }
2305 }
2306 else emit_zeroreg(t);
2307 }
2308 }
2309 }
cf95b4f0 2310 if(dops[i].opcode2>=0x2c&&dops[i].opcode2<=0x2f) { // DADD/DADDU/DSUB/DSUBU
00fa9369 2311 assert(0);
57871462 2312 }
cf95b4f0 2313 if(dops[i].opcode2==0x2a||dops[i].opcode2==0x2b) { // SLT/SLTU
2314 if(dops[i].rt1) {
ad49de89 2315 signed char s1l,s2l,t;
57871462 2316 {
cf95b4f0 2317 t=get_reg(i_regs->regmap,dops[i].rt1);
57871462 2318 //assert(t>=0);
2319 if(t>=0) {
cf95b4f0 2320 s1l=get_reg(i_regs->regmap,dops[i].rs1);
2321 s2l=get_reg(i_regs->regmap,dops[i].rs2);
2322 if(dops[i].rs2==0) // rx<r0
57871462 2323 {
cf95b4f0 2324 if(dops[i].opcode2==0x2a&&dops[i].rs1!=0) { // SLT
06e425d7 2325 assert(s1l>=0);
57871462 2326 emit_shrimm(s1l,31,t);
06e425d7 2327 }
2328 else // SLTU (unsigned can not be less than zero, 0<0)
57871462 2329 emit_zeroreg(t);
2330 }
cf95b4f0 2331 else if(dops[i].rs1==0) // r0<rx
57871462 2332 {
2333 assert(s2l>=0);
cf95b4f0 2334 if(dops[i].opcode2==0x2a) // SLT
57871462 2335 emit_set_gz32(s2l,t);
2336 else // SLTU (set if not zero)
2337 emit_set_nz32(s2l,t);
2338 }
2339 else{
2340 assert(s1l>=0);assert(s2l>=0);
cf95b4f0 2341 if(dops[i].opcode2==0x2a) // SLT
57871462 2342 emit_set_if_less32(s1l,s2l,t);
2343 else // SLTU
2344 emit_set_if_carry32(s1l,s2l,t);
2345 }
2346 }
2347 }
2348 }
2349 }
cf95b4f0 2350 if(dops[i].opcode2>=0x24&&dops[i].opcode2<=0x27) { // AND/OR/XOR/NOR
2351 if(dops[i].rt1) {
ad49de89 2352 signed char s1l,s2l,tl;
cf95b4f0 2353 tl=get_reg(i_regs->regmap,dops[i].rt1);
57871462 2354 {
57871462 2355 if(tl>=0) {
cf95b4f0 2356 s1l=get_reg(i_regs->regmap,dops[i].rs1);
2357 s2l=get_reg(i_regs->regmap,dops[i].rs2);
2358 if(dops[i].rs1&&dops[i].rs2) {
57871462 2359 assert(s1l>=0);
2360 assert(s2l>=0);
cf95b4f0 2361 if(dops[i].opcode2==0x24) { // AND
57871462 2362 emit_and(s1l,s2l,tl);
2363 } else
cf95b4f0 2364 if(dops[i].opcode2==0x25) { // OR
57871462 2365 emit_or(s1l,s2l,tl);
2366 } else
cf95b4f0 2367 if(dops[i].opcode2==0x26) { // XOR
57871462 2368 emit_xor(s1l,s2l,tl);
2369 } else
cf95b4f0 2370 if(dops[i].opcode2==0x27) { // NOR
57871462 2371 emit_or(s1l,s2l,tl);
2372 emit_not(tl,tl);
2373 }
2374 }
2375 else
2376 {
cf95b4f0 2377 if(dops[i].opcode2==0x24) { // AND
57871462 2378 emit_zeroreg(tl);
2379 } else
cf95b4f0 2380 if(dops[i].opcode2==0x25||dops[i].opcode2==0x26) { // OR/XOR
2381 if(dops[i].rs1){
57871462 2382 if(s1l>=0) emit_mov(s1l,tl);
cf95b4f0 2383 else emit_loadreg(dops[i].rs1,tl); // CHECK: regmap_entry?
57871462 2384 }
2385 else
cf95b4f0 2386 if(dops[i].rs2){
57871462 2387 if(s2l>=0) emit_mov(s2l,tl);
cf95b4f0 2388 else emit_loadreg(dops[i].rs2,tl); // CHECK: regmap_entry?
57871462 2389 }
2390 else emit_zeroreg(tl);
2391 } else
cf95b4f0 2392 if(dops[i].opcode2==0x27) { // NOR
2393 if(dops[i].rs1){
57871462 2394 if(s1l>=0) emit_not(s1l,tl);
2395 else {
cf95b4f0 2396 emit_loadreg(dops[i].rs1,tl);
57871462 2397 emit_not(tl,tl);
2398 }
2399 }
2400 else
cf95b4f0 2401 if(dops[i].rs2){
57871462 2402 if(s2l>=0) emit_not(s2l,tl);
2403 else {
cf95b4f0 2404 emit_loadreg(dops[i].rs2,tl);
57871462 2405 emit_not(tl,tl);
2406 }
2407 }
2408 else emit_movimm(-1,tl);
2409 }
2410 }
2411 }
2412 }
2413 }
2414 }
2415}
2416
2330734f 2417static void imm16_assemble(int i, const struct regstat *i_regs)
57871462 2418{
cf95b4f0 2419 if (dops[i].opcode==0x0f) { // LUI
2420 if(dops[i].rt1) {
57871462 2421 signed char t;
cf95b4f0 2422 t=get_reg(i_regs->regmap,dops[i].rt1);
57871462 2423 //assert(t>=0);
2424 if(t>=0) {
2425 if(!((i_regs->isconst>>t)&1))
2426 emit_movimm(imm[i]<<16,t);
2427 }
2428 }
2429 }
cf95b4f0 2430 if(dops[i].opcode==0x08||dops[i].opcode==0x09) { // ADDI/ADDIU
2431 if(dops[i].rt1) {
57871462 2432 signed char s,t;
cf95b4f0 2433 t=get_reg(i_regs->regmap,dops[i].rt1);
2434 s=get_reg(i_regs->regmap,dops[i].rs1);
2435 if(dops[i].rs1) {
57871462 2436 //assert(t>=0);
2437 //assert(s>=0);
2438 if(t>=0) {
2439 if(!((i_regs->isconst>>t)&1)) {
2440 if(s<0) {
cf95b4f0 2441 if(i_regs->regmap_entry[t]!=dops[i].rs1) emit_loadreg(dops[i].rs1,t);
57871462 2442 emit_addimm(t,imm[i],t);
2443 }else{
2444 if(!((i_regs->wasconst>>s)&1))
2445 emit_addimm(s,imm[i],t);
2446 else
2447 emit_movimm(constmap[i][s]+imm[i],t);
2448 }
2449 }
2450 }
2451 } else {
2452 if(t>=0) {
2453 if(!((i_regs->isconst>>t)&1))
2454 emit_movimm(imm[i],t);
2455 }
2456 }
2457 }
2458 }
cf95b4f0 2459 if(dops[i].opcode==0x18||dops[i].opcode==0x19) { // DADDI/DADDIU
2460 if(dops[i].rt1) {
7c3a5182 2461 signed char sl,tl;
cf95b4f0 2462 tl=get_reg(i_regs->regmap,dops[i].rt1);
2463 sl=get_reg(i_regs->regmap,dops[i].rs1);
57871462 2464 if(tl>=0) {
cf95b4f0 2465 if(dops[i].rs1) {
57871462 2466 assert(sl>=0);
7c3a5182 2467 emit_addimm(sl,imm[i],tl);
57871462 2468 } else {
2469 emit_movimm(imm[i],tl);
57871462 2470 }
2471 }
2472 }
2473 }
cf95b4f0 2474 else if(dops[i].opcode==0x0a||dops[i].opcode==0x0b) { // SLTI/SLTIU
2475 if(dops[i].rt1) {
2476 //assert(dops[i].rs1!=0); // r0 might be valid, but it's probably a bug
ad49de89 2477 signed char sl,t;
cf95b4f0 2478 t=get_reg(i_regs->regmap,dops[i].rt1);
2479 sl=get_reg(i_regs->regmap,dops[i].rs1);
57871462 2480 //assert(t>=0);
2481 if(t>=0) {
cf95b4f0 2482 if(dops[i].rs1>0) {
2483 if(dops[i].opcode==0x0a) { // SLTI
57871462 2484 if(sl<0) {
cf95b4f0 2485 if(i_regs->regmap_entry[t]!=dops[i].rs1) emit_loadreg(dops[i].rs1,t);
57871462 2486 emit_slti32(t,imm[i],t);
2487 }else{
2488 emit_slti32(sl,imm[i],t);
2489 }
2490 }
2491 else { // SLTIU
2492 if(sl<0) {
cf95b4f0 2493 if(i_regs->regmap_entry[t]!=dops[i].rs1) emit_loadreg(dops[i].rs1,t);
57871462 2494 emit_sltiu32(t,imm[i],t);
2495 }else{
2496 emit_sltiu32(sl,imm[i],t);
2497 }
2498 }
57871462 2499 }else{
2500 // SLTI(U) with r0 is just stupid,
2501 // nonetheless examples can be found
cf95b4f0 2502 if(dops[i].opcode==0x0a) // SLTI
57871462 2503 if(0<imm[i]) emit_movimm(1,t);
2504 else emit_zeroreg(t);
2505 else // SLTIU
2506 {
2507 if(imm[i]) emit_movimm(1,t);
2508 else emit_zeroreg(t);
2509 }
2510 }
2511 }
2512 }
2513 }
cf95b4f0 2514 else if(dops[i].opcode>=0x0c&&dops[i].opcode<=0x0e) { // ANDI/ORI/XORI
2515 if(dops[i].rt1) {
7c3a5182 2516 signed char sl,tl;
cf95b4f0 2517 tl=get_reg(i_regs->regmap,dops[i].rt1);
2518 sl=get_reg(i_regs->regmap,dops[i].rs1);
57871462 2519 if(tl>=0 && !((i_regs->isconst>>tl)&1)) {
cf95b4f0 2520 if(dops[i].opcode==0x0c) //ANDI
57871462 2521 {
cf95b4f0 2522 if(dops[i].rs1) {
57871462 2523 if(sl<0) {
cf95b4f0 2524 if(i_regs->regmap_entry[tl]!=dops[i].rs1) emit_loadreg(dops[i].rs1,tl);
57871462 2525 emit_andimm(tl,imm[i],tl);
2526 }else{
2527 if(!((i_regs->wasconst>>sl)&1))
2528 emit_andimm(sl,imm[i],tl);
2529 else
2530 emit_movimm(constmap[i][sl]&imm[i],tl);
2531 }
2532 }
2533 else
2534 emit_zeroreg(tl);
57871462 2535 }
2536 else
2537 {
cf95b4f0 2538 if(dops[i].rs1) {
57871462 2539 if(sl<0) {
cf95b4f0 2540 if(i_regs->regmap_entry[tl]!=dops[i].rs1) emit_loadreg(dops[i].rs1,tl);
57871462 2541 }
cf95b4f0 2542 if(dops[i].opcode==0x0d) { // ORI
581335b0 2543 if(sl<0) {
2544 emit_orimm(tl,imm[i],tl);
2545 }else{
2546 if(!((i_regs->wasconst>>sl)&1))
2547 emit_orimm(sl,imm[i],tl);
2548 else
2549 emit_movimm(constmap[i][sl]|imm[i],tl);
2550 }
57871462 2551 }
cf95b4f0 2552 if(dops[i].opcode==0x0e) { // XORI
581335b0 2553 if(sl<0) {
2554 emit_xorimm(tl,imm[i],tl);
2555 }else{
2556 if(!((i_regs->wasconst>>sl)&1))
2557 emit_xorimm(sl,imm[i],tl);
2558 else
2559 emit_movimm(constmap[i][sl]^imm[i],tl);
2560 }
57871462 2561 }
2562 }
2563 else {
2564 emit_movimm(imm[i],tl);
57871462 2565 }
2566 }
2567 }
2568 }
2569 }
2570}
2571
2330734f 2572static void shiftimm_assemble(int i, const struct regstat *i_regs)
57871462 2573{
cf95b4f0 2574 if(dops[i].opcode2<=0x3) // SLL/SRL/SRA
57871462 2575 {
cf95b4f0 2576 if(dops[i].rt1) {
57871462 2577 signed char s,t;
cf95b4f0 2578 t=get_reg(i_regs->regmap,dops[i].rt1);
2579 s=get_reg(i_regs->regmap,dops[i].rs1);
57871462 2580 //assert(t>=0);
dc49e339 2581 if(t>=0&&!((i_regs->isconst>>t)&1)){
cf95b4f0 2582 if(dops[i].rs1==0)
57871462 2583 {
2584 emit_zeroreg(t);
2585 }
2586 else
2587 {
cf95b4f0 2588 if(s<0&&i_regs->regmap_entry[t]!=dops[i].rs1) emit_loadreg(dops[i].rs1,t);
57871462 2589 if(imm[i]) {
cf95b4f0 2590 if(dops[i].opcode2==0) // SLL
57871462 2591 {
2592 emit_shlimm(s<0?t:s,imm[i],t);
2593 }
cf95b4f0 2594 if(dops[i].opcode2==2) // SRL
57871462 2595 {
2596 emit_shrimm(s<0?t:s,imm[i],t);
2597 }
cf95b4f0 2598 if(dops[i].opcode2==3) // SRA
57871462 2599 {
2600 emit_sarimm(s<0?t:s,imm[i],t);
2601 }
2602 }else{
2603 // Shift by zero
2604 if(s>=0 && s!=t) emit_mov(s,t);
2605 }
2606 }
2607 }
cf95b4f0 2608 //emit_storereg(dops[i].rt1,t); //DEBUG
57871462 2609 }
2610 }
cf95b4f0 2611 if(dops[i].opcode2>=0x38&&dops[i].opcode2<=0x3b) // DSLL/DSRL/DSRA
57871462 2612 {
9c45ca93 2613 assert(0);
57871462 2614 }
cf95b4f0 2615 if(dops[i].opcode2==0x3c) // DSLL32
57871462 2616 {
9c45ca93 2617 assert(0);
57871462 2618 }
cf95b4f0 2619 if(dops[i].opcode2==0x3e) // DSRL32
57871462 2620 {
9c45ca93 2621 assert(0);
57871462 2622 }
cf95b4f0 2623 if(dops[i].opcode2==0x3f) // DSRA32
57871462 2624 {
9c45ca93 2625 assert(0);
57871462 2626 }
2627}
2628
2629#ifndef shift_assemble
2330734f 2630static void shift_assemble(int i, const struct regstat *i_regs)
57871462 2631{
3968e69e 2632 signed char s,t,shift;
cf95b4f0 2633 if (dops[i].rt1 == 0)
3968e69e 2634 return;
cf95b4f0 2635 assert(dops[i].opcode2<=0x07); // SLLV/SRLV/SRAV
2636 t = get_reg(i_regs->regmap, dops[i].rt1);
2637 s = get_reg(i_regs->regmap, dops[i].rs1);
2638 shift = get_reg(i_regs->regmap, dops[i].rs2);
3968e69e 2639 if (t < 0)
2640 return;
2641
cf95b4f0 2642 if(dops[i].rs1==0)
3968e69e 2643 emit_zeroreg(t);
cf95b4f0 2644 else if(dops[i].rs2==0) {
3968e69e 2645 assert(s>=0);
2646 if(s!=t) emit_mov(s,t);
2647 }
2648 else {
2649 host_tempreg_acquire();
2650 emit_andimm(shift,31,HOST_TEMPREG);
cf95b4f0 2651 switch(dops[i].opcode2) {
3968e69e 2652 case 4: // SLLV
2653 emit_shl(s,HOST_TEMPREG,t);
2654 break;
2655 case 6: // SRLV
2656 emit_shr(s,HOST_TEMPREG,t);
2657 break;
2658 case 7: // SRAV
2659 emit_sar(s,HOST_TEMPREG,t);
2660 break;
2661 default:
2662 assert(0);
2663 }
2664 host_tempreg_release();
2665 }
57871462 2666}
3968e69e 2667
57871462 2668#endif
2669
8062d65a 2670enum {
2671 MTYPE_8000 = 0,
2672 MTYPE_8020,
2673 MTYPE_0000,
2674 MTYPE_A000,
2675 MTYPE_1F80,
2676};
2677
2678static int get_ptr_mem_type(u_int a)
2679{
2680 if(a < 0x00200000) {
2681 if(a<0x1000&&((start>>20)==0xbfc||(start>>24)==0xa0))
2682 // return wrong, must use memhandler for BIOS self-test to pass
2683 // 007 does similar stuff from a00 mirror, weird stuff
2684 return MTYPE_8000;
2685 return MTYPE_0000;
2686 }
2687 if(0x1f800000 <= a && a < 0x1f801000)
2688 return MTYPE_1F80;
2689 if(0x80200000 <= a && a < 0x80800000)
2690 return MTYPE_8020;
2691 if(0xa0000000 <= a && a < 0xa0200000)
2692 return MTYPE_A000;
2693 return MTYPE_8000;
2694}
2695
37387d8b 2696static int get_ro_reg(const struct regstat *i_regs, int host_tempreg_free)
2697{
2698 int r = get_reg(i_regs->regmap, ROREG);
2699 if (r < 0 && host_tempreg_free) {
2700 host_tempreg_acquire();
2701 emit_loadreg(ROREG, r = HOST_TEMPREG);
2702 }
2703 if (r < 0)
2704 abort();
2705 return r;
2706}
2707
2708static void *emit_fastpath_cmp_jump(int i, const struct regstat *i_regs,
2709 int addr, int *offset_reg, int *addr_reg_override)
8062d65a 2710{
2711 void *jaddr = NULL;
37387d8b 2712 int type = 0;
2713 int mr = dops[i].rs1;
2714 *offset_reg = -1;
8062d65a 2715 if(((smrv_strong|smrv_weak)>>mr)&1) {
2716 type=get_ptr_mem_type(smrv[mr]);
2717 //printf("set %08x @%08x r%d %d\n", smrv[mr], start+i*4, mr, type);
2718 }
2719 else {
2720 // use the mirror we are running on
2721 type=get_ptr_mem_type(start);
2722 //printf("set nospec @%08x r%d %d\n", start+i*4, mr, type);
2723 }
2724
2725 if(type==MTYPE_8020) { // RAM 80200000+ mirror
d1e4ebd9 2726 host_tempreg_acquire();
8062d65a 2727 emit_andimm(addr,~0x00e00000,HOST_TEMPREG);
2728 addr=*addr_reg_override=HOST_TEMPREG;
2729 type=0;
2730 }
2731 else if(type==MTYPE_0000) { // RAM 0 mirror
d1e4ebd9 2732 host_tempreg_acquire();
8062d65a 2733 emit_orimm(addr,0x80000000,HOST_TEMPREG);
2734 addr=*addr_reg_override=HOST_TEMPREG;
2735 type=0;
2736 }
2737 else if(type==MTYPE_A000) { // RAM A mirror
d1e4ebd9 2738 host_tempreg_acquire();
8062d65a 2739 emit_andimm(addr,~0x20000000,HOST_TEMPREG);
2740 addr=*addr_reg_override=HOST_TEMPREG;
2741 type=0;
2742 }
2743 else if(type==MTYPE_1F80) { // scratchpad
2744 if (psxH == (void *)0x1f800000) {
d1e4ebd9 2745 host_tempreg_acquire();
3968e69e 2746 emit_xorimm(addr,0x1f800000,HOST_TEMPREG);
8062d65a 2747 emit_cmpimm(HOST_TEMPREG,0x1000);
d1e4ebd9 2748 host_tempreg_release();
8062d65a 2749 jaddr=out;
2750 emit_jc(0);
2751 }
2752 else {
2753 // do the usual RAM check, jump will go to the right handler
2754 type=0;
2755 }
2756 }
2757
37387d8b 2758 if (type == 0) // need ram check
8062d65a 2759 {
2760 emit_cmpimm(addr,RAM_SIZE);
37387d8b 2761 jaddr = out;
8062d65a 2762 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
2763 // Hint to branch predictor that the branch is unlikely to be taken
37387d8b 2764 if (dops[i].rs1 >= 28)
8062d65a 2765 emit_jno_unlikely(0);
2766 else
2767 #endif
2768 emit_jno(0);
37387d8b 2769 if (ram_offset != 0)
2770 *offset_reg = get_ro_reg(i_regs, 0);
8062d65a 2771 }
2772
2773 return jaddr;
2774}
2775
687b4580 2776// return memhandler, or get directly accessable address and return 0
2777static void *get_direct_memhandler(void *table, u_int addr,
2778 enum stub_type type, uintptr_t *addr_host)
2779{
c979e8c2 2780 uintptr_t msb = 1ull << (sizeof(uintptr_t)*8 - 1);
687b4580 2781 uintptr_t l1, l2 = 0;
2782 l1 = ((uintptr_t *)table)[addr>>12];
c979e8c2 2783 if (!(l1 & msb)) {
687b4580 2784 uintptr_t v = l1 << 1;
2785 *addr_host = v + addr;
2786 return NULL;
2787 }
2788 else {
2789 l1 <<= 1;
2790 if (type == LOADB_STUB || type == LOADBU_STUB || type == STOREB_STUB)
2791 l2 = ((uintptr_t *)l1)[0x1000/4 + 0x1000/2 + (addr&0xfff)];
2792 else if (type == LOADH_STUB || type == LOADHU_STUB || type == STOREH_STUB)
c979e8c2 2793 l2 = ((uintptr_t *)l1)[0x1000/4 + (addr&0xfff)/2];
687b4580 2794 else
c979e8c2 2795 l2 = ((uintptr_t *)l1)[(addr&0xfff)/4];
2796 if (!(l2 & msb)) {
687b4580 2797 uintptr_t v = l2 << 1;
2798 *addr_host = v + (addr&0xfff);
2799 return NULL;
2800 }
2801 return (void *)(l2 << 1);
2802 }
2803}
2804
81dbbf4c 2805static u_int get_host_reglist(const signed char *regmap)
2806{
2807 u_int reglist = 0, hr;
2808 for (hr = 0; hr < HOST_REGS; hr++) {
2809 if (hr != EXCLUDE_REG && regmap[hr] >= 0)
2810 reglist |= 1 << hr;
2811 }
2812 return reglist;
2813}
2814
2815static u_int reglist_exclude(u_int reglist, int r1, int r2)
2816{
2817 if (r1 >= 0)
2818 reglist &= ~(1u << r1);
2819 if (r2 >= 0)
2820 reglist &= ~(1u << r2);
2821 return reglist;
2822}
2823
e3c6bdb5 2824// find a temp caller-saved register not in reglist (so assumed to be free)
2825static int reglist_find_free(u_int reglist)
2826{
2827 u_int free_regs = ~reglist & CALLER_SAVE_REGS;
2828 if (free_regs == 0)
2829 return -1;
2830 return __builtin_ctz(free_regs);
2831}
2832
37387d8b 2833static void do_load_word(int a, int rt, int offset_reg)
2834{
2835 if (offset_reg >= 0)
2836 emit_ldr_dualindexed(offset_reg, a, rt);
2837 else
2838 emit_readword_indexed(0, a, rt);
2839}
2840
2841static void do_store_word(int a, int ofs, int rt, int offset_reg, int preseve_a)
2842{
2843 if (offset_reg < 0) {
2844 emit_writeword_indexed(rt, ofs, a);
2845 return;
2846 }
2847 if (ofs != 0)
2848 emit_addimm(a, ofs, a);
2849 emit_str_dualindexed(offset_reg, a, rt);
2850 if (ofs != 0 && preseve_a)
2851 emit_addimm(a, -ofs, a);
2852}
2853
2854static void do_store_hword(int a, int ofs, int rt, int offset_reg, int preseve_a)
2855{
2856 if (offset_reg < 0) {
2857 emit_writehword_indexed(rt, ofs, a);
2858 return;
2859 }
2860 if (ofs != 0)
2861 emit_addimm(a, ofs, a);
2862 emit_strh_dualindexed(offset_reg, a, rt);
2863 if (ofs != 0 && preseve_a)
2864 emit_addimm(a, -ofs, a);
2865}
2866
2867static void do_store_byte(int a, int rt, int offset_reg)
2868{
2869 if (offset_reg >= 0)
2870 emit_strb_dualindexed(offset_reg, a, rt);
2871 else
2872 emit_writebyte_indexed(rt, 0, a);
2873}
2874
2330734f 2875static void load_assemble(int i, const struct regstat *i_regs, int ccadj_)
57871462 2876{
7c3a5182 2877 int s,tl,addr;
57871462 2878 int offset;
b14b6a8f 2879 void *jaddr=0;
5bf843dc 2880 int memtarget=0,c=0;
37387d8b 2881 int offset_reg = -1;
2882 int fastio_reg_override = -1;
81dbbf4c 2883 u_int reglist=get_host_reglist(i_regs->regmap);
cf95b4f0 2884 tl=get_reg(i_regs->regmap,dops[i].rt1);
2885 s=get_reg(i_regs->regmap,dops[i].rs1);
57871462 2886 offset=imm[i];
57871462 2887 if(i_regs->regmap[HOST_CCREG]==CCREG) reglist&=~(1<<HOST_CCREG);
2888 if(s>=0) {
2889 c=(i_regs->wasconst>>s)&1;
af4ee1fe 2890 if (c) {
2891 memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
af4ee1fe 2892 }
57871462 2893 }
57871462 2894 //printf("load_assemble: c=%d\n",c);
643aeae3 2895 //if(c) printf("load_assemble: const=%lx\n",(long)constmap[i][s]+offset);
57871462 2896 // FIXME: Even if the load is a NOP, we should check for pagefaults...
581335b0 2897 if((tl<0&&(!c||(((u_int)constmap[i][s]+offset)>>16)==0x1f80))
cf95b4f0 2898 ||dops[i].rt1==0) {
5bf843dc 2899 // could be FIFO, must perform the read
f18c0f46 2900 // ||dummy read
5bf843dc 2901 assem_debug("(forced read)\n");
9de8a0c3 2902 tl=get_reg_temp(i_regs->regmap);
5bf843dc 2903 assert(tl>=0);
5bf843dc 2904 }
2905 if(offset||s<0||c) addr=tl;
2906 else addr=s;
9de8a0c3 2907 //if(tl<0) tl=get_reg_temp(i_regs->regmap);
535d208a 2908 if(tl>=0) {
2909 //printf("load_assemble: c=%d\n",c);
643aeae3 2910 //if(c) printf("load_assemble: const=%lx\n",(long)constmap[i][s]+offset);
535d208a 2911 assert(tl>=0); // Even if the load is a NOP, we must check for pagefaults and I/O
2912 reglist&=~(1<<tl);
1edfcc68 2913 if(!c) {
1edfcc68 2914 #ifdef R29_HACK
2915 // Strmnnrmn's speed hack
cf95b4f0 2916 if(dops[i].rs1!=29||start<0x80001000||start>=0x80000000+RAM_SIZE)
1edfcc68 2917 #endif
2918 {
37387d8b 2919 jaddr = emit_fastpath_cmp_jump(i, i_regs, addr,
2920 &offset_reg, &fastio_reg_override);
535d208a 2921 }
1edfcc68 2922 }
37387d8b 2923 else if (ram_offset && memtarget) {
2924 offset_reg = get_ro_reg(i_regs, 0);
535d208a 2925 }
cf95b4f0 2926 int dummy=(dops[i].rt1==0)||(tl!=get_reg(i_regs->regmap,dops[i].rt1)); // ignore loads to r0 and unneeded reg
37387d8b 2927 switch (dops[i].opcode) {
2928 case 0x20: // LB
535d208a 2929 if(!c||memtarget) {
2930 if(!dummy) {
37387d8b 2931 int a = tl;
2932 if (!c) a = addr;
2933 if (fastio_reg_override >= 0)
2934 a = fastio_reg_override;
b1570849 2935
37387d8b 2936 if (offset_reg >= 0)
2937 emit_ldrsb_dualindexed(offset_reg, a, tl);
2938 else
2939 emit_movsbl_indexed(0, a, tl);
57871462 2940 }
535d208a 2941 if(jaddr)
2330734f 2942 add_stub_r(LOADB_STUB,jaddr,out,i,addr,i_regs,ccadj_,reglist);
57871462 2943 }
535d208a 2944 else
2330734f 2945 inline_readstub(LOADB_STUB,i,constmap[i][s]+offset,i_regs->regmap,dops[i].rt1,ccadj_,reglist);
37387d8b 2946 break;
2947 case 0x21: // LH
535d208a 2948 if(!c||memtarget) {
2949 if(!dummy) {
37387d8b 2950 int a = tl;
2951 if (!c) a = addr;
2952 if (fastio_reg_override >= 0)
2953 a = fastio_reg_override;
2954 if (offset_reg >= 0)
2955 emit_ldrsh_dualindexed(offset_reg, a, tl);
2956 else
2957 emit_movswl_indexed(0, a, tl);
57871462 2958 }
535d208a 2959 if(jaddr)
2330734f 2960 add_stub_r(LOADH_STUB,jaddr,out,i,addr,i_regs,ccadj_,reglist);
57871462 2961 }
535d208a 2962 else
2330734f 2963 inline_readstub(LOADH_STUB,i,constmap[i][s]+offset,i_regs->regmap,dops[i].rt1,ccadj_,reglist);
37387d8b 2964 break;
2965 case 0x23: // LW
535d208a 2966 if(!c||memtarget) {
2967 if(!dummy) {
37387d8b 2968 int a = addr;
2969 if (fastio_reg_override >= 0)
2970 a = fastio_reg_override;
2971 do_load_word(a, tl, offset_reg);
57871462 2972 }
535d208a 2973 if(jaddr)
2330734f 2974 add_stub_r(LOADW_STUB,jaddr,out,i,addr,i_regs,ccadj_,reglist);
57871462 2975 }
535d208a 2976 else
2330734f 2977 inline_readstub(LOADW_STUB,i,constmap[i][s]+offset,i_regs->regmap,dops[i].rt1,ccadj_,reglist);
37387d8b 2978 break;
2979 case 0x24: // LBU
535d208a 2980 if(!c||memtarget) {
2981 if(!dummy) {
37387d8b 2982 int a = tl;
2983 if (!c) a = addr;
2984 if (fastio_reg_override >= 0)
2985 a = fastio_reg_override;
b1570849 2986
37387d8b 2987 if (offset_reg >= 0)
2988 emit_ldrb_dualindexed(offset_reg, a, tl);
2989 else
2990 emit_movzbl_indexed(0, a, tl);
57871462 2991 }
535d208a 2992 if(jaddr)
2330734f 2993 add_stub_r(LOADBU_STUB,jaddr,out,i,addr,i_regs,ccadj_,reglist);
57871462 2994 }
535d208a 2995 else
2330734f 2996 inline_readstub(LOADBU_STUB,i,constmap[i][s]+offset,i_regs->regmap,dops[i].rt1,ccadj_,reglist);
37387d8b 2997 break;
2998 case 0x25: // LHU
535d208a 2999 if(!c||memtarget) {
3000 if(!dummy) {
37387d8b 3001 int a = tl;
3002 if(!c) a = addr;
3003 if (fastio_reg_override >= 0)
3004 a = fastio_reg_override;
3005 if (offset_reg >= 0)
3006 emit_ldrh_dualindexed(offset_reg, a, tl);
3007 else
3008 emit_movzwl_indexed(0, a, tl);
57871462 3009 }
535d208a 3010 if(jaddr)
2330734f 3011 add_stub_r(LOADHU_STUB,jaddr,out,i,addr,i_regs,ccadj_,reglist);
57871462 3012 }
535d208a 3013 else
2330734f 3014 inline_readstub(LOADHU_STUB,i,constmap[i][s]+offset,i_regs->regmap,dops[i].rt1,ccadj_,reglist);
37387d8b 3015 break;
3016 case 0x27: // LWU
3017 case 0x37: // LD
3018 default:
9c45ca93 3019 assert(0);
57871462 3020 }
535d208a 3021 }
37387d8b 3022 if (fastio_reg_override == HOST_TEMPREG || offset_reg == HOST_TEMPREG)
d1e4ebd9 3023 host_tempreg_release();
57871462 3024}
3025
3026#ifndef loadlr_assemble
2330734f 3027static void loadlr_assemble(int i, const struct regstat *i_regs, int ccadj_)
57871462 3028{
3968e69e 3029 int s,tl,temp,temp2,addr;
3030 int offset;
3031 void *jaddr=0;
3032 int memtarget=0,c=0;
37387d8b 3033 int offset_reg = -1;
3034 int fastio_reg_override = -1;
81dbbf4c 3035 u_int reglist=get_host_reglist(i_regs->regmap);
cf95b4f0 3036 tl=get_reg(i_regs->regmap,dops[i].rt1);
3037 s=get_reg(i_regs->regmap,dops[i].rs1);
9de8a0c3 3038 temp=get_reg_temp(i_regs->regmap);
3968e69e 3039 temp2=get_reg(i_regs->regmap,FTEMP);
3040 addr=get_reg(i_regs->regmap,AGEN1+(i&1));
3041 assert(addr<0);
3042 offset=imm[i];
3968e69e 3043 reglist|=1<<temp;
3044 if(offset||s<0||c) addr=temp2;
3045 else addr=s;
3046 if(s>=0) {
3047 c=(i_regs->wasconst>>s)&1;
3048 if(c) {
3049 memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
3050 }
3051 }
3052 if(!c) {
3053 emit_shlimm(addr,3,temp);
cf95b4f0 3054 if (dops[i].opcode==0x22||dops[i].opcode==0x26) {
3968e69e 3055 emit_andimm(addr,0xFFFFFFFC,temp2); // LWL/LWR
3056 }else{
3057 emit_andimm(addr,0xFFFFFFF8,temp2); // LDL/LDR
3058 }
37387d8b 3059 jaddr = emit_fastpath_cmp_jump(i, i_regs, temp2,
3060 &offset_reg, &fastio_reg_override);
3968e69e 3061 }
3062 else {
37387d8b 3063 if (ram_offset && memtarget) {
3064 offset_reg = get_ro_reg(i_regs, 0);
3968e69e 3065 }
cf95b4f0 3066 if (dops[i].opcode==0x22||dops[i].opcode==0x26) {
3968e69e 3067 emit_movimm(((constmap[i][s]+offset)<<3)&24,temp); // LWL/LWR
3068 }else{
3069 emit_movimm(((constmap[i][s]+offset)<<3)&56,temp); // LDL/LDR
3070 }
3071 }
cf95b4f0 3072 if (dops[i].opcode==0x22||dops[i].opcode==0x26) { // LWL/LWR
3968e69e 3073 if(!c||memtarget) {
37387d8b 3074 int a = temp2;
3075 if (fastio_reg_override >= 0)
3076 a = fastio_reg_override;
3077 do_load_word(a, temp2, offset_reg);
3078 if (fastio_reg_override == HOST_TEMPREG || offset_reg == HOST_TEMPREG)
3079 host_tempreg_release();
2330734f 3080 if(jaddr) add_stub_r(LOADW_STUB,jaddr,out,i,temp2,i_regs,ccadj_,reglist);
3968e69e 3081 }
3082 else
2330734f 3083 inline_readstub(LOADW_STUB,i,(constmap[i][s]+offset)&0xFFFFFFFC,i_regs->regmap,FTEMP,ccadj_,reglist);
cf95b4f0 3084 if(dops[i].rt1) {
3968e69e 3085 assert(tl>=0);
3086 emit_andimm(temp,24,temp);
cf95b4f0 3087 if (dops[i].opcode==0x22) // LWL
3968e69e 3088 emit_xorimm(temp,24,temp);
3089 host_tempreg_acquire();
3090 emit_movimm(-1,HOST_TEMPREG);
cf95b4f0 3091 if (dops[i].opcode==0x26) {
3968e69e 3092 emit_shr(temp2,temp,temp2);
3093 emit_bic_lsr(tl,HOST_TEMPREG,temp,tl);
3094 }else{
3095 emit_shl(temp2,temp,temp2);
3096 emit_bic_lsl(tl,HOST_TEMPREG,temp,tl);
3097 }
3098 host_tempreg_release();
3099 emit_or(temp2,tl,tl);
3100 }
cf95b4f0 3101 //emit_storereg(dops[i].rt1,tl); // DEBUG
3968e69e 3102 }
cf95b4f0 3103 if (dops[i].opcode==0x1A||dops[i].opcode==0x1B) { // LDL/LDR
3968e69e 3104 assert(0);
3105 }
57871462 3106}
3107#endif
3108
2330734f 3109static void store_assemble(int i, const struct regstat *i_regs, int ccadj_)
57871462 3110{
9c45ca93 3111 int s,tl;
57871462 3112 int addr,temp;
3113 int offset;
b14b6a8f 3114 void *jaddr=0;
37387d8b 3115 enum stub_type type=0;
666a299d 3116 int memtarget=0,c=0;
57871462 3117 int agr=AGEN1+(i&1);
37387d8b 3118 int offset_reg = -1;
3119 int fastio_reg_override = -1;
81dbbf4c 3120 u_int reglist=get_host_reglist(i_regs->regmap);
cf95b4f0 3121 tl=get_reg(i_regs->regmap,dops[i].rs2);
3122 s=get_reg(i_regs->regmap,dops[i].rs1);
57871462 3123 temp=get_reg(i_regs->regmap,agr);
9de8a0c3 3124 if(temp<0) temp=get_reg_temp(i_regs->regmap);
57871462 3125 offset=imm[i];
3126 if(s>=0) {
3127 c=(i_regs->wasconst>>s)&1;
af4ee1fe 3128 if(c) {
3129 memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
af4ee1fe 3130 }
57871462 3131 }
3132 assert(tl>=0);
3133 assert(temp>=0);
57871462 3134 if(i_regs->regmap[HOST_CCREG]==CCREG) reglist&=~(1<<HOST_CCREG);
3135 if(offset||s<0||c) addr=temp;
3136 else addr=s;
37387d8b 3137 if (!c) {
3138 jaddr = emit_fastpath_cmp_jump(i, i_regs, addr,
3139 &offset_reg, &fastio_reg_override);
1edfcc68 3140 }
37387d8b 3141 else if (ram_offset && memtarget) {
3142 offset_reg = get_ro_reg(i_regs, 0);
57871462 3143 }
3144
37387d8b 3145 switch (dops[i].opcode) {
3146 case 0x28: // SB
57871462 3147 if(!c||memtarget) {
37387d8b 3148 int a = temp;
3149 if (!c) a = addr;
3150 if (fastio_reg_override >= 0)
3151 a = fastio_reg_override;
3152 do_store_byte(a, tl, offset_reg);
3153 }
3154 type = STOREB_STUB;
3155 break;
3156 case 0x29: // SH
57871462 3157 if(!c||memtarget) {
37387d8b 3158 int a = temp;
3159 if (!c) a = addr;
3160 if (fastio_reg_override >= 0)
3161 a = fastio_reg_override;
3162 do_store_hword(a, 0, tl, offset_reg, 1);
3163 }
3164 type = STOREH_STUB;
3165 break;
3166 case 0x2B: // SW
dadf55f2 3167 if(!c||memtarget) {
37387d8b 3168 int a = addr;
3169 if (fastio_reg_override >= 0)
3170 a = fastio_reg_override;
3171 do_store_word(a, 0, tl, offset_reg, 1);
3172 }
3173 type = STOREW_STUB;
3174 break;
3175 case 0x3F: // SD
3176 default:
9c45ca93 3177 assert(0);
57871462 3178 }
37387d8b 3179 if (fastio_reg_override == HOST_TEMPREG || offset_reg == HOST_TEMPREG)
d1e4ebd9 3180 host_tempreg_release();
b96d3df7 3181 if(jaddr) {
3182 // PCSX store handlers don't check invcode again
3183 reglist|=1<<addr;
2330734f 3184 add_stub_r(type,jaddr,out,i,addr,i_regs,ccadj_,reglist);
b96d3df7 3185 jaddr=0;
3186 }
cf95b4f0 3187 if(!(i_regs->waswritten&(1<<dops[i].rs1)) && !HACK_ENABLED(NDHACK_NO_SMC_CHECK)) {
57871462 3188 if(!c||memtarget) {
3189 #ifdef DESTRUCTIVE_SHIFT
3190 // The x86 shift operation is 'destructive'; it overwrites the
3191 // source register, so we need to make a copy first and use that.
3192 addr=temp;
3193 #endif
3194 #if defined(HOST_IMM8)
3195 int ir=get_reg(i_regs->regmap,INVCP);
3196 assert(ir>=0);
3197 emit_cmpmem_indexedsr12_reg(ir,addr,1);
3198 #else
643aeae3 3199 emit_cmpmem_indexedsr12_imm(invalid_code,addr,1);
57871462 3200 #endif
882a08fc 3201 #ifdef INVALIDATE_USE_COND_CALL
0bbd1454 3202 emit_callne(invalidate_addr_reg[addr]);
3203 #else
b14b6a8f 3204 void *jaddr2 = out;
57871462 3205 emit_jne(0);
b14b6a8f 3206 add_stub(INVCODE_STUB,jaddr2,out,reglist|(1<<HOST_CCREG),addr,0,0,0);
0bbd1454 3207 #endif
57871462 3208 }
3209 }
7a518516 3210 u_int addr_val=constmap[i][s]+offset;
3eaa7048 3211 if(jaddr) {
2330734f 3212 add_stub_r(type,jaddr,out,i,addr,i_regs,ccadj_,reglist);
3eaa7048 3213 } else if(c&&!memtarget) {
2330734f 3214 inline_writestub(type,i,addr_val,i_regs->regmap,dops[i].rs2,ccadj_,reglist);
7a518516 3215 }
3216 // basic current block modification detection..
3217 // not looking back as that should be in mips cache already
3968e69e 3218 // (see Spyro2 title->attract mode)
7a518516 3219 if(c&&start+i*4<addr_val&&addr_val<start+slen*4) {
c43b5311 3220 SysPrintf("write to %08x hits block %08x, pc=%08x\n",addr_val,start,start+i*4);
7a518516 3221 assert(i_regs->regmap==regs[i].regmap); // not delay slot
3222 if(i_regs->regmap==regs[i].regmap) {
ad49de89 3223 load_all_consts(regs[i].regmap_entry,regs[i].wasdirty,i);
3224 wb_dirtys(regs[i].regmap_entry,regs[i].wasdirty);
7a518516 3225 emit_movimm(start+i*4+4,0);
643aeae3 3226 emit_writeword(0,&pcaddr);
d1e4ebd9 3227 emit_addimm(HOST_CCREG,2,HOST_CCREG);
104df9d3 3228 emit_far_call(ndrc_get_addr_ht);
d1e4ebd9 3229 emit_jmpreg(0);
7a518516 3230 }
3eaa7048 3231 }
57871462 3232}
3233
2330734f 3234static void storelr_assemble(int i, const struct regstat *i_regs, int ccadj_)
57871462 3235{
9c45ca93 3236 int s,tl;
57871462 3237 int temp;
57871462 3238 int offset;
b14b6a8f 3239 void *jaddr=0;
37387d8b 3240 void *case1, *case23, *case3;
df4dc2b1 3241 void *done0, *done1, *done2;
af4ee1fe 3242 int memtarget=0,c=0;
fab5d06d 3243 int agr=AGEN1+(i&1);
37387d8b 3244 int offset_reg = -1;
81dbbf4c 3245 u_int reglist=get_host_reglist(i_regs->regmap);
cf95b4f0 3246 tl=get_reg(i_regs->regmap,dops[i].rs2);
3247 s=get_reg(i_regs->regmap,dops[i].rs1);
fab5d06d 3248 temp=get_reg(i_regs->regmap,agr);
9de8a0c3 3249 if(temp<0) temp=get_reg_temp(i_regs->regmap);
57871462 3250 offset=imm[i];
3251 if(s>=0) {
3252 c=(i_regs->isconst>>s)&1;
af4ee1fe 3253 if(c) {
3254 memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
af4ee1fe 3255 }
57871462 3256 }
3257 assert(tl>=0);
535d208a 3258 assert(temp>=0);
1edfcc68 3259 if(!c) {
3260 emit_cmpimm(s<0||offset?temp:s,RAM_SIZE);
3261 if(!offset&&s!=temp) emit_mov(s,temp);
b14b6a8f 3262 jaddr=out;
1edfcc68 3263 emit_jno(0);
3264 }
3265 else
3266 {
cf95b4f0 3267 if(!memtarget||!dops[i].rs1) {
b14b6a8f 3268 jaddr=out;
535d208a 3269 emit_jmp(0);
57871462 3270 }
535d208a 3271 }
37387d8b 3272 if (ram_offset)
3273 offset_reg = get_ro_reg(i_regs, 0);
535d208a 3274
cf95b4f0 3275 if (dops[i].opcode==0x2C||dops[i].opcode==0x2D) { // SDL/SDR
9c45ca93 3276 assert(0);
535d208a 3277 }
57871462 3278
535d208a 3279 emit_testimm(temp,2);
37387d8b 3280 case23=out;
535d208a 3281 emit_jne(0);
3282 emit_testimm(temp,1);
df4dc2b1 3283 case1=out;
535d208a 3284 emit_jne(0);
3285 // 0
37387d8b 3286 if (dops[i].opcode == 0x2A) { // SWL
3287 // Write msb into least significant byte
3288 if (dops[i].rs2) emit_rorimm(tl, 24, tl);
3289 do_store_byte(temp, tl, offset_reg);
3290 if (dops[i].rs2) emit_rorimm(tl, 8, tl);
535d208a 3291 }
37387d8b 3292 else if (dops[i].opcode == 0x2E) { // SWR
3293 // Write entire word
3294 do_store_word(temp, 0, tl, offset_reg, 1);
535d208a 3295 }
37387d8b 3296 done0 = out;
535d208a 3297 emit_jmp(0);
3298 // 1
df4dc2b1 3299 set_jump_target(case1, out);
37387d8b 3300 if (dops[i].opcode == 0x2A) { // SWL
3301 // Write two msb into two least significant bytes
3302 if (dops[i].rs2) emit_rorimm(tl, 16, tl);
3303 do_store_hword(temp, -1, tl, offset_reg, 0);
3304 if (dops[i].rs2) emit_rorimm(tl, 16, tl);
535d208a 3305 }
37387d8b 3306 else if (dops[i].opcode == 0x2E) { // SWR
3307 // Write 3 lsb into three most significant bytes
3308 do_store_byte(temp, tl, offset_reg);
3309 if (dops[i].rs2) emit_rorimm(tl, 8, tl);
3310 do_store_hword(temp, 1, tl, offset_reg, 0);
3311 if (dops[i].rs2) emit_rorimm(tl, 24, tl);
535d208a 3312 }
df4dc2b1 3313 done1=out;
535d208a 3314 emit_jmp(0);
37387d8b 3315 // 2,3
3316 set_jump_target(case23, out);
535d208a 3317 emit_testimm(temp,1);
37387d8b 3318 case3 = out;
535d208a 3319 emit_jne(0);
37387d8b 3320 // 2
cf95b4f0 3321 if (dops[i].opcode==0x2A) { // SWL
37387d8b 3322 // Write 3 msb into three least significant bytes
3323 if (dops[i].rs2) emit_rorimm(tl, 8, tl);
3324 do_store_hword(temp, -2, tl, offset_reg, 1);
3325 if (dops[i].rs2) emit_rorimm(tl, 16, tl);
3326 do_store_byte(temp, tl, offset_reg);
3327 if (dops[i].rs2) emit_rorimm(tl, 8, tl);
535d208a 3328 }
37387d8b 3329 else if (dops[i].opcode == 0x2E) { // SWR
3330 // Write two lsb into two most significant bytes
3331 do_store_hword(temp, 0, tl, offset_reg, 1);
535d208a 3332 }
37387d8b 3333 done2 = out;
535d208a 3334 emit_jmp(0);
3335 // 3
df4dc2b1 3336 set_jump_target(case3, out);
37387d8b 3337 if (dops[i].opcode == 0x2A) { // SWL
3338 do_store_word(temp, -3, tl, offset_reg, 0);
535d208a 3339 }
37387d8b 3340 else if (dops[i].opcode == 0x2E) { // SWR
3341 do_store_byte(temp, tl, offset_reg);
535d208a 3342 }
df4dc2b1 3343 set_jump_target(done0, out);
3344 set_jump_target(done1, out);
3345 set_jump_target(done2, out);
37387d8b 3346 if (offset_reg == HOST_TEMPREG)
3347 host_tempreg_release();
535d208a 3348 if(!c||!memtarget)
2330734f 3349 add_stub_r(STORELR_STUB,jaddr,out,i,temp,i_regs,ccadj_,reglist);
cf95b4f0 3350 if(!(i_regs->waswritten&(1<<dops[i].rs1)) && !HACK_ENABLED(NDHACK_NO_SMC_CHECK)) {
57871462 3351 #if defined(HOST_IMM8)
3352 int ir=get_reg(i_regs->regmap,INVCP);
3353 assert(ir>=0);
3354 emit_cmpmem_indexedsr12_reg(ir,temp,1);
3355 #else
643aeae3 3356 emit_cmpmem_indexedsr12_imm(invalid_code,temp,1);
57871462 3357 #endif
882a08fc 3358 #ifdef INVALIDATE_USE_COND_CALL
535d208a 3359 emit_callne(invalidate_addr_reg[temp]);
3360 #else
b14b6a8f 3361 void *jaddr2 = out;
57871462 3362 emit_jne(0);
b14b6a8f 3363 add_stub(INVCODE_STUB,jaddr2,out,reglist|(1<<HOST_CCREG),temp,0,0,0);
535d208a 3364 #endif
57871462 3365 }
57871462 3366}
3367
2330734f 3368static void cop0_assemble(int i, const struct regstat *i_regs, int ccadj_)
8062d65a 3369{
cf95b4f0 3370 if(dops[i].opcode2==0) // MFC0
8062d65a 3371 {
cf95b4f0 3372 signed char t=get_reg(i_regs->regmap,dops[i].rt1);
8062d65a 3373 u_int copr=(source[i]>>11)&0x1f;
3374 //assert(t>=0); // Why does this happen? OOT is weird
cf95b4f0 3375 if(t>=0&&dops[i].rt1!=0) {
8062d65a 3376 emit_readword(&reg_cop0[copr],t);
3377 }
3378 }
cf95b4f0 3379 else if(dops[i].opcode2==4) // MTC0
8062d65a 3380 {
cf95b4f0 3381 signed char s=get_reg(i_regs->regmap,dops[i].rs1);
8062d65a 3382 char copr=(source[i]>>11)&0x1f;
3383 assert(s>=0);
cf95b4f0 3384 wb_register(dops[i].rs1,i_regs->regmap,i_regs->dirty);
8062d65a 3385 if(copr==9||copr==11||copr==12||copr==13) {
3386 emit_readword(&last_count,HOST_TEMPREG);
3387 emit_loadreg(CCREG,HOST_CCREG); // TODO: do proper reg alloc
3388 emit_add(HOST_CCREG,HOST_TEMPREG,HOST_CCREG);
2330734f 3389 emit_addimm(HOST_CCREG,ccadj_,HOST_CCREG);
8062d65a 3390 emit_writeword(HOST_CCREG,&Count);
3391 }
3392 // What a mess. The status register (12) can enable interrupts,
3393 // so needs a special case to handle a pending interrupt.
3394 // The interrupt must be taken immediately, because a subsequent
3395 // instruction might disable interrupts again.
3396 if(copr==12||copr==13) {
3397 if (is_delayslot) {
3398 // burn cycles to cause cc_interrupt, which will
3399 // reschedule next_interupt. Relies on CCREG from above.
3400 assem_debug("MTC0 DS %d\n", copr);
3401 emit_writeword(HOST_CCREG,&last_count);
3402 emit_movimm(0,HOST_CCREG);
3403 emit_storereg(CCREG,HOST_CCREG);
cf95b4f0 3404 emit_loadreg(dops[i].rs1,1);
8062d65a 3405 emit_movimm(copr,0);
2a014d73 3406 emit_far_call(pcsx_mtc0_ds);
cf95b4f0 3407 emit_loadreg(dops[i].rs1,s);
8062d65a 3408 return;
3409 }
3410 emit_movimm(start+i*4+4,HOST_TEMPREG);
3411 emit_writeword(HOST_TEMPREG,&pcaddr);
3412 emit_movimm(0,HOST_TEMPREG);
3413 emit_writeword(HOST_TEMPREG,&pending_exception);
3414 }
8062d65a 3415 if(s==HOST_CCREG)
cf95b4f0 3416 emit_loadreg(dops[i].rs1,1);
8062d65a 3417 else if(s!=1)
3418 emit_mov(s,1);
3419 emit_movimm(copr,0);
2a014d73 3420 emit_far_call(pcsx_mtc0);
8062d65a 3421 if(copr==9||copr==11||copr==12||copr==13) {
3422 emit_readword(&Count,HOST_CCREG);
3423 emit_readword(&next_interupt,HOST_TEMPREG);
2330734f 3424 emit_addimm(HOST_CCREG,-ccadj_,HOST_CCREG);
8062d65a 3425 emit_sub(HOST_CCREG,HOST_TEMPREG,HOST_CCREG);
3426 emit_writeword(HOST_TEMPREG,&last_count);
3427 emit_storereg(CCREG,HOST_CCREG);
3428 }
3429 if(copr==12||copr==13) {
3430 assert(!is_delayslot);
3431 emit_readword(&pending_exception,14);
3432 emit_test(14,14);
d1e4ebd9 3433 void *jaddr = out;
3434 emit_jeq(0);
3435 emit_readword(&pcaddr, 0);
3436 emit_addimm(HOST_CCREG,2,HOST_CCREG);
104df9d3 3437 emit_far_call(ndrc_get_addr_ht);
d1e4ebd9 3438 emit_jmpreg(0);
3439 set_jump_target(jaddr, out);
8062d65a 3440 }
cf95b4f0 3441 emit_loadreg(dops[i].rs1,s);
8062d65a 3442 }
3443 else
3444 {
cf95b4f0 3445 assert(dops[i].opcode2==0x10);
8062d65a 3446 //if((source[i]&0x3f)==0x10) // RFE
3447 {
3448 emit_readword(&Status,0);
3449 emit_andimm(0,0x3c,1);
3450 emit_andimm(0,~0xf,0);
3451 emit_orrshr_imm(1,2,0);
3452 emit_writeword(0,&Status);
3453 }
3454 }
3455}
3456
2330734f 3457static void cop1_unusable(int i, const struct regstat *i_regs)
8062d65a 3458{
3459 // XXX: should just just do the exception instead
3460 //if(!cop1_usable)
3461 {
3462 void *jaddr=out;
3463 emit_jmp(0);
3464 add_stub_r(FP_STUB,jaddr,out,i,0,i_regs,is_delayslot,0);
3465 }
3466}
3467
2330734f 3468static void cop1_assemble(int i, const struct regstat *i_regs)
8062d65a 3469{
3470 cop1_unusable(i, i_regs);
3471}
3472
2330734f 3473static void c1ls_assemble(int i, const struct regstat *i_regs)
57871462 3474{
3d624f89 3475 cop1_unusable(i, i_regs);
57871462 3476}
3477
8062d65a 3478// FP_STUB
3479static void do_cop1stub(int n)
3480{
3481 literal_pool(256);
3482 assem_debug("do_cop1stub %x\n",start+stubs[n].a*4);
3483 set_jump_target(stubs[n].addr, out);
3484 int i=stubs[n].a;
3485// int rs=stubs[n].b;
3486 struct regstat *i_regs=(struct regstat *)stubs[n].c;
3487 int ds=stubs[n].d;
3488 if(!ds) {
3489 load_all_consts(regs[i].regmap_entry,regs[i].wasdirty,i);
3490 //if(i_regs!=&regs[i]) printf("oops: regs[i]=%x i_regs=%x",(int)&regs[i],(int)i_regs);
3491 }
3492 //else {printf("fp exception in delay slot\n");}
3493 wb_dirtys(i_regs->regmap_entry,i_regs->wasdirty);
3494 if(regs[i].regmap_entry[HOST_CCREG]!=CCREG) emit_loadreg(CCREG,HOST_CCREG);
3495 emit_movimm(start+(i-ds)*4,EAX); // Get PC
2330734f 3496 emit_addimm(HOST_CCREG,ccadj[i],HOST_CCREG); // CHECK: is this right? There should probably be an extra cycle...
2a014d73 3497 emit_far_jump(ds?fp_exception_ds:fp_exception);
8062d65a 3498}
3499
e3c6bdb5 3500static int cop2_is_stalling_op(int i, int *cycles)
3501{
cf95b4f0 3502 if (dops[i].opcode == 0x3a) { // SWC2
e3c6bdb5 3503 *cycles = 0;
3504 return 1;
3505 }
cf95b4f0 3506 if (dops[i].itype == COP2 && (dops[i].opcode2 == 0 || dops[i].opcode2 == 2)) { // MFC2/CFC2
e3c6bdb5 3507 *cycles = 0;
3508 return 1;
3509 }
cf95b4f0 3510 if (dops[i].itype == C2OP) {
e3c6bdb5 3511 *cycles = gte_cycletab[source[i] & 0x3f];
3512 return 1;
3513 }
3514 // ... what about MTC2/CTC2/LWC2?
3515 return 0;
3516}
3517
3518#if 0
3519static void log_gte_stall(int stall, u_int cycle)
3520{
3521 if ((u_int)stall <= 44)
3522 printf("x stall %2d %u\n", stall, cycle + last_count);
e3c6bdb5 3523}
3524
3525static void emit_log_gte_stall(int i, int stall, u_int reglist)
3526{
3527 save_regs(reglist);
3528 if (stall > 0)
3529 emit_movimm(stall, 0);
3530 else
3531 emit_mov(HOST_TEMPREG, 0);
2330734f 3532 emit_addimm(HOST_CCREG, ccadj[i], 1);
e3c6bdb5 3533 emit_far_call(log_gte_stall);
3534 restore_regs(reglist);
3535}
3536#endif
3537
32631e6a 3538static void cop2_do_stall_check(u_int op, int i, const struct regstat *i_regs, u_int reglist)
81dbbf4c 3539{
e3c6bdb5 3540 int j = i, other_gte_op_cycles = -1, stall = -MAXBLOCK, cycles_passed;
3541 int rtmp = reglist_find_free(reglist);
3542
32631e6a 3543 if (HACK_ENABLED(NDHACK_NO_STALLS))
81dbbf4c 3544 return;
81dbbf4c 3545 if (get_reg(i_regs->regmap, CCREG) != HOST_CCREG) {
3546 // happens occasionally... cc evicted? Don't bother then
3547 //printf("no cc %08x\n", start + i*4);
3548 return;
3549 }
cf95b4f0 3550 if (!dops[i].bt) {
e3c6bdb5 3551 for (j = i - 1; j >= 0; j--) {
cf95b4f0 3552 //if (dops[j].is_ds) break;
3553 if (cop2_is_stalling_op(j, &other_gte_op_cycles) || dops[j].bt)
e3c6bdb5 3554 break;
2330734f 3555 if (j > 0 && ccadj[j - 1] > ccadj[j])
3556 break;
e3c6bdb5 3557 }
32631e6a 3558 j = max(j, 0);
e3c6bdb5 3559 }
2330734f 3560 cycles_passed = ccadj[i] - ccadj[j];
e3c6bdb5 3561 if (other_gte_op_cycles >= 0)
3562 stall = other_gte_op_cycles - cycles_passed;
3563 else if (cycles_passed >= 44)
3564 stall = 0; // can't stall
3565 if (stall == -MAXBLOCK && rtmp >= 0) {
3566 // unknown stall, do the expensive runtime check
32631e6a 3567 assem_debug("; cop2_do_stall_check\n");
e3c6bdb5 3568#if 0 // too slow
3569 save_regs(reglist);
3570 emit_movimm(gte_cycletab[op], 0);
2330734f 3571 emit_addimm(HOST_CCREG, ccadj[i], 1);
e3c6bdb5 3572 emit_far_call(call_gteStall);
3573 restore_regs(reglist);
3574#else
3575 host_tempreg_acquire();
3576 emit_readword(&psxRegs.gteBusyCycle, rtmp);
2330734f 3577 emit_addimm(rtmp, -ccadj[i], rtmp);
e3c6bdb5 3578 emit_sub(rtmp, HOST_CCREG, HOST_TEMPREG);
3579 emit_cmpimm(HOST_TEMPREG, 44);
3580 emit_cmovb_reg(rtmp, HOST_CCREG);
3581 //emit_log_gte_stall(i, 0, reglist);
3582 host_tempreg_release();
3583#endif
3584 }
3585 else if (stall > 0) {
3586 //emit_log_gte_stall(i, stall, reglist);
3587 emit_addimm(HOST_CCREG, stall, HOST_CCREG);
3588 }
3589
3590 // save gteBusyCycle, if needed
3591 if (gte_cycletab[op] == 0)
3592 return;
3593 other_gte_op_cycles = -1;
3594 for (j = i + 1; j < slen; j++) {
3595 if (cop2_is_stalling_op(j, &other_gte_op_cycles))
3596 break;
fe807a8a 3597 if (dops[j].is_jump) {
e3c6bdb5 3598 // check ds
3599 if (j + 1 < slen && cop2_is_stalling_op(j + 1, &other_gte_op_cycles))
3600 j++;
3601 break;
3602 }
3603 }
3604 if (other_gte_op_cycles >= 0)
3605 // will handle stall when assembling that op
3606 return;
2330734f 3607 cycles_passed = ccadj[min(j, slen -1)] - ccadj[i];
e3c6bdb5 3608 if (cycles_passed >= 44)
3609 return;
3610 assem_debug("; save gteBusyCycle\n");
3611 host_tempreg_acquire();
3612#if 0
3613 emit_readword(&last_count, HOST_TEMPREG);
3614 emit_add(HOST_TEMPREG, HOST_CCREG, HOST_TEMPREG);
2330734f 3615 emit_addimm(HOST_TEMPREG, ccadj[i], HOST_TEMPREG);
e3c6bdb5 3616 emit_addimm(HOST_TEMPREG, gte_cycletab[op]), HOST_TEMPREG);
3617 emit_writeword(HOST_TEMPREG, &psxRegs.gteBusyCycle);
3618#else
2330734f 3619 emit_addimm(HOST_CCREG, ccadj[i] + gte_cycletab[op], HOST_TEMPREG);
e3c6bdb5 3620 emit_writeword(HOST_TEMPREG, &psxRegs.gteBusyCycle);
3621#endif
3622 host_tempreg_release();
81dbbf4c 3623}
3624
32631e6a 3625static int is_mflohi(int i)
3626{
cf95b4f0 3627 return (dops[i].itype == MOV && (dops[i].rs1 == HIREG || dops[i].rs1 == LOREG));
32631e6a 3628}
3629
3630static int check_multdiv(int i, int *cycles)
3631{
cf95b4f0 3632 if (dops[i].itype != MULTDIV)
32631e6a 3633 return 0;
cf95b4f0 3634 if (dops[i].opcode2 == 0x18 || dops[i].opcode2 == 0x19) // MULT(U)
32631e6a 3635 *cycles = 11; // approx from 7 11 14
3636 else
3637 *cycles = 37;
3638 return 1;
3639}
3640
2330734f 3641static void multdiv_prepare_stall(int i, const struct regstat *i_regs, int ccadj_)
32631e6a 3642{
3643 int j, found = 0, c = 0;
3644 if (HACK_ENABLED(NDHACK_NO_STALLS))
3645 return;
3646 if (get_reg(i_regs->regmap, CCREG) != HOST_CCREG) {
3647 // happens occasionally... cc evicted? Don't bother then
3648 return;
3649 }
3650 for (j = i + 1; j < slen; j++) {
cf95b4f0 3651 if (dops[j].bt)
32631e6a 3652 break;
3653 if ((found = is_mflohi(j)))
3654 break;
fe807a8a 3655 if (dops[j].is_jump) {
32631e6a 3656 // check ds
3657 if (j + 1 < slen && (found = is_mflohi(j + 1)))
3658 j++;
3659 break;
3660 }
3661 }
3662 if (found)
3663 // handle all in multdiv_do_stall()
3664 return;
3665 check_multdiv(i, &c);
3666 assert(c > 0);
3667 assem_debug("; muldiv prepare stall %d\n", c);
3668 host_tempreg_acquire();
2330734f 3669 emit_addimm(HOST_CCREG, ccadj_ + c, HOST_TEMPREG);
32631e6a 3670 emit_writeword(HOST_TEMPREG, &psxRegs.muldivBusyCycle);
3671 host_tempreg_release();
3672}
3673
3674static void multdiv_do_stall(int i, const struct regstat *i_regs)
3675{
3676 int j, known_cycles = 0;
3677 u_int reglist = get_host_reglist(i_regs->regmap);
9de8a0c3 3678 int rtmp = get_reg_temp(i_regs->regmap);
32631e6a 3679 if (rtmp < 0)
3680 rtmp = reglist_find_free(reglist);
3681 if (HACK_ENABLED(NDHACK_NO_STALLS))
3682 return;
3683 if (get_reg(i_regs->regmap, CCREG) != HOST_CCREG || rtmp < 0) {
3684 // happens occasionally... cc evicted? Don't bother then
3685 //printf("no cc/rtmp %08x\n", start + i*4);
3686 return;
3687 }
cf95b4f0 3688 if (!dops[i].bt) {
32631e6a 3689 for (j = i - 1; j >= 0; j--) {
cf95b4f0 3690 if (dops[j].is_ds) break;
2330734f 3691 if (check_multdiv(j, &known_cycles))
32631e6a 3692 break;
3693 if (is_mflohi(j))
3694 // already handled by this op
3695 return;
2330734f 3696 if (dops[j].bt || (j > 0 && ccadj[j - 1] > ccadj[j]))
3697 break;
32631e6a 3698 }
3699 j = max(j, 0);
3700 }
3701 if (known_cycles > 0) {
2330734f 3702 known_cycles -= ccadj[i] - ccadj[j];
32631e6a 3703 assem_debug("; muldiv stall resolved %d\n", known_cycles);
3704 if (known_cycles > 0)
3705 emit_addimm(HOST_CCREG, known_cycles, HOST_CCREG);
3706 return;
3707 }
3708 assem_debug("; muldiv stall unresolved\n");
3709 host_tempreg_acquire();
3710 emit_readword(&psxRegs.muldivBusyCycle, rtmp);
2330734f 3711 emit_addimm(rtmp, -ccadj[i], rtmp);
32631e6a 3712 emit_sub(rtmp, HOST_CCREG, HOST_TEMPREG);
3713 emit_cmpimm(HOST_TEMPREG, 37);
3714 emit_cmovb_reg(rtmp, HOST_CCREG);
3715 //emit_log_gte_stall(i, 0, reglist);
3716 host_tempreg_release();
3717}
3718
8062d65a 3719static void cop2_get_dreg(u_int copr,signed char tl,signed char temp)
3720{
3721 switch (copr) {
3722 case 1:
3723 case 3:
3724 case 5:
3725 case 8:
3726 case 9:
3727 case 10:
3728 case 11:
3729 emit_readword(&reg_cop2d[copr],tl);
3730 emit_signextend16(tl,tl);
3731 emit_writeword(tl,&reg_cop2d[copr]); // hmh
3732 break;
3733 case 7:
3734 case 16:
3735 case 17:
3736 case 18:
3737 case 19:
3738 emit_readword(&reg_cop2d[copr],tl);
3739 emit_andimm(tl,0xffff,tl);
3740 emit_writeword(tl,&reg_cop2d[copr]);
3741 break;
3742 case 15:
3743 emit_readword(&reg_cop2d[14],tl); // SXY2
3744 emit_writeword(tl,&reg_cop2d[copr]);
3745 break;
3746 case 28:
3747 case 29:
3968e69e 3748 c2op_mfc2_29_assemble(tl,temp);
8062d65a 3749 break;
3750 default:
3751 emit_readword(&reg_cop2d[copr],tl);
3752 break;
3753 }
3754}
3755
3756static void cop2_put_dreg(u_int copr,signed char sl,signed char temp)
3757{
3758 switch (copr) {
3759 case 15:
3760 emit_readword(&reg_cop2d[13],temp); // SXY1
3761 emit_writeword(sl,&reg_cop2d[copr]);
3762 emit_writeword(temp,&reg_cop2d[12]); // SXY0
3763 emit_readword(&reg_cop2d[14],temp); // SXY2
3764 emit_writeword(sl,&reg_cop2d[14]);
3765 emit_writeword(temp,&reg_cop2d[13]); // SXY1
3766 break;
3767 case 28:
3768 emit_andimm(sl,0x001f,temp);
3769 emit_shlimm(temp,7,temp);
3770 emit_writeword(temp,&reg_cop2d[9]);
3771 emit_andimm(sl,0x03e0,temp);
3772 emit_shlimm(temp,2,temp);
3773 emit_writeword(temp,&reg_cop2d[10]);
3774 emit_andimm(sl,0x7c00,temp);
3775 emit_shrimm(temp,3,temp);
3776 emit_writeword(temp,&reg_cop2d[11]);
3777 emit_writeword(sl,&reg_cop2d[28]);
3778 break;
3779 case 30:
3968e69e 3780 emit_xorsar_imm(sl,sl,31,temp);
be516ebe 3781#if defined(HAVE_ARMV5) || defined(__aarch64__)
8062d65a 3782 emit_clz(temp,temp);
3783#else
3784 emit_movs(temp,HOST_TEMPREG);
3785 emit_movimm(0,temp);
3786 emit_jeq((int)out+4*4);
3787 emit_addpl_imm(temp,1,temp);
3788 emit_lslpls_imm(HOST_TEMPREG,1,HOST_TEMPREG);
3789 emit_jns((int)out-2*4);
3790#endif
3791 emit_writeword(sl,&reg_cop2d[30]);
3792 emit_writeword(temp,&reg_cop2d[31]);
3793 break;
3794 case 31:
3795 break;
3796 default:
3797 emit_writeword(sl,&reg_cop2d[copr]);
3798 break;
3799 }
3800}
3801
2330734f 3802static void c2ls_assemble(int i, const struct regstat *i_regs, int ccadj_)
b9b61529 3803{
3804 int s,tl;
3805 int ar;
3806 int offset;
1fd1aceb 3807 int memtarget=0,c=0;
b14b6a8f 3808 void *jaddr2=NULL;
3809 enum stub_type type;
b9b61529 3810 int agr=AGEN1+(i&1);
37387d8b 3811 int offset_reg = -1;
3812 int fastio_reg_override = -1;
81dbbf4c 3813 u_int reglist=get_host_reglist(i_regs->regmap);
b9b61529 3814 u_int copr=(source[i]>>16)&0x1f;
cf95b4f0 3815 s=get_reg(i_regs->regmap,dops[i].rs1);
b9b61529 3816 tl=get_reg(i_regs->regmap,FTEMP);
3817 offset=imm[i];
cf95b4f0 3818 assert(dops[i].rs1>0);
b9b61529 3819 assert(tl>=0);
b9b61529 3820
b9b61529 3821 if(i_regs->regmap[HOST_CCREG]==CCREG)
3822 reglist&=~(1<<HOST_CCREG);
3823
3824 // get the address
cf95b4f0 3825 if (dops[i].opcode==0x3a) { // SWC2
b9b61529 3826 ar=get_reg(i_regs->regmap,agr);
9de8a0c3 3827 if(ar<0) ar=get_reg_temp(i_regs->regmap);
b9b61529 3828 reglist|=1<<ar;
3829 } else { // LWC2
3830 ar=tl;
3831 }
1fd1aceb 3832 if(s>=0) c=(i_regs->wasconst>>s)&1;
3833 memtarget=c&&(((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE);
b9b61529 3834 if (!offset&&!c&&s>=0) ar=s;
3835 assert(ar>=0);
3836
32631e6a 3837 cop2_do_stall_check(0, i, i_regs, reglist);
3838
cf95b4f0 3839 if (dops[i].opcode==0x3a) { // SWC2
3968e69e 3840 cop2_get_dreg(copr,tl,-1);
1fd1aceb 3841 type=STOREW_STUB;
b9b61529 3842 }
1fd1aceb 3843 else
b9b61529 3844 type=LOADW_STUB;
1fd1aceb 3845
3846 if(c&&!memtarget) {
b14b6a8f 3847 jaddr2=out;
1fd1aceb 3848 emit_jmp(0); // inline_readstub/inline_writestub?
b9b61529 3849 }
1fd1aceb 3850 else {
3851 if(!c) {
37387d8b 3852 jaddr2 = emit_fastpath_cmp_jump(i, i_regs, ar,
3853 &offset_reg, &fastio_reg_override);
3854 }
3855 else if (ram_offset && memtarget) {
3856 offset_reg = get_ro_reg(i_regs, 0);
3857 }
3858 switch (dops[i].opcode) {
3859 case 0x32: { // LWC2
3860 int a = ar;
3861 if (fastio_reg_override >= 0)
3862 a = fastio_reg_override;
3863 do_load_word(a, tl, offset_reg);
3864 break;
1fd1aceb 3865 }
37387d8b 3866 case 0x3a: { // SWC2
1fd1aceb 3867 #ifdef DESTRUCTIVE_SHIFT
3868 if(!offset&&!c&&s>=0) emit_mov(s,ar);
3869 #endif
37387d8b 3870 int a = ar;
3871 if (fastio_reg_override >= 0)
3872 a = fastio_reg_override;
3873 do_store_word(a, 0, tl, offset_reg, 1);
3874 break;
3875 }
3876 default:
3877 assert(0);
1fd1aceb 3878 }
b9b61529 3879 }
37387d8b 3880 if (fastio_reg_override == HOST_TEMPREG || offset_reg == HOST_TEMPREG)
d1e4ebd9 3881 host_tempreg_release();
b9b61529 3882 if(jaddr2)
2330734f 3883 add_stub_r(type,jaddr2,out,i,ar,i_regs,ccadj_,reglist);
cf95b4f0 3884 if(dops[i].opcode==0x3a) // SWC2
3885 if(!(i_regs->waswritten&(1<<dops[i].rs1)) && !HACK_ENABLED(NDHACK_NO_SMC_CHECK)) {
b9b61529 3886#if defined(HOST_IMM8)
3887 int ir=get_reg(i_regs->regmap,INVCP);
3888 assert(ir>=0);
3889 emit_cmpmem_indexedsr12_reg(ir,ar,1);
3890#else
643aeae3 3891 emit_cmpmem_indexedsr12_imm(invalid_code,ar,1);
b9b61529 3892#endif
882a08fc 3893 #ifdef INVALIDATE_USE_COND_CALL
0bbd1454 3894 emit_callne(invalidate_addr_reg[ar]);
3895 #else
b14b6a8f 3896 void *jaddr3 = out;
b9b61529 3897 emit_jne(0);
b14b6a8f 3898 add_stub(INVCODE_STUB,jaddr3,out,reglist|(1<<HOST_CCREG),ar,0,0,0);
0bbd1454 3899 #endif
b9b61529 3900 }
cf95b4f0 3901 if (dops[i].opcode==0x32) { // LWC2
d1e4ebd9 3902 host_tempreg_acquire();
b9b61529 3903 cop2_put_dreg(copr,tl,HOST_TEMPREG);
d1e4ebd9 3904 host_tempreg_release();
b9b61529 3905 }
3906}
3907
81dbbf4c 3908static void cop2_assemble(int i, const struct regstat *i_regs)
8062d65a 3909{
81dbbf4c 3910 u_int copr = (source[i]>>11) & 0x1f;
9de8a0c3 3911 signed char temp = get_reg_temp(i_regs->regmap);
81dbbf4c 3912
32631e6a 3913 if (!HACK_ENABLED(NDHACK_NO_STALLS)) {
3914 u_int reglist = reglist_exclude(get_host_reglist(i_regs->regmap), temp, -1);
cf95b4f0 3915 if (dops[i].opcode2 == 0 || dops[i].opcode2 == 2) { // MFC2/CFC2
3916 signed char tl = get_reg(i_regs->regmap, dops[i].rt1);
32631e6a 3917 reglist = reglist_exclude(reglist, tl, -1);
81dbbf4c 3918 }
32631e6a 3919 cop2_do_stall_check(0, i, i_regs, reglist);
81dbbf4c 3920 }
cf95b4f0 3921 if (dops[i].opcode2==0) { // MFC2
3922 signed char tl=get_reg(i_regs->regmap,dops[i].rt1);
3923 if(tl>=0&&dops[i].rt1!=0)
8062d65a 3924 cop2_get_dreg(copr,tl,temp);
3925 }
cf95b4f0 3926 else if (dops[i].opcode2==4) { // MTC2
3927 signed char sl=get_reg(i_regs->regmap,dops[i].rs1);
8062d65a 3928 cop2_put_dreg(copr,sl,temp);
3929 }
cf95b4f0 3930 else if (dops[i].opcode2==2) // CFC2
8062d65a 3931 {
cf95b4f0 3932 signed char tl=get_reg(i_regs->regmap,dops[i].rt1);
3933 if(tl>=0&&dops[i].rt1!=0)
8062d65a 3934 emit_readword(&reg_cop2c[copr],tl);
3935 }
cf95b4f0 3936 else if (dops[i].opcode2==6) // CTC2
8062d65a 3937 {
cf95b4f0 3938 signed char sl=get_reg(i_regs->regmap,dops[i].rs1);
8062d65a 3939 switch(copr) {
3940 case 4:
3941 case 12:
3942 case 20:
3943 case 26:
3944 case 27:
3945 case 29:
3946 case 30:
3947 emit_signextend16(sl,temp);
3948 break;
3949 case 31:
3968e69e 3950 c2op_ctc2_31_assemble(sl,temp);
8062d65a 3951 break;
3952 default:
3953 temp=sl;
3954 break;
3955 }
3956 emit_writeword(temp,&reg_cop2c[copr]);
3957 assert(sl>=0);
3958 }
3959}
3960
3968e69e 3961static void do_unalignedwritestub(int n)
3962{
3963 assem_debug("do_unalignedwritestub %x\n",start+stubs[n].a*4);
3964 literal_pool(256);
3965 set_jump_target(stubs[n].addr, out);
3966
3967 int i=stubs[n].a;
3968 struct regstat *i_regs=(struct regstat *)stubs[n].c;
3969 int addr=stubs[n].b;
3970 u_int reglist=stubs[n].e;
3971 signed char *i_regmap=i_regs->regmap;
3972 int temp2=get_reg(i_regmap,FTEMP);
3973 int rt;
cf95b4f0 3974 rt=get_reg(i_regmap,dops[i].rs2);
3968e69e 3975 assert(rt>=0);
3976 assert(addr>=0);
cf95b4f0 3977 assert(dops[i].opcode==0x2a||dops[i].opcode==0x2e); // SWL/SWR only implemented
3968e69e 3978 reglist|=(1<<addr);
3979 reglist&=~(1<<temp2);
3980
3968e69e 3981 // don't bother with it and call write handler
3982 save_regs(reglist);
3983 pass_args(addr,rt);
3984 int cc=get_reg(i_regmap,CCREG);
3985 if(cc<0)
3986 emit_loadreg(CCREG,2);
2330734f 3987 emit_addimm(cc<0?2:cc,(int)stubs[n].d+1,2);
cf95b4f0 3988 emit_far_call((dops[i].opcode==0x2a?jump_handle_swl:jump_handle_swr));
2330734f 3989 emit_addimm(0,-((int)stubs[n].d+1),cc<0?2:cc);
3968e69e 3990 if(cc<0)
3991 emit_storereg(CCREG,2);
3992 restore_regs(reglist);
3993 emit_jmp(stubs[n].retaddr); // return address
3968e69e 3994}
3995
57871462 3996#ifndef multdiv_assemble
3997void multdiv_assemble(int i,struct regstat *i_regs)
3998{
3999 printf("Need multdiv_assemble for this architecture.\n");
7c3a5182 4000 abort();
57871462 4001}
4002#endif
4003
2330734f 4004static void mov_assemble(int i, const struct regstat *i_regs)
57871462 4005{
cf95b4f0 4006 //if(dops[i].opcode2==0x10||dops[i].opcode2==0x12) { // MFHI/MFLO
4007 //if(dops[i].opcode2==0x11||dops[i].opcode2==0x13) { // MTHI/MTLO
4008 if(dops[i].rt1) {
7c3a5182 4009 signed char sl,tl;
cf95b4f0 4010 tl=get_reg(i_regs->regmap,dops[i].rt1);
57871462 4011 //assert(tl>=0);
4012 if(tl>=0) {
cf95b4f0 4013 sl=get_reg(i_regs->regmap,dops[i].rs1);
57871462 4014 if(sl>=0) emit_mov(sl,tl);
cf95b4f0 4015 else emit_loadreg(dops[i].rs1,tl);
57871462 4016 }
4017 }
cf95b4f0 4018 if (dops[i].rs1 == HIREG || dops[i].rs1 == LOREG) // MFHI/MFLO
32631e6a 4019 multdiv_do_stall(i, i_regs);
57871462 4020}
4021
3968e69e 4022// call interpreter, exception handler, things that change pc/regs/cycles ...
2330734f 4023static void call_c_cpu_handler(int i, const struct regstat *i_regs, int ccadj_, u_int pc, void *func)
57871462 4024{
4025 signed char ccreg=get_reg(i_regs->regmap,CCREG);
4026 assert(ccreg==HOST_CCREG);
4027 assert(!is_delayslot);
581335b0 4028 (void)ccreg;
3968e69e 4029
4030 emit_movimm(pc,3); // Get PC
4031 emit_readword(&last_count,2);
4032 emit_writeword(3,&psxRegs.pc);
2330734f 4033 emit_addimm(HOST_CCREG,ccadj_,HOST_CCREG);
3968e69e 4034 emit_add(2,HOST_CCREG,2);
4035 emit_writeword(2,&psxRegs.cycle);
2a014d73 4036 emit_far_call(func);
4037 emit_far_jump(jump_to_new_pc);
3968e69e 4038}
4039
2330734f 4040static void syscall_assemble(int i, const struct regstat *i_regs, int ccadj_)
3968e69e 4041{
d1150cd6 4042 // 'break' tends to be littered around to catch things like
4043 // division by 0 and is almost never executed, so don't emit much code here
4044 void *func = (dops[i].opcode2 == 0x0C)
4045 ? (is_delayslot ? jump_syscall_ds : jump_syscall)
4046 : (is_delayslot ? jump_break_ds : jump_break);
2acc46cd 4047 assert(get_reg(i_regs->regmap, CCREG) == HOST_CCREG);
d1150cd6 4048 emit_movimm(start + i*4, 2); // pc
4049 emit_addimm(HOST_CCREG, ccadj_ + CLOCK_ADJUST(1), HOST_CCREG);
4050 emit_far_jump(func);
7139f3c8 4051}
4052
2330734f 4053static void hlecall_assemble(int i, const struct regstat *i_regs, int ccadj_)
7139f3c8 4054{
3968e69e 4055 void *hlefunc = psxNULL;
dd79da89 4056 uint32_t hleCode = source[i] & 0x03ffffff;
3968e69e 4057 if (hleCode < ARRAY_SIZE(psxHLEt))
4058 hlefunc = psxHLEt[hleCode];
4059
2330734f 4060 call_c_cpu_handler(i, i_regs, ccadj_, start + i*4+4, hlefunc);
57871462 4061}
4062
2330734f 4063static void intcall_assemble(int i, const struct regstat *i_regs, int ccadj_)
1e973cb0 4064{
2330734f 4065 call_c_cpu_handler(i, i_regs, ccadj_, start + i*4, execI);
1e973cb0 4066}
4067
8062d65a 4068static void speculate_mov(int rs,int rt)
4069{
4070 if(rt!=0) {
4071 smrv_strong_next|=1<<rt;
4072 smrv[rt]=smrv[rs];
4073 }
4074}
4075
4076static void speculate_mov_weak(int rs,int rt)
4077{
4078 if(rt!=0) {
4079 smrv_weak_next|=1<<rt;
4080 smrv[rt]=smrv[rs];
4081 }
4082}
4083
4084static void speculate_register_values(int i)
4085{
4086 if(i==0) {
4087 memcpy(smrv,psxRegs.GPR.r,sizeof(smrv));
4088 // gp,sp are likely to stay the same throughout the block
4089 smrv_strong_next=(1<<28)|(1<<29)|(1<<30);
4090 smrv_weak_next=~smrv_strong_next;
4091 //printf(" llr %08x\n", smrv[4]);
4092 }
4093 smrv_strong=smrv_strong_next;
4094 smrv_weak=smrv_weak_next;
cf95b4f0 4095 switch(dops[i].itype) {
8062d65a 4096 case ALU:
cf95b4f0 4097 if ((smrv_strong>>dops[i].rs1)&1) speculate_mov(dops[i].rs1,dops[i].rt1);
4098 else if((smrv_strong>>dops[i].rs2)&1) speculate_mov(dops[i].rs2,dops[i].rt1);
4099 else if((smrv_weak>>dops[i].rs1)&1) speculate_mov_weak(dops[i].rs1,dops[i].rt1);
4100 else if((smrv_weak>>dops[i].rs2)&1) speculate_mov_weak(dops[i].rs2,dops[i].rt1);
8062d65a 4101 else {
cf95b4f0 4102 smrv_strong_next&=~(1<<dops[i].rt1);
4103 smrv_weak_next&=~(1<<dops[i].rt1);
8062d65a 4104 }
4105 break;
4106 case SHIFTIMM:
cf95b4f0 4107 smrv_strong_next&=~(1<<dops[i].rt1);
4108 smrv_weak_next&=~(1<<dops[i].rt1);
8062d65a 4109 // fallthrough
4110 case IMM16:
cf95b4f0 4111 if(dops[i].rt1&&is_const(&regs[i],dops[i].rt1)) {
4112 int value,hr=get_reg(regs[i].regmap,dops[i].rt1);
8062d65a 4113 if(hr>=0) {
4114 if(get_final_value(hr,i,&value))
cf95b4f0 4115 smrv[dops[i].rt1]=value;
4116 else smrv[dops[i].rt1]=constmap[i][hr];
4117 smrv_strong_next|=1<<dops[i].rt1;
8062d65a 4118 }
4119 }
4120 else {
cf95b4f0 4121 if ((smrv_strong>>dops[i].rs1)&1) speculate_mov(dops[i].rs1,dops[i].rt1);
4122 else if((smrv_weak>>dops[i].rs1)&1) speculate_mov_weak(dops[i].rs1,dops[i].rt1);
8062d65a 4123 }
4124 break;
4125 case LOAD:
cf95b4f0 4126 if(start<0x2000&&(dops[i].rt1==26||(smrv[dops[i].rt1]>>24)==0xa0)) {
8062d65a 4127 // special case for BIOS
cf95b4f0 4128 smrv[dops[i].rt1]=0xa0000000;
4129 smrv_strong_next|=1<<dops[i].rt1;
8062d65a 4130 break;
4131 }
4132 // fallthrough
4133 case SHIFT:
4134 case LOADLR:
4135 case MOV:
cf95b4f0 4136 smrv_strong_next&=~(1<<dops[i].rt1);
4137 smrv_weak_next&=~(1<<dops[i].rt1);
8062d65a 4138 break;
4139 case COP0:
4140 case COP2:
cf95b4f0 4141 if(dops[i].opcode2==0||dops[i].opcode2==2) { // MFC/CFC
4142 smrv_strong_next&=~(1<<dops[i].rt1);
4143 smrv_weak_next&=~(1<<dops[i].rt1);
8062d65a 4144 }
4145 break;
4146 case C2LS:
cf95b4f0 4147 if (dops[i].opcode==0x32) { // LWC2
4148 smrv_strong_next&=~(1<<dops[i].rt1);
4149 smrv_weak_next&=~(1<<dops[i].rt1);
8062d65a 4150 }
4151 break;
4152 }
4153#if 0
4154 int r=4;
4155 printf("x %08x %08x %d %d c %08x %08x\n",smrv[r],start+i*4,
4156 ((smrv_strong>>r)&1),(smrv_weak>>r)&1,regs[i].isconst,regs[i].wasconst);
4157#endif
4158}
4159
2330734f 4160static void ujump_assemble(int i, const struct regstat *i_regs);
4161static void rjump_assemble(int i, const struct regstat *i_regs);
4162static void cjump_assemble(int i, const struct regstat *i_regs);
4163static void sjump_assemble(int i, const struct regstat *i_regs);
2330734f 4164
4165static int assemble(int i, const struct regstat *i_regs, int ccadj_)
57871462 4166{
2330734f 4167 int ds = 0;
4168 switch (dops[i].itype) {
57871462 4169 case ALU:
2330734f 4170 alu_assemble(i, i_regs);
4171 break;
57871462 4172 case IMM16:
2330734f 4173 imm16_assemble(i, i_regs);
4174 break;
57871462 4175 case SHIFT:
2330734f 4176 shift_assemble(i, i_regs);
4177 break;
57871462 4178 case SHIFTIMM:
2330734f 4179 shiftimm_assemble(i, i_regs);
4180 break;
57871462 4181 case LOAD:
2330734f 4182 load_assemble(i, i_regs, ccadj_);
4183 break;
57871462 4184 case LOADLR:
2330734f 4185 loadlr_assemble(i, i_regs, ccadj_);
4186 break;
57871462 4187 case STORE:
2330734f 4188 store_assemble(i, i_regs, ccadj_);
4189 break;
57871462 4190 case STORELR:
2330734f 4191 storelr_assemble(i, i_regs, ccadj_);
4192 break;
57871462 4193 case COP0:
2330734f 4194 cop0_assemble(i, i_regs, ccadj_);
4195 break;
57871462 4196 case COP1:
2330734f 4197 cop1_assemble(i, i_regs);
4198 break;
57871462 4199 case C1LS:
2330734f 4200 c1ls_assemble(i, i_regs);
4201 break;
b9b61529 4202 case COP2:
2330734f 4203 cop2_assemble(i, i_regs);
4204 break;
b9b61529 4205 case C2LS:
2330734f 4206 c2ls_assemble(i, i_regs, ccadj_);
4207 break;
b9b61529 4208 case C2OP:
2330734f 4209 c2op_assemble(i, i_regs);
4210 break;
57871462 4211 case MULTDIV:
2330734f 4212 multdiv_assemble(i, i_regs);
4213 multdiv_prepare_stall(i, i_regs, ccadj_);
32631e6a 4214 break;
57871462 4215 case MOV:
2330734f 4216 mov_assemble(i, i_regs);
4217 break;
4218 case SYSCALL:
4219 syscall_assemble(i, i_regs, ccadj_);
4220 break;
4221 case HLECALL:
4222 hlecall_assemble(i, i_regs, ccadj_);
4223 break;
4224 case INTCALL:
4225 intcall_assemble(i, i_regs, ccadj_);
4226 break;
4227 case UJUMP:
4228 ujump_assemble(i, i_regs);
4229 ds = 1;
4230 break;
4231 case RJUMP:
4232 rjump_assemble(i, i_regs);
4233 ds = 1;
4234 break;
4235 case CJUMP:
4236 cjump_assemble(i, i_regs);
4237 ds = 1;
4238 break;
4239 case SJUMP:
4240 sjump_assemble(i, i_regs);
4241 ds = 1;
4242 break;
24058131 4243 case NOP:
2330734f 4244 case OTHER:
4245 case NI:
4246 // not handled, just skip
4247 break;
4248 default:
4249 assert(0);
4250 }
4251 return ds;
4252}
4253
4254static void ds_assemble(int i, const struct regstat *i_regs)
4255{
4256 speculate_register_values(i);
4257 is_delayslot = 1;
4258 switch (dops[i].itype) {
57871462 4259 case SYSCALL:
7139f3c8 4260 case HLECALL:
1e973cb0 4261 case INTCALL:
57871462 4262 case UJUMP:
4263 case RJUMP:
4264 case CJUMP:
4265 case SJUMP:
c43b5311 4266 SysPrintf("Jump in the delay slot. This is probably a bug.\n");
2330734f 4267 break;
4268 default:
4269 assemble(i, i_regs, ccadj[i]);
57871462 4270 }
2330734f 4271 is_delayslot = 0;
57871462 4272}
4273
4274// Is the branch target a valid internal jump?
ad49de89 4275static int internal_branch(int addr)
57871462 4276{
4277 if(addr&1) return 0; // Indirect (register) jump
4278 if(addr>=start && addr<start+slen*4-4)
4279 {
71e490c5 4280 return 1;
57871462 4281 }
4282 return 0;
4283}
4284
ad49de89 4285static void wb_invalidate(signed char pre[],signed char entry[],uint64_t dirty,uint64_t u)
57871462 4286{
4287 int hr;
4288 for(hr=0;hr<HOST_REGS;hr++) {
4289 if(hr!=EXCLUDE_REG) {
4290 if(pre[hr]!=entry[hr]) {
4291 if(pre[hr]>=0) {
4292 if((dirty>>hr)&1) {
4293 if(get_reg(entry,pre[hr])<0) {
00fa9369 4294 assert(pre[hr]<64);
4295 if(!((u>>pre[hr])&1))
4296 emit_storereg(pre[hr],hr);
57871462 4297 }
4298 }
4299 }
4300 }
4301 }
4302 }
4303 // Move from one register to another (no writeback)
4304 for(hr=0;hr<HOST_REGS;hr++) {
4305 if(hr!=EXCLUDE_REG) {
4306 if(pre[hr]!=entry[hr]) {
9de8a0c3 4307 if(pre[hr]>=0&&pre[hr]<TEMPREG) {
57871462 4308 int nr;
4309 if((nr=get_reg(entry,pre[hr]))>=0) {
4310 emit_mov(hr,nr);
4311 }
4312 }
4313 }
4314 }
4315 }
4316}
57871462 4317
4318// Load the specified registers
4319// This only loads the registers given as arguments because
4320// we don't want to load things that will be overwritten
53358c1d 4321static inline void load_reg(signed char entry[], signed char regmap[], int rs)
57871462 4322{
53358c1d 4323 int hr = get_reg(regmap, rs);
4324 if (hr >= 0 && entry[hr] != regmap[hr])
4325 emit_loadreg(regmap[hr], hr);
4326}
4327
4328static void load_regs(signed char entry[], signed char regmap[], int rs1, int rs2)
4329{
4330 load_reg(entry, regmap, rs1);
4331 if (rs1 != rs2)
4332 load_reg(entry, regmap, rs2);
57871462 4333}
4334
4335// Load registers prior to the start of a loop
4336// so that they are not loaded within the loop
4337static void loop_preload(signed char pre[],signed char entry[])
4338{
4339 int hr;
53358c1d 4340 for (hr = 0; hr < HOST_REGS; hr++) {
4341 int r = entry[hr];
4342 if (r >= 0 && pre[hr] != r && get_reg(pre, r) < 0) {
4343 assem_debug("loop preload:\n");
4344 if (r < TEMPREG)
4345 emit_loadreg(r, hr);
57871462 4346 }
4347 }
4348}
4349
4350// Generate address for load/store instruction
b9b61529 4351// goes to AGEN for writes, FTEMP for LOADLR and cop1/2 loads
4149788d 4352static void address_generation(int i, const struct regstat *i_regs, signed char entry[])
57871462 4353{
37387d8b 4354 if (dops[i].is_load || dops[i].is_store) {
5194fb95 4355 int ra=-1;
57871462 4356 int agr=AGEN1+(i&1);
cf95b4f0 4357 if(dops[i].itype==LOAD) {
4358 ra=get_reg(i_regs->regmap,dops[i].rt1);
9de8a0c3 4359 if(ra<0) ra=get_reg_temp(i_regs->regmap);
535d208a 4360 assert(ra>=0);
57871462 4361 }
cf95b4f0 4362 if(dops[i].itype==LOADLR) {
57871462 4363 ra=get_reg(i_regs->regmap,FTEMP);
4364 }
cf95b4f0 4365 if(dops[i].itype==STORE||dops[i].itype==STORELR) {
57871462 4366 ra=get_reg(i_regs->regmap,agr);
9de8a0c3 4367 if(ra<0) ra=get_reg_temp(i_regs->regmap);
57871462 4368 }
37387d8b 4369 if(dops[i].itype==C2LS) {
cf95b4f0 4370 if ((dops[i].opcode&0x3b)==0x31||(dops[i].opcode&0x3b)==0x32) // LWC1/LDC1/LWC2/LDC2
57871462 4371 ra=get_reg(i_regs->regmap,FTEMP);
1fd1aceb 4372 else { // SWC1/SDC1/SWC2/SDC2
57871462 4373 ra=get_reg(i_regs->regmap,agr);
9de8a0c3 4374 if(ra<0) ra=get_reg_temp(i_regs->regmap);
57871462 4375 }
4376 }
cf95b4f0 4377 int rs=get_reg(i_regs->regmap,dops[i].rs1);
57871462 4378 if(ra>=0) {
4379 int offset=imm[i];
4380 int c=(i_regs->wasconst>>rs)&1;
cf95b4f0 4381 if(dops[i].rs1==0) {
57871462 4382 // Using r0 as a base address
57871462 4383 if(!entry||entry[ra]!=agr) {
cf95b4f0 4384 if (dops[i].opcode==0x22||dops[i].opcode==0x26) {
57871462 4385 emit_movimm(offset&0xFFFFFFFC,ra); // LWL/LWR
cf95b4f0 4386 }else if (dops[i].opcode==0x1a||dops[i].opcode==0x1b) {
57871462 4387 emit_movimm(offset&0xFFFFFFF8,ra); // LDL/LDR
4388 }else{
4389 emit_movimm(offset,ra);
4390 }
4391 } // else did it in the previous cycle
4392 }
4393 else if(rs<0) {
cf95b4f0 4394 if(!entry||entry[ra]!=dops[i].rs1)
4395 emit_loadreg(dops[i].rs1,ra);
4396 //if(!entry||entry[ra]!=dops[i].rs1)
57871462 4397 // printf("poor load scheduling!\n");
4398 }
4399 else if(c) {
cf95b4f0 4400 if(dops[i].rs1!=dops[i].rt1||dops[i].itype!=LOAD) {
57871462 4401 if(!entry||entry[ra]!=agr) {
cf95b4f0 4402 if (dops[i].opcode==0x22||dops[i].opcode==0x26) {
57871462 4403 emit_movimm((constmap[i][rs]+offset)&0xFFFFFFFC,ra); // LWL/LWR
cf95b4f0 4404 }else if (dops[i].opcode==0x1a||dops[i].opcode==0x1b) {
57871462 4405 emit_movimm((constmap[i][rs]+offset)&0xFFFFFFF8,ra); // LDL/LDR
4406 }else{
57871462 4407 emit_movimm(constmap[i][rs]+offset,ra);
8575a877 4408 regs[i].loadedconst|=1<<ra;
57871462 4409 }
4410 } // else did it in the previous cycle
4411 } // else load_consts already did it
4412 }
cf95b4f0 4413 if(offset&&!c&&dops[i].rs1) {
57871462 4414 if(rs>=0) {
4415 emit_addimm(rs,offset,ra);
4416 }else{
4417 emit_addimm(ra,offset,ra);
4418 }
4419 }
4420 }
4421 }
4422 // Preload constants for next instruction
37387d8b 4423 if (dops[i+1].is_load || dops[i+1].is_store) {
57871462 4424 int agr,ra;
57871462 4425 // Actual address
4426 agr=AGEN1+((i+1)&1);
4427 ra=get_reg(i_regs->regmap,agr);
4428 if(ra>=0) {
cf95b4f0 4429 int rs=get_reg(regs[i+1].regmap,dops[i+1].rs1);
57871462 4430 int offset=imm[i+1];
4431 int c=(regs[i+1].wasconst>>rs)&1;
cf95b4f0 4432 if(c&&(dops[i+1].rs1!=dops[i+1].rt1||dops[i+1].itype!=LOAD)) {
4433 if (dops[i+1].opcode==0x22||dops[i+1].opcode==0x26) {
57871462 4434 emit_movimm((constmap[i+1][rs]+offset)&0xFFFFFFFC,ra); // LWL/LWR
cf95b4f0 4435 }else if (dops[i+1].opcode==0x1a||dops[i+1].opcode==0x1b) {
57871462 4436 emit_movimm((constmap[i+1][rs]+offset)&0xFFFFFFF8,ra); // LDL/LDR
4437 }else{
57871462 4438 emit_movimm(constmap[i+1][rs]+offset,ra);
8575a877 4439 regs[i+1].loadedconst|=1<<ra;
57871462 4440 }
4441 }
cf95b4f0 4442 else if(dops[i+1].rs1==0) {
57871462 4443 // Using r0 as a base address
cf95b4f0 4444 if (dops[i+1].opcode==0x22||dops[i+1].opcode==0x26) {
57871462 4445 emit_movimm(offset&0xFFFFFFFC,ra); // LWL/LWR
cf95b4f0 4446 }else if (dops[i+1].opcode==0x1a||dops[i+1].opcode==0x1b) {
57871462 4447 emit_movimm(offset&0xFFFFFFF8,ra); // LDL/LDR
4448 }else{
4449 emit_movimm(offset,ra);
4450 }
4451 }
4452 }
4453 }
4454}
4455
e2b5e7aa 4456static int get_final_value(int hr, int i, int *value)
57871462 4457{
4458 int reg=regs[i].regmap[hr];
4459 while(i<slen-1) {
4460 if(regs[i+1].regmap[hr]!=reg) break;
4461 if(!((regs[i+1].isconst>>hr)&1)) break;
cf95b4f0 4462 if(dops[i+1].bt) break;
57871462 4463 i++;
4464 }
4465 if(i<slen-1) {
fe807a8a 4466 if (dops[i].is_jump) {
57871462 4467 *value=constmap[i][hr];
4468 return 1;
4469 }
cf95b4f0 4470 if(!dops[i+1].bt) {
fe807a8a 4471 if (dops[i+1].is_jump) {
57871462 4472 // Load in delay slot, out-of-order execution
cf95b4f0 4473 if(dops[i+2].itype==LOAD&&dops[i+2].rs1==reg&&dops[i+2].rt1==reg&&((regs[i+1].wasconst>>hr)&1))
57871462 4474 {
57871462 4475 // Precompute load address
4476 *value=constmap[i][hr]+imm[i+2];
4477 return 1;
4478 }
4479 }
cf95b4f0 4480 if(dops[i+1].itype==LOAD&&dops[i+1].rs1==reg&&dops[i+1].rt1==reg)
57871462 4481 {
57871462 4482 // Precompute load address
4483 *value=constmap[i][hr]+imm[i+1];
643aeae3 4484 //printf("c=%x imm=%lx\n",(long)constmap[i][hr],imm[i+1]);
57871462 4485 return 1;
4486 }
4487 }
4488 }
4489 *value=constmap[i][hr];
643aeae3 4490 //printf("c=%lx\n",(long)constmap[i][hr]);
57871462 4491 if(i==slen-1) return 1;
00fa9369 4492 assert(reg < 64);
4493 return !((unneeded_reg[i+1]>>reg)&1);
57871462 4494}
4495
4496// Load registers with known constants
ad49de89 4497static void load_consts(signed char pre[],signed char regmap[],int i)
57871462 4498{
8575a877 4499 int hr,hr2;
4500 // propagate loaded constant flags
cf95b4f0 4501 if(i==0||dops[i].bt)
8575a877 4502 regs[i].loadedconst=0;
4503 else {
4504 for(hr=0;hr<HOST_REGS;hr++) {
4505 if(hr!=EXCLUDE_REG&&regmap[hr]>=0&&((regs[i-1].isconst>>hr)&1)&&pre[hr]==regmap[hr]
4506 &&regmap[hr]==regs[i-1].regmap[hr]&&((regs[i-1].loadedconst>>hr)&1))
4507 {
4508 regs[i].loadedconst|=1<<hr;
4509 }
4510 }
4511 }
57871462 4512 // Load 32-bit regs
4513 for(hr=0;hr<HOST_REGS;hr++) {
4514 if(hr!=EXCLUDE_REG&&regmap[hr]>=0) {
4515 //if(entry[hr]!=regmap[hr]) {
8575a877 4516 if(!((regs[i].loadedconst>>hr)&1)) {
ad49de89 4517 assert(regmap[hr]<64);
4518 if(((regs[i].isconst>>hr)&1)&&regmap[hr]>0) {
8575a877 4519 int value,similar=0;
57871462 4520 if(get_final_value(hr,i,&value)) {
8575a877 4521 // see if some other register has similar value
4522 for(hr2=0;hr2<HOST_REGS;hr2++) {
4523 if(hr2!=EXCLUDE_REG&&((regs[i].loadedconst>>hr2)&1)) {
4524 if(is_similar_value(value,constmap[i][hr2])) {
4525 similar=1;
4526 break;
4527 }
4528 }
4529 }
4530 if(similar) {
4531 int value2;
4532 if(get_final_value(hr2,i,&value2)) // is this needed?
4533 emit_movimm_from(value2,hr2,value,hr);
4534 else
4535 emit_movimm(value,hr);
4536 }
4537 else if(value==0) {
57871462 4538 emit_zeroreg(hr);
4539 }
4540 else {
4541 emit_movimm(value,hr);
4542 }
4543 }
8575a877 4544 regs[i].loadedconst|=1<<hr;
57871462 4545 }
4546 }
4547 }
4548 }
57871462 4549}
ad49de89 4550
2330734f 4551static void load_all_consts(const signed char regmap[], u_int dirty, int i)
57871462 4552{
4553 int hr;
4554 // Load 32-bit regs
4555 for(hr=0;hr<HOST_REGS;hr++) {
4556 if(hr!=EXCLUDE_REG&&regmap[hr]>=0&&((dirty>>hr)&1)) {
ad49de89 4557 assert(regmap[hr] < 64);
4558 if(((regs[i].isconst>>hr)&1)&&regmap[hr]>0) {
57871462 4559 int value=constmap[i][hr];
4560 if(value==0) {
4561 emit_zeroreg(hr);
4562 }
4563 else {
4564 emit_movimm(value,hr);
4565 }
4566 }
4567 }
4568 }
57871462 4569}
4570
4571// Write out all dirty registers (except cycle count)
2330734f 4572static void wb_dirtys(const signed char i_regmap[], uint64_t i_dirty)
57871462 4573{
4574 int hr;
4575 for(hr=0;hr<HOST_REGS;hr++) {
4576 if(hr!=EXCLUDE_REG) {
4577 if(i_regmap[hr]>0) {
4578 if(i_regmap[hr]!=CCREG) {
4579 if((i_dirty>>hr)&1) {
00fa9369 4580 assert(i_regmap[hr]<64);
4581 emit_storereg(i_regmap[hr],hr);
57871462 4582 }
4583 }
4584 }
4585 }
4586 }
4587}
ad49de89 4588
57871462 4589// Write out dirty registers that we need to reload (pair with load_needed_regs)
4590// This writes the registers not written by store_regs_bt
2330734f 4591static void wb_needed_dirtys(const signed char i_regmap[], uint64_t i_dirty, int addr)
57871462 4592{
4593 int hr;
4594 int t=(addr-start)>>2;
4595 for(hr=0;hr<HOST_REGS;hr++) {
4596 if(hr!=EXCLUDE_REG) {
4597 if(i_regmap[hr]>0) {
4598 if(i_regmap[hr]!=CCREG) {
ad49de89 4599 if(i_regmap[hr]==regs[t].regmap_entry[hr] && ((regs[t].dirty>>hr)&1)) {
57871462 4600 if((i_dirty>>hr)&1) {
00fa9369 4601 assert(i_regmap[hr]<64);
4602 emit_storereg(i_regmap[hr],hr);
57871462 4603 }
4604 }
4605 }
4606 }
4607 }
4608 }
4609}
4610
4611// Load all registers (except cycle count)
2330734f 4612static void load_all_regs(const signed char i_regmap[])
57871462 4613{
4614 int hr;
4615 for(hr=0;hr<HOST_REGS;hr++) {
4616 if(hr!=EXCLUDE_REG) {
4617 if(i_regmap[hr]==0) {
4618 emit_zeroreg(hr);
4619 }
4620 else
9de8a0c3 4621 if(i_regmap[hr]>0 && i_regmap[hr]<TEMPREG && i_regmap[hr]!=CCREG)
57871462 4622 {
4623 emit_loadreg(i_regmap[hr],hr);
4624 }
4625 }
4626 }
4627}
4628
4629// Load all current registers also needed by next instruction
2330734f 4630static void load_needed_regs(const signed char i_regmap[], const signed char next_regmap[])
57871462 4631{
4632 int hr;
4633 for(hr=0;hr<HOST_REGS;hr++) {
4634 if(hr!=EXCLUDE_REG) {
4635 if(get_reg(next_regmap,i_regmap[hr])>=0) {
4636 if(i_regmap[hr]==0) {
4637 emit_zeroreg(hr);
4638 }
4639 else
9de8a0c3 4640 if(i_regmap[hr]>0 && i_regmap[hr]<TEMPREG && i_regmap[hr]!=CCREG)
57871462 4641 {
4642 emit_loadreg(i_regmap[hr],hr);
4643 }
4644 }
4645 }
4646 }
4647}
4648
4649// Load all regs, storing cycle count if necessary
2330734f 4650static void load_regs_entry(int t)
57871462 4651{
4652 int hr;
cf95b4f0 4653 if(dops[t].is_ds) emit_addimm(HOST_CCREG,CLOCK_ADJUST(1),HOST_CCREG);
2330734f 4654 else if(ccadj[t]) emit_addimm(HOST_CCREG,-ccadj[t],HOST_CCREG);
57871462 4655 if(regs[t].regmap_entry[HOST_CCREG]!=CCREG) {
4656 emit_storereg(CCREG,HOST_CCREG);
4657 }
4658 // Load 32-bit regs
4659 for(hr=0;hr<HOST_REGS;hr++) {
ea3d2e6e 4660 if(regs[t].regmap_entry[hr]>=0&&regs[t].regmap_entry[hr]<TEMPREG) {
57871462 4661 if(regs[t].regmap_entry[hr]==0) {
4662 emit_zeroreg(hr);
4663 }
4664 else if(regs[t].regmap_entry[hr]!=CCREG)
4665 {
4666 emit_loadreg(regs[t].regmap_entry[hr],hr);
4667 }
4668 }
4669 }
57871462 4670}
4671
4672// Store dirty registers prior to branch
4149788d 4673static void store_regs_bt(signed char i_regmap[],uint64_t i_dirty,int addr)
57871462 4674{
ad49de89 4675 if(internal_branch(addr))
57871462 4676 {
4677 int t=(addr-start)>>2;
4678 int hr;
4679 for(hr=0;hr<HOST_REGS;hr++) {
4680 if(hr!=EXCLUDE_REG) {
4681 if(i_regmap[hr]>0 && i_regmap[hr]!=CCREG) {
ad49de89 4682 if(i_regmap[hr]!=regs[t].regmap_entry[hr] || !((regs[t].dirty>>hr)&1)) {
57871462 4683 if((i_dirty>>hr)&1) {
00fa9369 4684 assert(i_regmap[hr]<64);
4685 if(!((unneeded_reg[t]>>i_regmap[hr])&1))
4686 emit_storereg(i_regmap[hr],hr);
57871462 4687 }
4688 }
4689 }
4690 }
4691 }
4692 }
4693 else
4694 {
4695 // Branch out of this block, write out all dirty regs
ad49de89 4696 wb_dirtys(i_regmap,i_dirty);
57871462 4697 }
4698}
4699
4700// Load all needed registers for branch target
ad49de89 4701static void load_regs_bt(signed char i_regmap[],uint64_t i_dirty,int addr)
57871462 4702{
4703 //if(addr>=start && addr<(start+slen*4))
ad49de89 4704 if(internal_branch(addr))
57871462 4705 {
4706 int t=(addr-start)>>2;
4707 int hr;
4708 // Store the cycle count before loading something else
4709 if(i_regmap[HOST_CCREG]!=CCREG) {
4710 assert(i_regmap[HOST_CCREG]==-1);
4711 }
4712 if(regs[t].regmap_entry[HOST_CCREG]!=CCREG) {
4713 emit_storereg(CCREG,HOST_CCREG);
4714 }
4715 // Load 32-bit regs
4716 for(hr=0;hr<HOST_REGS;hr++) {
ea3d2e6e 4717 if(hr!=EXCLUDE_REG&&regs[t].regmap_entry[hr]>=0&&regs[t].regmap_entry[hr]<TEMPREG) {
00fa9369 4718 if(i_regmap[hr]!=regs[t].regmap_entry[hr]) {
57871462 4719 if(regs[t].regmap_entry[hr]==0) {
4720 emit_zeroreg(hr);
4721 }
4722 else if(regs[t].regmap_entry[hr]!=CCREG)
4723 {
4724 emit_loadreg(regs[t].regmap_entry[hr],hr);
4725 }
4726 }
4727 }
4728 }
57871462 4729 }
4730}
4731
ad49de89 4732static int match_bt(signed char i_regmap[],uint64_t i_dirty,int addr)
57871462 4733{
4734 if(addr>=start && addr<start+slen*4-4)
4735 {
4736 int t=(addr-start)>>2;
4737 int hr;
4738 if(regs[t].regmap_entry[HOST_CCREG]!=CCREG) return 0;
4739 for(hr=0;hr<HOST_REGS;hr++)
4740 {
4741 if(hr!=EXCLUDE_REG)
4742 {
4743 if(i_regmap[hr]!=regs[t].regmap_entry[hr])
4744 {
ea3d2e6e 4745 if(regs[t].regmap_entry[hr]>=0&&(regs[t].regmap_entry[hr]|64)<TEMPREG+64)
57871462 4746 {
4747 return 0;
4748 }
9f51b4b9 4749 else
57871462 4750 if((i_dirty>>hr)&1)
4751 {
ea3d2e6e 4752 if(i_regmap[hr]<TEMPREG)
57871462 4753 {
4754 if(!((unneeded_reg[t]>>i_regmap[hr])&1))
4755 return 0;
4756 }
ea3d2e6e 4757 else if(i_regmap[hr]>=64&&i_regmap[hr]<TEMPREG+64)
57871462 4758 {
00fa9369 4759 assert(0);
57871462 4760 }
4761 }
4762 }
4763 else // Same register but is it 32-bit or dirty?
4764 if(i_regmap[hr]>=0)
4765 {
4766 if(!((regs[t].dirty>>hr)&1))
4767 {
4768 if((i_dirty>>hr)&1)
4769 {
4770 if(!((unneeded_reg[t]>>i_regmap[hr])&1))
4771 {
4772 //printf("%x: dirty no match\n",addr);
4773 return 0;
4774 }
4775 }
4776 }
57871462 4777 }
4778 }
4779 }
57871462 4780 // Delay slots are not valid branch targets
fe807a8a 4781 //if(t>0&&(dops[t-1].is_jump) return 0;
57871462 4782 // Delay slots require additional processing, so do not match
cf95b4f0 4783 if(dops[t].is_ds) return 0;
57871462 4784 }
4785 else
4786 {
4787 int hr;
4788 for(hr=0;hr<HOST_REGS;hr++)
4789 {
4790 if(hr!=EXCLUDE_REG)
4791 {
4792 if(i_regmap[hr]>=0)
4793 {
4794 if(hr!=HOST_CCREG||i_regmap[hr]!=CCREG)
4795 {
4796 if((i_dirty>>hr)&1)
4797 {
4798 return 0;
4799 }
4800 }
4801 }
4802 }
4803 }
4804 }
4805 return 1;
4806}
4807
dd114d7d 4808#ifdef DRC_DBG
2330734f 4809static void drc_dbg_emit_do_cmp(int i, int ccadj_)
dd114d7d 4810{
4811 extern void do_insn_cmp();
3968e69e 4812 //extern int cycle;
81dbbf4c 4813 u_int hr, reglist = get_host_reglist(regs[i].regmap);
dd114d7d 4814
40fca85b 4815 assem_debug("//do_insn_cmp %08x\n", start+i*4);
dd114d7d 4816 save_regs(reglist);
40fca85b 4817 // write out changed consts to match the interpreter
cf95b4f0 4818 if (i > 0 && !dops[i].bt) {
40fca85b 4819 for (hr = 0; hr < HOST_REGS; hr++) {
2330734f 4820 int reg = regs[i].regmap_entry[hr]; // regs[i-1].regmap[hr];
40fca85b 4821 if (hr == EXCLUDE_REG || reg < 0)
4822 continue;
4823 if (!((regs[i-1].isconst >> hr) & 1))
4824 continue;
4825 if (i > 1 && reg == regs[i-2].regmap[hr] && constmap[i-1][hr] == constmap[i-2][hr])
4826 continue;
4827 emit_movimm(constmap[i-1][hr],0);
4828 emit_storereg(reg, 0);
4829 }
4830 }
dd114d7d 4831 emit_movimm(start+i*4,0);
643aeae3 4832 emit_writeword(0,&pcaddr);
2330734f 4833 int cc = get_reg(regs[i].regmap_entry, CCREG);
4834 if (cc < 0)
4835 emit_loadreg(CCREG, cc = 0);
4836 emit_addimm(cc, ccadj_, 0);
4837 emit_writeword(0, &psxRegs.cycle);
2a014d73 4838 emit_far_call(do_insn_cmp);
643aeae3 4839 //emit_readword(&cycle,0);
dd114d7d 4840 //emit_addimm(0,2,0);
643aeae3 4841 //emit_writeword(0,&cycle);
3968e69e 4842 (void)get_reg2;
dd114d7d 4843 restore_regs(reglist);
40fca85b 4844 assem_debug("\\\\do_insn_cmp\n");
dd114d7d 4845}
4846#else
2330734f 4847#define drc_dbg_emit_do_cmp(x,y)
dd114d7d 4848#endif
4849
57871462 4850// Used when a branch jumps into the delay slot of another branch
7c3a5182 4851static void ds_assemble_entry(int i)
57871462 4852{
2330734f 4853 int t = (ba[i] - start) >> 2;
4854 int ccadj_ = -CLOCK_ADJUST(1);
df4dc2b1 4855 if (!instr_addr[t])
4856 instr_addr[t] = out;
57871462 4857 assem_debug("Assemble delay slot at %x\n",ba[i]);
4858 assem_debug("<->\n");
2330734f 4859 drc_dbg_emit_do_cmp(t, ccadj_);
57871462 4860 if(regs[t].regmap_entry[HOST_CCREG]==CCREG&&regs[t].regmap[HOST_CCREG]!=CCREG)
ad49de89 4861 wb_register(CCREG,regs[t].regmap_entry,regs[t].wasdirty);
cf95b4f0 4862 load_regs(regs[t].regmap_entry,regs[t].regmap,dops[t].rs1,dops[t].rs2);
57871462 4863 address_generation(t,&regs[t],regs[t].regmap_entry);
37387d8b 4864 if (ram_offset && (dops[t].is_load || dops[t].is_store))
53358c1d 4865 load_reg(regs[t].regmap_entry,regs[t].regmap,ROREG);
37387d8b 4866 if (dops[t].is_store)
53358c1d 4867 load_reg(regs[t].regmap_entry,regs[t].regmap,INVCP);
57871462 4868 is_delayslot=0;
2330734f 4869 switch (dops[t].itype) {
57871462 4870 case SYSCALL:
7139f3c8 4871 case HLECALL:
1e973cb0 4872 case INTCALL:
57871462 4873 case UJUMP:
4874 case RJUMP:
4875 case CJUMP:
4876 case SJUMP:
c43b5311 4877 SysPrintf("Jump in the delay slot. This is probably a bug.\n");
2330734f 4878 break;
4879 default:
4880 assemble(t, &regs[t], ccadj_);
57871462 4881 }
ad49de89 4882 store_regs_bt(regs[t].regmap,regs[t].dirty,ba[i]+4);
4883 load_regs_bt(regs[t].regmap,regs[t].dirty,ba[i]+4);
4884 if(internal_branch(ba[i]+4))
57871462 4885 assem_debug("branch: internal\n");
4886 else
4887 assem_debug("branch: external\n");
ad49de89 4888 assert(internal_branch(ba[i]+4));
4889 add_to_linker(out,ba[i]+4,internal_branch(ba[i]+4));
57871462 4890 emit_jmp(0);
4891}
4892
d1e4ebd9 4893// Load 2 immediates optimizing for small code size
4894static void emit_mov2imm_compact(int imm1,u_int rt1,int imm2,u_int rt2)
4895{
4896 emit_movimm(imm1,rt1);
4897 emit_movimm_from(imm1,rt1,imm2,rt2);
4898}
4899
2330734f 4900static void do_cc(int i, const signed char i_regmap[], int *adj,
4901 int addr, int taken, int invert)
57871462 4902{
2330734f 4903 int count, count_plus2;
b14b6a8f 4904 void *jaddr;
4905 void *idle=NULL;
b6e87b2b 4906 int t=0;
cf95b4f0 4907 if(dops[i].itype==RJUMP)
57871462 4908 {
4909 *adj=0;
4910 }
4911 //if(ba[i]>=start && ba[i]<(start+slen*4))
ad49de89 4912 if(internal_branch(ba[i]))
57871462 4913 {
b6e87b2b 4914 t=(ba[i]-start)>>2;
2330734f 4915 if(dops[t].is_ds) *adj=-CLOCK_ADJUST(1); // Branch into delay slot adds an extra cycle
57871462 4916 else *adj=ccadj[t];
4917 }
4918 else
4919 {
4920 *adj=0;
4921 }
2330734f 4922 count = ccadj[i];
4923 count_plus2 = count + CLOCK_ADJUST(2);
57871462 4924 if(taken==TAKEN && i==(ba[i]-start)>>2 && source[i+1]==0) {
4925 // Idle loop
4926 if(count&1) emit_addimm_and_set_flags(2*(count+2),HOST_CCREG);
b14b6a8f 4927 idle=out;
57871462 4928 //emit_subfrommem(&idlecount,HOST_CCREG); // Count idle cycles
4929 emit_andimm(HOST_CCREG,3,HOST_CCREG);
b14b6a8f 4930 jaddr=out;
57871462 4931 emit_jmp(0);
4932 }
4933 else if(*adj==0||invert) {
2330734f 4934 int cycles = count_plus2;
b6e87b2b 4935 // faster loop HACK
bb4f300c 4936#if 0
b6e87b2b 4937 if (t&&*adj) {
4938 int rel=t-i;
4939 if(-NO_CYCLE_PENALTY_THR<rel&&rel<0)
2330734f 4940 cycles=*adj+count+2-*adj;
b6e87b2b 4941 }
bb4f300c 4942#endif
2330734f 4943 emit_addimm_and_set_flags(cycles, HOST_CCREG);
4944 jaddr = out;
57871462 4945 emit_jns(0);
4946 }
4947 else
4948 {
2330734f 4949 emit_cmpimm(HOST_CCREG, -count_plus2);
4950 jaddr = out;
57871462 4951 emit_jns(0);
4952 }
2330734f 4953 add_stub(CC_STUB,jaddr,idle?idle:out,(*adj==0||invert||idle)?0:count_plus2,i,addr,taken,0);
57871462 4954}
4955
b14b6a8f 4956static void do_ccstub(int n)
57871462 4957{
4958 literal_pool(256);
d1e4ebd9 4959 assem_debug("do_ccstub %x\n",start+(u_int)stubs[n].b*4);
b14b6a8f 4960 set_jump_target(stubs[n].addr, out);
4961 int i=stubs[n].b;
4962 if(stubs[n].d==NULLDS) {
57871462 4963 // Delay slot instruction is nullified ("likely" branch)
ad49de89 4964 wb_dirtys(regs[i].regmap,regs[i].dirty);
57871462 4965 }
b14b6a8f 4966 else if(stubs[n].d!=TAKEN) {
ad49de89 4967 wb_dirtys(branch_regs[i].regmap,branch_regs[i].dirty);
57871462 4968 }
4969 else {
ad49de89 4970 if(internal_branch(ba[i]))
4971 wb_needed_dirtys(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
57871462 4972 }
b14b6a8f 4973 if(stubs[n].c!=-1)
57871462 4974 {
4975 // Save PC as return address
b14b6a8f 4976 emit_movimm(stubs[n].c,EAX);
643aeae3 4977 emit_writeword(EAX,&pcaddr);
57871462 4978 }
4979 else
4980 {
4981 // Return address depends on which way the branch goes
cf95b4f0 4982 if(dops[i].itype==CJUMP||dops[i].itype==SJUMP)
57871462 4983 {
cf95b4f0 4984 int s1l=get_reg(branch_regs[i].regmap,dops[i].rs1);
4985 int s2l=get_reg(branch_regs[i].regmap,dops[i].rs2);
4986 if(dops[i].rs1==0)
57871462 4987 {
ad49de89 4988 s1l=s2l;
4989 s2l=-1;
57871462 4990 }
cf95b4f0 4991 else if(dops[i].rs2==0)
57871462 4992 {
ad49de89 4993 s2l=-1;
57871462 4994 }
4995 assert(s1l>=0);
4996 #ifdef DESTRUCTIVE_WRITEBACK
cf95b4f0 4997 if(dops[i].rs1) {
ad49de89 4998 if((branch_regs[i].dirty>>s1l)&&1)
cf95b4f0 4999 emit_loadreg(dops[i].rs1,s1l);
9f51b4b9 5000 }
57871462 5001 else {
ad49de89 5002 if((branch_regs[i].dirty>>s1l)&1)
cf95b4f0 5003 emit_loadreg(dops[i].rs2,s1l);
57871462 5004 }
5005 if(s2l>=0)
ad49de89 5006 if((branch_regs[i].dirty>>s2l)&1)
cf95b4f0 5007 emit_loadreg(dops[i].rs2,s2l);
57871462 5008 #endif
5009 int hr=0;
5194fb95 5010 int addr=-1,alt=-1,ntaddr=-1;
57871462 5011 while(hr<HOST_REGS)
5012 {
5013 if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
9de8a0c3 5014 branch_regs[i].regmap[hr]!=dops[i].rs1 &&
5015 branch_regs[i].regmap[hr]!=dops[i].rs2 )
57871462 5016 {
5017 addr=hr++;break;
5018 }
5019 hr++;
5020 }
5021 while(hr<HOST_REGS)
5022 {
5023 if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
9de8a0c3 5024 branch_regs[i].regmap[hr]!=dops[i].rs1 &&
5025 branch_regs[i].regmap[hr]!=dops[i].rs2 )
57871462 5026 {
5027 alt=hr++;break;
5028 }
5029 hr++;
5030 }
cf95b4f0 5031 if((dops[i].opcode&0x2E)==6) // BLEZ/BGTZ needs another register
57871462 5032 {
5033 while(hr<HOST_REGS)
5034 {
5035 if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
9de8a0c3 5036 branch_regs[i].regmap[hr]!=dops[i].rs1 &&
5037 branch_regs[i].regmap[hr]!=dops[i].rs2 )
57871462 5038 {
5039 ntaddr=hr;break;
5040 }
5041 hr++;
5042 }
5043 assert(hr<HOST_REGS);
5044 }
cf95b4f0 5045 if((dops[i].opcode&0x2f)==4) // BEQ
57871462 5046 {
5047 #ifdef HAVE_CMOV_IMM
ad49de89 5048 if(s2l>=0) emit_cmp(s1l,s2l);
5049 else emit_test(s1l,s1l);
5050 emit_cmov2imm_e_ne_compact(ba[i],start+i*4+8,addr);
5051 #else
5052 emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
5053 if(s2l>=0) emit_cmp(s1l,s2l);
5054 else emit_test(s1l,s1l);
5055 emit_cmovne_reg(alt,addr);
57871462 5056 #endif
57871462 5057 }
cf95b4f0 5058 if((dops[i].opcode&0x2f)==5) // BNE
57871462 5059 {
5060 #ifdef HAVE_CMOV_IMM
ad49de89 5061 if(s2l>=0) emit_cmp(s1l,s2l);
5062 else emit_test(s1l,s1l);
5063 emit_cmov2imm_e_ne_compact(start+i*4+8,ba[i],addr);
5064 #else
5065 emit_mov2imm_compact(start+i*4+8,addr,ba[i],alt);
5066 if(s2l>=0) emit_cmp(s1l,s2l);
5067 else emit_test(s1l,s1l);
5068 emit_cmovne_reg(alt,addr);
57871462 5069 #endif
57871462 5070 }
cf95b4f0 5071 if((dops[i].opcode&0x2f)==6) // BLEZ
57871462 5072 {
5073 //emit_movimm(ba[i],alt);
5074 //emit_movimm(start+i*4+8,addr);
5075 emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
5076 emit_cmpimm(s1l,1);
57871462 5077 emit_cmovl_reg(alt,addr);
57871462 5078 }
cf95b4f0 5079 if((dops[i].opcode&0x2f)==7) // BGTZ
57871462 5080 {
5081 //emit_movimm(ba[i],addr);
5082 //emit_movimm(start+i*4+8,ntaddr);
5083 emit_mov2imm_compact(ba[i],addr,start+i*4+8,ntaddr);
5084 emit_cmpimm(s1l,1);
57871462 5085 emit_cmovl_reg(ntaddr,addr);
57871462 5086 }
cf95b4f0 5087 if((dops[i].opcode==1)&&(dops[i].opcode2&0x2D)==0) // BLTZ
57871462 5088 {
5089 //emit_movimm(ba[i],alt);
5090 //emit_movimm(start+i*4+8,addr);
5091 emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
ad49de89 5092 emit_test(s1l,s1l);
57871462 5093 emit_cmovs_reg(alt,addr);
5094 }
cf95b4f0 5095 if((dops[i].opcode==1)&&(dops[i].opcode2&0x2D)==1) // BGEZ
57871462 5096 {
5097 //emit_movimm(ba[i],addr);
5098 //emit_movimm(start+i*4+8,alt);
5099 emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
ad49de89 5100 emit_test(s1l,s1l);
57871462 5101 emit_cmovs_reg(alt,addr);
5102 }
cf95b4f0 5103 if(dops[i].opcode==0x11 && dops[i].opcode2==0x08 ) {
57871462 5104 if(source[i]&0x10000) // BC1T
5105 {
5106 //emit_movimm(ba[i],alt);
5107 //emit_movimm(start+i*4+8,addr);
5108 emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
5109 emit_testimm(s1l,0x800000);
5110 emit_cmovne_reg(alt,addr);
5111 }
5112 else // BC1F
5113 {
5114 //emit_movimm(ba[i],addr);
5115 //emit_movimm(start+i*4+8,alt);
5116 emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
5117 emit_testimm(s1l,0x800000);
5118 emit_cmovne_reg(alt,addr);
5119 }
5120 }
643aeae3 5121 emit_writeword(addr,&pcaddr);
57871462 5122 }
5123 else
cf95b4f0 5124 if(dops[i].itype==RJUMP)
57871462 5125 {
cf95b4f0 5126 int r=get_reg(branch_regs[i].regmap,dops[i].rs1);
4919de1e 5127 if (ds_writes_rjump_rs(i)) {
57871462 5128 r=get_reg(branch_regs[i].regmap,RTEMP);
5129 }
643aeae3 5130 emit_writeword(r,&pcaddr);
57871462 5131 }
7c3a5182 5132 else {SysPrintf("Unknown branch type in do_ccstub\n");abort();}
57871462 5133 }
5134 // Update cycle count
5135 assert(branch_regs[i].regmap[HOST_CCREG]==CCREG||branch_regs[i].regmap[HOST_CCREG]==-1);
2330734f 5136 if(stubs[n].a) emit_addimm(HOST_CCREG,(int)stubs[n].a,HOST_CCREG);
2a014d73 5137 emit_far_call(cc_interrupt);
2330734f 5138 if(stubs[n].a) emit_addimm(HOST_CCREG,-(int)stubs[n].a,HOST_CCREG);
b14b6a8f 5139 if(stubs[n].d==TAKEN) {
ad49de89 5140 if(internal_branch(ba[i]))
57871462 5141 load_needed_regs(branch_regs[i].regmap,regs[(ba[i]-start)>>2].regmap_entry);
cf95b4f0 5142 else if(dops[i].itype==RJUMP) {
57871462 5143 if(get_reg(branch_regs[i].regmap,RTEMP)>=0)
643aeae3 5144 emit_readword(&pcaddr,get_reg(branch_regs[i].regmap,RTEMP));
57871462 5145 else
cf95b4f0 5146 emit_loadreg(dops[i].rs1,get_reg(branch_regs[i].regmap,dops[i].rs1));
57871462 5147 }
b14b6a8f 5148 }else if(stubs[n].d==NOTTAKEN) {
57871462 5149 if(i<slen-2) load_needed_regs(branch_regs[i].regmap,regmap_pre[i+2]);
5150 else load_all_regs(branch_regs[i].regmap);
b14b6a8f 5151 }else if(stubs[n].d==NULLDS) {
57871462 5152 // Delay slot instruction is nullified ("likely" branch)
5153 if(i<slen-2) load_needed_regs(regs[i].regmap,regmap_pre[i+2]);
5154 else load_all_regs(regs[i].regmap);
5155 }else{
5156 load_all_regs(branch_regs[i].regmap);
5157 }
d1e4ebd9 5158 if (stubs[n].retaddr)
5159 emit_jmp(stubs[n].retaddr);
5160 else
5161 do_jump_vaddr(stubs[n].e);
57871462 5162}
5163
104df9d3 5164static void add_to_linker(void *addr, u_int target, int is_internal)
57871462 5165{
643aeae3 5166 assert(linkcount < ARRAY_SIZE(link_addr));
5167 link_addr[linkcount].addr = addr;
5168 link_addr[linkcount].target = target;
104df9d3 5169 link_addr[linkcount].internal = is_internal;
57871462 5170 linkcount++;
5171}
5172
eba830cd 5173static void ujump_assemble_write_ra(int i)
5174{
5175 int rt;
5176 unsigned int return_address;
5177 rt=get_reg(branch_regs[i].regmap,31);
5178 assem_debug("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5179 //assert(rt>=0);
5180 return_address=start+i*4+8;
5181 if(rt>=0) {
5182 #ifdef USE_MINI_HT
cf95b4f0 5183 if(internal_branch(return_address)&&dops[i+1].rt1!=31) {
eba830cd 5184 int temp=-1; // note: must be ds-safe
5185 #ifdef HOST_TEMPREG
5186 temp=HOST_TEMPREG;
5187 #endif
5188 if(temp>=0) do_miniht_insert(return_address,rt,temp);
5189 else emit_movimm(return_address,rt);
5190 }
5191 else
5192 #endif
5193 {
5194 #ifdef REG_PREFETCH
9f51b4b9 5195 if(temp>=0)
eba830cd 5196 {
643aeae3 5197 if(i_regmap[temp]!=PTEMP) emit_movimm((uintptr_t)hash_table_get(return_address),temp);
eba830cd 5198 }
5199 #endif
5200 emit_movimm(return_address,rt); // PC into link register
5201 #ifdef IMM_PREFETCH
df4dc2b1 5202 emit_prefetch(hash_table_get(return_address));
eba830cd 5203 #endif
5204 }
5205 }
5206}
5207
2330734f 5208static void ujump_assemble(int i, const struct regstat *i_regs)
57871462 5209{
eba830cd 5210 int ra_done=0;
57871462 5211 if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
5212 address_generation(i+1,i_regs,regs[i].regmap_entry);
5213 #ifdef REG_PREFETCH
5214 int temp=get_reg(branch_regs[i].regmap,PTEMP);
cf95b4f0 5215 if(dops[i].rt1==31&&temp>=0)
57871462 5216 {
581335b0 5217 signed char *i_regmap=i_regs->regmap;
57871462 5218 int return_address=start+i*4+8;
9f51b4b9 5219 if(get_reg(branch_regs[i].regmap,31)>0)
643aeae3 5220 if(i_regmap[temp]==PTEMP) emit_movimm((uintptr_t)hash_table_get(return_address),temp);
57871462 5221 }
5222 #endif
cf95b4f0 5223 if(dops[i].rt1==31&&(dops[i].rt1==dops[i+1].rs1||dops[i].rt1==dops[i+1].rs2)) {
eba830cd 5224 ujump_assemble_write_ra(i); // writeback ra for DS
5225 ra_done=1;
57871462 5226 }
4ef8f67d 5227 ds_assemble(i+1,i_regs);
5228 uint64_t bc_unneeded=branch_regs[i].u;
cf95b4f0 5229 bc_unneeded|=1|(1LL<<dops[i].rt1);
ad49de89 5230 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,bc_unneeded);
53358c1d 5231 load_reg(regs[i].regmap,branch_regs[i].regmap,CCREG);
cf95b4f0 5232 if(!ra_done&&dops[i].rt1==31)
eba830cd 5233 ujump_assemble_write_ra(i);
57871462 5234 int cc,adj;
5235 cc=get_reg(branch_regs[i].regmap,CCREG);
5236 assert(cc==HOST_CCREG);
ad49de89 5237 store_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
57871462 5238 #ifdef REG_PREFETCH
cf95b4f0 5239 if(dops[i].rt1==31&&temp>=0) emit_prefetchreg(temp);
57871462 5240 #endif
5241 do_cc(i,branch_regs[i].regmap,&adj,ba[i],TAKEN,0);
2330734f 5242 if(adj) emit_addimm(cc, ccadj[i] + CLOCK_ADJUST(2) - adj, cc);
ad49de89 5243 load_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
5244 if(internal_branch(ba[i]))
57871462 5245 assem_debug("branch: internal\n");
5246 else
5247 assem_debug("branch: external\n");
cf95b4f0 5248 if (internal_branch(ba[i]) && dops[(ba[i]-start)>>2].is_ds) {
57871462 5249 ds_assemble_entry(i);
5250 }
5251 else {
ad49de89 5252 add_to_linker(out,ba[i],internal_branch(ba[i]));
57871462 5253 emit_jmp(0);
5254 }
5255}
5256
eba830cd 5257static void rjump_assemble_write_ra(int i)
5258{
5259 int rt,return_address;
cf95b4f0 5260 assert(dops[i+1].rt1!=dops[i].rt1);
5261 assert(dops[i+1].rt2!=dops[i].rt1);
5262 rt=get_reg(branch_regs[i].regmap,dops[i].rt1);
eba830cd 5263 assem_debug("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5264 assert(rt>=0);
5265 return_address=start+i*4+8;
5266 #ifdef REG_PREFETCH
9f51b4b9 5267 if(temp>=0)
eba830cd 5268 {
643aeae3 5269 if(i_regmap[temp]!=PTEMP) emit_movimm((uintptr_t)hash_table_get(return_address),temp);
eba830cd 5270 }
5271 #endif
5272 emit_movimm(return_address,rt); // PC into link register
5273 #ifdef IMM_PREFETCH
df4dc2b1 5274 emit_prefetch(hash_table_get(return_address));
eba830cd 5275 #endif
5276}
5277
2330734f 5278static void rjump_assemble(int i, const struct regstat *i_regs)
57871462 5279{
57871462 5280 int temp;
581335b0 5281 int rs,cc;
eba830cd 5282 int ra_done=0;
cf95b4f0 5283 rs=get_reg(branch_regs[i].regmap,dops[i].rs1);
57871462 5284 assert(rs>=0);
4919de1e 5285 if (ds_writes_rjump_rs(i)) {
57871462 5286 // Delay slot abuse, make a copy of the branch address register
5287 temp=get_reg(branch_regs[i].regmap,RTEMP);
5288 assert(temp>=0);
5289 assert(regs[i].regmap[temp]==RTEMP);
5290 emit_mov(rs,temp);
5291 rs=temp;
5292 }
5293 address_generation(i+1,i_regs,regs[i].regmap_entry);
5294 #ifdef REG_PREFETCH
cf95b4f0 5295 if(dops[i].rt1==31)
57871462 5296 {
5297 if((temp=get_reg(branch_regs[i].regmap,PTEMP))>=0) {
581335b0 5298 signed char *i_regmap=i_regs->regmap;
57871462 5299 int return_address=start+i*4+8;
643aeae3 5300 if(i_regmap[temp]==PTEMP) emit_movimm((uintptr_t)hash_table_get(return_address),temp);
57871462 5301 }
5302 }
5303 #endif
5304 #ifdef USE_MINI_HT
cf95b4f0 5305 if(dops[i].rs1==31) {
57871462 5306 int rh=get_reg(regs[i].regmap,RHASH);
5307 if(rh>=0) do_preload_rhash(rh);
5308 }
5309 #endif
cf95b4f0 5310 if(dops[i].rt1!=0&&(dops[i].rt1==dops[i+1].rs1||dops[i].rt1==dops[i+1].rs2)) {
eba830cd 5311 rjump_assemble_write_ra(i);
5312 ra_done=1;
57871462 5313 }
d5910d5d 5314 ds_assemble(i+1,i_regs);
5315 uint64_t bc_unneeded=branch_regs[i].u;
cf95b4f0 5316 bc_unneeded|=1|(1LL<<dops[i].rt1);
5317 bc_unneeded&=~(1LL<<dops[i].rs1);
ad49de89 5318 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,bc_unneeded);
cf95b4f0 5319 load_regs(regs[i].regmap,branch_regs[i].regmap,dops[i].rs1,CCREG);
5320 if(!ra_done&&dops[i].rt1!=0)
eba830cd 5321 rjump_assemble_write_ra(i);
57871462 5322 cc=get_reg(branch_regs[i].regmap,CCREG);
5323 assert(cc==HOST_CCREG);
581335b0 5324 (void)cc;
57871462 5325 #ifdef USE_MINI_HT
5326 int rh=get_reg(branch_regs[i].regmap,RHASH);
5327 int ht=get_reg(branch_regs[i].regmap,RHTBL);
cf95b4f0 5328 if(dops[i].rs1==31) {
57871462 5329 if(regs[i].regmap[rh]!=RHASH) do_preload_rhash(rh);
5330 do_preload_rhtbl(ht);
5331 do_rhash(rs,rh);
5332 }
5333 #endif
ad49de89 5334 store_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,-1);
57871462 5335 #ifdef DESTRUCTIVE_WRITEBACK
ad49de89 5336 if((branch_regs[i].dirty>>rs)&1) {
cf95b4f0 5337 if(dops[i].rs1!=dops[i+1].rt1&&dops[i].rs1!=dops[i+1].rt2) {
5338 emit_loadreg(dops[i].rs1,rs);
57871462 5339 }
5340 }
5341 #endif
5342 #ifdef REG_PREFETCH
cf95b4f0 5343 if(dops[i].rt1==31&&temp>=0) emit_prefetchreg(temp);
57871462 5344 #endif
5345 #ifdef USE_MINI_HT
cf95b4f0 5346 if(dops[i].rs1==31) {
57871462 5347 do_miniht_load(ht,rh);
5348 }
5349 #endif
5350 //do_cc(i,branch_regs[i].regmap,&adj,-1,TAKEN);
5351 //if(adj) emit_addimm(cc,2*(ccadj[i]+2-adj),cc); // ??? - Shouldn't happen
5352 //assert(adj==0);
2330734f 5353 emit_addimm_and_set_flags(ccadj[i] + CLOCK_ADJUST(2), HOST_CCREG);
d1e4ebd9 5354 add_stub(CC_STUB,out,NULL,0,i,-1,TAKEN,rs);
55a695d9 5355 if(dops[i+1].itype==COP0 && dops[i+1].opcode2==0x10)
911f2d55 5356 // special case for RFE
5357 emit_jmp(0);
5358 else
71e490c5 5359 emit_jns(0);
ad49de89 5360 //load_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,-1);
57871462 5361 #ifdef USE_MINI_HT
cf95b4f0 5362 if(dops[i].rs1==31) {
57871462 5363 do_miniht_jump(rs,rh,ht);
5364 }
5365 else
5366 #endif
5367 {
d1e4ebd9 5368 do_jump_vaddr(rs);
57871462 5369 }
57871462 5370 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
cf95b4f0 5371 if(dops[i].rt1!=31&&i<slen-2&&(((u_int)out)&7)) emit_mov(13,13);
57871462 5372 #endif
5373}
5374
2330734f 5375static void cjump_assemble(int i, const struct regstat *i_regs)
57871462 5376{
2330734f 5377 const signed char *i_regmap = i_regs->regmap;
57871462 5378 int cc;
5379 int match;
ad49de89 5380 match=match_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
57871462 5381 assem_debug("match=%d\n",match);
ad49de89 5382 int s1l,s2l;
57871462 5383 int unconditional=0,nop=0;
57871462 5384 int invert=0;
ad49de89 5385 int internal=internal_branch(ba[i]);
57871462 5386 if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
57871462 5387 if(!match) invert=1;
5388 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5389 if(i>(ba[i]-start)>>2) invert=1;
5390 #endif
3968e69e 5391 #ifdef __aarch64__
5392 invert=1; // because of near cond. branches
5393 #endif
9f51b4b9 5394
cf95b4f0 5395 if(dops[i].ooo) {
5396 s1l=get_reg(branch_regs[i].regmap,dops[i].rs1);
5397 s2l=get_reg(branch_regs[i].regmap,dops[i].rs2);
57871462 5398 }
5399 else {
cf95b4f0 5400 s1l=get_reg(i_regmap,dops[i].rs1);
5401 s2l=get_reg(i_regmap,dops[i].rs2);
57871462 5402 }
cf95b4f0 5403 if(dops[i].rs1==0&&dops[i].rs2==0)
57871462 5404 {
cf95b4f0 5405 if(dops[i].opcode&1) nop=1;
57871462 5406 else unconditional=1;
cf95b4f0 5407 //assert(dops[i].opcode!=5);
5408 //assert(dops[i].opcode!=7);
5409 //assert(dops[i].opcode!=0x15);
5410 //assert(dops[i].opcode!=0x17);
57871462 5411 }
cf95b4f0 5412 else if(dops[i].rs1==0)
57871462 5413 {
ad49de89 5414 s1l=s2l;
5415 s2l=-1;
57871462 5416 }
cf95b4f0 5417 else if(dops[i].rs2==0)
57871462 5418 {
ad49de89 5419 s2l=-1;
57871462 5420 }
5421
cf95b4f0 5422 if(dops[i].ooo) {
57871462 5423 // Out of order execution (delay slot first)
5424 //printf("OOOE\n");
5425 address_generation(i+1,i_regs,regs[i].regmap_entry);
5426 ds_assemble(i+1,i_regs);
5427 int adj;
5428 uint64_t bc_unneeded=branch_regs[i].u;
cf95b4f0 5429 bc_unneeded&=~((1LL<<dops[i].rs1)|(1LL<<dops[i].rs2));
57871462 5430 bc_unneeded|=1;
ad49de89 5431 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,bc_unneeded);
cf95b4f0 5432 load_regs(regs[i].regmap,branch_regs[i].regmap,dops[i].rs1,dops[i].rs2);
53358c1d 5433 load_reg(regs[i].regmap,branch_regs[i].regmap,CCREG);
57871462 5434 cc=get_reg(branch_regs[i].regmap,CCREG);
5435 assert(cc==HOST_CCREG);
9f51b4b9 5436 if(unconditional)
ad49de89 5437 store_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
57871462 5438 //do_cc(i,branch_regs[i].regmap,&adj,unconditional?ba[i]:-1,unconditional);
5439 //assem_debug("cycle count (adj)\n");
5440 if(unconditional) {
5441 do_cc(i,branch_regs[i].regmap,&adj,ba[i],TAKEN,0);
5442 if(i!=(ba[i]-start)>>2 || source[i+1]!=0) {
2330734f 5443 if(adj) emit_addimm(cc, ccadj[i] + CLOCK_ADJUST(2) - adj, cc);
ad49de89 5444 load_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
57871462 5445 if(internal)
5446 assem_debug("branch: internal\n");
5447 else
5448 assem_debug("branch: external\n");
cf95b4f0 5449 if (internal && dops[(ba[i]-start)>>2].is_ds) {
57871462 5450 ds_assemble_entry(i);
5451 }
5452 else {
643aeae3 5453 add_to_linker(out,ba[i],internal);
57871462 5454 emit_jmp(0);
5455 }
5456 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5457 if(((u_int)out)&7) emit_addnop(0);
5458 #endif
5459 }
5460 }
5461 else if(nop) {
2330734f 5462 emit_addimm_and_set_flags(ccadj[i] + CLOCK_ADJUST(2), cc);
b14b6a8f 5463 void *jaddr=out;
57871462 5464 emit_jns(0);
b14b6a8f 5465 add_stub(CC_STUB,jaddr,out,0,i,start+i*4+8,NOTTAKEN,0);
57871462 5466 }
5467 else {
df4dc2b1 5468 void *taken = NULL, *nottaken = NULL, *nottaken1 = NULL;
57871462 5469 do_cc(i,branch_regs[i].regmap,&adj,-1,0,invert);
2330734f 5470 if(adj&&!invert) emit_addimm(cc, ccadj[i] + CLOCK_ADJUST(2) - adj, cc);
9f51b4b9 5471
57871462 5472 //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5473 assert(s1l>=0);
cf95b4f0 5474 if(dops[i].opcode==4) // BEQ
57871462 5475 {
5476 if(s2l>=0) emit_cmp(s1l,s2l);
5477 else emit_test(s1l,s1l);
5478 if(invert){
df4dc2b1 5479 nottaken=out;
7c3a5182 5480 emit_jne(DJT_1);
57871462 5481 }else{
643aeae3 5482 add_to_linker(out,ba[i],internal);
57871462 5483 emit_jeq(0);
5484 }
5485 }
cf95b4f0 5486 if(dops[i].opcode==5) // BNE
57871462 5487 {
5488 if(s2l>=0) emit_cmp(s1l,s2l);
5489 else emit_test(s1l,s1l);
5490 if(invert){
df4dc2b1 5491 nottaken=out;
7c3a5182 5492 emit_jeq(DJT_1);
57871462 5493 }else{
643aeae3 5494 add_to_linker(out,ba[i],internal);
57871462 5495 emit_jne(0);
5496 }
5497 }
cf95b4f0 5498 if(dops[i].opcode==6) // BLEZ
57871462 5499 {
5500 emit_cmpimm(s1l,1);
5501 if(invert){
df4dc2b1 5502 nottaken=out;
7c3a5182 5503 emit_jge(DJT_1);
57871462 5504 }else{
643aeae3 5505 add_to_linker(out,ba[i],internal);
57871462 5506 emit_jl(0);
5507 }
5508 }
cf95b4f0 5509 if(dops[i].opcode==7) // BGTZ
57871462 5510 {
5511 emit_cmpimm(s1l,1);
5512 if(invert){
df4dc2b1 5513 nottaken=out;
7c3a5182 5514 emit_jl(DJT_1);
57871462 5515 }else{
643aeae3 5516 add_to_linker(out,ba[i],internal);
57871462 5517 emit_jge(0);
5518 }
5519 }
5520 if(invert) {
df4dc2b1 5521 if(taken) set_jump_target(taken, out);
57871462 5522 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
cf95b4f0 5523 if (match && (!internal || !dops[(ba[i]-start)>>2].is_ds)) {
57871462 5524 if(adj) {
2330734f 5525 emit_addimm(cc,-adj,cc);
643aeae3 5526 add_to_linker(out,ba[i],internal);
57871462 5527 }else{
5528 emit_addnop(13);
643aeae3 5529 add_to_linker(out,ba[i],internal*2);
57871462 5530 }
5531 emit_jmp(0);
5532 }else
5533 #endif
5534 {
2330734f 5535 if(adj) emit_addimm(cc,-adj,cc);
ad49de89 5536 store_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
5537 load_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
57871462 5538 if(internal)
5539 assem_debug("branch: internal\n");
5540 else
5541 assem_debug("branch: external\n");
cf95b4f0 5542 if (internal && dops[(ba[i] - start) >> 2].is_ds) {
57871462 5543 ds_assemble_entry(i);
5544 }
5545 else {
643aeae3 5546 add_to_linker(out,ba[i],internal);
57871462 5547 emit_jmp(0);
5548 }
5549 }
df4dc2b1 5550 set_jump_target(nottaken, out);
57871462 5551 }
5552
df4dc2b1 5553 if(nottaken1) set_jump_target(nottaken1, out);
57871462 5554 if(adj) {
2330734f 5555 if(!invert) emit_addimm(cc,adj,cc);
57871462 5556 }
5557 } // (!unconditional)
5558 } // if(ooo)
5559 else
5560 {
5561 // In-order execution (branch first)
df4dc2b1 5562 void *taken = NULL, *nottaken = NULL, *nottaken1 = NULL;
57871462 5563 if(!unconditional&&!nop) {
57871462 5564 //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5565 assert(s1l>=0);
cf95b4f0 5566 if((dops[i].opcode&0x2f)==4) // BEQ
57871462 5567 {
5568 if(s2l>=0) emit_cmp(s1l,s2l);
5569 else emit_test(s1l,s1l);
df4dc2b1 5570 nottaken=out;
7c3a5182 5571 emit_jne(DJT_2);
57871462 5572 }
cf95b4f0 5573 if((dops[i].opcode&0x2f)==5) // BNE
57871462 5574 {
5575 if(s2l>=0) emit_cmp(s1l,s2l);
5576 else emit_test(s1l,s1l);
df4dc2b1 5577 nottaken=out;
7c3a5182 5578 emit_jeq(DJT_2);
57871462 5579 }
cf95b4f0 5580 if((dops[i].opcode&0x2f)==6) // BLEZ
57871462 5581 {
5582 emit_cmpimm(s1l,1);
df4dc2b1 5583 nottaken=out;
7c3a5182 5584 emit_jge(DJT_2);
57871462 5585 }
cf95b4f0 5586 if((dops[i].opcode&0x2f)==7) // BGTZ
57871462 5587 {
5588 emit_cmpimm(s1l,1);
df4dc2b1 5589 nottaken=out;
7c3a5182 5590 emit_jl(DJT_2);
57871462 5591 }
5592 } // if(!unconditional)
5593 int adj;
5594 uint64_t ds_unneeded=branch_regs[i].u;
cf95b4f0 5595 ds_unneeded&=~((1LL<<dops[i+1].rs1)|(1LL<<dops[i+1].rs2));
57871462 5596 ds_unneeded|=1;
57871462 5597 // branch taken
5598 if(!nop) {
df4dc2b1 5599 if(taken) set_jump_target(taken, out);
57871462 5600 assem_debug("1:\n");
ad49de89 5601 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,ds_unneeded);
57871462 5602 // load regs
cf95b4f0 5603 load_regs(regs[i].regmap,branch_regs[i].regmap,dops[i+1].rs1,dops[i+1].rs2);
57871462 5604 address_generation(i+1,&branch_regs[i],0);
37387d8b 5605 if (ram_offset)
53358c1d 5606 load_reg(regs[i].regmap,branch_regs[i].regmap,ROREG);
ad49de89 5607 load_regs(regs[i].regmap,branch_regs[i].regmap,CCREG,INVCP);
57871462 5608 ds_assemble(i+1,&branch_regs[i]);
5609 cc=get_reg(branch_regs[i].regmap,CCREG);
5610 if(cc==-1) {
5611 emit_loadreg(CCREG,cc=HOST_CCREG);
5612 // CHECK: Is the following instruction (fall thru) allocated ok?
5613 }
5614 assert(cc==HOST_CCREG);
ad49de89 5615 store_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
57871462 5616 do_cc(i,i_regmap,&adj,ba[i],TAKEN,0);
5617 assem_debug("cycle count (adj)\n");
2330734f 5618 if(adj) emit_addimm(cc, ccadj[i] + CLOCK_ADJUST(2) - adj, cc);
ad49de89 5619 load_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
57871462 5620 if(internal)
5621 assem_debug("branch: internal\n");
5622 else
5623 assem_debug("branch: external\n");
cf95b4f0 5624 if (internal && dops[(ba[i] - start) >> 2].is_ds) {
57871462 5625 ds_assemble_entry(i);
5626 }
5627 else {
643aeae3 5628 add_to_linker(out,ba[i],internal);
57871462 5629 emit_jmp(0);
5630 }
5631 }
5632 // branch not taken
57871462 5633 if(!unconditional) {
df4dc2b1 5634 if(nottaken1) set_jump_target(nottaken1, out);
5635 set_jump_target(nottaken, out);
57871462 5636 assem_debug("2:\n");
fe807a8a 5637 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,ds_unneeded);
37387d8b 5638 // load regs
fe807a8a 5639 load_regs(regs[i].regmap,branch_regs[i].regmap,dops[i+1].rs1,dops[i+1].rs2);
5640 address_generation(i+1,&branch_regs[i],0);
37387d8b 5641 if (ram_offset)
53358c1d 5642 load_reg(regs[i].regmap,branch_regs[i].regmap,ROREG);
37387d8b 5643 load_regs(regs[i].regmap,branch_regs[i].regmap,CCREG,INVCP);
fe807a8a 5644 ds_assemble(i+1,&branch_regs[i]);
57871462 5645 cc=get_reg(branch_regs[i].regmap,CCREG);
fe807a8a 5646 if (cc == -1) {
57871462 5647 // Cycle count isn't in a register, temporarily load it then write it out
5648 emit_loadreg(CCREG,HOST_CCREG);
2330734f 5649 emit_addimm_and_set_flags(ccadj[i] + CLOCK_ADJUST(2), HOST_CCREG);
b14b6a8f 5650 void *jaddr=out;
57871462 5651 emit_jns(0);
b14b6a8f 5652 add_stub(CC_STUB,jaddr,out,0,i,start+i*4+8,NOTTAKEN,0);
57871462 5653 emit_storereg(CCREG,HOST_CCREG);
5654 }
5655 else{
5656 cc=get_reg(i_regmap,CCREG);
5657 assert(cc==HOST_CCREG);
2330734f 5658 emit_addimm_and_set_flags(ccadj[i] + CLOCK_ADJUST(2), cc);
b14b6a8f 5659 void *jaddr=out;
57871462 5660 emit_jns(0);
fe807a8a 5661 add_stub(CC_STUB,jaddr,out,0,i,start+i*4+8,NOTTAKEN,0);
57871462 5662 }
5663 }
5664 }
5665}
5666
2330734f 5667static void sjump_assemble(int i, const struct regstat *i_regs)
57871462 5668{
2330734f 5669 const signed char *i_regmap = i_regs->regmap;
57871462 5670 int cc;
5671 int match;
ad49de89 5672 match=match_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
2acc46cd 5673 assem_debug("smatch=%d ooo=%d\n", match, dops[i].ooo);
ad49de89 5674 int s1l;
57871462 5675 int unconditional=0,nevertaken=0;
57871462 5676 int invert=0;
ad49de89 5677 int internal=internal_branch(ba[i]);
57871462 5678 if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
57871462 5679 if(!match) invert=1;
5680 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5681 if(i>(ba[i]-start)>>2) invert=1;
5682 #endif
3968e69e 5683 #ifdef __aarch64__
5684 invert=1; // because of near cond. branches
5685 #endif
57871462 5686
cf95b4f0 5687 //if(dops[i].opcode2>=0x10) return; // FIXME (BxxZAL)
5688 //assert(dops[i].opcode2<0x10||dops[i].rs1==0); // FIXME (BxxZAL)
57871462 5689
cf95b4f0 5690 if(dops[i].ooo) {
5691 s1l=get_reg(branch_regs[i].regmap,dops[i].rs1);
57871462 5692 }
5693 else {
cf95b4f0 5694 s1l=get_reg(i_regmap,dops[i].rs1);
57871462 5695 }
cf95b4f0 5696 if(dops[i].rs1==0)
57871462 5697 {
cf95b4f0 5698 if(dops[i].opcode2&1) unconditional=1;
57871462 5699 else nevertaken=1;
5700 // These are never taken (r0 is never less than zero)
cf95b4f0 5701 //assert(dops[i].opcode2!=0);
5702 //assert(dops[i].opcode2!=2);
5703 //assert(dops[i].opcode2!=0x10);
5704 //assert(dops[i].opcode2!=0x12);
57871462 5705 }
57871462 5706
cf95b4f0 5707 if(dops[i].ooo) {
57871462 5708 // Out of order execution (delay slot first)
5709 //printf("OOOE\n");
5710 address_generation(i+1,i_regs,regs[i].regmap_entry);
5711 ds_assemble(i+1,i_regs);
5712 int adj;
5713 uint64_t bc_unneeded=branch_regs[i].u;
cf95b4f0 5714 bc_unneeded&=~((1LL<<dops[i].rs1)|(1LL<<dops[i].rs2));
57871462 5715 bc_unneeded|=1;
ad49de89 5716 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,bc_unneeded);
cf95b4f0 5717 load_regs(regs[i].regmap,branch_regs[i].regmap,dops[i].rs1,dops[i].rs1);
53358c1d 5718 load_reg(regs[i].regmap,branch_regs[i].regmap,CCREG);
cf95b4f0 5719 if(dops[i].rt1==31) {
57871462 5720 int rt,return_address;
57871462 5721 rt=get_reg(branch_regs[i].regmap,31);
5722 assem_debug("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5723 if(rt>=0) {
5724 // Save the PC even if the branch is not taken
5725 return_address=start+i*4+8;
5726 emit_movimm(return_address,rt); // PC into link register
5727 #ifdef IMM_PREFETCH
df4dc2b1 5728 if(!nevertaken) emit_prefetch(hash_table_get(return_address));
57871462 5729 #endif
5730 }
5731 }
5732 cc=get_reg(branch_regs[i].regmap,CCREG);
5733 assert(cc==HOST_CCREG);
9f51b4b9 5734 if(unconditional)
ad49de89 5735 store_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
57871462 5736 //do_cc(i,branch_regs[i].regmap,&adj,unconditional?ba[i]:-1,unconditional);
5737 assem_debug("cycle count (adj)\n");
5738 if(unconditional) {
5739 do_cc(i,branch_regs[i].regmap,&adj,ba[i],TAKEN,0);
5740 if(i!=(ba[i]-start)>>2 || source[i+1]!=0) {
2330734f 5741 if(adj) emit_addimm(cc, ccadj[i] + CLOCK_ADJUST(2) - adj, cc);
ad49de89 5742 load_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
57871462 5743 if(internal)
5744 assem_debug("branch: internal\n");
5745 else
5746 assem_debug("branch: external\n");
cf95b4f0 5747 if (internal && dops[(ba[i] - start) >> 2].is_ds) {
57871462 5748 ds_assemble_entry(i);
5749 }
5750 else {
643aeae3 5751 add_to_linker(out,ba[i],internal);
57871462 5752 emit_jmp(0);
5753 }
5754 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5755 if(((u_int)out)&7) emit_addnop(0);
5756 #endif
5757 }
5758 }
5759 else if(nevertaken) {
2330734f 5760 emit_addimm_and_set_flags(ccadj[i] + CLOCK_ADJUST(2), cc);
b14b6a8f 5761 void *jaddr=out;
57871462 5762 emit_jns(0);
b14b6a8f 5763 add_stub(CC_STUB,jaddr,out,0,i,start+i*4+8,NOTTAKEN,0);
57871462 5764 }
5765 else {
df4dc2b1 5766 void *nottaken = NULL;
57871462 5767 do_cc(i,branch_regs[i].regmap,&adj,-1,0,invert);
2330734f 5768 if(adj&&!invert) emit_addimm(cc, ccadj[i] + CLOCK_ADJUST(2) - adj, cc);
57871462 5769 {
5770 assert(s1l>=0);
cf95b4f0 5771 if((dops[i].opcode2&0xf)==0) // BLTZ/BLTZAL
57871462 5772 {
5773 emit_test(s1l,s1l);
5774 if(invert){
df4dc2b1 5775 nottaken=out;
7c3a5182 5776 emit_jns(DJT_1);
57871462 5777 }else{
643aeae3 5778 add_to_linker(out,ba[i],internal);
57871462 5779 emit_js(0);
5780 }
5781 }
cf95b4f0 5782 if((dops[i].opcode2&0xf)==1) // BGEZ/BLTZAL
57871462 5783 {
5784 emit_test(s1l,s1l);
5785 if(invert){
df4dc2b1 5786 nottaken=out;
7c3a5182 5787 emit_js(DJT_1);
57871462 5788 }else{
643aeae3 5789 add_to_linker(out,ba[i],internal);
57871462 5790 emit_jns(0);
5791 }
5792 }
ad49de89 5793 }
9f51b4b9 5794
57871462 5795 if(invert) {
5796 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
cf95b4f0 5797 if (match && (!internal || !dops[(ba[i] - start) >> 2].is_ds)) {
57871462 5798 if(adj) {
2330734f 5799 emit_addimm(cc,-adj,cc);
643aeae3 5800 add_to_linker(out,ba[i],internal);
57871462 5801 }else{
5802 emit_addnop(13);
643aeae3 5803 add_to_linker(out,ba[i],internal*2);
57871462 5804 }
5805 emit_jmp(0);
5806 }else
5807 #endif
5808 {
2330734f 5809 if(adj) emit_addimm(cc,-adj,cc);
ad49de89 5810 store_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
5811 load_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
57871462 5812 if(internal)
5813 assem_debug("branch: internal\n");
5814 else
5815 assem_debug("branch: external\n");
cf95b4f0 5816 if (internal && dops[(ba[i] - start) >> 2].is_ds) {
57871462 5817 ds_assemble_entry(i);
5818 }
5819 else {
643aeae3 5820 add_to_linker(out,ba[i],internal);
57871462 5821 emit_jmp(0);
5822 }
5823 }
df4dc2b1 5824 set_jump_target(nottaken, out);
57871462 5825 }
5826
5827 if(adj) {
2330734f 5828 if(!invert) emit_addimm(cc,adj,cc);
57871462 5829 }
5830 } // (!unconditional)
5831 } // if(ooo)
5832 else
5833 {
5834 // In-order execution (branch first)
5835 //printf("IOE\n");
df4dc2b1 5836 void *nottaken = NULL;
cf95b4f0 5837 if(dops[i].rt1==31) {
a6491170 5838 int rt,return_address;
a6491170 5839 rt=get_reg(branch_regs[i].regmap,31);
5840 if(rt>=0) {
5841 // Save the PC even if the branch is not taken
5842 return_address=start+i*4+8;
5843 emit_movimm(return_address,rt); // PC into link register
5844 #ifdef IMM_PREFETCH
df4dc2b1 5845 emit_prefetch(hash_table_get(return_address));
a6491170 5846 #endif
5847 }
5848 }
57871462 5849 if(!unconditional) {
5850 //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
57871462 5851 assert(s1l>=0);
cf95b4f0 5852 if((dops[i].opcode2&0x0d)==0) // BLTZ/BLTZL/BLTZAL/BLTZALL
57871462 5853 {
5854 emit_test(s1l,s1l);
df4dc2b1 5855 nottaken=out;
7c3a5182 5856 emit_jns(DJT_1);
57871462 5857 }
cf95b4f0 5858 if((dops[i].opcode2&0x0d)==1) // BGEZ/BGEZL/BGEZAL/BGEZALL
57871462 5859 {
5860 emit_test(s1l,s1l);
df4dc2b1 5861 nottaken=out;
7c3a5182 5862 emit_js(DJT_1);
57871462 5863 }
57871462 5864 } // if(!unconditional)
5865 int adj;
5866 uint64_t ds_unneeded=branch_regs[i].u;
cf95b4f0 5867 ds_unneeded&=~((1LL<<dops[i+1].rs1)|(1LL<<dops[i+1].rs2));
57871462 5868 ds_unneeded|=1;
57871462 5869 // branch taken
5870 if(!nevertaken) {
5871 //assem_debug("1:\n");
ad49de89 5872 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,ds_unneeded);
57871462 5873 // load regs
cf95b4f0 5874 load_regs(regs[i].regmap,branch_regs[i].regmap,dops[i+1].rs1,dops[i+1].rs2);
57871462 5875 address_generation(i+1,&branch_regs[i],0);
37387d8b 5876 if (ram_offset)
53358c1d 5877 load_reg(regs[i].regmap,branch_regs[i].regmap,ROREG);
ad49de89 5878 load_regs(regs[i].regmap,branch_regs[i].regmap,CCREG,INVCP);
57871462 5879 ds_assemble(i+1,&branch_regs[i]);
5880 cc=get_reg(branch_regs[i].regmap,CCREG);
5881 if(cc==-1) {
5882 emit_loadreg(CCREG,cc=HOST_CCREG);
5883 // CHECK: Is the following instruction (fall thru) allocated ok?
5884 }
5885 assert(cc==HOST_CCREG);
ad49de89 5886 store_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
57871462 5887 do_cc(i,i_regmap,&adj,ba[i],TAKEN,0);
5888 assem_debug("cycle count (adj)\n");
2330734f 5889 if(adj) emit_addimm(cc, ccadj[i] + CLOCK_ADJUST(2) - adj, cc);
ad49de89 5890 load_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
57871462 5891 if(internal)
5892 assem_debug("branch: internal\n");
5893 else
5894 assem_debug("branch: external\n");
cf95b4f0 5895 if (internal && dops[(ba[i] - start) >> 2].is_ds) {
57871462 5896 ds_assemble_entry(i);
5897 }
5898 else {
643aeae3 5899 add_to_linker(out,ba[i],internal);
57871462 5900 emit_jmp(0);
5901 }
5902 }
5903 // branch not taken
57871462 5904 if(!unconditional) {
df4dc2b1 5905 set_jump_target(nottaken, out);
57871462 5906 assem_debug("1:\n");
fe807a8a 5907 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,ds_unneeded);
5908 load_regs(regs[i].regmap,branch_regs[i].regmap,dops[i+1].rs1,dops[i+1].rs2);
5909 address_generation(i+1,&branch_regs[i],0);
5a18ce2e 5910 if (ram_offset)
53358c1d 5911 load_reg(regs[i].regmap,branch_regs[i].regmap,ROREG);
5a18ce2e 5912 load_regs(regs[i].regmap,branch_regs[i].regmap,CCREG,INVCP);
fe807a8a 5913 ds_assemble(i+1,&branch_regs[i]);
57871462 5914 cc=get_reg(branch_regs[i].regmap,CCREG);
fe807a8a 5915 if (cc == -1) {
57871462 5916 // Cycle count isn't in a register, temporarily load it then write it out
5917 emit_loadreg(CCREG,HOST_CCREG);
2330734f 5918 emit_addimm_and_set_flags(ccadj[i] + CLOCK_ADJUST(2), HOST_CCREG);
b14b6a8f 5919 void *jaddr=out;
57871462 5920 emit_jns(0);
b14b6a8f 5921 add_stub(CC_STUB,jaddr,out,0,i,start+i*4+8,NOTTAKEN,0);
57871462 5922 emit_storereg(CCREG,HOST_CCREG);
5923 }
5924 else{
5925 cc=get_reg(i_regmap,CCREG);
5926 assert(cc==HOST_CCREG);
2330734f 5927 emit_addimm_and_set_flags(ccadj[i] + CLOCK_ADJUST(2), cc);
b14b6a8f 5928 void *jaddr=out;
57871462 5929 emit_jns(0);
fe807a8a 5930 add_stub(CC_STUB,jaddr,out,0,i,start+i*4+8,NOTTAKEN,0);
57871462 5931 }
5932 }
5933 }
5934}
5935
670c0f22 5936static void check_regmap(signed char *regmap)
5937{
5938#ifndef NDEBUG
5939 int i,j;
5940 for (i = 0; i < HOST_REGS; i++) {
5941 if (regmap[i] < 0)
5942 continue;
5943 for (j = i + 1; j < HOST_REGS; j++)
5944 assert(regmap[i] != regmap[j]);
5945 }
5946#endif
5947}
5948
4600ba03 5949#ifdef DISASM
2acc46cd 5950#include <inttypes.h>
53dc27f6 5951static char insn[MAXBLOCK][10];
5952
5953#define set_mnemonic(i_, n_) \
5954 strcpy(insn[i_], n_)
5955
2acc46cd 5956void print_regmap(const char *name, const signed char *regmap)
5957{
5958 char buf[5];
5959 int i, l;
5960 fputs(name, stdout);
5961 for (i = 0; i < HOST_REGS; i++) {
5962 l = 0;
5963 if (regmap[i] >= 0)
5964 l = snprintf(buf, sizeof(buf), "$%d", regmap[i]);
5965 for (; l < 3; l++)
5966 buf[l] = ' ';
5967 buf[l] = 0;
5968 printf(" r%d=%s", i, buf);
5969 }
5970 fputs("\n", stdout);
5971}
5972
57871462 5973 /* disassembly */
5974void disassemble_inst(int i)
5975{
cf95b4f0 5976 if (dops[i].bt) printf("*"); else printf(" ");
5977 switch(dops[i].itype) {
57871462 5978 case UJUMP:
5979 printf (" %x: %s %8x\n",start+i*4,insn[i],ba[i]);break;
5980 case CJUMP:
cf95b4f0 5981 printf (" %x: %s r%d,r%d,%8x\n",start+i*4,insn[i],dops[i].rs1,dops[i].rs2,i?start+i*4+4+((signed int)((unsigned int)source[i]<<16)>>14):*ba);break;
57871462 5982 case SJUMP:
cf95b4f0 5983 printf (" %x: %s r%d,%8x\n",start+i*4,insn[i],dops[i].rs1,start+i*4+4+((signed int)((unsigned int)source[i]<<16)>>14));break;
57871462 5984 case RJUMP:
cf95b4f0 5985 if (dops[i].opcode==0x9&&dops[i].rt1!=31)
5986 printf (" %x: %s r%d,r%d\n",start+i*4,insn[i],dops[i].rt1,dops[i].rs1);
5067f341 5987 else
cf95b4f0 5988 printf (" %x: %s r%d\n",start+i*4,insn[i],dops[i].rs1);
5067f341 5989 break;
57871462 5990 case IMM16:
cf95b4f0 5991 if(dops[i].opcode==0xf) //LUI
5992 printf (" %x: %s r%d,%4x0000\n",start+i*4,insn[i],dops[i].rt1,imm[i]&0xffff);
57871462 5993 else
cf95b4f0 5994 printf (" %x: %s r%d,r%d,%d\n",start+i*4,insn[i],dops[i].rt1,dops[i].rs1,imm[i]);
57871462 5995 break;
5996 case LOAD:
5997 case LOADLR:
cf95b4f0 5998 printf (" %x: %s r%d,r%d+%x\n",start+i*4,insn[i],dops[i].rt1,dops[i].rs1,imm[i]);
57871462 5999 break;
6000 case STORE:
6001 case STORELR:
cf95b4f0 6002 printf (" %x: %s r%d,r%d+%x\n",start+i*4,insn[i],dops[i].rs2,dops[i].rs1,imm[i]);
57871462 6003 break;
6004 case ALU:
6005 case SHIFT:
cf95b4f0 6006 printf (" %x: %s r%d,r%d,r%d\n",start+i*4,insn[i],dops[i].rt1,dops[i].rs1,dops[i].rs2);
57871462 6007 break;
6008 case MULTDIV:
cf95b4f0 6009 printf (" %x: %s r%d,r%d\n",start+i*4,insn[i],dops[i].rs1,dops[i].rs2);
57871462 6010 break;
6011 case SHIFTIMM:
cf95b4f0 6012 printf (" %x: %s r%d,r%d,%d\n",start+i*4,insn[i],dops[i].rt1,dops[i].rs1,imm[i]);
57871462 6013 break;
6014 case MOV:
cf95b4f0 6015 if((dops[i].opcode2&0x1d)==0x10)
6016 printf (" %x: %s r%d\n",start+i*4,insn[i],dops[i].rt1);
6017 else if((dops[i].opcode2&0x1d)==0x11)
6018 printf (" %x: %s r%d\n",start+i*4,insn[i],dops[i].rs1);
57871462 6019 else
6020 printf (" %x: %s\n",start+i*4,insn[i]);
6021 break;
6022 case COP0:
cf95b4f0 6023 if(dops[i].opcode2==0)
6024 printf (" %x: %s r%d,cpr0[%d]\n",start+i*4,insn[i],dops[i].rt1,(source[i]>>11)&0x1f); // MFC0
6025 else if(dops[i].opcode2==4)
6026 printf (" %x: %s r%d,cpr0[%d]\n",start+i*4,insn[i],dops[i].rs1,(source[i]>>11)&0x1f); // MTC0
57871462 6027 else printf (" %x: %s\n",start+i*4,insn[i]);
6028 break;
6029 case COP1:
cf95b4f0 6030 if(dops[i].opcode2<3)
6031 printf (" %x: %s r%d,cpr1[%d]\n",start+i*4,insn[i],dops[i].rt1,(source[i]>>11)&0x1f); // MFC1
6032 else if(dops[i].opcode2>3)
6033 printf (" %x: %s r%d,cpr1[%d]\n",start+i*4,insn[i],dops[i].rs1,(source[i]>>11)&0x1f); // MTC1
57871462 6034 else printf (" %x: %s\n",start+i*4,insn[i]);
6035 break;
b9b61529 6036 case COP2:
cf95b4f0 6037 if(dops[i].opcode2<3)
6038 printf (" %x: %s r%d,cpr2[%d]\n",start+i*4,insn[i],dops[i].rt1,(source[i]>>11)&0x1f); // MFC2
6039 else if(dops[i].opcode2>3)
6040 printf (" %x: %s r%d,cpr2[%d]\n",start+i*4,insn[i],dops[i].rs1,(source[i]>>11)&0x1f); // MTC2
b9b61529 6041 else printf (" %x: %s\n",start+i*4,insn[i]);
6042 break;
57871462 6043 case C1LS:
cf95b4f0 6044 printf (" %x: %s cpr1[%d],r%d+%x\n",start+i*4,insn[i],(source[i]>>16)&0x1f,dops[i].rs1,imm[i]);
57871462 6045 break;
b9b61529 6046 case C2LS:
cf95b4f0 6047 printf (" %x: %s cpr2[%d],r%d+%x\n",start+i*4,insn[i],(source[i]>>16)&0x1f,dops[i].rs1,imm[i]);
b9b61529 6048 break;
1e973cb0 6049 case INTCALL:
6050 printf (" %x: %s (INTCALL)\n",start+i*4,insn[i]);
6051 break;
57871462 6052 default:
6053 //printf (" %s %8x\n",insn[i],source[i]);
6054 printf (" %x: %s\n",start+i*4,insn[i]);
6055 }
2acc46cd 6056 return;
6057 printf("D: %"PRIu64" WD: %"PRIu64" U: %"PRIu64"\n",
6058 regs[i].dirty, regs[i].wasdirty, unneeded_reg[i]);
6059 print_regmap("pre: ", regmap_pre[i]);
6060 print_regmap("entry: ", regs[i].regmap_entry);
6061 print_regmap("map: ", regs[i].regmap);
6062 if (dops[i].is_jump) {
6063 print_regmap("bentry:", branch_regs[i].regmap_entry);
6064 print_regmap("bmap: ", branch_regs[i].regmap);
6065 }
57871462 6066}
4600ba03 6067#else
53dc27f6 6068#define set_mnemonic(i_, n_)
4600ba03 6069static void disassemble_inst(int i) {}
6070#endif // DISASM
57871462 6071
d848b60a 6072#define DRC_TEST_VAL 0x74657374
6073
be516ebe 6074static void new_dynarec_test(void)
d848b60a 6075{
be516ebe 6076 int (*testfunc)(void);
d148d265 6077 void *beginning;
be516ebe 6078 int ret[2];
6079 size_t i;
d148d265 6080
687b4580 6081 // check structure linkage
7c3a5182 6082 if ((u_char *)rcnts - (u_char *)&psxRegs != sizeof(psxRegs))
687b4580 6083 {
7c3a5182 6084 SysPrintf("linkage_arm* miscompilation/breakage detected.\n");
687b4580 6085 }
6086
761fdd0a 6087 SysPrintf("testing if we can run recompiled code @%p...\n", out);
3039c914 6088 ((volatile u_int *)(out + ndrc_write_ofs))[0]++; // make the cache dirty
be516ebe 6089
6090 for (i = 0; i < ARRAY_SIZE(ret); i++) {
2a014d73 6091 out = ndrc->translation_cache;
be516ebe 6092 beginning = start_block();
6093 emit_movimm(DRC_TEST_VAL + i, 0); // test
6094 emit_ret();
6095 literal_pool(0);
6096 end_block(beginning);
6097 testfunc = beginning;
6098 ret[i] = testfunc();
6099 }
6100
6101 if (ret[0] == DRC_TEST_VAL && ret[1] == DRC_TEST_VAL + 1)
d848b60a 6102 SysPrintf("test passed.\n");
6103 else
be516ebe 6104 SysPrintf("test failed, will likely crash soon (r=%08x %08x)\n", ret[0], ret[1]);
2a014d73 6105 out = ndrc->translation_cache;
d848b60a 6106}
6107
dc990066 6108// clear the state completely, instead of just marking
6109// things invalid like invalidate_all_pages() does
919981d0 6110void new_dynarec_clear_full(void)
57871462 6111{
57871462 6112 int n;
2a014d73 6113 out = ndrc->translation_cache;
35775df7 6114 memset(invalid_code,1,sizeof(invalid_code));
6115 memset(hash_table,0xff,sizeof(hash_table));
57871462 6116 memset(mini_ht,-1,sizeof(mini_ht));
dc990066 6117 memset(shadow,0,sizeof(shadow));
57871462 6118 copy=shadow;
93c0345b 6119 expirep = EXPIRITY_OFFSET;
57871462 6120 pending_exception=0;
6121 literalcount=0;
57871462 6122 stop_after_jal=0;
9be4ba64 6123 inv_code_start=inv_code_end=~0;
7f94b097 6124 hack_addr=0;
39b71d9a 6125 f1_hack=0;
93c0345b 6126 for (n = 0; n < ARRAY_SIZE(blocks); n++)
6127 blocks_clear(&blocks[n]);
b7ad2f2c 6128 for (n = 0; n < ARRAY_SIZE(jumps); n++) {
6129 free(jumps[n]);
6130 jumps[n] = NULL;
6131 }
104df9d3 6132 stat_clear(stat_blocks);
6133 stat_clear(stat_links);
32631e6a 6134
6135 cycle_multiplier_old = cycle_multiplier;
6136 new_dynarec_hacks_old = new_dynarec_hacks;
dc990066 6137}
6138
919981d0 6139void new_dynarec_init(void)
dc990066 6140{
66ea165f 6141 SysPrintf("Init new dynarec, ndrc size %x\n", (int)sizeof(*ndrc));
1e212a25 6142
0aeb0cb9 6143#ifdef _3DS
6144 check_rosalina();
6145#endif
2a014d73 6146#ifdef BASE_ADDR_DYNAMIC
1e212a25 6147 #ifdef VITA
0aeb0cb9 6148 sceBlock = getVMBlock(); //sceKernelAllocMemBlockForVM("code", sizeof(*ndrc));
66ea165f 6149 if (sceBlock <= 0)
6150 SysPrintf("sceKernelAllocMemBlockForVM failed: %x\n", sceBlock);
2a014d73 6151 int ret = sceKernelGetMemBlockBase(sceBlock, (void **)&ndrc);
1e212a25 6152 if (ret < 0)
66ea165f 6153 SysPrintf("sceKernelGetMemBlockBase failed: %x\n", ret);
0aeb0cb9 6154 sceKernelOpenVMDomain();
6155 sceClibPrintf("translation_cache = 0x%08lx\n ", (long)ndrc->translation_cache);
6156 #elif defined(_MSC_VER)
6157 ndrc = VirtualAlloc(NULL, sizeof(*ndrc), MEM_COMMIT | MEM_RESERVE,
6158 PAGE_EXECUTE_READWRITE);
3039c914 6159 #elif defined(HAVE_LIBNX)
6160 Result rc = jitCreate(&g_jit, sizeof(*ndrc));
6161 if (R_FAILED(rc))
6162 SysPrintf("jitCreate failed: %08x\n", rc);
6163 SysPrintf("jitCreate: RX: %p RW: %p type: %d\n", g_jit.rx_addr, g_jit.rw_addr, g_jit.type);
6164 ndrc = g_jit.rx_addr;
6165 ndrc_write_ofs = (char *)g_jit.rw_addr - (char *)ndrc;
1e212a25 6166 #else
2a014d73 6167 uintptr_t desired_addr = 0;
3039c914 6168 int prot = PROT_READ | PROT_WRITE | PROT_EXEC;
6169 int flags = MAP_PRIVATE | MAP_ANONYMOUS;
6170 int fd = -1;
2a014d73 6171 #ifdef __ELF__
6172 extern char _end;
6173 desired_addr = ((uintptr_t)&_end + 0xffffff) & ~0xffffffl;
6174 #endif
3039c914 6175 #ifdef NDRC_WRITE_OFFSET
6176 // mostly for testing
6177 fd = open("/dev/shm/pcsxr", O_CREAT | O_RDWR, 0600);
6178 ftruncate(fd, sizeof(*ndrc));
6179 void *mw = mmap(NULL, sizeof(*ndrc), PROT_READ | PROT_WRITE,
6180 (flags = MAP_SHARED), fd, 0);
6181 assert(mw != MAP_FAILED);
6182 prot = PROT_READ | PROT_EXEC;
6183 #endif
6184 ndrc = mmap((void *)desired_addr, sizeof(*ndrc), prot, flags, fd, 0);
2a014d73 6185 if (ndrc == MAP_FAILED) {
d848b60a 6186 SysPrintf("mmap() failed: %s\n", strerror(errno));
1e212a25 6187 abort();
d848b60a 6188 }
3039c914 6189 #ifdef NDRC_WRITE_OFFSET
6190 ndrc_write_ofs = (char *)mw - (char *)ndrc;
6191 #endif
1e212a25 6192 #endif
6193#else
6194 #ifndef NO_WRITE_EXEC
bdeade46 6195 // not all systems allow execute in data segment by default
761fdd0a 6196 // size must be 4K aligned for 3DS?
6197 if (mprotect(ndrc, sizeof(*ndrc),
2a014d73 6198 PROT_READ | PROT_WRITE | PROT_EXEC) != 0)
d848b60a 6199 SysPrintf("mprotect() failed: %s\n", strerror(errno));
1e212a25 6200 #endif
dc990066 6201#endif
2a014d73 6202 out = ndrc->translation_cache;
2573466a 6203 cycle_multiplier=200;
dc990066 6204 new_dynarec_clear_full();
6205#ifdef HOST_IMM8
6206 // Copy this into local area so we don't have to put it in every literal pool
6207 invc_ptr=invalid_code;
6208#endif
57871462 6209 arch_init();
d848b60a 6210 new_dynarec_test();
01d26796 6211 ram_offset=(uintptr_t)rdram-0x80000000;
b105cf4f 6212 if (ram_offset!=0)
c43b5311 6213 SysPrintf("warning: RAM is not directly mapped, performance will suffer\n");
398d6924 6214 SysPrintf("Mapped (RAM/scrp/ROM/LUTs/TC):\n");
6215 SysPrintf("%p/%p/%p/%p/%p\n", psxM, psxH, psxR, mem_rtab, out);
57871462 6216}
6217
919981d0 6218void new_dynarec_cleanup(void)
57871462 6219{
6220 int n;
2a014d73 6221#ifdef BASE_ADDR_DYNAMIC
1e212a25 6222 #ifdef VITA
66ea165f 6223 // sceBlock is managed by retroarch's bootstrap code
9c67c98f 6224 //sceKernelFreeMemBlock(sceBlock);
6225 //sceBlock = -1;
3039c914 6226 #elif defined(HAVE_LIBNX)
6227 jitClose(&g_jit);
6228 ndrc = NULL;
1e212a25 6229 #else
2a014d73 6230 if (munmap(ndrc, sizeof(*ndrc)) < 0)
1e212a25 6231 SysPrintf("munmap() failed\n");
3039c914 6232 ndrc = NULL;
bdeade46 6233 #endif
1e212a25 6234#endif
93c0345b 6235 for (n = 0; n < ARRAY_SIZE(blocks); n++)
6236 blocks_clear(&blocks[n]);
b7ad2f2c 6237 for (n = 0; n < ARRAY_SIZE(jumps); n++) {
6238 free(jumps[n]);
6239 jumps[n] = NULL;
6240 }
104df9d3 6241 stat_clear(stat_blocks);
6242 stat_clear(stat_links);
57871462 6243 #ifdef ROM_COPY
c43b5311 6244 if (munmap (ROM_COPY, 67108864) < 0) {SysPrintf("munmap() failed\n");}
57871462 6245 #endif
ece032e6 6246 new_dynarec_print_stats();
57871462 6247}
6248
03f55e6b 6249static u_int *get_source_start(u_int addr, u_int *limit)
57871462 6250{
03f55e6b 6251 if (addr < 0x00200000 ||
a3203cf4 6252 (0xa0000000 <= addr && addr < 0xa0200000))
6253 {
03f55e6b 6254 // used for BIOS calls mostly?
6255 *limit = (addr&0xa0000000)|0x00200000;
01d26796 6256 return (u_int *)(rdram + (addr&0x1fffff));
03f55e6b 6257 }
6258 else if (!Config.HLE && (
6259 /* (0x9fc00000 <= addr && addr < 0x9fc80000) ||*/
a3203cf4 6260 (0xbfc00000 <= addr && addr < 0xbfc80000)))
6261 {
6262 // BIOS. The multiplier should be much higher as it's uncached 8bit mem,
6263 // but timings in PCSX are too tied to the interpreter's BIAS
d62c125a 6264 if (!HACK_ENABLED(NDHACK_OVERRIDE_CYCLE_M))
24058131 6265 cycle_multiplier_active = 200;
a3203cf4 6266
03f55e6b 6267 *limit = (addr & 0xfff00000) | 0x80000;
01d26796 6268 return (u_int *)((u_char *)psxR + (addr&0x7ffff));
03f55e6b 6269 }
6270 else if (addr >= 0x80000000 && addr < 0x80000000+RAM_SIZE) {
6271 *limit = (addr & 0x80600000) + 0x00200000;
01d26796 6272 return (u_int *)(rdram + (addr&0x1fffff));
03f55e6b 6273 }
581335b0 6274 return NULL;
03f55e6b 6275}
6276
6277static u_int scan_for_ret(u_int addr)
6278{
6279 u_int limit = 0;
6280 u_int *mem;
6281
6282 mem = get_source_start(addr, &limit);
6283 if (mem == NULL)
6284 return addr;
6285
6286 if (limit > addr + 0x1000)
6287 limit = addr + 0x1000;
6288 for (; addr < limit; addr += 4, mem++) {
6289 if (*mem == 0x03e00008) // jr $ra
6290 return addr + 8;
57871462 6291 }
581335b0 6292 return addr;
03f55e6b 6293}
6294
6295struct savestate_block {
6296 uint32_t addr;
6297 uint32_t regflags;
6298};
6299
6300static int addr_cmp(const void *p1_, const void *p2_)
6301{
6302 const struct savestate_block *p1 = p1_, *p2 = p2_;
6303 return p1->addr - p2->addr;
6304}
6305
6306int new_dynarec_save_blocks(void *save, int size)
6307{
104df9d3 6308 struct savestate_block *sblocks = save;
6309 int maxcount = size / sizeof(sblocks[0]);
03f55e6b 6310 struct savestate_block tmp_blocks[1024];
104df9d3 6311 struct block_info *block;
03f55e6b 6312 int p, s, d, o, bcnt;
6313 u_int addr;
6314
6315 o = 0;
104df9d3 6316 for (p = 0; p < ARRAY_SIZE(blocks); p++) {
03f55e6b 6317 bcnt = 0;
104df9d3 6318 for (block = blocks[p]; block != NULL; block = block->next) {
6319 if (block->is_dirty)
6320 continue;
6321 tmp_blocks[bcnt].addr = block->start;
6322 tmp_blocks[bcnt].regflags = block->reg_sv_flags;
03f55e6b 6323 bcnt++;
6324 }
6325 if (bcnt < 1)
6326 continue;
6327 qsort(tmp_blocks, bcnt, sizeof(tmp_blocks[0]), addr_cmp);
6328
6329 addr = tmp_blocks[0].addr;
6330 for (s = d = 0; s < bcnt; s++) {
6331 if (tmp_blocks[s].addr < addr)
6332 continue;
6333 if (d == 0 || tmp_blocks[d-1].addr != tmp_blocks[s].addr)
6334 tmp_blocks[d++] = tmp_blocks[s];
6335 addr = scan_for_ret(tmp_blocks[s].addr);
6336 }
6337
6338 if (o + d > maxcount)
6339 d = maxcount - o;
104df9d3 6340 memcpy(&sblocks[o], tmp_blocks, d * sizeof(sblocks[0]));
03f55e6b 6341 o += d;
6342 }
6343
104df9d3 6344 return o * sizeof(sblocks[0]);
03f55e6b 6345}
6346
6347void new_dynarec_load_blocks(const void *save, int size)
6348{
104df9d3 6349 const struct savestate_block *sblocks = save;
6350 int count = size / sizeof(sblocks[0]);
6351 struct block_info *block;
03f55e6b 6352 u_int regs_save[32];
104df9d3 6353 u_int page;
03f55e6b 6354 uint32_t f;
6355 int i, b;
6356
104df9d3 6357 // restore clean blocks, if any
6358 for (page = 0, b = i = 0; page < ARRAY_SIZE(blocks); page++) {
6359 for (block = blocks[page]; block != NULL; block = block->next, b++) {
6360 if (!block->is_dirty)
6361 continue;
6362 assert(block->source && block->copy);
6363 if (memcmp(block->source, block->copy, block->len))
6364 continue;
6365
6366 // see try_restore_block
6367 block->is_dirty = 0;
6368 mark_invalid_code(block->start, block->len, 0);
6369 i++;
6370 }
6371 }
6372 inv_debug("load_blocks: %d/%d clean blocks\n", i, b);
03f55e6b 6373
6374 // change GPRs for speculation to at least partially work..
6375 memcpy(regs_save, &psxRegs.GPR, sizeof(regs_save));
6376 for (i = 1; i < 32; i++)
6377 psxRegs.GPR.r[i] = 0x80000000;
6378
6379 for (b = 0; b < count; b++) {
104df9d3 6380 for (f = sblocks[b].regflags, i = 0; f; f >>= 1, i++) {
03f55e6b 6381 if (f & 1)
6382 psxRegs.GPR.r[i] = 0x1f800000;
6383 }
6384
104df9d3 6385 ndrc_get_addr_ht(sblocks[b].addr);
03f55e6b 6386
104df9d3 6387 for (f = sblocks[b].regflags, i = 0; f; f >>= 1, i++) {
03f55e6b 6388 if (f & 1)
6389 psxRegs.GPR.r[i] = 0x80000000;
6390 }
6391 }
6392
6393 memcpy(&psxRegs.GPR, regs_save, sizeof(regs_save));
6394}
6395
ece032e6 6396void new_dynarec_print_stats(void)
6397{
6398#ifdef STAT_PRINT
104df9d3 6399 printf("cc %3d,%3d,%3d lu%6d,%3d,%3d c%3d inv%3d,%3d tc_offs %zu b %u,%u\n",
ece032e6 6400 stat_bc_pre, stat_bc_direct, stat_bc_restore,
104df9d3 6401 stat_ht_lookups, stat_jump_in_lookups, stat_restore_tries,
6402 stat_restore_compares, stat_inv_addr_calls, stat_inv_hits,
6403 out - ndrc->translation_cache, stat_blocks, stat_links);
ece032e6 6404 stat_bc_direct = stat_bc_pre = stat_bc_restore =
104df9d3 6405 stat_ht_lookups = stat_jump_in_lookups = stat_restore_tries =
6406 stat_restore_compares = stat_inv_addr_calls = stat_inv_hits = 0;
ece032e6 6407#endif
6408}
6409
7f94b097 6410static int apply_hacks(void)
24058131 6411{
6412 int i;
6413 if (HACK_ENABLED(NDHACK_NO_COMPAT_HACKS))
7f94b097 6414 return 0;
24058131 6415 /* special hack(s) */
6416 for (i = 0; i < slen - 4; i++)
6417 {
6418 // lui a4, 0xf200; jal <rcnt_read>; addu a0, 2; slti v0, 28224
6419 if (source[i] == 0x3c04f200 && dops[i+1].itype == UJUMP
6420 && source[i+2] == 0x34840002 && dops[i+3].opcode == 0x0a
6421 && imm[i+3] == 0x6e40 && dops[i+3].rs1 == 2)
6422 {
6423 SysPrintf("PE2 hack @%08x\n", start + (i+3)*4);
6424 dops[i + 3].itype = NOP;
6425 }
6426 }
6427 i = slen;
6428 if (i > 10 && source[i-1] == 0 && source[i-2] == 0x03e00008
6429 && source[i-4] == 0x8fbf0018 && source[i-6] == 0x00c0f809
6430 && dops[i-7].itype == STORE)
6431 {
6432 i = i-8;
6433 if (dops[i].itype == IMM16)
6434 i--;
6435 // swl r2, 15(r6); swr r2, 12(r6); sw r6, *; jalr r6
6436 if (dops[i].itype == STORELR && dops[i].rs1 == 6
6437 && dops[i-1].itype == STORELR && dops[i-1].rs1 == 6)
6438 {
7f94b097 6439 SysPrintf("F1 hack from %08x, old dst %08x\n", start, hack_addr);
6440 f1_hack = 1;
6441 return 1;
24058131 6442 }
6443 }
7f94b097 6444 return 0;
24058131 6445}
6446
4149788d 6447static noinline void pass1_disassemble(u_int pagelimit)
03f55e6b 6448{
4149788d 6449 int i, j, done = 0, ni_count = 0;
57871462 6450 unsigned int type,op,op2;
6451
7ebfcedf 6452 for (i = 0; !done; i++)
6453 {
6454 memset(&dops[i], 0, sizeof(dops[i]));
cf95b4f0 6455 op2=0;
e1190b87 6456 minimum_free_regs[i]=0;
cf95b4f0 6457 dops[i].opcode=op=source[i]>>26;
57871462 6458 switch(op)
6459 {
53dc27f6 6460 case 0x00: set_mnemonic(i, "special"); type=NI;
57871462 6461 op2=source[i]&0x3f;
6462 switch(op2)
6463 {
53dc27f6 6464 case 0x00: set_mnemonic(i, "SLL"); type=SHIFTIMM; break;
6465 case 0x02: set_mnemonic(i, "SRL"); type=SHIFTIMM; break;
6466 case 0x03: set_mnemonic(i, "SRA"); type=SHIFTIMM; break;
6467 case 0x04: set_mnemonic(i, "SLLV"); type=SHIFT; break;
6468 case 0x06: set_mnemonic(i, "SRLV"); type=SHIFT; break;
6469 case 0x07: set_mnemonic(i, "SRAV"); type=SHIFT; break;
6470 case 0x08: set_mnemonic(i, "JR"); type=RJUMP; break;
6471 case 0x09: set_mnemonic(i, "JALR"); type=RJUMP; break;
6472 case 0x0C: set_mnemonic(i, "SYSCALL"); type=SYSCALL; break;
6473 case 0x0D: set_mnemonic(i, "BREAK"); type=SYSCALL; break;
6474 case 0x0F: set_mnemonic(i, "SYNC"); type=OTHER; break;
6475 case 0x10: set_mnemonic(i, "MFHI"); type=MOV; break;
6476 case 0x11: set_mnemonic(i, "MTHI"); type=MOV; break;
6477 case 0x12: set_mnemonic(i, "MFLO"); type=MOV; break;
6478 case 0x13: set_mnemonic(i, "MTLO"); type=MOV; break;
6479 case 0x18: set_mnemonic(i, "MULT"); type=MULTDIV; break;
6480 case 0x19: set_mnemonic(i, "MULTU"); type=MULTDIV; break;
6481 case 0x1A: set_mnemonic(i, "DIV"); type=MULTDIV; break;
6482 case 0x1B: set_mnemonic(i, "DIVU"); type=MULTDIV; break;
6483 case 0x20: set_mnemonic(i, "ADD"); type=ALU; break;
6484 case 0x21: set_mnemonic(i, "ADDU"); type=ALU; break;
6485 case 0x22: set_mnemonic(i, "SUB"); type=ALU; break;
6486 case 0x23: set_mnemonic(i, "SUBU"); type=ALU; break;
6487 case 0x24: set_mnemonic(i, "AND"); type=ALU; break;
6488 case 0x25: set_mnemonic(i, "OR"); type=ALU; break;
6489 case 0x26: set_mnemonic(i, "XOR"); type=ALU; break;
6490 case 0x27: set_mnemonic(i, "NOR"); type=ALU; break;
6491 case 0x2A: set_mnemonic(i, "SLT"); type=ALU; break;
6492 case 0x2B: set_mnemonic(i, "SLTU"); type=ALU; break;
6493 case 0x30: set_mnemonic(i, "TGE"); type=NI; break;
6494 case 0x31: set_mnemonic(i, "TGEU"); type=NI; break;
6495 case 0x32: set_mnemonic(i, "TLT"); type=NI; break;
6496 case 0x33: set_mnemonic(i, "TLTU"); type=NI; break;
6497 case 0x34: set_mnemonic(i, "TEQ"); type=NI; break;
6498 case 0x36: set_mnemonic(i, "TNE"); type=NI; break;
71e490c5 6499#if 0
53dc27f6 6500 case 0x14: set_mnemonic(i, "DSLLV"); type=SHIFT; break;
6501 case 0x16: set_mnemonic(i, "DSRLV"); type=SHIFT; break;
6502 case 0x17: set_mnemonic(i, "DSRAV"); type=SHIFT; break;
6503 case 0x1C: set_mnemonic(i, "DMULT"); type=MULTDIV; break;
6504 case 0x1D: set_mnemonic(i, "DMULTU"); type=MULTDIV; break;
6505 case 0x1E: set_mnemonic(i, "DDIV"); type=MULTDIV; break;
6506 case 0x1F: set_mnemonic(i, "DDIVU"); type=MULTDIV; break;
6507 case 0x2C: set_mnemonic(i, "DADD"); type=ALU; break;
6508 case 0x2D: set_mnemonic(i, "DADDU"); type=ALU; break;
6509 case 0x2E: set_mnemonic(i, "DSUB"); type=ALU; break;
6510 case 0x2F: set_mnemonic(i, "DSUBU"); type=ALU; break;
6511 case 0x38: set_mnemonic(i, "DSLL"); type=SHIFTIMM; break;
6512 case 0x3A: set_mnemonic(i, "DSRL"); type=SHIFTIMM; break;
6513 case 0x3B: set_mnemonic(i, "DSRA"); type=SHIFTIMM; break;
6514 case 0x3C: set_mnemonic(i, "DSLL32"); type=SHIFTIMM; break;
6515 case 0x3E: set_mnemonic(i, "DSRL32"); type=SHIFTIMM; break;
6516 case 0x3F: set_mnemonic(i, "DSRA32"); type=SHIFTIMM; break;
7f2607ea 6517#endif
57871462 6518 }
6519 break;
53dc27f6 6520 case 0x01: set_mnemonic(i, "regimm"); type=NI;
57871462 6521 op2=(source[i]>>16)&0x1f;
6522 switch(op2)
6523 {
53dc27f6 6524 case 0x00: set_mnemonic(i, "BLTZ"); type=SJUMP; break;
6525 case 0x01: set_mnemonic(i, "BGEZ"); type=SJUMP; break;
6526 //case 0x02: set_mnemonic(i, "BLTZL"); type=SJUMP; break;
6527 //case 0x03: set_mnemonic(i, "BGEZL"); type=SJUMP; break;
6528 //case 0x08: set_mnemonic(i, "TGEI"); type=NI; break;
6529 //case 0x09: set_mnemonic(i, "TGEIU"); type=NI; break;
6530 //case 0x0A: set_mnemonic(i, "TLTI"); type=NI; break;
6531 //case 0x0B: set_mnemonic(i, "TLTIU"); type=NI; break;
6532 //case 0x0C: set_mnemonic(i, "TEQI"); type=NI; break;
6533 //case 0x0E: set_mnemonic(i, "TNEI"); type=NI; break;
6534 case 0x10: set_mnemonic(i, "BLTZAL"); type=SJUMP; break;
6535 case 0x11: set_mnemonic(i, "BGEZAL"); type=SJUMP; break;
6536 //case 0x12: set_mnemonic(i, "BLTZALL"); type=SJUMP; break;
6537 //case 0x13: set_mnemonic(i, "BGEZALL"); type=SJUMP; break;
57871462 6538 }
6539 break;
53dc27f6 6540 case 0x02: set_mnemonic(i, "J"); type=UJUMP; break;
6541 case 0x03: set_mnemonic(i, "JAL"); type=UJUMP; break;
6542 case 0x04: set_mnemonic(i, "BEQ"); type=CJUMP; break;
6543 case 0x05: set_mnemonic(i, "BNE"); type=CJUMP; break;
6544 case 0x06: set_mnemonic(i, "BLEZ"); type=CJUMP; break;
6545 case 0x07: set_mnemonic(i, "BGTZ"); type=CJUMP; break;
6546 case 0x08: set_mnemonic(i, "ADDI"); type=IMM16; break;
6547 case 0x09: set_mnemonic(i, "ADDIU"); type=IMM16; break;
6548 case 0x0A: set_mnemonic(i, "SLTI"); type=IMM16; break;
6549 case 0x0B: set_mnemonic(i, "SLTIU"); type=IMM16; break;
6550 case 0x0C: set_mnemonic(i, "ANDI"); type=IMM16; break;
6551 case 0x0D: set_mnemonic(i, "ORI"); type=IMM16; break;
6552 case 0x0E: set_mnemonic(i, "XORI"); type=IMM16; break;
6553 case 0x0F: set_mnemonic(i, "LUI"); type=IMM16; break;
6554 case 0x10: set_mnemonic(i, "cop0"); type=NI;
57871462 6555 op2=(source[i]>>21)&0x1f;
6556 switch(op2)
6557 {
53dc27f6 6558 case 0x00: set_mnemonic(i, "MFC0"); type=COP0; break;
6559 case 0x02: set_mnemonic(i, "CFC0"); type=COP0; break;
6560 case 0x04: set_mnemonic(i, "MTC0"); type=COP0; break;
6561 case 0x06: set_mnemonic(i, "CTC0"); type=COP0; break;
6562 case 0x10: set_mnemonic(i, "RFE"); type=COP0; break;
57871462 6563 }
6564 break;
53dc27f6 6565 case 0x11: set_mnemonic(i, "cop1"); type=COP1;
57871462 6566 op2=(source[i]>>21)&0x1f;
57871462 6567 break;
71e490c5 6568#if 0
53dc27f6 6569 case 0x14: set_mnemonic(i, "BEQL"); type=CJUMP; break;
6570 case 0x15: set_mnemonic(i, "BNEL"); type=CJUMP; break;
6571 case 0x16: set_mnemonic(i, "BLEZL"); type=CJUMP; break;
6572 case 0x17: set_mnemonic(i, "BGTZL"); type=CJUMP; break;
6573 case 0x18: set_mnemonic(i, "DADDI"); type=IMM16; break;
6574 case 0x19: set_mnemonic(i, "DADDIU"); type=IMM16; break;
6575 case 0x1A: set_mnemonic(i, "LDL"); type=LOADLR; break;
6576 case 0x1B: set_mnemonic(i, "LDR"); type=LOADLR; break;
996cc15d 6577#endif
53dc27f6 6578 case 0x20: set_mnemonic(i, "LB"); type=LOAD; break;
6579 case 0x21: set_mnemonic(i, "LH"); type=LOAD; break;
6580 case 0x22: set_mnemonic(i, "LWL"); type=LOADLR; break;
6581 case 0x23: set_mnemonic(i, "LW"); type=LOAD; break;
6582 case 0x24: set_mnemonic(i, "LBU"); type=LOAD; break;
6583 case 0x25: set_mnemonic(i, "LHU"); type=LOAD; break;
6584 case 0x26: set_mnemonic(i, "LWR"); type=LOADLR; break;
71e490c5 6585#if 0
53dc27f6 6586 case 0x27: set_mnemonic(i, "LWU"); type=LOAD; break;
64bd6f82 6587#endif
53dc27f6 6588 case 0x28: set_mnemonic(i, "SB"); type=STORE; break;
6589 case 0x29: set_mnemonic(i, "SH"); type=STORE; break;
6590 case 0x2A: set_mnemonic(i, "SWL"); type=STORELR; break;
6591 case 0x2B: set_mnemonic(i, "SW"); type=STORE; break;
71e490c5 6592#if 0
53dc27f6 6593 case 0x2C: set_mnemonic(i, "SDL"); type=STORELR; break;
6594 case 0x2D: set_mnemonic(i, "SDR"); type=STORELR; break;
996cc15d 6595#endif
53dc27f6 6596 case 0x2E: set_mnemonic(i, "SWR"); type=STORELR; break;
6597 case 0x2F: set_mnemonic(i, "CACHE"); type=NOP; break;
6598 case 0x30: set_mnemonic(i, "LL"); type=NI; break;
6599 case 0x31: set_mnemonic(i, "LWC1"); type=C1LS; break;
71e490c5 6600#if 0
53dc27f6 6601 case 0x34: set_mnemonic(i, "LLD"); type=NI; break;
6602 case 0x35: set_mnemonic(i, "LDC1"); type=C1LS; break;
6603 case 0x37: set_mnemonic(i, "LD"); type=LOAD; break;
996cc15d 6604#endif
53dc27f6 6605 case 0x38: set_mnemonic(i, "SC"); type=NI; break;
6606 case 0x39: set_mnemonic(i, "SWC1"); type=C1LS; break;
71e490c5 6607#if 0
53dc27f6 6608 case 0x3C: set_mnemonic(i, "SCD"); type=NI; break;
6609 case 0x3D: set_mnemonic(i, "SDC1"); type=C1LS; break;
6610 case 0x3F: set_mnemonic(i, "SD"); type=STORE; break;
996cc15d 6611#endif
53dc27f6 6612 case 0x12: set_mnemonic(i, "COP2"); type=NI;
b9b61529 6613 op2=(source[i]>>21)&0x1f;
be516ebe 6614 //if (op2 & 0x10)
bedfea38 6615 if (source[i]&0x3f) { // use this hack to support old savestates with patched gte insns
c7abc864 6616 if (gte_handlers[source[i]&0x3f]!=NULL) {
53dc27f6 6617#ifdef DISASM
bedfea38 6618 if (gte_regnames[source[i]&0x3f]!=NULL)
6619 strcpy(insn[i],gte_regnames[source[i]&0x3f]);
6620 else
6621 snprintf(insn[i], sizeof(insn[i]), "COP2 %x", source[i]&0x3f);
53dc27f6 6622#endif
c7abc864 6623 type=C2OP;
6624 }
6625 }
6626 else switch(op2)
b9b61529 6627 {
53dc27f6 6628 case 0x00: set_mnemonic(i, "MFC2"); type=COP2; break;
6629 case 0x02: set_mnemonic(i, "CFC2"); type=COP2; break;
6630 case 0x04: set_mnemonic(i, "MTC2"); type=COP2; break;
6631 case 0x06: set_mnemonic(i, "CTC2"); type=COP2; break;
b9b61529 6632 }
6633 break;
53dc27f6 6634 case 0x32: set_mnemonic(i, "LWC2"); type=C2LS; break;
6635 case 0x3A: set_mnemonic(i, "SWC2"); type=C2LS; break;
6636 case 0x3B: set_mnemonic(i, "HLECALL"); type=HLECALL; break;
6637 default: set_mnemonic(i, "???"); type=NI;
4149788d 6638 SysPrintf("NI %08x @%08x (%08x)\n", source[i], start + i*4, start);
90ae6d4e 6639 break;
57871462 6640 }
cf95b4f0 6641 dops[i].itype=type;
6642 dops[i].opcode2=op2;
57871462 6643 /* Get registers/immediates */
53dc27f6 6644 dops[i].use_lt1=0;
bedfea38 6645 gte_rs[i]=gte_rt[i]=0;
57871462 6646 switch(type) {
6647 case LOAD:
cf95b4f0 6648 dops[i].rs1=(source[i]>>21)&0x1f;
6649 dops[i].rs2=0;
6650 dops[i].rt1=(source[i]>>16)&0x1f;
6651 dops[i].rt2=0;
57871462 6652 imm[i]=(short)source[i];
6653 break;
6654 case STORE:
6655 case STORELR:
cf95b4f0 6656 dops[i].rs1=(source[i]>>21)&0x1f;
6657 dops[i].rs2=(source[i]>>16)&0x1f;
6658 dops[i].rt1=0;
6659 dops[i].rt2=0;
57871462 6660 imm[i]=(short)source[i];
57871462 6661 break;
6662 case LOADLR:
6663 // LWL/LWR only load part of the register,
6664 // therefore the target register must be treated as a source too
cf95b4f0 6665 dops[i].rs1=(source[i]>>21)&0x1f;
6666 dops[i].rs2=(source[i]>>16)&0x1f;
6667 dops[i].rt1=(source[i]>>16)&0x1f;
6668 dops[i].rt2=0;
57871462 6669 imm[i]=(short)source[i];
57871462 6670 break;
6671 case IMM16:
cf95b4f0 6672 if (op==0x0f) dops[i].rs1=0; // LUI instruction has no source register
6673 else dops[i].rs1=(source[i]>>21)&0x1f;
6674 dops[i].rs2=0;
6675 dops[i].rt1=(source[i]>>16)&0x1f;
6676 dops[i].rt2=0;
57871462 6677 if(op>=0x0c&&op<=0x0e) { // ANDI/ORI/XORI
6678 imm[i]=(unsigned short)source[i];
6679 }else{
6680 imm[i]=(short)source[i];
6681 }
57871462 6682 break;
6683 case UJUMP:
cf95b4f0 6684 dops[i].rs1=0;
6685 dops[i].rs2=0;
6686 dops[i].rt1=0;
6687 dops[i].rt2=0;
57871462 6688 // The JAL instruction writes to r31.
6689 if (op&1) {
cf95b4f0 6690 dops[i].rt1=31;
57871462 6691 }
cf95b4f0 6692 dops[i].rs2=CCREG;
57871462 6693 break;
6694 case RJUMP:
cf95b4f0 6695 dops[i].rs1=(source[i]>>21)&0x1f;
6696 dops[i].rs2=0;
6697 dops[i].rt1=0;
6698 dops[i].rt2=0;
5067f341 6699 // The JALR instruction writes to rd.
57871462 6700 if (op2&1) {
cf95b4f0 6701 dops[i].rt1=(source[i]>>11)&0x1f;
57871462 6702 }
cf95b4f0 6703 dops[i].rs2=CCREG;
57871462 6704 break;
6705 case CJUMP:
cf95b4f0 6706 dops[i].rs1=(source[i]>>21)&0x1f;
6707 dops[i].rs2=(source[i]>>16)&0x1f;
6708 dops[i].rt1=0;
6709 dops[i].rt2=0;
57871462 6710 if(op&2) { // BGTZ/BLEZ
cf95b4f0 6711 dops[i].rs2=0;
57871462 6712 }
57871462 6713 break;
6714 case SJUMP:
cf95b4f0 6715 dops[i].rs1=(source[i]>>21)&0x1f;
6716 dops[i].rs2=CCREG;
6717 dops[i].rt1=0;
6718 dops[i].rt2=0;
57871462 6719 if(op2&0x10) { // BxxAL
cf95b4f0 6720 dops[i].rt1=31;
57871462 6721 // NOTE: If the branch is not taken, r31 is still overwritten
6722 }
57871462 6723 break;
57871462 6724 case ALU:
cf95b4f0 6725 dops[i].rs1=(source[i]>>21)&0x1f; // source
6726 dops[i].rs2=(source[i]>>16)&0x1f; // subtract amount
6727 dops[i].rt1=(source[i]>>11)&0x1f; // destination
6728 dops[i].rt2=0;
57871462 6729 break;
6730 case MULTDIV:
cf95b4f0 6731 dops[i].rs1=(source[i]>>21)&0x1f; // source
6732 dops[i].rs2=(source[i]>>16)&0x1f; // divisor
6733 dops[i].rt1=HIREG;
6734 dops[i].rt2=LOREG;
57871462 6735 break;
6736 case MOV:
cf95b4f0 6737 dops[i].rs1=0;
6738 dops[i].rs2=0;
6739 dops[i].rt1=0;
6740 dops[i].rt2=0;
6741 if(op2==0x10) dops[i].rs1=HIREG; // MFHI
6742 if(op2==0x11) dops[i].rt1=HIREG; // MTHI
6743 if(op2==0x12) dops[i].rs1=LOREG; // MFLO
6744 if(op2==0x13) dops[i].rt1=LOREG; // MTLO
6745 if((op2&0x1d)==0x10) dops[i].rt1=(source[i]>>11)&0x1f; // MFxx
6746 if((op2&0x1d)==0x11) dops[i].rs1=(source[i]>>21)&0x1f; // MTxx
57871462 6747 break;
6748 case SHIFT:
cf95b4f0 6749 dops[i].rs1=(source[i]>>16)&0x1f; // target of shift
6750 dops[i].rs2=(source[i]>>21)&0x1f; // shift amount
6751 dops[i].rt1=(source[i]>>11)&0x1f; // destination
6752 dops[i].rt2=0;
57871462 6753 break;
6754 case SHIFTIMM:
cf95b4f0 6755 dops[i].rs1=(source[i]>>16)&0x1f;
6756 dops[i].rs2=0;
6757 dops[i].rt1=(source[i]>>11)&0x1f;
6758 dops[i].rt2=0;
57871462 6759 imm[i]=(source[i]>>6)&0x1f;
6760 // DSxx32 instructions
6761 if(op2>=0x3c) imm[i]|=0x20;
57871462 6762 break;
6763 case COP0:
cf95b4f0 6764 dops[i].rs1=0;
6765 dops[i].rs2=0;
6766 dops[i].rt1=0;
6767 dops[i].rt2=0;
6768 if(op2==0||op2==2) dops[i].rt1=(source[i]>>16)&0x1F; // MFC0/CFC0
6769 if(op2==4||op2==6) dops[i].rs1=(source[i]>>16)&0x1F; // MTC0/CTC0
6770 if(op2==4&&((source[i]>>11)&0x1f)==12) dops[i].rt2=CSREG; // Status
6771 if(op2==16) if((source[i]&0x3f)==0x18) dops[i].rs2=CCREG; // ERET
57871462 6772 break;
6773 case COP1:
cf95b4f0 6774 dops[i].rs1=0;
6775 dops[i].rs2=0;
6776 dops[i].rt1=0;
6777 dops[i].rt2=0;
6778 if(op2<3) dops[i].rt1=(source[i]>>16)&0x1F; // MFC1/DMFC1/CFC1
6779 if(op2>3) dops[i].rs1=(source[i]>>16)&0x1F; // MTC1/DMTC1/CTC1
6780 dops[i].rs2=CSREG;
57871462 6781 break;
bedfea38 6782 case COP2:
cf95b4f0 6783 dops[i].rs1=0;
6784 dops[i].rs2=0;
6785 dops[i].rt1=0;
6786 dops[i].rt2=0;
6787 if(op2<3) dops[i].rt1=(source[i]>>16)&0x1F; // MFC2/CFC2
6788 if(op2>3) dops[i].rs1=(source[i]>>16)&0x1F; // MTC2/CTC2
6789 dops[i].rs2=CSREG;
bedfea38 6790 int gr=(source[i]>>11)&0x1F;
6791 switch(op2)
6792 {
6793 case 0x00: gte_rs[i]=1ll<<gr; break; // MFC2
6794 case 0x04: gte_rt[i]=1ll<<gr; break; // MTC2
0ff8c62c 6795 case 0x02: gte_rs[i]=1ll<<(gr+32); break; // CFC2
bedfea38 6796 case 0x06: gte_rt[i]=1ll<<(gr+32); break; // CTC2
6797 }
6798 break;
57871462 6799 case C1LS:
cf95b4f0 6800 dops[i].rs1=(source[i]>>21)&0x1F;
6801 dops[i].rs2=CSREG;
6802 dops[i].rt1=0;
6803 dops[i].rt2=0;
57871462 6804 imm[i]=(short)source[i];
6805 break;
b9b61529 6806 case C2LS:
cf95b4f0 6807 dops[i].rs1=(source[i]>>21)&0x1F;
6808 dops[i].rs2=0;
6809 dops[i].rt1=0;
6810 dops[i].rt2=0;
b9b61529 6811 imm[i]=(short)source[i];
bedfea38 6812 if(op==0x32) gte_rt[i]=1ll<<((source[i]>>16)&0x1F); // LWC2
6813 else gte_rs[i]=1ll<<((source[i]>>16)&0x1F); // SWC2
6814 break;
6815 case C2OP:
cf95b4f0 6816 dops[i].rs1=0;
6817 dops[i].rs2=0;
6818 dops[i].rt1=0;
6819 dops[i].rt2=0;
2167bef6 6820 gte_rs[i]=gte_reg_reads[source[i]&0x3f];
6821 gte_rt[i]=gte_reg_writes[source[i]&0x3f];
6822 gte_rt[i]|=1ll<<63; // every op changes flags
587a5b1c 6823 if((source[i]&0x3f)==GTE_MVMVA) {
6824 int v = (source[i] >> 15) & 3;
6825 gte_rs[i]&=~0xe3fll;
6826 if(v==3) gte_rs[i]|=0xe00ll;
6827 else gte_rs[i]|=3ll<<(v*2);
6828 }
b9b61529 6829 break;
57871462 6830 case SYSCALL:
7139f3c8 6831 case HLECALL:
1e973cb0 6832 case INTCALL:
cf95b4f0 6833 dops[i].rs1=CCREG;
6834 dops[i].rs2=0;
6835 dops[i].rt1=0;
6836 dops[i].rt2=0;
57871462 6837 break;
6838 default:
cf95b4f0 6839 dops[i].rs1=0;
6840 dops[i].rs2=0;
6841 dops[i].rt1=0;
6842 dops[i].rt2=0;
57871462 6843 }
6844 /* Calculate branch target addresses */
6845 if(type==UJUMP)
6846 ba[i]=((start+i*4+4)&0xF0000000)|(((unsigned int)source[i]<<6)>>4);
cf95b4f0 6847 else if(type==CJUMP&&dops[i].rs1==dops[i].rs2&&(op&1))
57871462 6848 ba[i]=start+i*4+8; // Ignore never taken branch
cf95b4f0 6849 else if(type==SJUMP&&dops[i].rs1==0&&!(op2&1))
57871462 6850 ba[i]=start+i*4+8; // Ignore never taken branch
ad49de89 6851 else if(type==CJUMP||type==SJUMP)
57871462 6852 ba[i]=start+i*4+4+((signed int)((unsigned int)source[i]<<16)>>14);
6853 else ba[i]=-1;
4919de1e 6854
6855 /* simplify always (not)taken branches */
cf95b4f0 6856 if (type == CJUMP && dops[i].rs1 == dops[i].rs2) {
6857 dops[i].rs1 = dops[i].rs2 = 0;
4919de1e 6858 if (!(op & 1)) {
cf95b4f0 6859 dops[i].itype = type = UJUMP;
6860 dops[i].rs2 = CCREG;
4919de1e 6861 }
6862 }
cf95b4f0 6863 else if (type == SJUMP && dops[i].rs1 == 0 && (op2 & 1))
6864 dops[i].itype = type = UJUMP;
4919de1e 6865
fe807a8a 6866 dops[i].is_jump = (dops[i].itype == RJUMP || dops[i].itype == UJUMP || dops[i].itype == CJUMP || dops[i].itype == SJUMP);
6867 dops[i].is_ujump = (dops[i].itype == RJUMP || dops[i].itype == UJUMP); // || (source[i] >> 16) == 0x1000 // beq r0,r0
37387d8b 6868 dops[i].is_load = (dops[i].itype == LOAD || dops[i].itype == LOADLR || op == 0x32); // LWC2
6869 dops[i].is_store = (dops[i].itype == STORE || dops[i].itype == STORELR || op == 0x3a); // SWC2
fe807a8a 6870
4919de1e 6871 /* messy cases to just pass over to the interpreter */
fe807a8a 6872 if (i > 0 && dops[i-1].is_jump) {
3e535354 6873 int do_in_intrp=0;
6874 // branch in delay slot?
fe807a8a 6875 if (dops[i].is_jump) {
3e535354 6876 // don't handle first branch and call interpreter if it's hit
4149788d 6877 SysPrintf("branch in delay slot @%08x (%08x)\n", start + i*4, start);
3e535354 6878 do_in_intrp=1;
6879 }
6880 // basic load delay detection
cf95b4f0 6881 else if((type==LOAD||type==LOADLR||type==COP0||type==COP2||type==C2LS)&&dops[i].rt1!=0) {
3e535354 6882 int t=(ba[i-1]-start)/4;
cf95b4f0 6883 if(0 <= t && t < i &&(dops[i].rt1==dops[t].rs1||dops[i].rt1==dops[t].rs2)&&dops[t].itype!=CJUMP&&dops[t].itype!=SJUMP) {
3e535354 6884 // jump target wants DS result - potential load delay effect
4149788d 6885 SysPrintf("load delay @%08x (%08x)\n", start + i*4, start);
3e535354 6886 do_in_intrp=1;
cf95b4f0 6887 dops[t+1].bt=1; // expected return from interpreter
3e535354 6888 }
cf95b4f0 6889 else if(i>=2&&dops[i-2].rt1==2&&dops[i].rt1==2&&dops[i].rs1!=2&&dops[i].rs2!=2&&dops[i-1].rs1!=2&&dops[i-1].rs2!=2&&
fe807a8a 6890 !(i>=3&&dops[i-3].is_jump)) {
3e535354 6891 // v0 overwrite like this is a sign of trouble, bail out
4149788d 6892 SysPrintf("v0 overwrite @%08x (%08x)\n", start + i*4, start);
3e535354 6893 do_in_intrp=1;
6894 }
6895 }
7ebfcedf 6896 if (do_in_intrp) {
6897 memset(&dops[i-1], 0, sizeof(dops[i-1]));
6898 dops[i-1].itype = INTCALL;
6899 dops[i-1].rs1 = CCREG;
6900 ba[i-1] = -1;
6901 done = 2;
3e535354 6902 i--; // don't compile the DS
26869094 6903 }
3e535354 6904 }
4919de1e 6905
3e535354 6906 /* Is this the end of the block? */
fe807a8a 6907 if (i > 0 && dops[i-1].is_ujump) {
0787af86 6908 if (dops[i-1].rt1 == 0) { // not jal
6909 int found_bbranch = 0, t = (ba[i-1] - start) / 4;
6910 if ((u_int)(t - i) < 64 && start + (t+64)*4 < pagelimit) {
6911 // scan for a branch back to i+1
6912 for (j = t; j < t + 64; j++) {
6913 int tmpop = source[j] >> 26;
6914 if (tmpop == 1 || ((tmpop & ~3) == 4)) {
6915 int t2 = j + 1 + (int)(signed short)source[j];
6916 if (t2 == i + 1) {
6917 //printf("blk expand %08x<-%08x\n", start + (i+1)*4, start + j*4);
6918 found_bbranch = 1;
6919 break;
6920 }
6921 }
6922 }
6923 }
6924 if (!found_bbranch)
6925 done = 2;
57871462 6926 }
6927 else {
6928 if(stop_after_jal) done=1;
6929 // Stop on BREAK
6930 if((source[i+1]&0xfc00003f)==0x0d) done=1;
6931 }
6932 // Don't recompile stuff that's already compiled
6933 if(check_addr(start+i*4+4)) done=1;
6934 // Don't get too close to the limit
6935 if(i>MAXBLOCK/2) done=1;
6936 }
d1150cd6 6937 if (dops[i].itype == SYSCALL || dops[i].itype == HLECALL || dops[i].itype == INTCALL)
6938 done = stop_after_jal ? 1 : 2;
6939 if (done == 2) {
1e973cb0 6940 // Does the block continue due to a branch?
6941 for(j=i-1;j>=0;j--)
6942 {
2a706964 6943 if(ba[j]==start+i*4) done=j=0; // Branch into delay slot
1e973cb0 6944 if(ba[j]==start+i*4+4) done=j=0;
6945 if(ba[j]==start+i*4+8) done=j=0;
6946 }
6947 }
75dec299 6948 //assert(i<MAXBLOCK-1);
57871462 6949 if(start+i*4==pagelimit-4) done=1;
6950 assert(start+i*4<pagelimit);
6951 if (i==MAXBLOCK-1) done=1;
6952 // Stop if we're compiling junk
b4ab351d 6953 if(dops[i].itype == NI && (++ni_count > 8 || dops[i].opcode == 0x11)) {
57871462 6954 done=stop_after_jal=1;
c43b5311 6955 SysPrintf("Disabled speculative precompilation\n");
57871462 6956 }
6957 }
4bdc30ab 6958 while (i > 0 && dops[i-1].is_jump)
6959 i--;
6960 assert(i > 0);
6961 assert(!dops[i-1].is_jump);
6962 slen = i;
4149788d 6963}
6964
6965// Basic liveness analysis for MIPS registers
6966static noinline void pass2_unneeded_regs(int istart,int iend,int r)
6967{
6968 int i;
6969 uint64_t u,gte_u,b,gte_b;
6970 uint64_t temp_u,temp_gte_u=0;
6971 uint64_t gte_u_unknown=0;
6972 if (HACK_ENABLED(NDHACK_GTE_UNNEEDED))
6973 gte_u_unknown=~0ll;
6974 if(iend==slen-1) {
6975 u=1;
6976 gte_u=gte_u_unknown;
6977 }else{
6978 //u=unneeded_reg[iend+1];
6979 u=1;
6980 gte_u=gte_unneeded[iend+1];
6981 }
6982
6983 for (i=iend;i>=istart;i--)
6984 {
6985 //printf("unneeded registers i=%d (%d,%d) r=%d\n",i,istart,iend,r);
6986 if(dops[i].is_jump)
6987 {
6988 // If subroutine call, flag return address as a possible branch target
6989 if(dops[i].rt1==31 && i<slen-2) dops[i+2].bt=1;
6990
6991 if(ba[i]<start || ba[i]>=(start+slen*4))
6992 {
6993 // Branch out of this block, flush all regs
6994 u=1;
6995 gte_u=gte_u_unknown;
6996 branch_unneeded_reg[i]=u;
6997 // Merge in delay slot
6998 u|=(1LL<<dops[i+1].rt1)|(1LL<<dops[i+1].rt2);
6999 u&=~((1LL<<dops[i+1].rs1)|(1LL<<dops[i+1].rs2));
7000 u|=1;
7001 gte_u|=gte_rt[i+1];
7002 gte_u&=~gte_rs[i+1];
7003 }
7004 else
7005 {
7006 // Internal branch, flag target
7007 dops[(ba[i]-start)>>2].bt=1;
7008 if(ba[i]<=start+i*4) {
7009 // Backward branch
7010 if(dops[i].is_ujump)
7011 {
7012 // Unconditional branch
7013 temp_u=1;
7014 temp_gte_u=0;
7015 } else {
7016 // Conditional branch (not taken case)
7017 temp_u=unneeded_reg[i+2];
7018 temp_gte_u&=gte_unneeded[i+2];
7019 }
7020 // Merge in delay slot
7021 temp_u|=(1LL<<dops[i+1].rt1)|(1LL<<dops[i+1].rt2);
7022 temp_u&=~((1LL<<dops[i+1].rs1)|(1LL<<dops[i+1].rs2));
7023 temp_u|=1;
7024 temp_gte_u|=gte_rt[i+1];
7025 temp_gte_u&=~gte_rs[i+1];
7026 temp_u|=(1LL<<dops[i].rt1)|(1LL<<dops[i].rt2);
7027 temp_u&=~((1LL<<dops[i].rs1)|(1LL<<dops[i].rs2));
7028 temp_u|=1;
7029 temp_gte_u|=gte_rt[i];
7030 temp_gte_u&=~gte_rs[i];
7031 unneeded_reg[i]=temp_u;
7032 gte_unneeded[i]=temp_gte_u;
7033 // Only go three levels deep. This recursion can take an
7034 // excessive amount of time if there are a lot of nested loops.
7035 if(r<2) {
7036 pass2_unneeded_regs((ba[i]-start)>>2,i-1,r+1);
7037 }else{
7038 unneeded_reg[(ba[i]-start)>>2]=1;
7039 gte_unneeded[(ba[i]-start)>>2]=gte_u_unknown;
7040 }
7041 } /*else*/ if(1) {
7042 if (dops[i].is_ujump)
7043 {
7044 // Unconditional branch
7045 u=unneeded_reg[(ba[i]-start)>>2];
7046 gte_u=gte_unneeded[(ba[i]-start)>>2];
7047 branch_unneeded_reg[i]=u;
7048 // Merge in delay slot
7049 u|=(1LL<<dops[i+1].rt1)|(1LL<<dops[i+1].rt2);
7050 u&=~((1LL<<dops[i+1].rs1)|(1LL<<dops[i+1].rs2));
7051 u|=1;
7052 gte_u|=gte_rt[i+1];
7053 gte_u&=~gte_rs[i+1];
7054 } else {
7055 // Conditional branch
7056 b=unneeded_reg[(ba[i]-start)>>2];
7057 gte_b=gte_unneeded[(ba[i]-start)>>2];
7058 branch_unneeded_reg[i]=b;
7059 // Branch delay slot
7060 b|=(1LL<<dops[i+1].rt1)|(1LL<<dops[i+1].rt2);
7061 b&=~((1LL<<dops[i+1].rs1)|(1LL<<dops[i+1].rs2));
7062 b|=1;
7063 gte_b|=gte_rt[i+1];
7064 gte_b&=~gte_rs[i+1];
7065 u&=b;
7066 gte_u&=gte_b;
7067 if(i<slen-1) {
7068 branch_unneeded_reg[i]&=unneeded_reg[i+2];
7069 } else {
7070 branch_unneeded_reg[i]=1;
7071 }
7072 }
7073 }
7074 }
7075 }
7076 else if(dops[i].itype==SYSCALL||dops[i].itype==HLECALL||dops[i].itype==INTCALL)
7077 {
7078 // SYSCALL instruction (software interrupt)
7079 u=1;
7080 }
55a695d9 7081 else if(dops[i].itype==COP0 && dops[i].opcode2==0x10)
4149788d 7082 {
55a695d9 7083 // RFE
4149788d 7084 u=1;
7085 }
7086 //u=1; // DEBUG
7087 // Written registers are unneeded
7088 u|=1LL<<dops[i].rt1;
7089 u|=1LL<<dops[i].rt2;
7090 gte_u|=gte_rt[i];
7091 // Accessed registers are needed
7092 u&=~(1LL<<dops[i].rs1);
7093 u&=~(1LL<<dops[i].rs2);
7094 gte_u&=~gte_rs[i];
7095 if(gte_rs[i]&&dops[i].rt1&&(unneeded_reg[i+1]&(1ll<<dops[i].rt1)))
7096 gte_u|=gte_rs[i]&gte_unneeded[i+1]; // MFC2/CFC2 to dead register, unneeded
7097 // Source-target dependencies
7098 // R0 is always unneeded
7099 u|=1;
7100 // Save it
7101 unneeded_reg[i]=u;
7102 gte_unneeded[i]=gte_u;
7103 /*
7104 printf("ur (%d,%d) %x: ",istart,iend,start+i*4);
7105 printf("U:");
7106 int r;
7107 for(r=1;r<=CCREG;r++) {
7108 if((unneeded_reg[i]>>r)&1) {
7109 if(r==HIREG) printf(" HI");
7110 else if(r==LOREG) printf(" LO");
7111 else printf(" r%d",r);
7112 }
7113 }
7114 printf("\n");
7115 */
7116 }
7117}
57871462 7118
4149788d 7119static noinline void pass3_register_alloc(u_int addr)
7120{
57871462 7121 struct regstat current; // Current register allocations/status
6cc8d23c 7122 clear_all_regs(current.regmap_entry);
57871462 7123 clear_all_regs(current.regmap);
6cc8d23c 7124 current.wasdirty = current.dirty = 0;
7125 current.u = unneeded_reg[0];
7126 alloc_reg(&current, 0, CCREG);
7127 dirty_reg(&current, CCREG);
7128 current.wasconst = 0;
7129 current.isconst = 0;
7130 current.loadedconst = 0;
7131 current.waswritten = 0;
57871462 7132 int ds=0;
7133 int cc=0;
4149788d 7134 int hr;
7135 int i, j;
6ebf4adf 7136
4149788d 7137 if (addr & 1) {
57871462 7138 // First instruction is delay slot
7139 cc=-1;
cf95b4f0 7140 dops[1].bt=1;
57871462 7141 ds=1;
7142 unneeded_reg[0]=1;
57871462 7143 current.regmap[HOST_BTREG]=BTREG;
7144 }
9f51b4b9 7145
57871462 7146 for(i=0;i<slen;i++)
7147 {
cf95b4f0 7148 if(dops[i].bt)
57871462 7149 {
57871462 7150 for(hr=0;hr<HOST_REGS;hr++)
7151 {
7152 // Is this really necessary?
7153 if(current.regmap[hr]==0) current.regmap[hr]=-1;
7154 }
7155 current.isconst=0;
27727b63 7156 current.waswritten=0;
57871462 7157 }
24385cae 7158
57871462 7159 memcpy(regmap_pre[i],current.regmap,sizeof(current.regmap));
7160 regs[i].wasconst=current.isconst;
57871462 7161 regs[i].wasdirty=current.dirty;
6cc8d23c 7162 regs[i].dirty=0;
7163 regs[i].u=0;
7164 regs[i].isconst=0;
8575a877 7165 regs[i].loadedconst=0;
fe807a8a 7166 if (!dops[i].is_jump) {
57871462 7167 if(i+1<slen) {
cf95b4f0 7168 current.u=unneeded_reg[i+1]&~((1LL<<dops[i].rs1)|(1LL<<dops[i].rs2));
57871462 7169 current.u|=1;
57871462 7170 } else {
7171 current.u=1;
57871462 7172 }
7173 } else {
7174 if(i+1<slen) {
cf95b4f0 7175 current.u=branch_unneeded_reg[i]&~((1LL<<dops[i+1].rs1)|(1LL<<dops[i+1].rs2));
7176 current.u&=~((1LL<<dops[i].rs1)|(1LL<<dops[i].rs2));
57871462 7177 current.u|=1;
7ebfcedf 7178 } else {
7179 SysPrintf("oops, branch at end of block with no delay slot @%08x\n", start + i*4);
7180 abort();
7181 }
57871462 7182 }
cf95b4f0 7183 dops[i].is_ds=ds;
57871462 7184 if(ds) {
7185 ds=0; // Skip delay slot, already allocated as part of branch
7186 // ...but we need to alloc it in case something jumps here
7187 if(i+1<slen) {
7188 current.u=branch_unneeded_reg[i-1]&unneeded_reg[i+1];
57871462 7189 }else{
7190 current.u=branch_unneeded_reg[i-1];
57871462 7191 }
cf95b4f0 7192 current.u&=~((1LL<<dops[i].rs1)|(1LL<<dops[i].rs2));
57871462 7193 current.u|=1;
57871462 7194 struct regstat temp;
7195 memcpy(&temp,&current,sizeof(current));
7196 temp.wasdirty=temp.dirty;
57871462 7197 // TODO: Take into account unconditional branches, as below
7198 delayslot_alloc(&temp,i);
7199 memcpy(regs[i].regmap,temp.regmap,sizeof(temp.regmap));
7200 regs[i].wasdirty=temp.wasdirty;
57871462 7201 regs[i].dirty=temp.dirty;
57871462 7202 regs[i].isconst=0;
7203 regs[i].wasconst=0;
7204 current.isconst=0;
7205 // Create entry (branch target) regmap
7206 for(hr=0;hr<HOST_REGS;hr++)
7207 {
7208 int r=temp.regmap[hr];
7209 if(r>=0) {
7210 if(r!=regmap_pre[i][hr]) {
7211 regs[i].regmap_entry[hr]=-1;
7212 }
7213 else
7214 {
7c3a5182 7215 assert(r < 64);
57871462 7216 if((current.u>>r)&1) {
7217 regs[i].regmap_entry[hr]=-1;
7218 regs[i].regmap[hr]=-1;
7219 //Don't clear regs in the delay slot as the branch might need them
7220 //current.regmap[hr]=-1;
7221 }else
7222 regs[i].regmap_entry[hr]=r;
57871462 7223 }
7224 } else {
7225 // First instruction expects CCREG to be allocated
9f51b4b9 7226 if(i==0&&hr==HOST_CCREG)
57871462 7227 regs[i].regmap_entry[hr]=CCREG;
7228 else
7229 regs[i].regmap_entry[hr]=-1;
7230 }
7231 }
7232 }
7233 else { // Not delay slot
cf95b4f0 7234 switch(dops[i].itype) {
57871462 7235 case UJUMP:
7236 //current.isconst=0; // DEBUG
7237 //current.wasconst=0; // DEBUG
7238 //regs[i].wasconst=0; // DEBUG
cf95b4f0 7239 clear_const(&current,dops[i].rt1);
57871462 7240 alloc_cc(&current,i);
7241 dirty_reg(&current,CCREG);
cf95b4f0 7242 if (dops[i].rt1==31) {
57871462 7243 alloc_reg(&current,i,31);
7244 dirty_reg(&current,31);
cf95b4f0 7245 //assert(dops[i+1].rs1!=31&&dops[i+1].rs2!=31);
7246 //assert(dops[i+1].rt1!=dops[i].rt1);
57871462 7247 #ifdef REG_PREFETCH
7248 alloc_reg(&current,i,PTEMP);
7249 #endif
57871462 7250 }
cf95b4f0 7251 dops[i].ooo=1;
269bb29a 7252 delayslot_alloc(&current,i+1);
57871462 7253 //current.isconst=0; // DEBUG
7254 ds=1;
7255 //printf("i=%d, isconst=%x\n",i,current.isconst);
7256 break;
7257 case RJUMP:
7258 //current.isconst=0;
7259 //current.wasconst=0;
7260 //regs[i].wasconst=0;
cf95b4f0 7261 clear_const(&current,dops[i].rs1);
7262 clear_const(&current,dops[i].rt1);
57871462 7263 alloc_cc(&current,i);
7264 dirty_reg(&current,CCREG);
4919de1e 7265 if (!ds_writes_rjump_rs(i)) {
cf95b4f0 7266 alloc_reg(&current,i,dops[i].rs1);
7267 if (dops[i].rt1!=0) {
7268 alloc_reg(&current,i,dops[i].rt1);
7269 dirty_reg(&current,dops[i].rt1);
7270 assert(dops[i+1].rs1!=dops[i].rt1&&dops[i+1].rs2!=dops[i].rt1);
7271 assert(dops[i+1].rt1!=dops[i].rt1);
57871462 7272 #ifdef REG_PREFETCH
7273 alloc_reg(&current,i,PTEMP);
7274 #endif
7275 }
7276 #ifdef USE_MINI_HT
cf95b4f0 7277 if(dops[i].rs1==31) { // JALR
57871462 7278 alloc_reg(&current,i,RHASH);
57871462 7279 alloc_reg(&current,i,RHTBL);
57871462 7280 }
7281 #endif
7282 delayslot_alloc(&current,i+1);
7283 } else {
7284 // The delay slot overwrites our source register,
7285 // allocate a temporary register to hold the old value.
7286 current.isconst=0;
7287 current.wasconst=0;
7288 regs[i].wasconst=0;
7289 delayslot_alloc(&current,i+1);
7290 current.isconst=0;
7291 alloc_reg(&current,i,RTEMP);
7292 }
7293 //current.isconst=0; // DEBUG
cf95b4f0 7294 dops[i].ooo=1;
57871462 7295 ds=1;
7296 break;
7297 case CJUMP:
7298 //current.isconst=0;
7299 //current.wasconst=0;
7300 //regs[i].wasconst=0;
cf95b4f0 7301 clear_const(&current,dops[i].rs1);
7302 clear_const(&current,dops[i].rs2);
7303 if((dops[i].opcode&0x3E)==4) // BEQ/BNE
57871462 7304 {
7305 alloc_cc(&current,i);
7306 dirty_reg(&current,CCREG);
cf95b4f0 7307 if(dops[i].rs1) alloc_reg(&current,i,dops[i].rs1);
7308 if(dops[i].rs2) alloc_reg(&current,i,dops[i].rs2);
7309 if((dops[i].rs1&&(dops[i].rs1==dops[i+1].rt1||dops[i].rs1==dops[i+1].rt2))||
7310 (dops[i].rs2&&(dops[i].rs2==dops[i+1].rt1||dops[i].rs2==dops[i+1].rt2))) {
57871462 7311 // The delay slot overwrites one of our conditions.
7312 // Allocate the branch condition registers instead.
57871462 7313 current.isconst=0;
7314 current.wasconst=0;
7315 regs[i].wasconst=0;
cf95b4f0 7316 if(dops[i].rs1) alloc_reg(&current,i,dops[i].rs1);
7317 if(dops[i].rs2) alloc_reg(&current,i,dops[i].rs2);
57871462 7318 }
e1190b87 7319 else
7320 {
cf95b4f0 7321 dops[i].ooo=1;
e1190b87 7322 delayslot_alloc(&current,i+1);
7323 }
57871462 7324 }
7325 else
cf95b4f0 7326 if((dops[i].opcode&0x3E)==6) // BLEZ/BGTZ
57871462 7327 {
7328 alloc_cc(&current,i);
7329 dirty_reg(&current,CCREG);
cf95b4f0 7330 alloc_reg(&current,i,dops[i].rs1);
7331 if(dops[i].rs1&&(dops[i].rs1==dops[i+1].rt1||dops[i].rs1==dops[i+1].rt2)) {
57871462 7332 // The delay slot overwrites one of our conditions.
7333 // Allocate the branch condition registers instead.
57871462 7334 current.isconst=0;
7335 current.wasconst=0;
7336 regs[i].wasconst=0;
cf95b4f0 7337 if(dops[i].rs1) alloc_reg(&current,i,dops[i].rs1);
57871462 7338 }
e1190b87 7339 else
7340 {
cf95b4f0 7341 dops[i].ooo=1;
e1190b87 7342 delayslot_alloc(&current,i+1);
7343 }
57871462 7344 }
7345 else
7346 // Don't alloc the delay slot yet because we might not execute it
cf95b4f0 7347 if((dops[i].opcode&0x3E)==0x14) // BEQL/BNEL
57871462 7348 {
7349 current.isconst=0;
7350 current.wasconst=0;
7351 regs[i].wasconst=0;
7352 alloc_cc(&current,i);
7353 dirty_reg(&current,CCREG);
cf95b4f0 7354 alloc_reg(&current,i,dops[i].rs1);
7355 alloc_reg(&current,i,dops[i].rs2);
57871462 7356 }
7357 else
cf95b4f0 7358 if((dops[i].opcode&0x3E)==0x16) // BLEZL/BGTZL
57871462 7359 {
7360 current.isconst=0;
7361 current.wasconst=0;
7362 regs[i].wasconst=0;
7363 alloc_cc(&current,i);
7364 dirty_reg(&current,CCREG);
cf95b4f0 7365 alloc_reg(&current,i,dops[i].rs1);
57871462 7366 }
7367 ds=1;
7368 //current.isconst=0;
7369 break;
7370 case SJUMP:
7371 //current.isconst=0;
7372 //current.wasconst=0;
7373 //regs[i].wasconst=0;
cf95b4f0 7374 clear_const(&current,dops[i].rs1);
7375 clear_const(&current,dops[i].rt1);
7376 //if((dops[i].opcode2&0x1E)==0x0) // BLTZ/BGEZ
7377 if((dops[i].opcode2&0x0E)==0x0) // BLTZ/BGEZ
57871462 7378 {
7379 alloc_cc(&current,i);
7380 dirty_reg(&current,CCREG);
cf95b4f0 7381 alloc_reg(&current,i,dops[i].rs1);
7382 if (dops[i].rt1==31) { // BLTZAL/BGEZAL
57871462 7383 alloc_reg(&current,i,31);
7384 dirty_reg(&current,31);
57871462 7385 //#ifdef REG_PREFETCH
7386 //alloc_reg(&current,i,PTEMP);
7387 //#endif
57871462 7388 }
cf95b4f0 7389 if((dops[i].rs1&&(dops[i].rs1==dops[i+1].rt1||dops[i].rs1==dops[i+1].rt2)) // The delay slot overwrites the branch condition.
7390 ||(dops[i].rt1==31&&(dops[i+1].rs1==31||dops[i+1].rs2==31||dops[i+1].rt1==31||dops[i+1].rt2==31))) { // DS touches $ra
57871462 7391 // Allocate the branch condition registers instead.
57871462 7392 current.isconst=0;
7393 current.wasconst=0;
7394 regs[i].wasconst=0;
cf95b4f0 7395 if(dops[i].rs1) alloc_reg(&current,i,dops[i].rs1);
57871462 7396 }
e1190b87 7397 else
7398 {
cf95b4f0 7399 dops[i].ooo=1;
e1190b87 7400 delayslot_alloc(&current,i+1);
7401 }
57871462 7402 }
7403 else
7404 // Don't alloc the delay slot yet because we might not execute it
cf95b4f0 7405 if((dops[i].opcode2&0x1E)==0x2) // BLTZL/BGEZL
57871462 7406 {
7407 current.isconst=0;
7408 current.wasconst=0;
7409 regs[i].wasconst=0;
7410 alloc_cc(&current,i);
7411 dirty_reg(&current,CCREG);
cf95b4f0 7412 alloc_reg(&current,i,dops[i].rs1);
57871462 7413 }
7414 ds=1;
7415 //current.isconst=0;
7416 break;
57871462 7417 case IMM16:
7418 imm16_alloc(&current,i);
7419 break;
7420 case LOAD:
7421 case LOADLR:
7422 load_alloc(&current,i);
7423 break;
7424 case STORE:
7425 case STORELR:
7426 store_alloc(&current,i);
7427 break;
7428 case ALU:
7429 alu_alloc(&current,i);
7430 break;
7431 case SHIFT:
7432 shift_alloc(&current,i);
7433 break;
7434 case MULTDIV:
7435 multdiv_alloc(&current,i);
7436 break;
7437 case SHIFTIMM:
7438 shiftimm_alloc(&current,i);
7439 break;
7440 case MOV:
7441 mov_alloc(&current,i);
7442 break;
7443 case COP0:
7444 cop0_alloc(&current,i);
7445 break;
7446 case COP1:
81dbbf4c 7447 break;
b9b61529 7448 case COP2:
81dbbf4c 7449 cop2_alloc(&current,i);
57871462 7450 break;
7451 case C1LS:
7452 c1ls_alloc(&current,i);
7453 break;
b9b61529 7454 case C2LS:
7455 c2ls_alloc(&current,i);
7456 break;
7457 case C2OP:
7458 c2op_alloc(&current,i);
7459 break;
57871462 7460 case SYSCALL:
7139f3c8 7461 case HLECALL:
1e973cb0 7462 case INTCALL:
57871462 7463 syscall_alloc(&current,i);
7464 break;
57871462 7465 }
9f51b4b9 7466
57871462 7467 // Create entry (branch target) regmap
7468 for(hr=0;hr<HOST_REGS;hr++)
7469 {
581335b0 7470 int r,or;
57871462 7471 r=current.regmap[hr];
7472 if(r>=0) {
7473 if(r!=regmap_pre[i][hr]) {
7474 // TODO: delay slot (?)
7475 or=get_reg(regmap_pre[i],r); // Get old mapping for this register
9de8a0c3 7476 if(or<0||r>=TEMPREG){
57871462 7477 regs[i].regmap_entry[hr]=-1;
7478 }
7479 else
7480 {
7481 // Just move it to a different register
7482 regs[i].regmap_entry[hr]=r;
7483 // If it was dirty before, it's still dirty
9de8a0c3 7484 if((regs[i].wasdirty>>or)&1) dirty_reg(&current,r);
57871462 7485 }
7486 }
7487 else
7488 {
7489 // Unneeded
7490 if(r==0){
7491 regs[i].regmap_entry[hr]=0;
7492 }
7493 else
7c3a5182 7494 {
7495 assert(r<64);
57871462 7496 if((current.u>>r)&1) {
7497 regs[i].regmap_entry[hr]=-1;
7498 //regs[i].regmap[hr]=-1;
7499 current.regmap[hr]=-1;
7500 }else
7501 regs[i].regmap_entry[hr]=r;
7502 }
57871462 7503 }
7504 } else {
7505 // Branches expect CCREG to be allocated at the target
9f51b4b9 7506 if(regmap_pre[i][hr]==CCREG)
57871462 7507 regs[i].regmap_entry[hr]=CCREG;
7508 else
7509 regs[i].regmap_entry[hr]=-1;
7510 }
7511 }
7512 memcpy(regs[i].regmap,current.regmap,sizeof(current.regmap));
7513 }
27727b63 7514
cf95b4f0 7515 if(i>0&&(dops[i-1].itype==STORE||dops[i-1].itype==STORELR||(dops[i-1].itype==C2LS&&dops[i-1].opcode==0x3a))&&(u_int)imm[i-1]<0x800)
7516 current.waswritten|=1<<dops[i-1].rs1;
7517 current.waswritten&=~(1<<dops[i].rt1);
7518 current.waswritten&=~(1<<dops[i].rt2);
7519 if((dops[i].itype==STORE||dops[i].itype==STORELR||(dops[i].itype==C2LS&&dops[i].opcode==0x3a))&&(u_int)imm[i]>=0x800)
7520 current.waswritten&=~(1<<dops[i].rs1);
27727b63 7521
57871462 7522 /* Branch post-alloc */
7523 if(i>0)
7524 {
57871462 7525 current.wasdirty=current.dirty;
cf95b4f0 7526 switch(dops[i-1].itype) {
57871462 7527 case UJUMP:
7528 memcpy(&branch_regs[i-1],&current,sizeof(current));
7529 branch_regs[i-1].isconst=0;
7530 branch_regs[i-1].wasconst=0;
cf95b4f0 7531 branch_regs[i-1].u=branch_unneeded_reg[i-1]&~((1LL<<dops[i-1].rs1)|(1LL<<dops[i-1].rs2));
57871462 7532 alloc_cc(&branch_regs[i-1],i-1);
7533 dirty_reg(&branch_regs[i-1],CCREG);
cf95b4f0 7534 if(dops[i-1].rt1==31) { // JAL
57871462 7535 alloc_reg(&branch_regs[i-1],i-1,31);
7536 dirty_reg(&branch_regs[i-1],31);
57871462 7537 }
7538 memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
40fca85b 7539 memcpy(constmap[i],constmap[i-1],sizeof(constmap[i]));
57871462 7540 break;
7541 case RJUMP:
7542 memcpy(&branch_regs[i-1],&current,sizeof(current));
7543 branch_regs[i-1].isconst=0;
7544 branch_regs[i-1].wasconst=0;
cf95b4f0 7545 branch_regs[i-1].u=branch_unneeded_reg[i-1]&~((1LL<<dops[i-1].rs1)|(1LL<<dops[i-1].rs2));
57871462 7546 alloc_cc(&branch_regs[i-1],i-1);
7547 dirty_reg(&branch_regs[i-1],CCREG);
cf95b4f0 7548 alloc_reg(&branch_regs[i-1],i-1,dops[i-1].rs1);
7549 if(dops[i-1].rt1!=0) { // JALR
7550 alloc_reg(&branch_regs[i-1],i-1,dops[i-1].rt1);
7551 dirty_reg(&branch_regs[i-1],dops[i-1].rt1);
57871462 7552 }
7553 #ifdef USE_MINI_HT
cf95b4f0 7554 if(dops[i-1].rs1==31) { // JALR
57871462 7555 alloc_reg(&branch_regs[i-1],i-1,RHASH);
57871462 7556 alloc_reg(&branch_regs[i-1],i-1,RHTBL);
57871462 7557 }
7558 #endif
7559 memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
40fca85b 7560 memcpy(constmap[i],constmap[i-1],sizeof(constmap[i]));
57871462 7561 break;
7562 case CJUMP:
cf95b4f0 7563 if((dops[i-1].opcode&0x3E)==4) // BEQ/BNE
57871462 7564 {
7565 alloc_cc(&current,i-1);
7566 dirty_reg(&current,CCREG);
cf95b4f0 7567 if((dops[i-1].rs1&&(dops[i-1].rs1==dops[i].rt1||dops[i-1].rs1==dops[i].rt2))||
7568 (dops[i-1].rs2&&(dops[i-1].rs2==dops[i].rt1||dops[i-1].rs2==dops[i].rt2))) {
57871462 7569 // The delay slot overwrote one of our conditions
7570 // Delay slot goes after the test (in order)
cf95b4f0 7571 current.u=branch_unneeded_reg[i-1]&~((1LL<<dops[i].rs1)|(1LL<<dops[i].rs2));
57871462 7572 current.u|=1;
57871462 7573 delayslot_alloc(&current,i);
7574 current.isconst=0;
7575 }
7576 else
7577 {
cf95b4f0 7578 current.u=branch_unneeded_reg[i-1]&~((1LL<<dops[i-1].rs1)|(1LL<<dops[i-1].rs2));
57871462 7579 // Alloc the branch condition registers
cf95b4f0 7580 if(dops[i-1].rs1) alloc_reg(&current,i-1,dops[i-1].rs1);
7581 if(dops[i-1].rs2) alloc_reg(&current,i-1,dops[i-1].rs2);
57871462 7582 }
7583 memcpy(&branch_regs[i-1],&current,sizeof(current));
7584 branch_regs[i-1].isconst=0;
7585 branch_regs[i-1].wasconst=0;
7586 memcpy(&branch_regs[i-1].regmap_entry,&current.regmap,sizeof(current.regmap));
40fca85b 7587 memcpy(constmap[i],constmap[i-1],sizeof(constmap[i]));
57871462 7588 }
7589 else
cf95b4f0 7590 if((dops[i-1].opcode&0x3E)==6) // BLEZ/BGTZ
57871462 7591 {
7592 alloc_cc(&current,i-1);
7593 dirty_reg(&current,CCREG);
cf95b4f0 7594 if(dops[i-1].rs1==dops[i].rt1||dops[i-1].rs1==dops[i].rt2) {
57871462 7595 // The delay slot overwrote the branch condition
7596 // Delay slot goes after the test (in order)
cf95b4f0 7597 current.u=branch_unneeded_reg[i-1]&~((1LL<<dops[i].rs1)|(1LL<<dops[i].rs2));
57871462 7598 current.u|=1;
57871462 7599 delayslot_alloc(&current,i);
7600 current.isconst=0;
7601 }
7602 else
7603 {
cf95b4f0 7604 current.u=branch_unneeded_reg[i-1]&~(1LL<<dops[i-1].rs1);
57871462 7605 // Alloc the branch condition register
cf95b4f0 7606 alloc_reg(&current,i-1,dops[i-1].rs1);
57871462 7607 }
7608 memcpy(&branch_regs[i-1],&current,sizeof(current));
7609 branch_regs[i-1].isconst=0;
7610 branch_regs[i-1].wasconst=0;
7611 memcpy(&branch_regs[i-1].regmap_entry,&current.regmap,sizeof(current.regmap));
40fca85b 7612 memcpy(constmap[i],constmap[i-1],sizeof(constmap[i]));
57871462 7613 }
7614 else
7615 // Alloc the delay slot in case the branch is taken
cf95b4f0 7616 if((dops[i-1].opcode&0x3E)==0x14) // BEQL/BNEL
57871462 7617 {
7618 memcpy(&branch_regs[i-1],&current,sizeof(current));
cf95b4f0 7619 branch_regs[i-1].u=(branch_unneeded_reg[i-1]&~((1LL<<dops[i].rs1)|(1LL<<dops[i].rs2)|(1LL<<dops[i].rt1)|(1LL<<dops[i].rt2)))|1;
57871462 7620 alloc_cc(&branch_regs[i-1],i);
7621 dirty_reg(&branch_regs[i-1],CCREG);
7622 delayslot_alloc(&branch_regs[i-1],i);
7623 branch_regs[i-1].isconst=0;
7624 alloc_reg(&current,i,CCREG); // Not taken path
7625 dirty_reg(&current,CCREG);
7626 memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
7627 }
7628 else
cf95b4f0 7629 if((dops[i-1].opcode&0x3E)==0x16) // BLEZL/BGTZL
57871462 7630 {
7631 memcpy(&branch_regs[i-1],&current,sizeof(current));
cf95b4f0 7632 branch_regs[i-1].u=(branch_unneeded_reg[i-1]&~((1LL<<dops[i].rs1)|(1LL<<dops[i].rs2)|(1LL<<dops[i].rt1)|(1LL<<dops[i].rt2)))|1;
57871462 7633 alloc_cc(&branch_regs[i-1],i);
7634 dirty_reg(&branch_regs[i-1],CCREG);
7635 delayslot_alloc(&branch_regs[i-1],i);
7636 branch_regs[i-1].isconst=0;
7637 alloc_reg(&current,i,CCREG); // Not taken path
7638 dirty_reg(&current,CCREG);
7639 memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
7640 }
7641 break;
7642 case SJUMP:
cf95b4f0 7643 //if((dops[i-1].opcode2&0x1E)==0) // BLTZ/BGEZ
7644 if((dops[i-1].opcode2&0x0E)==0) // BLTZ/BGEZ
57871462 7645 {
7646 alloc_cc(&current,i-1);
7647 dirty_reg(&current,CCREG);
cf95b4f0 7648 if(dops[i-1].rs1==dops[i].rt1||dops[i-1].rs1==dops[i].rt2) {
57871462 7649 // The delay slot overwrote the branch condition
7650 // Delay slot goes after the test (in order)
cf95b4f0 7651 current.u=branch_unneeded_reg[i-1]&~((1LL<<dops[i].rs1)|(1LL<<dops[i].rs2));
57871462 7652 current.u|=1;
57871462 7653 delayslot_alloc(&current,i);
7654 current.isconst=0;
7655 }
7656 else
7657 {
cf95b4f0 7658 current.u=branch_unneeded_reg[i-1]&~(1LL<<dops[i-1].rs1);
57871462 7659 // Alloc the branch condition register
cf95b4f0 7660 alloc_reg(&current,i-1,dops[i-1].rs1);
57871462 7661 }
7662 memcpy(&branch_regs[i-1],&current,sizeof(current));
7663 branch_regs[i-1].isconst=0;
7664 branch_regs[i-1].wasconst=0;
7665 memcpy(&branch_regs[i-1].regmap_entry,&current.regmap,sizeof(current.regmap));
40fca85b 7666 memcpy(constmap[i],constmap[i-1],sizeof(constmap[i]));
57871462 7667 }
7668 else
7669 // Alloc the delay slot in case the branch is taken
cf95b4f0 7670 if((dops[i-1].opcode2&0x1E)==2) // BLTZL/BGEZL
57871462 7671 {
7672 memcpy(&branch_regs[i-1],&current,sizeof(current));
cf95b4f0 7673 branch_regs[i-1].u=(branch_unneeded_reg[i-1]&~((1LL<<dops[i].rs1)|(1LL<<dops[i].rs2)|(1LL<<dops[i].rt1)|(1LL<<dops[i].rt2)))|1;
57871462 7674 alloc_cc(&branch_regs[i-1],i);
7675 dirty_reg(&branch_regs[i-1],CCREG);
7676 delayslot_alloc(&branch_regs[i-1],i);
7677 branch_regs[i-1].isconst=0;
7678 alloc_reg(&current,i,CCREG); // Not taken path
7679 dirty_reg(&current,CCREG);
7680 memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
7681 }
7682 // FIXME: BLTZAL/BGEZAL
cf95b4f0 7683 if(dops[i-1].opcode2&0x10) { // BxxZAL
57871462 7684 alloc_reg(&branch_regs[i-1],i-1,31);
7685 dirty_reg(&branch_regs[i-1],31);
57871462 7686 }
7687 break;
57871462 7688 }
7689
fe807a8a 7690 if (dops[i-1].is_ujump)
57871462 7691 {
cf95b4f0 7692 if(dops[i-1].rt1==31) // JAL/JALR
57871462 7693 {
7694 // Subroutine call will return here, don't alloc any registers
57871462 7695 current.dirty=0;
7696 clear_all_regs(current.regmap);
7697 alloc_reg(&current,i,CCREG);
7698 dirty_reg(&current,CCREG);
7699 }
7700 else if(i+1<slen)
7701 {
7702 // Internal branch will jump here, match registers to caller
57871462 7703 current.dirty=0;
7704 clear_all_regs(current.regmap);
7705 alloc_reg(&current,i,CCREG);
7706 dirty_reg(&current,CCREG);
7707 for(j=i-1;j>=0;j--)
7708 {
7709 if(ba[j]==start+i*4+4) {
7710 memcpy(current.regmap,branch_regs[j].regmap,sizeof(current.regmap));
57871462 7711 current.dirty=branch_regs[j].dirty;
7712 break;
7713 }
7714 }
7715 while(j>=0) {
7716 if(ba[j]==start+i*4+4) {
7717 for(hr=0;hr<HOST_REGS;hr++) {
7718 if(current.regmap[hr]!=branch_regs[j].regmap[hr]) {
7719 current.regmap[hr]=-1;
7720 }
57871462 7721 current.dirty&=branch_regs[j].dirty;
7722 }
7723 }
7724 j--;
7725 }
7726 }
7727 }
7728 }
7729
7730 // Count cycles in between branches
2330734f 7731 ccadj[i] = CLOCK_ADJUST(cc);
fe807a8a 7732 if (i > 0 && (dops[i-1].is_jump || dops[i].itype == SYSCALL || dops[i].itype == HLECALL))
57871462 7733 {
7734 cc=0;
7735 }
71e490c5 7736#if !defined(DRC_DBG)
cf95b4f0 7737 else if(dops[i].itype==C2OP&&gte_cycletab[source[i]&0x3f]>2)
054175e9 7738 {
81dbbf4c 7739 // this should really be removed since the real stalls have been implemented,
7740 // but doing so causes sizeable perf regression against the older version
7741 u_int gtec = gte_cycletab[source[i] & 0x3f];
32631e6a 7742 cc += HACK_ENABLED(NDHACK_NO_STALLS) ? gtec/2 : 2;
fb407447 7743 }
cf95b4f0 7744 else if(i>1&&dops[i].itype==STORE&&dops[i-1].itype==STORE&&dops[i-2].itype==STORE&&!dops[i].bt)
5fdcbb5a 7745 {
7746 cc+=4;
7747 }
cf95b4f0 7748 else if(dops[i].itype==C2LS)
fb407447 7749 {
81dbbf4c 7750 // same as with C2OP
32631e6a 7751 cc += HACK_ENABLED(NDHACK_NO_STALLS) ? 4 : 2;
fb407447 7752 }
7753#endif
57871462 7754 else
7755 {
7756 cc++;
7757 }
7758
cf95b4f0 7759 if(!dops[i].is_ds) {
57871462 7760 regs[i].dirty=current.dirty;
7761 regs[i].isconst=current.isconst;
40fca85b 7762 memcpy(constmap[i],current_constmap,sizeof(constmap[i]));
57871462 7763 }
7764 for(hr=0;hr<HOST_REGS;hr++) {
7765 if(hr!=EXCLUDE_REG&&regs[i].regmap[hr]>=0) {
7766 if(regmap_pre[i][hr]!=regs[i].regmap[hr]) {
7767 regs[i].wasconst&=~(1<<hr);
7768 }
7769 }
7770 }
7771 if(current.regmap[HOST_BTREG]==BTREG) current.regmap[HOST_BTREG]=-1;
27727b63 7772 regs[i].waswritten=current.waswritten;
57871462 7773 }
4149788d 7774}
9f51b4b9 7775
4149788d 7776static noinline void pass4_cull_unused_regs(void)
7777{
53358c1d 7778 u_int last_needed_regs[4] = {0,0,0,0};
4149788d 7779 u_int nr=0;
7780 int i;
9f51b4b9 7781
57871462 7782 for (i=slen-1;i>=0;i--)
7783 {
7784 int hr;
53358c1d 7785 __builtin_prefetch(regs[i-2].regmap);
fe807a8a 7786 if(dops[i].is_jump)
57871462 7787 {
7788 if(ba[i]<start || ba[i]>=(start+slen*4))
7789 {
7790 // Branch out of this block, don't need anything
7791 nr=0;
7792 }
7793 else
7794 {
7795 // Internal branch
7796 // Need whatever matches the target
7797 nr=0;
7798 int t=(ba[i]-start)>>2;
7799 for(hr=0;hr<HOST_REGS;hr++)
7800 {
7801 if(regs[i].regmap_entry[hr]>=0) {
7802 if(regs[i].regmap_entry[hr]==regs[t].regmap_entry[hr]) nr|=1<<hr;
7803 }
7804 }
7805 }
7806 // Conditional branch may need registers for following instructions
fe807a8a 7807 if (!dops[i].is_ujump)
57871462 7808 {
7809 if(i<slen-2) {
53358c1d 7810 nr |= last_needed_regs[(i+2) & 3];
57871462 7811 for(hr=0;hr<HOST_REGS;hr++)
7812 {
7813 if(regmap_pre[i+2][hr]>=0&&get_reg(regs[i+2].regmap_entry,regmap_pre[i+2][hr])<0) nr&=~(1<<hr);
7814 //if((regmap_entry[i+2][hr])>=0) if(!((nr>>hr)&1)) printf("%x-bogus(%d=%d)\n",start+i*4,hr,regmap_entry[i+2][hr]);
7815 }
7816 }
7817 }
7818 // Don't need stuff which is overwritten
f5955059 7819 //if(regs[i].regmap[hr]!=regmap_pre[i][hr]) nr&=~(1<<hr);
7820 //if(regs[i].regmap[hr]<0) nr&=~(1<<hr);
57871462 7821 // Merge in delay slot
53358c1d 7822 if (dops[i+1].rt1) nr &= ~get_regm(regs[i].regmap, dops[i+1].rt1);
7823 if (dops[i+1].rt2) nr &= ~get_regm(regs[i].regmap, dops[i+1].rt2);
7824 nr |= get_regm(regmap_pre[i], dops[i+1].rs1);
7825 nr |= get_regm(regmap_pre[i], dops[i+1].rs2);
7826 nr |= get_regm(regs[i].regmap_entry, dops[i+1].rs1);
7827 nr |= get_regm(regs[i].regmap_entry, dops[i+1].rs2);
7828 if (ram_offset && (dops[i+1].is_load || dops[i+1].is_store)) {
7829 nr |= get_regm(regmap_pre[i], ROREG);
7830 nr |= get_regm(regs[i].regmap_entry, ROREG);
7831 }
7832 if (dops[i+1].is_store) {
7833 nr |= get_regm(regmap_pre[i], INVCP);
7834 nr |= get_regm(regs[i].regmap_entry, INVCP);
57871462 7835 }
7836 }
cf95b4f0 7837 else if(dops[i].itype==SYSCALL||dops[i].itype==HLECALL||dops[i].itype==INTCALL)
57871462 7838 {
7839 // SYSCALL instruction (software interrupt)
7840 nr=0;
7841 }
cf95b4f0 7842 else if(dops[i].itype==COP0 && (source[i]&0x3f)==0x18)
57871462 7843 {
7844 // ERET instruction (return from interrupt)
7845 nr=0;
7846 }
7847 else // Non-branch
7848 {
7849 if(i<slen-1) {
7850 for(hr=0;hr<HOST_REGS;hr++) {
7851 if(regmap_pre[i+1][hr]>=0&&get_reg(regs[i+1].regmap_entry,regmap_pre[i+1][hr])<0) nr&=~(1<<hr);
7852 if(regs[i].regmap[hr]!=regmap_pre[i+1][hr]) nr&=~(1<<hr);
7853 if(regs[i].regmap[hr]!=regmap_pre[i][hr]) nr&=~(1<<hr);
7854 if(regs[i].regmap[hr]<0) nr&=~(1<<hr);
7855 }
7856 }
7857 }
53358c1d 7858 // Overwritten registers are not needed
7859 if (dops[i].rt1) nr &= ~get_regm(regs[i].regmap, dops[i].rt1);
7860 if (dops[i].rt2) nr &= ~get_regm(regs[i].regmap, dops[i].rt2);
7861 nr &= ~get_regm(regs[i].regmap, FTEMP);
7862 // Source registers are needed
7863 nr |= get_regm(regmap_pre[i], dops[i].rs1);
7864 nr |= get_regm(regmap_pre[i], dops[i].rs2);
7865 nr |= get_regm(regs[i].regmap_entry, dops[i].rs1);
7866 nr |= get_regm(regs[i].regmap_entry, dops[i].rs2);
7867 if (ram_offset && (dops[i].is_load || dops[i].is_store)) {
7868 nr |= get_regm(regmap_pre[i], ROREG);
7869 nr |= get_regm(regs[i].regmap_entry, ROREG);
7870 }
7871 if (dops[i].is_store) {
7872 nr |= get_regm(regmap_pre[i], INVCP);
7873 nr |= get_regm(regs[i].regmap_entry, INVCP);
7874 }
7875
7876 if (i > 0 && !dops[i].bt && regs[i].wasdirty)
57871462 7877 for(hr=0;hr<HOST_REGS;hr++)
7878 {
57871462 7879 // Don't store a register immediately after writing it,
7880 // may prevent dual-issue.
7881 // But do so if this is a branch target, otherwise we
7882 // might have to load the register before the branch.
53358c1d 7883 if((regs[i].wasdirty>>hr)&1) {
7c3a5182 7884 if((regmap_pre[i][hr]>0&&!((unneeded_reg[i]>>regmap_pre[i][hr])&1))) {
9de8a0c3 7885 if(dops[i-1].rt1==regmap_pre[i][hr]) nr|=1<<hr;
7886 if(dops[i-1].rt2==regmap_pre[i][hr]) nr|=1<<hr;
57871462 7887 }
7c3a5182 7888 if((regs[i].regmap_entry[hr]>0&&!((unneeded_reg[i]>>regs[i].regmap_entry[hr])&1))) {
9de8a0c3 7889 if(dops[i-1].rt1==regs[i].regmap_entry[hr]) nr|=1<<hr;
7890 if(dops[i-1].rt2==regs[i].regmap_entry[hr]) nr|=1<<hr;
57871462 7891 }
7892 }
7893 }
7894 // Cycle count is needed at branches. Assume it is needed at the target too.
4bdc30ab 7895 if(i==0||dops[i].bt||dops[i].itype==CJUMP) {
57871462 7896 if(regmap_pre[i][HOST_CCREG]==CCREG) nr|=1<<HOST_CCREG;
7897 if(regs[i].regmap_entry[HOST_CCREG]==CCREG) nr|=1<<HOST_CCREG;
7898 }
7899 // Save it
53358c1d 7900 last_needed_regs[i & 3] = nr;
9f51b4b9 7901
57871462 7902 // Deallocate unneeded registers
7903 for(hr=0;hr<HOST_REGS;hr++)
7904 {
7905 if(!((nr>>hr)&1)) {
7906 if(regs[i].regmap_entry[hr]!=CCREG) regs[i].regmap_entry[hr]=-1;
fe807a8a 7907 if(dops[i].is_jump)
57871462 7908 {
37387d8b 7909 int map1 = 0, map2 = 0, temp = 0; // or -1 ??
7910 if (dops[i+1].is_load || dops[i+1].is_store)
7911 map1 = ROREG;
7912 if (dops[i+1].is_store)
7913 map2 = INVCP;
7914 if(dops[i+1].itype==LOADLR || dops[i+1].itype==STORELR || dops[i+1].itype==C2LS)
7915 temp = FTEMP;
9de8a0c3 7916 if(regs[i].regmap[hr]!=dops[i].rs1 && regs[i].regmap[hr]!=dops[i].rs2 &&
7917 regs[i].regmap[hr]!=dops[i].rt1 && regs[i].regmap[hr]!=dops[i].rt2 &&
7918 regs[i].regmap[hr]!=dops[i+1].rt1 && regs[i].regmap[hr]!=dops[i+1].rt2 &&
cf95b4f0 7919 regs[i].regmap[hr]!=dops[i+1].rs1 && regs[i].regmap[hr]!=dops[i+1].rs2 &&
9de8a0c3 7920 regs[i].regmap[hr]!=temp && regs[i].regmap[hr]!=PTEMP &&
57871462 7921 regs[i].regmap[hr]!=RHASH && regs[i].regmap[hr]!=RHTBL &&
7922 regs[i].regmap[hr]!=RTEMP && regs[i].regmap[hr]!=CCREG &&
37387d8b 7923 regs[i].regmap[hr]!=map1 && regs[i].regmap[hr]!=map2)
57871462 7924 {
7925 regs[i].regmap[hr]=-1;
7926 regs[i].isconst&=~(1<<hr);
a550c61c 7927 regs[i].dirty&=~(1<<hr);
7928 regs[i+1].wasdirty&=~(1<<hr);
9de8a0c3 7929 if(branch_regs[i].regmap[hr]!=dops[i].rs1 && branch_regs[i].regmap[hr]!=dops[i].rs2 &&
7930 branch_regs[i].regmap[hr]!=dops[i].rt1 && branch_regs[i].regmap[hr]!=dops[i].rt2 &&
7931 branch_regs[i].regmap[hr]!=dops[i+1].rt1 && branch_regs[i].regmap[hr]!=dops[i+1].rt2 &&
cf95b4f0 7932 branch_regs[i].regmap[hr]!=dops[i+1].rs1 && branch_regs[i].regmap[hr]!=dops[i+1].rs2 &&
9de8a0c3 7933 branch_regs[i].regmap[hr]!=temp && branch_regs[i].regmap[hr]!=PTEMP &&
57871462 7934 branch_regs[i].regmap[hr]!=RHASH && branch_regs[i].regmap[hr]!=RHTBL &&
7935 branch_regs[i].regmap[hr]!=RTEMP && branch_regs[i].regmap[hr]!=CCREG &&
37387d8b 7936 branch_regs[i].regmap[hr]!=map1 && branch_regs[i].regmap[hr]!=map2)
57871462 7937 {
7938 branch_regs[i].regmap[hr]=-1;
7939 branch_regs[i].regmap_entry[hr]=-1;
fe807a8a 7940 if (!dops[i].is_ujump)
57871462 7941 {
fe807a8a 7942 if (i < slen-2) {
57871462 7943 regmap_pre[i+2][hr]=-1;
79c75f1b 7944 regs[i+2].wasconst&=~(1<<hr);
57871462 7945 }
7946 }
7947 }
7948 }
7949 }
7950 else
7951 {
7952 // Non-branch
7953 if(i>0)
7954 {
37387d8b 7955 int map1 = -1, map2 = -1, temp=-1;
7956 if (dops[i].is_load || dops[i].is_store)
7957 map1 = ROREG;
7958 if (dops[i].is_store)
7959 map2 = INVCP;
7960 if (dops[i].itype==LOADLR || dops[i].itype==STORELR || dops[i].itype==C2LS)
7961 temp = FTEMP;
9de8a0c3 7962 if(regs[i].regmap[hr]!=dops[i].rt1 && regs[i].regmap[hr]!=dops[i].rt2 &&
cf95b4f0 7963 regs[i].regmap[hr]!=dops[i].rs1 && regs[i].regmap[hr]!=dops[i].rs2 &&
9de8a0c3 7964 regs[i].regmap[hr]!=temp && regs[i].regmap[hr]!=map1 && regs[i].regmap[hr]!=map2 &&
4b1c7cd1 7965 //(dops[i].itype!=SPAN||regs[i].regmap[hr]!=CCREG)
7966 regs[i].regmap[hr] != CCREG)
57871462 7967 {
cf95b4f0 7968 if(i<slen-1&&!dops[i].is_ds) {
ad49de89 7969 assert(regs[i].regmap[hr]<64);
afec9d44 7970 if(regmap_pre[i+1][hr]!=-1 || regs[i].regmap[hr]>0)
57871462 7971 if(regmap_pre[i+1][hr]!=regs[i].regmap[hr])
57871462 7972 {
c43b5311 7973 SysPrintf("fail: %x (%d %d!=%d)\n",start+i*4,hr,regmap_pre[i+1][hr],regs[i].regmap[hr]);
57871462 7974 assert(regmap_pre[i+1][hr]==regs[i].regmap[hr]);
7975 }
7976 regmap_pre[i+1][hr]=-1;
7977 if(regs[i+1].regmap_entry[hr]==CCREG) regs[i+1].regmap_entry[hr]=-1;
79c75f1b 7978 regs[i+1].wasconst&=~(1<<hr);
57871462 7979 }
7980 regs[i].regmap[hr]=-1;
7981 regs[i].isconst&=~(1<<hr);
a550c61c 7982 regs[i].dirty&=~(1<<hr);
7983 regs[i+1].wasdirty&=~(1<<hr);
57871462 7984 }
7985 }
7986 }
3968e69e 7987 } // if needed
7988 } // for hr
57871462 7989 }
4149788d 7990}
9f51b4b9 7991
4149788d 7992// If a register is allocated during a loop, try to allocate it for the
7993// entire loop, if possible. This avoids loading/storing registers
7994// inside of the loop.
7995static noinline void pass5a_preallocate1(void)
7996{
7997 int i, j, hr;
57871462 7998 signed char f_regmap[HOST_REGS];
7999 clear_all_regs(f_regmap);
8000 for(i=0;i<slen-1;i++)
8001 {
cf95b4f0 8002 if(dops[i].itype==UJUMP||dops[i].itype==CJUMP||dops[i].itype==SJUMP)
57871462 8003 {
9f51b4b9 8004 if(ba[i]>=start && ba[i]<(start+i*4))
cf95b4f0 8005 if(dops[i+1].itype==NOP||dops[i+1].itype==MOV||dops[i+1].itype==ALU
8006 ||dops[i+1].itype==SHIFTIMM||dops[i+1].itype==IMM16||dops[i+1].itype==LOAD
8007 ||dops[i+1].itype==STORE||dops[i+1].itype==STORELR||dops[i+1].itype==C1LS
8008 ||dops[i+1].itype==SHIFT||dops[i+1].itype==COP1
8009 ||dops[i+1].itype==COP2||dops[i+1].itype==C2LS||dops[i+1].itype==C2OP)
57871462 8010 {
8011 int t=(ba[i]-start)>>2;
fe807a8a 8012 if(t > 0 && !dops[t-1].is_jump) // loop_preload can't handle jumps into delay slots
cf95b4f0 8013 if(t<2||(dops[t-2].itype!=UJUMP&&dops[t-2].itype!=RJUMP)||dops[t-2].rt1!=31) // call/ret assumes no registers allocated
57871462 8014 for(hr=0;hr<HOST_REGS;hr++)
8015 {
7c3a5182 8016 if(regs[i].regmap[hr]>=0) {
b372a952 8017 if(f_regmap[hr]!=regs[i].regmap[hr]) {
8018 // dealloc old register
8019 int n;
8020 for(n=0;n<HOST_REGS;n++)
8021 {
8022 if(f_regmap[n]==regs[i].regmap[hr]) {f_regmap[n]=-1;}
8023 }
8024 // and alloc new one
8025 f_regmap[hr]=regs[i].regmap[hr];
8026 }
8027 }
7c3a5182 8028 if(branch_regs[i].regmap[hr]>=0) {
b372a952 8029 if(f_regmap[hr]!=branch_regs[i].regmap[hr]) {
8030 // dealloc old register
8031 int n;
8032 for(n=0;n<HOST_REGS;n++)
8033 {
8034 if(f_regmap[n]==branch_regs[i].regmap[hr]) {f_regmap[n]=-1;}
8035 }
8036 // and alloc new one
8037 f_regmap[hr]=branch_regs[i].regmap[hr];
8038 }
8039 }
cf95b4f0 8040 if(dops[i].ooo) {
9f51b4b9 8041 if(count_free_regs(regs[i].regmap)<=minimum_free_regs[i+1])
e1190b87 8042 f_regmap[hr]=branch_regs[i].regmap[hr];
8043 }else{
9f51b4b9 8044 if(count_free_regs(branch_regs[i].regmap)<=minimum_free_regs[i+1])
57871462 8045 f_regmap[hr]=branch_regs[i].regmap[hr];
8046 }
8047 // Avoid dirty->clean transition
e1190b87 8048 #ifdef DESTRUCTIVE_WRITEBACK
57871462 8049 if(t>0) if(get_reg(regmap_pre[t],f_regmap[hr])>=0) if((regs[t].wasdirty>>get_reg(regmap_pre[t],f_regmap[hr]))&1) f_regmap[hr]=-1;
e1190b87 8050 #endif
8051 // This check is only strictly required in the DESTRUCTIVE_WRITEBACK
8052 // case above, however it's always a good idea. We can't hoist the
8053 // load if the register was already allocated, so there's no point
8054 // wasting time analyzing most of these cases. It only "succeeds"
8055 // when the mapping was different and the load can be replaced with
8056 // a mov, which is of negligible benefit. So such cases are
8057 // skipped below.
57871462 8058 if(f_regmap[hr]>0) {
198df76f 8059 if(regs[t].regmap[hr]==f_regmap[hr]||(regs[t].regmap_entry[hr]<0&&get_reg(regmap_pre[t],f_regmap[hr])<0)) {
57871462 8060 int r=f_regmap[hr];
8061 for(j=t;j<=i;j++)
8062 {
8063 //printf("Test %x -> %x, %x %d/%d\n",start+i*4,ba[i],start+j*4,hr,r);
8064 if(r<34&&((unneeded_reg[j]>>r)&1)) break;
00fa9369 8065 assert(r < 64);
9de8a0c3 8066 if(regs[j].regmap[hr]==f_regmap[hr]&&f_regmap[hr]<TEMPREG) {
57871462 8067 //printf("Hit %x -> %x, %x %d/%d\n",start+i*4,ba[i],start+j*4,hr,r);
8068 int k;
8069 if(regs[i].regmap[hr]==-1&&branch_regs[i].regmap[hr]==-1) {
670c0f22 8070 if(get_reg(regs[i].regmap,f_regmap[hr])>=0) break;
57871462 8071 if(get_reg(regs[i+2].regmap,f_regmap[hr])>=0) break;
57871462 8072 k=i;
8073 while(k>1&&regs[k-1].regmap[hr]==-1) {
e1190b87 8074 if(count_free_regs(regs[k-1].regmap)<=minimum_free_regs[k-1]) {
8075 //printf("no free regs for store %x\n",start+(k-1)*4);
8076 break;
57871462 8077 }
57871462 8078 if(get_reg(regs[k-1].regmap,f_regmap[hr])>=0) {
8079 //printf("no-match due to different register\n");
8080 break;
8081 }
fe807a8a 8082 if (dops[k-2].is_jump) {
57871462 8083 //printf("no-match due to branch\n");
8084 break;
8085 }
8086 // call/ret fast path assumes no registers allocated
cf95b4f0 8087 if(k>2&&(dops[k-3].itype==UJUMP||dops[k-3].itype==RJUMP)&&dops[k-3].rt1==31) {
57871462 8088 break;
8089 }
57871462 8090 k--;
8091 }
57871462 8092 if(regs[k-1].regmap[hr]==f_regmap[hr]&&regmap_pre[k][hr]==f_regmap[hr]) {
8093 //printf("Extend r%d, %x ->\n",hr,start+k*4);
8094 while(k<i) {
8095 regs[k].regmap_entry[hr]=f_regmap[hr];
8096 regs[k].regmap[hr]=f_regmap[hr];
8097 regmap_pre[k+1][hr]=f_regmap[hr];
8098 regs[k].wasdirty&=~(1<<hr);
8099 regs[k].dirty&=~(1<<hr);
8100 regs[k].wasdirty|=(1<<hr)&regs[k-1].dirty;
8101 regs[k].dirty|=(1<<hr)&regs[k].wasdirty;
8102 regs[k].wasconst&=~(1<<hr);
8103 regs[k].isconst&=~(1<<hr);
8104 k++;
8105 }
8106 }
8107 else {
8108 //printf("Fail Extend r%d, %x ->\n",hr,start+k*4);
8109 break;
8110 }
8111 assert(regs[i-1].regmap[hr]==f_regmap[hr]);
8112 if(regs[i-1].regmap[hr]==f_regmap[hr]&&regmap_pre[i][hr]==f_regmap[hr]) {
8113 //printf("OK fill %x (r%d)\n",start+i*4,hr);
8114 regs[i].regmap_entry[hr]=f_regmap[hr];
8115 regs[i].regmap[hr]=f_regmap[hr];
8116 regs[i].wasdirty&=~(1<<hr);
8117 regs[i].dirty&=~(1<<hr);
8118 regs[i].wasdirty|=(1<<hr)&regs[i-1].dirty;
8119 regs[i].dirty|=(1<<hr)&regs[i-1].dirty;
8120 regs[i].wasconst&=~(1<<hr);
8121 regs[i].isconst&=~(1<<hr);
8122 branch_regs[i].regmap_entry[hr]=f_regmap[hr];
8123 branch_regs[i].wasdirty&=~(1<<hr);
8124 branch_regs[i].wasdirty|=(1<<hr)&regs[i].dirty;
8125 branch_regs[i].regmap[hr]=f_regmap[hr];
8126 branch_regs[i].dirty&=~(1<<hr);
8127 branch_regs[i].dirty|=(1<<hr)&regs[i].dirty;
8128 branch_regs[i].wasconst&=~(1<<hr);
8129 branch_regs[i].isconst&=~(1<<hr);
fe807a8a 8130 if (!dops[i].is_ujump) {
57871462 8131 regmap_pre[i+2][hr]=f_regmap[hr];
8132 regs[i+2].wasdirty&=~(1<<hr);
8133 regs[i+2].wasdirty|=(1<<hr)&regs[i].dirty;
57871462 8134 }
8135 }
8136 }
8137 for(k=t;k<j;k++) {
e1190b87 8138 // Alloc register clean at beginning of loop,
8139 // but may dirty it in pass 6
57871462 8140 regs[k].regmap_entry[hr]=f_regmap[hr];
8141 regs[k].regmap[hr]=f_regmap[hr];
57871462 8142 regs[k].dirty&=~(1<<hr);
8143 regs[k].wasconst&=~(1<<hr);
8144 regs[k].isconst&=~(1<<hr);
fe807a8a 8145 if (dops[k].is_jump) {
e1190b87 8146 branch_regs[k].regmap_entry[hr]=f_regmap[hr];
8147 branch_regs[k].regmap[hr]=f_regmap[hr];
8148 branch_regs[k].dirty&=~(1<<hr);
8149 branch_regs[k].wasconst&=~(1<<hr);
8150 branch_regs[k].isconst&=~(1<<hr);
fe807a8a 8151 if (!dops[k].is_ujump) {
e1190b87 8152 regmap_pre[k+2][hr]=f_regmap[hr];
8153 regs[k+2].wasdirty&=~(1<<hr);
e1190b87 8154 }
8155 }
8156 else
8157 {
8158 regmap_pre[k+1][hr]=f_regmap[hr];
8159 regs[k+1].wasdirty&=~(1<<hr);
8160 }
57871462 8161 }
8162 if(regs[j].regmap[hr]==f_regmap[hr])
8163 regs[j].regmap_entry[hr]=f_regmap[hr];
8164 break;
8165 }
8166 if(j==i) break;
8167 if(regs[j].regmap[hr]>=0)
8168 break;
8169 if(get_reg(regs[j].regmap,f_regmap[hr])>=0) {
8170 //printf("no-match due to different register\n");
8171 break;
8172 }
fe807a8a 8173 if (dops[j].is_ujump)
e1190b87 8174 {
8175 // Stop on unconditional branch
8176 break;
8177 }
cf95b4f0 8178 if(dops[j].itype==CJUMP||dops[j].itype==SJUMP)
e1190b87 8179 {
cf95b4f0 8180 if(dops[j].ooo) {
9f51b4b9 8181 if(count_free_regs(regs[j].regmap)<=minimum_free_regs[j+1])
e1190b87 8182 break;
8183 }else{
9f51b4b9 8184 if(count_free_regs(branch_regs[j].regmap)<=minimum_free_regs[j+1])
e1190b87 8185 break;
8186 }
8187 if(get_reg(branch_regs[j].regmap,f_regmap[hr])>=0) {
8188 //printf("no-match due to different register (branch)\n");
57871462 8189 break;
8190 }
8191 }
e1190b87 8192 if(count_free_regs(regs[j].regmap)<=minimum_free_regs[j]) {
8193 //printf("No free regs for store %x\n",start+j*4);
8194 break;
8195 }
ad49de89 8196 assert(f_regmap[hr]<64);
57871462 8197 }
8198 }
8199 }
8200 }
8201 }
8202 }else{
198df76f 8203 // Non branch or undetermined branch target
57871462 8204 for(hr=0;hr<HOST_REGS;hr++)
8205 {
8206 if(hr!=EXCLUDE_REG) {
7c3a5182 8207 if(regs[i].regmap[hr]>=0) {
b372a952 8208 if(f_regmap[hr]!=regs[i].regmap[hr]) {
8209 // dealloc old register
8210 int n;
8211 for(n=0;n<HOST_REGS;n++)
8212 {
8213 if(f_regmap[n]==regs[i].regmap[hr]) {f_regmap[n]=-1;}
8214 }
4149788d 8215 // and alloc new one
8216 f_regmap[hr]=regs[i].regmap[hr];
8217 }
8218 }
8219 }
8220 }
8221 // Try to restore cycle count at branch targets
8222 if(dops[i].bt) {
8223 for(j=i;j<slen-1;j++) {
8224 if(regs[j].regmap[HOST_CCREG]!=-1) break;
8225 if(count_free_regs(regs[j].regmap)<=minimum_free_regs[j]) {
8226 //printf("no free regs for store %x\n",start+j*4);
8227 break;
8228 }
8229 }
8230 if(regs[j].regmap[HOST_CCREG]==CCREG) {
8231 int k=i;
8232 //printf("Extend CC, %x -> %x\n",start+k*4,start+j*4);
8233 while(k<j) {
8234 regs[k].regmap_entry[HOST_CCREG]=CCREG;
8235 regs[k].regmap[HOST_CCREG]=CCREG;
8236 regmap_pre[k+1][HOST_CCREG]=CCREG;
8237 regs[k+1].wasdirty|=1<<HOST_CCREG;
8238 regs[k].dirty|=1<<HOST_CCREG;
8239 regs[k].wasconst&=~(1<<HOST_CCREG);
8240 regs[k].isconst&=~(1<<HOST_CCREG);
8241 k++;
8242 }
8243 regs[j].regmap_entry[HOST_CCREG]=CCREG;
8244 }
8245 // Work backwards from the branch target
8246 if(j>i&&f_regmap[HOST_CCREG]==CCREG)
8247 {
8248 //printf("Extend backwards\n");
8249 int k;
8250 k=i;
8251 while(regs[k-1].regmap[HOST_CCREG]==-1) {
8252 if(count_free_regs(regs[k-1].regmap)<=minimum_free_regs[k-1]) {
8253 //printf("no free regs for store %x\n",start+(k-1)*4);
8254 break;
8255 }
8256 k--;
8257 }
8258 if(regs[k-1].regmap[HOST_CCREG]==CCREG) {
8259 //printf("Extend CC, %x ->\n",start+k*4);
8260 while(k<=i) {
8261 regs[k].regmap_entry[HOST_CCREG]=CCREG;
8262 regs[k].regmap[HOST_CCREG]=CCREG;
8263 regmap_pre[k+1][HOST_CCREG]=CCREG;
8264 regs[k+1].wasdirty|=1<<HOST_CCREG;
8265 regs[k].dirty|=1<<HOST_CCREG;
8266 regs[k].wasconst&=~(1<<HOST_CCREG);
8267 regs[k].isconst&=~(1<<HOST_CCREG);
8268 k++;
8269 }
8270 }
8271 else {
8272 //printf("Fail Extend CC, %x ->\n",start+k*4);
8273 }
8274 }
8275 }
8276 if(dops[i].itype!=STORE&&dops[i].itype!=STORELR&&dops[i].itype!=C1LS&&dops[i].itype!=SHIFT&&
8277 dops[i].itype!=NOP&&dops[i].itype!=MOV&&dops[i].itype!=ALU&&dops[i].itype!=SHIFTIMM&&
8278 dops[i].itype!=IMM16&&dops[i].itype!=LOAD&&dops[i].itype!=COP1)
8279 {
8280 memcpy(f_regmap,regs[i].regmap,sizeof(f_regmap));
8281 }
8282 }
8283 }
8284}
8285
8286// This allocates registers (if possible) one instruction prior
8287// to use, which can avoid a load-use penalty on certain CPUs.
8288static noinline void pass5b_preallocate2(void)
8289{
8290 int i, hr;
8291 for(i=0;i<slen-1;i++)
8292 {
8293 if (!i || !dops[i-1].is_jump)
8294 {
8295 if(!dops[i+1].bt)
8296 {
8297 if(dops[i].itype==ALU||dops[i].itype==MOV||dops[i].itype==LOAD||dops[i].itype==SHIFTIMM||dops[i].itype==IMM16
8298 ||((dops[i].itype==COP1||dops[i].itype==COP2)&&dops[i].opcode2<3))
8299 {
8300 if(dops[i+1].rs1) {
8301 if((hr=get_reg(regs[i+1].regmap,dops[i+1].rs1))>=0)
8302 {
8303 if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
8304 {
8305 regs[i].regmap[hr]=regs[i+1].regmap[hr];
8306 regmap_pre[i+1][hr]=regs[i+1].regmap[hr];
8307 regs[i+1].regmap_entry[hr]=regs[i+1].regmap[hr];
8308 regs[i].isconst&=~(1<<hr);
8309 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
8310 constmap[i][hr]=constmap[i+1][hr];
8311 regs[i+1].wasdirty&=~(1<<hr);
8312 regs[i].dirty&=~(1<<hr);
8313 }
8314 }
8315 }
8316 if(dops[i+1].rs2) {
8317 if((hr=get_reg(regs[i+1].regmap,dops[i+1].rs2))>=0)
8318 {
8319 if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
8320 {
8321 regs[i].regmap[hr]=regs[i+1].regmap[hr];
8322 regmap_pre[i+1][hr]=regs[i+1].regmap[hr];
8323 regs[i+1].regmap_entry[hr]=regs[i+1].regmap[hr];
8324 regs[i].isconst&=~(1<<hr);
8325 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
8326 constmap[i][hr]=constmap[i+1][hr];
8327 regs[i+1].wasdirty&=~(1<<hr);
8328 regs[i].dirty&=~(1<<hr);
8329 }
8330 }
8331 }
8332 // Preload target address for load instruction (non-constant)
8333 if(dops[i+1].itype==LOAD&&dops[i+1].rs1&&get_reg(regs[i+1].regmap,dops[i+1].rs1)<0) {
8334 if((hr=get_reg(regs[i+1].regmap,dops[i+1].rt1))>=0)
8335 {
8336 if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
8337 {
8338 regs[i].regmap[hr]=dops[i+1].rs1;
8339 regmap_pre[i+1][hr]=dops[i+1].rs1;
8340 regs[i+1].regmap_entry[hr]=dops[i+1].rs1;
8341 regs[i].isconst&=~(1<<hr);
8342 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
8343 constmap[i][hr]=constmap[i+1][hr];
8344 regs[i+1].wasdirty&=~(1<<hr);
8345 regs[i].dirty&=~(1<<hr);
8346 }
8347 }
8348 }
8349 // Load source into target register
8350 if(dops[i+1].use_lt1&&get_reg(regs[i+1].regmap,dops[i+1].rs1)<0) {
8351 if((hr=get_reg(regs[i+1].regmap,dops[i+1].rt1))>=0)
8352 {
8353 if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
8354 {
8355 regs[i].regmap[hr]=dops[i+1].rs1;
8356 regmap_pre[i+1][hr]=dops[i+1].rs1;
8357 regs[i+1].regmap_entry[hr]=dops[i+1].rs1;
8358 regs[i].isconst&=~(1<<hr);
8359 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
8360 constmap[i][hr]=constmap[i+1][hr];
8361 regs[i+1].wasdirty&=~(1<<hr);
8362 regs[i].dirty&=~(1<<hr);
8363 }
8364 }
8365 }
8366 // Address for store instruction (non-constant)
8367 if(dops[i+1].itype==STORE||dops[i+1].itype==STORELR
8368 ||(dops[i+1].opcode&0x3b)==0x39||(dops[i+1].opcode&0x3b)==0x3a) { // SB/SH/SW/SD/SWC1/SDC1/SWC2/SDC2
8369 if(get_reg(regs[i+1].regmap,dops[i+1].rs1)<0) {
8370 hr=get_reg2(regs[i].regmap,regs[i+1].regmap,-1);
8371 if(hr<0) hr=get_reg_temp(regs[i+1].regmap);
8372 else {
8373 regs[i+1].regmap[hr]=AGEN1+((i+1)&1);
8374 regs[i+1].isconst&=~(1<<hr);
8375 }
8376 assert(hr>=0);
8377 if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
8378 {
8379 regs[i].regmap[hr]=dops[i+1].rs1;
8380 regmap_pre[i+1][hr]=dops[i+1].rs1;
8381 regs[i+1].regmap_entry[hr]=dops[i+1].rs1;
8382 regs[i].isconst&=~(1<<hr);
8383 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
8384 constmap[i][hr]=constmap[i+1][hr];
8385 regs[i+1].wasdirty&=~(1<<hr);
8386 regs[i].dirty&=~(1<<hr);
8387 }
8388 }
8389 }
8390 if(dops[i+1].itype==LOADLR||(dops[i+1].opcode&0x3b)==0x31||(dops[i+1].opcode&0x3b)==0x32) { // LWC1/LDC1, LWC2/LDC2
8391 if(get_reg(regs[i+1].regmap,dops[i+1].rs1)<0) {
8392 int nr;
8393 hr=get_reg(regs[i+1].regmap,FTEMP);
8394 assert(hr>=0);
8395 if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
8396 {
8397 regs[i].regmap[hr]=dops[i+1].rs1;
8398 regmap_pre[i+1][hr]=dops[i+1].rs1;
8399 regs[i+1].regmap_entry[hr]=dops[i+1].rs1;
8400 regs[i].isconst&=~(1<<hr);
8401 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
8402 constmap[i][hr]=constmap[i+1][hr];
8403 regs[i+1].wasdirty&=~(1<<hr);
8404 regs[i].dirty&=~(1<<hr);
8405 }
8406 else if((nr=get_reg2(regs[i].regmap,regs[i+1].regmap,-1))>=0)
8407 {
8408 // move it to another register
8409 regs[i+1].regmap[hr]=-1;
8410 regmap_pre[i+2][hr]=-1;
8411 regs[i+1].regmap[nr]=FTEMP;
8412 regmap_pre[i+2][nr]=FTEMP;
8413 regs[i].regmap[nr]=dops[i+1].rs1;
8414 regmap_pre[i+1][nr]=dops[i+1].rs1;
8415 regs[i+1].regmap_entry[nr]=dops[i+1].rs1;
8416 regs[i].isconst&=~(1<<nr);
8417 regs[i+1].isconst&=~(1<<nr);
8418 regs[i].dirty&=~(1<<nr);
8419 regs[i+1].wasdirty&=~(1<<nr);
8420 regs[i+1].dirty&=~(1<<nr);
8421 regs[i+2].wasdirty&=~(1<<nr);
8422 }
8423 }
8424 }
8425 if(dops[i+1].itype==LOAD||dops[i+1].itype==LOADLR||dops[i+1].itype==STORE||dops[i+1].itype==STORELR/*||dops[i+1].itype==C1LS||||dops[i+1].itype==C2LS*/) {
8426 hr = -1;
8427 if(dops[i+1].itype==LOAD)
8428 hr=get_reg(regs[i+1].regmap,dops[i+1].rt1);
8429 if(dops[i+1].itype==LOADLR||(dops[i+1].opcode&0x3b)==0x31||(dops[i+1].opcode&0x3b)==0x32) // LWC1/LDC1, LWC2/LDC2
8430 hr=get_reg(regs[i+1].regmap,FTEMP);
8431 if(dops[i+1].itype==STORE||dops[i+1].itype==STORELR||(dops[i+1].opcode&0x3b)==0x39||(dops[i+1].opcode&0x3b)==0x3a) { // SWC1/SDC1/SWC2/SDC2
8432 hr=get_reg(regs[i+1].regmap,AGEN1+((i+1)&1));
8433 if(hr<0) hr=get_reg_temp(regs[i+1].regmap);
8434 }
8435 if(hr>=0&&regs[i].regmap[hr]<0) {
8436 int rs=get_reg(regs[i+1].regmap,dops[i+1].rs1);
8437 if(rs>=0&&((regs[i+1].wasconst>>rs)&1)) {
8438 regs[i].regmap[hr]=AGEN1+((i+1)&1);
8439 regmap_pre[i+1][hr]=AGEN1+((i+1)&1);
8440 regs[i+1].regmap_entry[hr]=AGEN1+((i+1)&1);
8441 regs[i].isconst&=~(1<<hr);
8442 regs[i+1].wasdirty&=~(1<<hr);
8443 regs[i].dirty&=~(1<<hr);
8444 }
b372a952 8445 }
8446 }
57871462 8447 }
8448 }
4149788d 8449 }
8450 }
8451}
8452
8453// Write back dirty registers as soon as we will no longer modify them,
8454// so that we don't end up with lots of writes at the branches.
8455static noinline void pass6_clean_registers(int istart, int iend, int wr)
8456{
53358c1d 8457 static u_int wont_dirty[MAXBLOCK];
8458 static u_int will_dirty[MAXBLOCK];
4149788d 8459 int i;
8460 int r;
8461 u_int will_dirty_i,will_dirty_next,temp_will_dirty;
8462 u_int wont_dirty_i,wont_dirty_next,temp_wont_dirty;
8463 if(iend==slen-1) {
8464 will_dirty_i=will_dirty_next=0;
8465 wont_dirty_i=wont_dirty_next=0;
8466 }else{
8467 will_dirty_i=will_dirty_next=will_dirty[iend+1];
8468 wont_dirty_i=wont_dirty_next=wont_dirty[iend+1];
8469 }
8470 for (i=iend;i>=istart;i--)
8471 {
8472 signed char rregmap_i[RRMAP_SIZE];
8473 u_int hr_candirty = 0;
8474 assert(HOST_REGS < 32);
8475 make_rregs(regs[i].regmap, rregmap_i, &hr_candirty);
8476 __builtin_prefetch(regs[i-1].regmap);
8477 if(dops[i].is_jump)
8478 {
8479 signed char branch_rregmap_i[RRMAP_SIZE];
8480 u_int branch_hr_candirty = 0;
8481 make_rregs(branch_regs[i].regmap, branch_rregmap_i, &branch_hr_candirty);
8482 if(ba[i]<start || ba[i]>=(start+slen*4))
8483 {
8484 // Branch out of this block, flush all regs
8485 will_dirty_i = 0;
8486 will_dirty_i |= 1u << (get_rreg(branch_rregmap_i, dops[i].rt1) & 31);
8487 will_dirty_i |= 1u << (get_rreg(branch_rregmap_i, dops[i].rt2) & 31);
8488 will_dirty_i |= 1u << (get_rreg(branch_rregmap_i, dops[i+1].rt1) & 31);
8489 will_dirty_i |= 1u << (get_rreg(branch_rregmap_i, dops[i+1].rt2) & 31);
8490 will_dirty_i |= 1u << (get_rreg(branch_rregmap_i, CCREG) & 31);
8491 will_dirty_i &= branch_hr_candirty;
8492 if (dops[i].is_ujump)
8493 {
8494 // Unconditional branch
8495 wont_dirty_i = 0;
8496 // Merge in delay slot (will dirty)
8497 will_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i].rt1) & 31);
8498 will_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i].rt2) & 31);
8499 will_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i+1].rt1) & 31);
8500 will_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i+1].rt2) & 31);
8501 will_dirty_i |= 1u << (get_rreg(rregmap_i, CCREG) & 31);
8502 will_dirty_i &= hr_candirty;
57871462 8503 }
4149788d 8504 else
8505 {
8506 // Conditional branch
8507 wont_dirty_i = wont_dirty_next;
8508 // Merge in delay slot (will dirty)
8509 // (the original code had no explanation why these 2 are commented out)
8510 //will_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i].rt1) & 31);
8511 //will_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i].rt2) & 31);
8512 will_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i+1].rt1) & 31);
8513 will_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i+1].rt2) & 31);
8514 will_dirty_i |= 1u << (get_rreg(rregmap_i, CCREG) & 31);
8515 will_dirty_i &= hr_candirty;
8516 }
8517 // Merge in delay slot (wont dirty)
8518 wont_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i].rt1) & 31);
8519 wont_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i].rt2) & 31);
8520 wont_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i+1].rt1) & 31);
8521 wont_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i+1].rt2) & 31);
8522 wont_dirty_i |= 1u << (get_rreg(rregmap_i, CCREG) & 31);
8523 wont_dirty_i |= 1u << (get_rreg(branch_rregmap_i, dops[i].rt1) & 31);
8524 wont_dirty_i |= 1u << (get_rreg(branch_rregmap_i, dops[i].rt2) & 31);
8525 wont_dirty_i |= 1u << (get_rreg(branch_rregmap_i, dops[i+1].rt1) & 31);
8526 wont_dirty_i |= 1u << (get_rreg(branch_rregmap_i, dops[i+1].rt2) & 31);
8527 wont_dirty_i |= 1u << (get_rreg(branch_rregmap_i, CCREG) & 31);
8528 wont_dirty_i &= ~(1u << 31);
8529 if(wr) {
8530 #ifndef DESTRUCTIVE_WRITEBACK
8531 branch_regs[i].dirty&=wont_dirty_i;
8532 #endif
8533 branch_regs[i].dirty|=will_dirty_i;
8534 }
8535 }
8536 else
8537 {
8538 // Internal branch
8539 if(ba[i]<=start+i*4) {
8540 // Backward branch
8541 if (dops[i].is_ujump)
8542 {
8543 // Unconditional branch
8544 temp_will_dirty=0;
8545 temp_wont_dirty=0;
8546 // Merge in delay slot (will dirty)
8547 temp_will_dirty |= 1u << (get_rreg(branch_rregmap_i, dops[i].rt1) & 31);
8548 temp_will_dirty |= 1u << (get_rreg(branch_rregmap_i, dops[i].rt2) & 31);
8549 temp_will_dirty |= 1u << (get_rreg(branch_rregmap_i, dops[i+1].rt1) & 31);
8550 temp_will_dirty |= 1u << (get_rreg(branch_rregmap_i, dops[i+1].rt2) & 31);
8551 temp_will_dirty |= 1u << (get_rreg(branch_rregmap_i, CCREG) & 31);
8552 temp_will_dirty &= branch_hr_candirty;
8553 temp_will_dirty |= 1u << (get_rreg(rregmap_i, dops[i].rt1) & 31);
8554 temp_will_dirty |= 1u << (get_rreg(rregmap_i, dops[i].rt2) & 31);
8555 temp_will_dirty |= 1u << (get_rreg(rregmap_i, dops[i+1].rt1) & 31);
8556 temp_will_dirty |= 1u << (get_rreg(rregmap_i, dops[i+1].rt2) & 31);
8557 temp_will_dirty |= 1u << (get_rreg(rregmap_i, CCREG) & 31);
8558 temp_will_dirty &= hr_candirty;
8559 } else {
8560 // Conditional branch (not taken case)
8561 temp_will_dirty=will_dirty_next;
8562 temp_wont_dirty=wont_dirty_next;
8563 // Merge in delay slot (will dirty)
8564 temp_will_dirty |= 1u << (get_rreg(branch_rregmap_i, dops[i].rt1) & 31);
8565 temp_will_dirty |= 1u << (get_rreg(branch_rregmap_i, dops[i].rt2) & 31);
8566 temp_will_dirty |= 1u << (get_rreg(branch_rregmap_i, dops[i+1].rt1) & 31);
8567 temp_will_dirty |= 1u << (get_rreg(branch_rregmap_i, dops[i+1].rt2) & 31);
8568 temp_will_dirty |= 1u << (get_rreg(branch_rregmap_i, CCREG) & 31);
8569 temp_will_dirty &= branch_hr_candirty;
8570 //temp_will_dirty |= 1u << (get_rreg(rregmap_i, dops[i].rt1) & 31);
8571 //temp_will_dirty |= 1u << (get_rreg(rregmap_i, dops[i].rt2) & 31);
8572 temp_will_dirty |= 1u << (get_rreg(rregmap_i, dops[i+1].rt1) & 31);
8573 temp_will_dirty |= 1u << (get_rreg(rregmap_i, dops[i+1].rt2) & 31);
8574 temp_will_dirty |= 1u << (get_rreg(rregmap_i, CCREG) & 31);
8575 temp_will_dirty &= hr_candirty;
8576 }
8577 // Merge in delay slot (wont dirty)
8578 temp_wont_dirty |= 1u << (get_rreg(rregmap_i, dops[i].rt1) & 31);
8579 temp_wont_dirty |= 1u << (get_rreg(rregmap_i, dops[i].rt2) & 31);
8580 temp_wont_dirty |= 1u << (get_rreg(rregmap_i, dops[i+1].rt1) & 31);
8581 temp_wont_dirty |= 1u << (get_rreg(rregmap_i, dops[i+1].rt2) & 31);
8582 temp_wont_dirty |= 1u << (get_rreg(rregmap_i, CCREG) & 31);
8583 temp_wont_dirty |= 1u << (get_rreg(branch_rregmap_i, dops[i].rt1) & 31);
8584 temp_wont_dirty |= 1u << (get_rreg(branch_rregmap_i, dops[i].rt2) & 31);
8585 temp_wont_dirty |= 1u << (get_rreg(branch_rregmap_i, dops[i+1].rt1) & 31);
8586 temp_wont_dirty |= 1u << (get_rreg(branch_rregmap_i, dops[i+1].rt2) & 31);
8587 temp_wont_dirty |= 1u << (get_rreg(branch_rregmap_i, CCREG) & 31);
8588 temp_wont_dirty &= ~(1u << 31);
8589 // Deal with changed mappings
8590 if(i<iend) {
8591 for(r=0;r<HOST_REGS;r++) {
8592 if(r!=EXCLUDE_REG) {
8593 if(regs[i].regmap[r]!=regmap_pre[i][r]) {
8594 temp_will_dirty&=~(1<<r);
8595 temp_wont_dirty&=~(1<<r);
8596 if(regmap_pre[i][r]>0 && regmap_pre[i][r]<34) {
8597 temp_will_dirty|=((unneeded_reg[i]>>regmap_pre[i][r])&1)<<r;
8598 temp_wont_dirty|=((unneeded_reg[i]>>regmap_pre[i][r])&1)<<r;
8599 } else {
8600 temp_will_dirty|=1<<r;
8601 temp_wont_dirty|=1<<r;
8602 }
8603 }
8604 }
8605 }
8606 }
8607 if(wr) {
8608 will_dirty[i]=temp_will_dirty;
8609 wont_dirty[i]=temp_wont_dirty;
8610 pass6_clean_registers((ba[i]-start)>>2,i-1,0);
8611 }else{
8612 // Limit recursion. It can take an excessive amount
8613 // of time if there are a lot of nested loops.
8614 will_dirty[(ba[i]-start)>>2]=0;
8615 wont_dirty[(ba[i]-start)>>2]=-1;
57871462 8616 }
57871462 8617 }
4149788d 8618 /*else*/ if(1)
57871462 8619 {
4149788d 8620 if (dops[i].is_ujump)
8621 {
8622 // Unconditional branch
8623 will_dirty_i=0;
8624 wont_dirty_i=0;
8625 //if(ba[i]>start+i*4) { // Disable recursion (for debugging)
8626 for(r=0;r<HOST_REGS;r++) {
8627 if(r!=EXCLUDE_REG) {
8628 if(branch_regs[i].regmap[r]==regs[(ba[i]-start)>>2].regmap_entry[r]) {
8629 will_dirty_i|=will_dirty[(ba[i]-start)>>2]&(1<<r);
8630 wont_dirty_i|=wont_dirty[(ba[i]-start)>>2]&(1<<r);
8631 }
8632 if(branch_regs[i].regmap[r]>=0) {
8633 will_dirty_i|=((unneeded_reg[(ba[i]-start)>>2]>>branch_regs[i].regmap[r])&1)<<r;
8634 wont_dirty_i|=((unneeded_reg[(ba[i]-start)>>2]>>branch_regs[i].regmap[r])&1)<<r;
8635 }
8636 }
57871462 8637 }
4149788d 8638 //}
8639 // Merge in delay slot
8640 will_dirty_i |= 1u << (get_rreg(branch_rregmap_i, dops[i].rt1) & 31);
8641 will_dirty_i |= 1u << (get_rreg(branch_rregmap_i, dops[i].rt2) & 31);
8642 will_dirty_i |= 1u << (get_rreg(branch_rregmap_i, dops[i+1].rt1) & 31);
8643 will_dirty_i |= 1u << (get_rreg(branch_rregmap_i, dops[i+1].rt2) & 31);
8644 will_dirty_i |= 1u << (get_rreg(branch_rregmap_i, CCREG) & 31);
8645 will_dirty_i &= branch_hr_candirty;
8646 will_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i].rt1) & 31);
8647 will_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i].rt2) & 31);
8648 will_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i+1].rt1) & 31);
8649 will_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i+1].rt2) & 31);
8650 will_dirty_i |= 1u << (get_rreg(rregmap_i, CCREG) & 31);
8651 will_dirty_i &= hr_candirty;
8652 } else {
8653 // Conditional branch
8654 will_dirty_i=will_dirty_next;
8655 wont_dirty_i=wont_dirty_next;
8656 //if(ba[i]>start+i*4) // Disable recursion (for debugging)
8657 for(r=0;r<HOST_REGS;r++) {
8658 if(r!=EXCLUDE_REG) {
8659 signed char target_reg=branch_regs[i].regmap[r];
8660 if(target_reg==regs[(ba[i]-start)>>2].regmap_entry[r]) {
8661 will_dirty_i&=will_dirty[(ba[i]-start)>>2]&(1<<r);
8662 wont_dirty_i|=wont_dirty[(ba[i]-start)>>2]&(1<<r);
8663 }
8664 else if(target_reg>=0) {
8665 will_dirty_i&=((unneeded_reg[(ba[i]-start)>>2]>>target_reg)&1)<<r;
8666 wont_dirty_i|=((unneeded_reg[(ba[i]-start)>>2]>>target_reg)&1)<<r;
8667 }
8668 }
57871462 8669 }
4149788d 8670 // Merge in delay slot
8671 will_dirty_i |= 1u << (get_rreg(branch_rregmap_i, dops[i].rt1) & 31);
8672 will_dirty_i |= 1u << (get_rreg(branch_rregmap_i, dops[i].rt2) & 31);
8673 will_dirty_i |= 1u << (get_rreg(branch_rregmap_i, dops[i+1].rt1) & 31);
8674 will_dirty_i |= 1u << (get_rreg(branch_rregmap_i, dops[i+1].rt2) & 31);
8675 will_dirty_i |= 1u << (get_rreg(branch_rregmap_i, CCREG) & 31);
8676 will_dirty_i &= branch_hr_candirty;
8677 //will_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i].rt1) & 31);
8678 //will_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i].rt2) & 31);
8679 will_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i+1].rt1) & 31);
8680 will_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i+1].rt2) & 31);
8681 will_dirty_i |= 1u << (get_rreg(rregmap_i, CCREG) & 31);
8682 will_dirty_i &= hr_candirty;
57871462 8683 }
4149788d 8684 // Merge in delay slot (won't dirty)
8685 wont_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i].rt1) & 31);
8686 wont_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i].rt2) & 31);
8687 wont_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i+1].rt1) & 31);
8688 wont_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i+1].rt2) & 31);
8689 wont_dirty_i |= 1u << (get_rreg(rregmap_i, CCREG) & 31);
8690 wont_dirty_i |= 1u << (get_rreg(branch_rregmap_i, dops[i].rt1) & 31);
8691 wont_dirty_i |= 1u << (get_rreg(branch_rregmap_i, dops[i].rt2) & 31);
8692 wont_dirty_i |= 1u << (get_rreg(branch_rregmap_i, dops[i+1].rt1) & 31);
8693 wont_dirty_i |= 1u << (get_rreg(branch_rregmap_i, dops[i+1].rt2) & 31);
8694 wont_dirty_i |= 1u << (get_rreg(branch_rregmap_i, CCREG) & 31);
8695 wont_dirty_i &= ~(1u << 31);
8696 if(wr) {
8697 #ifndef DESTRUCTIVE_WRITEBACK
8698 branch_regs[i].dirty&=wont_dirty_i;
8699 #endif
8700 branch_regs[i].dirty|=will_dirty_i;
57871462 8701 }
8702 }
8703 }
57871462 8704 }
4149788d 8705 else if(dops[i].itype==SYSCALL||dops[i].itype==HLECALL||dops[i].itype==INTCALL)
57871462 8706 {
4149788d 8707 // SYSCALL instruction (software interrupt)
8708 will_dirty_i=0;
8709 wont_dirty_i=0;
8710 }
8711 else if(dops[i].itype==COP0 && (source[i]&0x3f)==0x18)
8712 {
8713 // ERET instruction (return from interrupt)
8714 will_dirty_i=0;
8715 wont_dirty_i=0;
8716 }
8717 will_dirty_next=will_dirty_i;
8718 wont_dirty_next=wont_dirty_i;
8719 will_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i].rt1) & 31);
8720 will_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i].rt2) & 31);
8721 will_dirty_i |= 1u << (get_rreg(rregmap_i, CCREG) & 31);
8722 will_dirty_i &= hr_candirty;
8723 wont_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i].rt1) & 31);
8724 wont_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i].rt2) & 31);
8725 wont_dirty_i |= 1u << (get_rreg(rregmap_i, CCREG) & 31);
8726 wont_dirty_i &= ~(1u << 31);
8727 if (i > istart && !dops[i].is_jump) {
8728 // Don't store a register immediately after writing it,
8729 // may prevent dual-issue.
8730 wont_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i-1].rt1) & 31);
8731 wont_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i-1].rt2) & 31);
8732 }
8733 // Save it
8734 will_dirty[i]=will_dirty_i;
8735 wont_dirty[i]=wont_dirty_i;
8736 // Mark registers that won't be dirtied as not dirty
8737 if(wr) {
8738 regs[i].dirty|=will_dirty_i;
8739 #ifndef DESTRUCTIVE_WRITEBACK
8740 regs[i].dirty&=wont_dirty_i;
8741 if(dops[i].is_jump)
57871462 8742 {
4149788d 8743 if (i < iend-1 && !dops[i].is_ujump) {
8744 for(r=0;r<HOST_REGS;r++) {
8745 if(r!=EXCLUDE_REG) {
8746 if(regs[i].regmap[r]==regmap_pre[i+2][r]) {
8747 regs[i+2].wasdirty&=wont_dirty_i|~(1<<r);
8748 }else {/*printf("i: %x (%d) mismatch(+2): %d\n",start+i*4,i,r);assert(!((wont_dirty_i>>r)&1));*/}
57871462 8749 }
8750 }
8751 }
4149788d 8752 }
8753 else
8754 {
8755 if(i<iend) {
8756 for(r=0;r<HOST_REGS;r++) {
8757 if(r!=EXCLUDE_REG) {
8758 if(regs[i].regmap[r]==regmap_pre[i+1][r]) {
8759 regs[i+1].wasdirty&=wont_dirty_i|~(1<<r);
8760 }else {/*printf("i: %x (%d) mismatch(+1): %d\n",start+i*4,i,r);assert(!((wont_dirty_i>>r)&1));*/}
57871462 8761 }
8762 }
8763 }
4149788d 8764 }
8765 #endif
8766 }
8767 // Deal with changed mappings
8768 temp_will_dirty=will_dirty_i;
8769 temp_wont_dirty=wont_dirty_i;
8770 for(r=0;r<HOST_REGS;r++) {
8771 if(r!=EXCLUDE_REG) {
8772 int nr;
8773 if(regs[i].regmap[r]==regmap_pre[i][r]) {
8774 if(wr) {
8775 #ifndef DESTRUCTIVE_WRITEBACK
8776 regs[i].wasdirty&=wont_dirty_i|~(1<<r);
8777 #endif
8778 regs[i].wasdirty|=will_dirty_i&(1<<r);
57871462 8779 }
4149788d 8780 }
8781 else if(regmap_pre[i][r]>=0&&(nr=get_rreg(rregmap_i,regmap_pre[i][r]))>=0) {
8782 // Register moved to a different register
8783 will_dirty_i&=~(1<<r);
8784 wont_dirty_i&=~(1<<r);
8785 will_dirty_i|=((temp_will_dirty>>nr)&1)<<r;
8786 wont_dirty_i|=((temp_wont_dirty>>nr)&1)<<r;
8787 if(wr) {
8788 #ifndef DESTRUCTIVE_WRITEBACK
8789 regs[i].wasdirty&=wont_dirty_i|~(1<<r);
8790 #endif
8791 regs[i].wasdirty|=will_dirty_i&(1<<r);
8792 }
8793 }
8794 else {
8795 will_dirty_i&=~(1<<r);
8796 wont_dirty_i&=~(1<<r);
8797 if(regmap_pre[i][r]>0 && regmap_pre[i][r]<34) {
8798 will_dirty_i|=((unneeded_reg[i]>>regmap_pre[i][r])&1)<<r;
8799 wont_dirty_i|=((unneeded_reg[i]>>regmap_pre[i][r])&1)<<r;
8800 } else {
8801 wont_dirty_i|=1<<r;
8802 /*printf("i: %x (%d) mismatch: %d\n",start+i*4,i,r);assert(!((will_dirty>>r)&1));*/
57871462 8803 }
8804 }
8805 }
8806 }
8807 }
4149788d 8808}
8809
8810static noinline void pass10_expire_blocks(void)
8811{
93c0345b 8812 u_int step = MAX_OUTPUT_BLOCK_SIZE / PAGE_COUNT / 2;
8813 // not sizeof(ndrc->translation_cache) due to vita hack
8814 u_int step_mask = ((1u << TARGET_SIZE_2) - 1u) & ~(step - 1u);
8815 u_int end = (out - ndrc->translation_cache + EXPIRITY_OFFSET) & step_mask;
8816 u_int base_shift = __builtin_ctz(MAX_OUTPUT_BLOCK_SIZE);
8817 int hit;
8818
8819 for (; expirep != end; expirep = ((expirep + step) & step_mask))
4149788d 8820 {
93c0345b 8821 u_int base_offs = expirep & ~(MAX_OUTPUT_BLOCK_SIZE - 1);
8822 u_int block_i = expirep / step & (PAGE_COUNT - 1);
8823 u_int phase = (expirep >> (base_shift - 1)) & 1u;
8824 if (!(expirep & (MAX_OUTPUT_BLOCK_SIZE / 2 - 1))) {
8825 inv_debug("EXP: base_offs %x/%x phase %u\n", base_offs,
55a695d9 8826 out - ndrc->translation_cache, phase);
93c0345b 8827 }
8828
8829 if (!phase) {
8830 hit = blocks_remove_matching_addrs(&blocks[block_i], base_offs, base_shift);
8831 if (hit) {
8832 do_clear_cache();
8833 #ifdef USE_MINI_HT
8834 memset(mini_ht, -1, sizeof(mini_ht));
8835 #endif
8836 }
4149788d 8837 }
93c0345b 8838 else
b7ad2f2c 8839 unlink_jumps_tc_range(jumps[block_i], base_offs, base_shift);
4149788d 8840 }
8841}
8842
104df9d3 8843static struct block_info *new_block_info(u_int start, u_int len,
8844 const void *source, const void *copy, u_char *beginning, u_short jump_in_count)
8845{
8846 struct block_info **b_pptr;
8847 struct block_info *block;
8848 u_int page = get_page(start);
8849
8850 block = malloc(sizeof(*block) + jump_in_count * sizeof(block->jump_in[0]));
8851 assert(block);
8852 assert(jump_in_count > 0);
8853 block->source = source;
8854 block->copy = copy;
8855 block->start = start;
8856 block->len = len;
8857 block->reg_sv_flags = 0;
8858 block->tc_offs = beginning - ndrc->translation_cache;
8859 //block->tc_len = out - beginning;
8860 block->is_dirty = 0;
3280e616 8861 block->inv_near_misses = 0;
104df9d3 8862 block->jump_in_cnt = jump_in_count;
8863
93c0345b 8864 // insert sorted by start mirror-unmasked vaddr
104df9d3 8865 for (b_pptr = &blocks[page]; ; b_pptr = &((*b_pptr)->next)) {
8866 if (*b_pptr == NULL || (*b_pptr)->start >= start) {
8867 block->next = *b_pptr;
8868 *b_pptr = block;
8869 break;
8870 }
8871 }
8872 stat_inc(stat_blocks);
8873 return block;
8874}
8875
8876static int new_recompile_block(u_int addr)
4149788d 8877{
8878 u_int pagelimit = 0;
8879 u_int state_rflags = 0;
8880 int i;
8881
8882 assem_debug("NOTCOMPILED: addr = %x -> %p\n", addr, out);
8883
8884 // this is just for speculation
8885 for (i = 1; i < 32; i++) {
8886 if ((psxRegs.GPR.r[i] & 0xffff0000) == 0x1f800000)
8887 state_rflags |= 1 << i;
8888 }
8889
4bdc30ab 8890 assert(!(addr & 3));
8891 start = addr & ~3;
4149788d 8892 new_dynarec_did_compile=1;
8893 if (Config.HLE && start == 0x80001000) // hlecall
8894 {
8895 // XXX: is this enough? Maybe check hleSoftCall?
104df9d3 8896 void *beginning = start_block();
4149788d 8897
4149788d 8898 emit_movimm(start,0);
8899 emit_writeword(0,&pcaddr);
8900 emit_far_jump(new_dyna_leave);
8901 literal_pool(0);
8902 end_block(beginning);
104df9d3 8903 struct block_info *block = new_block_info(start, 4, NULL, NULL, beginning, 1);
8904 block->jump_in[0].vaddr = start;
8905 block->jump_in[0].addr = beginning;
4149788d 8906 return 0;
8907 }
8908 else if (f1_hack && hack_addr == 0) {
8909 void *beginning = start_block();
4149788d 8910 emit_movimm(start, 0);
8911 emit_writeword(0, &hack_addr);
8912 emit_readword(&psxRegs.GPR.n.sp, 0);
8913 emit_readptr(&mem_rtab, 1);
8914 emit_shrimm(0, 12, 2);
8915 emit_readptr_dualindexedx_ptrlen(1, 2, 1);
8916 emit_addimm(0, 0x18, 0);
8917 emit_adds_ptr(1, 1, 1);
8918 emit_ldr_dualindexed(1, 0, 0);
8919 emit_writeword(0, &psxRegs.GPR.r[26]); // lw k0, 0x18(sp)
104df9d3 8920 emit_far_call(ndrc_get_addr_ht);
4149788d 8921 emit_jmpreg(0); // jr k0
8922 literal_pool(0);
8923 end_block(beginning);
8924
104df9d3 8925 struct block_info *block = new_block_info(start, 4, NULL, NULL, beginning, 1);
8926 block->jump_in[0].vaddr = start;
8927 block->jump_in[0].addr = beginning;
4149788d 8928 SysPrintf("F1 hack to %08x\n", start);
8929 return 0;
8930 }
8931
8932 cycle_multiplier_active = cycle_multiplier_override && cycle_multiplier == CYCLE_MULT_DEFAULT
8933 ? cycle_multiplier_override : cycle_multiplier;
8934
8935 source = get_source_start(start, &pagelimit);
8936 if (source == NULL) {
8937 if (addr != hack_addr) {
8938 SysPrintf("Compile at bogus memory address: %08x\n", addr);
8939 hack_addr = addr;
8940 }
8941 //abort();
8942 return -1;
8943 }
8944
8945 /* Pass 1: disassemble */
8946 /* Pass 2: register dependencies, branch targets */
8947 /* Pass 3: register allocation */
8948 /* Pass 4: branch dependencies */
8949 /* Pass 5: pre-alloc */
8950 /* Pass 6: optimize clean/dirty state */
8951 /* Pass 7: flag 32-bit registers */
8952 /* Pass 8: assembly */
8953 /* Pass 9: linker */
8954 /* Pass 10: garbage collection / free memory */
8955
8956 /* Pass 1 disassembly */
8957
8958 pass1_disassemble(pagelimit);
8959
8960 int clear_hack_addr = apply_hacks();
8961
8962 /* Pass 2 - Register dependencies and branch targets */
8963
8964 pass2_unneeded_regs(0,slen-1,0);
8965
8966 /* Pass 3 - Register allocation */
8967
8968 pass3_register_alloc(addr);
8969
8970 /* Pass 4 - Cull unused host registers */
8971
8972 pass4_cull_unused_regs();
8973
8974 /* Pass 5 - Pre-allocate registers */
8975
8976 pass5a_preallocate1();
8977 pass5b_preallocate2();
9f51b4b9 8978
57871462 8979 /* Pass 6 - Optimize clean/dirty state */
4149788d 8980 pass6_clean_registers(0, slen-1, 1);
9f51b4b9 8981
57871462 8982 /* Pass 7 - Identify 32-bit registers */
04fd948a 8983 for (i=slen-1;i>=0;i--)
8984 {
cf95b4f0 8985 if(dops[i].itype==CJUMP||dops[i].itype==SJUMP)
04fd948a 8986 {
8987 // Conditional branch
8988 if((source[i]>>16)!=0x1000&&i<slen-2) {
8989 // Mark this address as a branch target since it may be called
8990 // upon return from interrupt
cf95b4f0 8991 dops[i+2].bt=1;
04fd948a 8992 }
8993 }
8994 }
57871462 8995
57871462 8996 /* Pass 8 - Assembly */
8997 linkcount=0;stubcount=0;
4149788d 8998 is_delayslot=0;
57871462 8999 u_int dirty_pre=0;
d148d265 9000 void *beginning=start_block();
df4dc2b1 9001 void *instr_addr0_override = NULL;
4bdc30ab 9002 int ds = 0;
9ad4d757 9003
9ad4d757 9004 if (start == 0x80030000) {
3968e69e 9005 // nasty hack for the fastbios thing
96186eba 9006 // override block entry to this code
df4dc2b1 9007 instr_addr0_override = out;
9ad4d757 9008 emit_movimm(start,0);
96186eba 9009 // abuse io address var as a flag that we
9010 // have already returned here once
643aeae3 9011 emit_readword(&address,1);
9012 emit_writeword(0,&pcaddr);
9013 emit_writeword(0,&address);
9ad4d757 9014 emit_cmp(0,1);
3968e69e 9015 #ifdef __aarch64__
9016 emit_jeq(out + 4*2);
2a014d73 9017 emit_far_jump(new_dyna_leave);
3968e69e 9018 #else
643aeae3 9019 emit_jne(new_dyna_leave);
3968e69e 9020 #endif
9ad4d757 9021 }
57871462 9022 for(i=0;i<slen;i++)
9023 {
9de8a0c3 9024 __builtin_prefetch(regs[i+1].regmap);
670c0f22 9025 check_regmap(regmap_pre[i]);
9026 check_regmap(regs[i].regmap_entry);
9027 check_regmap(regs[i].regmap);
57871462 9028 //if(ds) printf("ds: ");
4600ba03 9029 disassemble_inst(i);
57871462 9030 if(ds) {
9031 ds=0; // Skip delay slot
cf95b4f0 9032 if(dops[i].bt) assem_debug("OOPS - branch into delay slot\n");
df4dc2b1 9033 instr_addr[i] = NULL;
57871462 9034 } else {
ffb0b9e0 9035 speculate_register_values(i);
57871462 9036 #ifndef DESTRUCTIVE_WRITEBACK
fe807a8a 9037 if (i < 2 || !dops[i-2].is_ujump)
57871462 9038 {
ad49de89 9039 wb_valid(regmap_pre[i],regs[i].regmap_entry,dirty_pre,regs[i].wasdirty,unneeded_reg[i]);
57871462 9040 }
fe807a8a 9041 if((dops[i].itype==CJUMP||dops[i].itype==SJUMP)) {
f776eb14 9042 dirty_pre=branch_regs[i].dirty;
9043 }else{
f776eb14 9044 dirty_pre=regs[i].dirty;
9045 }
57871462 9046 #endif
9047 // write back
fe807a8a 9048 if (i < 2 || !dops[i-2].is_ujump)
57871462 9049 {
ad49de89 9050 wb_invalidate(regmap_pre[i],regs[i].regmap_entry,regs[i].wasdirty,unneeded_reg[i]);
57871462 9051 loop_preload(regmap_pre[i],regs[i].regmap_entry);
9052 }
9053 // branch target entry point
df4dc2b1 9054 instr_addr[i] = out;
57871462 9055 assem_debug("<->\n");
2330734f 9056 drc_dbg_emit_do_cmp(i, ccadj[i]);
7f94b097 9057 if (clear_hack_addr) {
9058 emit_movimm(0, 0);
9059 emit_writeword(0, &hack_addr);
9060 clear_hack_addr = 0;
9061 }
dd114d7d 9062
57871462 9063 // load regs
9064 if(regs[i].regmap_entry[HOST_CCREG]==CCREG&&regs[i].regmap[HOST_CCREG]!=CCREG)
ad49de89 9065 wb_register(CCREG,regs[i].regmap_entry,regs[i].wasdirty);
cf95b4f0 9066 load_regs(regs[i].regmap_entry,regs[i].regmap,dops[i].rs1,dops[i].rs2);
57871462 9067 address_generation(i,&regs[i],regs[i].regmap_entry);
ad49de89 9068 load_consts(regmap_pre[i],regs[i].regmap,i);
fe807a8a 9069 if(dops[i].is_jump)
57871462 9070 {
9071 // Load the delay slot registers if necessary
cf95b4f0 9072 if(dops[i+1].rs1!=dops[i].rs1&&dops[i+1].rs1!=dops[i].rs2&&(dops[i+1].rs1!=dops[i].rt1||dops[i].rt1==0))
9073 load_regs(regs[i].regmap_entry,regs[i].regmap,dops[i+1].rs1,dops[i+1].rs1);
9074 if(dops[i+1].rs2!=dops[i+1].rs1&&dops[i+1].rs2!=dops[i].rs1&&dops[i+1].rs2!=dops[i].rs2&&(dops[i+1].rs2!=dops[i].rt1||dops[i].rt1==0))
9075 load_regs(regs[i].regmap_entry,regs[i].regmap,dops[i+1].rs2,dops[i+1].rs2);
37387d8b 9076 if (ram_offset && (dops[i+1].is_load || dops[i+1].is_store))
53358c1d 9077 load_reg(regs[i].regmap_entry,regs[i].regmap,ROREG);
37387d8b 9078 if (dops[i+1].is_store)
53358c1d 9079 load_reg(regs[i].regmap_entry,regs[i].regmap,INVCP);
57871462 9080 }
9081 else if(i+1<slen)
9082 {
9083 // Preload registers for following instruction
cf95b4f0 9084 if(dops[i+1].rs1!=dops[i].rs1&&dops[i+1].rs1!=dops[i].rs2)
9085 if(dops[i+1].rs1!=dops[i].rt1&&dops[i+1].rs1!=dops[i].rt2)
9086 load_regs(regs[i].regmap_entry,regs[i].regmap,dops[i+1].rs1,dops[i+1].rs1);
9087 if(dops[i+1].rs2!=dops[i+1].rs1&&dops[i+1].rs2!=dops[i].rs1&&dops[i+1].rs2!=dops[i].rs2)
9088 if(dops[i+1].rs2!=dops[i].rt1&&dops[i+1].rs2!=dops[i].rt2)
9089 load_regs(regs[i].regmap_entry,regs[i].regmap,dops[i+1].rs2,dops[i+1].rs2);
57871462 9090 }
9091 // TODO: if(is_ooo(i)) address_generation(i+1);
9a3ccfeb 9092 if (!dops[i].is_jump || dops[i].itype == CJUMP)
53358c1d 9093 load_reg(regs[i].regmap_entry,regs[i].regmap,CCREG);
37387d8b 9094 if (ram_offset && (dops[i].is_load || dops[i].is_store))
53358c1d 9095 load_reg(regs[i].regmap_entry,regs[i].regmap,ROREG);
37387d8b 9096 if (dops[i].is_store)
53358c1d 9097 load_reg(regs[i].regmap_entry,regs[i].regmap,INVCP);
2330734f 9098
9099 ds = assemble(i, &regs[i], ccadj[i]);
9100
fe807a8a 9101 if (dops[i].is_ujump)
57871462 9102 literal_pool(1024);
9103 else
9104 literal_pool_jumpover(256);
9105 }
9106 }
3d680478 9107
9108 assert(slen > 0);
cf95b4f0 9109 if (slen > 0 && dops[slen-1].itype == INTCALL) {
3d680478 9110 // no ending needed for this block since INTCALL never returns
9111 }
57871462 9112 // If the block did not end with an unconditional branch,
9113 // add a jump to the next instruction.
3d680478 9114 else if (i > 1) {
4bdc30ab 9115 if (!dops[i-2].is_ujump) {
fe807a8a 9116 assert(!dops[i-1].is_jump);
57871462 9117 assert(i==slen);
cf95b4f0 9118 if(dops[i-2].itype!=CJUMP&&dops[i-2].itype!=SJUMP) {
ad49de89 9119 store_regs_bt(regs[i-1].regmap,regs[i-1].dirty,start+i*4);
57871462 9120 if(regs[i-1].regmap[HOST_CCREG]!=CCREG)
9121 emit_loadreg(CCREG,HOST_CCREG);
2330734f 9122 emit_addimm(HOST_CCREG, ccadj[i-1] + CLOCK_ADJUST(1), HOST_CCREG);
57871462 9123 }
fe807a8a 9124 else
57871462 9125 {
ad49de89 9126 store_regs_bt(branch_regs[i-2].regmap,branch_regs[i-2].dirty,start+i*4);
57871462 9127 assert(branch_regs[i-2].regmap[HOST_CCREG]==CCREG);
9128 }
643aeae3 9129 add_to_linker(out,start+i*4,0);
57871462 9130 emit_jmp(0);
9131 }
9132 }
9133 else
9134 {
9135 assert(i>0);
fe807a8a 9136 assert(!dops[i-1].is_jump);
ad49de89 9137 store_regs_bt(regs[i-1].regmap,regs[i-1].dirty,start+i*4);
57871462 9138 if(regs[i-1].regmap[HOST_CCREG]!=CCREG)
9139 emit_loadreg(CCREG,HOST_CCREG);
2330734f 9140 emit_addimm(HOST_CCREG, ccadj[i-1] + CLOCK_ADJUST(1), HOST_CCREG);
643aeae3 9141 add_to_linker(out,start+i*4,0);
57871462 9142 emit_jmp(0);
9143 }
9144
9145 // TODO: delay slot stubs?
9146 // Stubs
9147 for(i=0;i<stubcount;i++)
9148 {
b14b6a8f 9149 switch(stubs[i].type)
57871462 9150 {
9151 case LOADB_STUB:
9152 case LOADH_STUB:
9153 case LOADW_STUB:
9154 case LOADD_STUB:
9155 case LOADBU_STUB:
9156 case LOADHU_STUB:
9157 do_readstub(i);break;
9158 case STOREB_STUB:
9159 case STOREH_STUB:
9160 case STOREW_STUB:
9161 case STORED_STUB:
9162 do_writestub(i);break;
9163 case CC_STUB:
9164 do_ccstub(i);break;
9165 case INVCODE_STUB:
9166 do_invstub(i);break;
9167 case FP_STUB:
9168 do_cop1stub(i);break;
9169 case STORELR_STUB:
9170 do_unalignedwritestub(i);break;
9171 }
9172 }
9173
9ad4d757 9174 if (instr_addr0_override)
9175 instr_addr[0] = instr_addr0_override;
9176
93c0345b 9177#if 0
9178 /* check for improper expiration */
9179 for (i = 0; i < ARRAY_SIZE(jumps); i++) {
9180 int j;
9181 if (!jumps[i])
9182 continue;
9183 for (j = 0; j < jumps[i]->count; j++)
9184 assert(jumps[i]->e[j].stub < beginning || (u_char *)jumps[i]->e[j].stub > out);
9185 }
9186#endif
9187
57871462 9188 /* Pass 9 - Linker */
9189 for(i=0;i<linkcount;i++)
9190 {
643aeae3 9191 assem_debug("%p -> %8x\n",link_addr[i].addr,link_addr[i].target);
57871462 9192 literal_pool(64);
104df9d3 9193 if (!link_addr[i].internal)
57871462 9194 {
643aeae3 9195 void *stub = out;
9196 void *addr = check_addr(link_addr[i].target);
9197 emit_extjump(link_addr[i].addr, link_addr[i].target);
9198 if (addr) {
9199 set_jump_target(link_addr[i].addr, addr);
104df9d3 9200 ndrc_add_jump_out(link_addr[i].target,stub);
57871462 9201 }
643aeae3 9202 else
9203 set_jump_target(link_addr[i].addr, stub);
57871462 9204 }
9205 else
9206 {
9207 // Internal branch
643aeae3 9208 int target=(link_addr[i].target-start)>>2;
57871462 9209 assert(target>=0&&target<slen);
9210 assert(instr_addr[target]);
9211 //#ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
643aeae3 9212 //set_jump_target_fillslot(link_addr[i].addr,instr_addr[target],link_addr[i].ext>>1);
57871462 9213 //#else
643aeae3 9214 set_jump_target(link_addr[i].addr, instr_addr[target]);
57871462 9215 //#endif
9216 }
9217 }
3d680478 9218
9219 u_int source_len = slen*4;
cf95b4f0 9220 if (dops[slen-1].itype == INTCALL && source_len > 4)
3d680478 9221 // no need to treat the last instruction as compiled
9222 // as interpreter fully handles it
9223 source_len -= 4;
9224
9225 if ((u_char *)copy + source_len > (u_char *)shadow + sizeof(shadow))
9226 copy = shadow;
9227
57871462 9228 // External Branch Targets (jump_in)
104df9d3 9229 int jump_in_count = 1;
9230 assert(instr_addr[0]);
9231 for (i = 1; i < slen; i++)
9232 {
9233 if (dops[i].bt && instr_addr[i])
9234 jump_in_count++;
9235 }
9236
9237 struct block_info *block =
9238 new_block_info(start, slen * 4, source, copy, beginning, jump_in_count);
9239 block->reg_sv_flags = state_rflags;
9240
9241 int jump_in_i = 0;
9242 for (i = 0; i < slen; i++)
57871462 9243 {
104df9d3 9244 if ((i == 0 || dops[i].bt) && instr_addr[i])
57871462 9245 {
104df9d3 9246 assem_debug("%p (%d) <- %8x\n", instr_addr[i], i, start + i*4);
9247 u_int vaddr = start + i*4;
9248
9249 literal_pool(256);
9250 void *entry = out;
9251 load_regs_entry(i);
9252 if (entry == out)
9253 entry = instr_addr[i];
9254 else
9255 emit_jmp(instr_addr[i]);
9256
9257 block->jump_in[jump_in_i].vaddr = vaddr;
9258 block->jump_in[jump_in_i].addr = entry;
9259 jump_in_i++;
57871462 9260 }
9261 }
104df9d3 9262 assert(jump_in_i == jump_in_count);
9263 hash_table_add(block->jump_in[0].vaddr, block->jump_in[0].addr);
57871462 9264 // Write out the literal pool if necessary
9265 literal_pool(0);
9266 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
9267 // Align code
9268 if(((u_int)out)&7) emit_addnop(13);
9269 #endif
01d26796 9270 assert(out - (u_char *)beginning < MAX_OUTPUT_BLOCK_SIZE);
643aeae3 9271 //printf("shadow buffer: %p-%p\n",copy,(u_char *)copy+slen*4);
3d680478 9272 memcpy(copy, source, source_len);
9273 copy += source_len;
9f51b4b9 9274
d148d265 9275 end_block(beginning);
9f51b4b9 9276
57871462 9277 // If we're within 256K of the end of the buffer,
9278 // start over from the beginning. (Is 256K enough?)
2a014d73 9279 if (out > ndrc->translation_cache + sizeof(ndrc->translation_cache) - MAX_OUTPUT_BLOCK_SIZE)
9280 out = ndrc->translation_cache;
9f51b4b9 9281
57871462 9282 // Trap writes to any of the pages we compiled
104df9d3 9283 mark_invalid_code(start, slen*4, 0);
9f51b4b9 9284
57871462 9285 /* Pass 10 - Free memory by expiring oldest blocks */
9f51b4b9 9286
4149788d 9287 pass10_expire_blocks();
9288
37387d8b 9289#ifdef ASSEM_PRINT
9290 fflush(stdout);
9291#endif
ece032e6 9292 stat_inc(stat_bc_direct);
57871462 9293 return 0;
9294}
b9b61529 9295
9296// vim:shiftwidth=2:expandtab