9889272a3ee67a26441e6afe9af09ba130f3d0a0
[pcsx_rearmed.git] / deps / lightrec / lightrec.c
1 // SPDX-License-Identifier: LGPL-2.1-or-later
2 /*
3  * Copyright (C) 2014-2021 Paul Cercueil <paul@crapouillou.net>
4  */
5
6 #include "blockcache.h"
7 #include "debug.h"
8 #include "disassembler.h"
9 #include "emitter.h"
10 #include "interpreter.h"
11 #include "lightrec-config.h"
12 #include "lightning-wrapper.h"
13 #include "lightrec.h"
14 #include "memmanager.h"
15 #include "reaper.h"
16 #include "recompiler.h"
17 #include "regcache.h"
18 #include "optimizer.h"
19
20 #include <errno.h>
21 #include <inttypes.h>
22 #include <limits.h>
23 #if ENABLE_THREADED_COMPILER
24 #include <stdatomic.h>
25 #endif
26 #include <stdbool.h>
27 #include <stddef.h>
28 #include <string.h>
29 #if ENABLE_TINYMM
30 #include <tinymm.h>
31 #endif
32
33 #define GENMASK(h, l) \
34         (((uintptr_t)-1 << (l)) & ((uintptr_t)-1 >> (__WORDSIZE - 1 - (h))))
35
36 static struct block * lightrec_precompile_block(struct lightrec_state *state,
37                                                 u32 pc);
38 static bool lightrec_block_is_fully_tagged(const struct block *block);
39
40 static void lightrec_mtc2(struct lightrec_state *state, u8 reg, u32 data);
41 static u32 lightrec_mfc2(struct lightrec_state *state, u8 reg);
42
43 static void lightrec_default_sb(struct lightrec_state *state, u32 opcode,
44                                 void *host, u32 addr, u8 data)
45 {
46         *(u8 *)host = data;
47
48         if (!state->invalidate_from_dma_only)
49                 lightrec_invalidate(state, addr, 1);
50 }
51
52 static void lightrec_default_sh(struct lightrec_state *state, u32 opcode,
53                                 void *host, u32 addr, u16 data)
54 {
55         *(u16 *)host = HTOLE16(data);
56
57         if (!state->invalidate_from_dma_only)
58                 lightrec_invalidate(state, addr, 2);
59 }
60
61 static void lightrec_default_sw(struct lightrec_state *state, u32 opcode,
62                                 void *host, u32 addr, u32 data)
63 {
64         *(u32 *)host = HTOLE32(data);
65
66         if (!state->invalidate_from_dma_only)
67                 lightrec_invalidate(state, addr, 4);
68 }
69
70 static u8 lightrec_default_lb(struct lightrec_state *state,
71                               u32 opcode, void *host, u32 addr)
72 {
73         return *(u8 *)host;
74 }
75
76 static u16 lightrec_default_lh(struct lightrec_state *state,
77                                u32 opcode, void *host, u32 addr)
78 {
79         return LE16TOH(*(u16 *)host);
80 }
81
82 static u32 lightrec_default_lw(struct lightrec_state *state,
83                                u32 opcode, void *host, u32 addr)
84 {
85         return LE32TOH(*(u32 *)host);
86 }
87
88 static const struct lightrec_mem_map_ops lightrec_default_ops = {
89         .sb = lightrec_default_sb,
90         .sh = lightrec_default_sh,
91         .sw = lightrec_default_sw,
92         .lb = lightrec_default_lb,
93         .lh = lightrec_default_lh,
94         .lw = lightrec_default_lw,
95 };
96
97 static void __segfault_cb(struct lightrec_state *state, u32 addr,
98                           const struct block *block)
99 {
100         lightrec_set_exit_flags(state, LIGHTREC_EXIT_SEGFAULT);
101         pr_err("Segmentation fault in recompiled code: invalid "
102                "load/store at address 0x%08x\n", addr);
103         if (block)
104                 pr_err("Was executing block PC 0x%08x\n", block->pc);
105 }
106
107 static void lightrec_swl(struct lightrec_state *state,
108                          const struct lightrec_mem_map_ops *ops,
109                          u32 opcode, void *host, u32 addr, u32 data)
110 {
111         unsigned int shift = addr & 0x3;
112         unsigned int mask = GENMASK(31, (shift + 1) * 8);
113         u32 old_data;
114
115         /* Align to 32 bits */
116         addr &= ~3;
117         host = (void *)((uintptr_t)host & ~3);
118
119         old_data = ops->lw(state, opcode, host, addr);
120
121         data = (data >> ((3 - shift) * 8)) | (old_data & mask);
122
123         ops->sw(state, opcode, host, addr, data);
124 }
125
126 static void lightrec_swr(struct lightrec_state *state,
127                          const struct lightrec_mem_map_ops *ops,
128                          u32 opcode, void *host, u32 addr, u32 data)
129 {
130         unsigned int shift = addr & 0x3;
131         unsigned int mask = (1 << (shift * 8)) - 1;
132         u32 old_data;
133
134         /* Align to 32 bits */
135         addr &= ~3;
136         host = (void *)((uintptr_t)host & ~3);
137
138         old_data = ops->lw(state, opcode, host, addr);
139
140         data = (data << (shift * 8)) | (old_data & mask);
141
142         ops->sw(state, opcode, host, addr, data);
143 }
144
145 static void lightrec_swc2(struct lightrec_state *state, union code op,
146                           const struct lightrec_mem_map_ops *ops,
147                           void *host, u32 addr)
148 {
149         u32 data = lightrec_mfc2(state, op.i.rt);
150
151         ops->sw(state, op.opcode, host, addr, data);
152 }
153
154 static u32 lightrec_lwl(struct lightrec_state *state,
155                         const struct lightrec_mem_map_ops *ops,
156                         u32 opcode, void *host, u32 addr, u32 data)
157 {
158         unsigned int shift = addr & 0x3;
159         unsigned int mask = (1 << (24 - shift * 8)) - 1;
160         u32 old_data;
161
162         /* Align to 32 bits */
163         addr &= ~3;
164         host = (void *)((uintptr_t)host & ~3);
165
166         old_data = ops->lw(state, opcode, host, addr);
167
168         return (data & mask) | (old_data << (24 - shift * 8));
169 }
170
171 static u32 lightrec_lwr(struct lightrec_state *state,
172                         const struct lightrec_mem_map_ops *ops,
173                         u32 opcode, void *host, u32 addr, u32 data)
174 {
175         unsigned int shift = addr & 0x3;
176         unsigned int mask = GENMASK(31, 32 - shift * 8);
177         u32 old_data;
178
179         /* Align to 32 bits */
180         addr &= ~3;
181         host = (void *)((uintptr_t)host & ~3);
182
183         old_data = ops->lw(state, opcode, host, addr);
184
185         return (data & mask) | (old_data >> (shift * 8));
186 }
187
188 static void lightrec_lwc2(struct lightrec_state *state, union code op,
189                           const struct lightrec_mem_map_ops *ops,
190                           void *host, u32 addr)
191 {
192         u32 data = ops->lw(state, op.opcode, host, addr);
193
194         lightrec_mtc2(state, op.i.rt, data);
195 }
196
197 static void lightrec_invalidate_map(struct lightrec_state *state,
198                 const struct lightrec_mem_map *map, u32 addr, u32 len)
199 {
200         if (map == &state->maps[PSX_MAP_KERNEL_USER_RAM]) {
201                 memset(&state->code_lut[lut_offset(addr)], 0,
202                        ((len + 3) / 4) * sizeof(void *));
203         }
204 }
205
206 const struct lightrec_mem_map *
207 lightrec_get_map(struct lightrec_state *state, void **host, u32 kaddr)
208 {
209         const struct lightrec_mem_map *map;
210         unsigned int i;
211         u32 addr;
212
213         for (i = 0; i < state->nb_maps; i++) {
214                 const struct lightrec_mem_map *mapi = &state->maps[i];
215
216                 if (kaddr >= mapi->pc && kaddr < mapi->pc + mapi->length) {
217                         map = mapi;
218                         break;
219                 }
220         }
221
222         if (i == state->nb_maps)
223                 return NULL;
224
225         addr = kaddr - map->pc;
226
227         while (map->mirror_of)
228                 map = map->mirror_of;
229
230         if (host)
231                 *host = map->address + addr;
232
233         return map;
234 }
235
236 u32 lightrec_rw(struct lightrec_state *state, union code op,
237                 u32 addr, u32 data, u16 *flags, struct block *block)
238 {
239         const struct lightrec_mem_map *map;
240         const struct lightrec_mem_map_ops *ops;
241         u32 opcode = op.opcode;
242         void *host;
243
244         addr += (s16) op.i.imm;
245
246         map = lightrec_get_map(state, &host, kunseg(addr));
247         if (!map) {
248                 __segfault_cb(state, addr, block);
249                 return 0;
250         }
251
252         if (unlikely(map->ops)) {
253                 if (flags && !LIGHTREC_FLAGS_GET_IO_MODE(*flags))
254                         *flags |= LIGHTREC_IO_MODE(LIGHTREC_IO_HW);
255
256                 ops = map->ops;
257         } else {
258                 if (flags && !LIGHTREC_FLAGS_GET_IO_MODE(*flags))
259                         *flags |= LIGHTREC_IO_MODE(LIGHTREC_IO_DIRECT);
260
261                 ops = &lightrec_default_ops;
262         }
263
264         switch (op.i.op) {
265         case OP_SB:
266                 ops->sb(state, opcode, host, addr, (u8) data);
267                 return 0;
268         case OP_SH:
269                 ops->sh(state, opcode, host, addr, (u16) data);
270                 return 0;
271         case OP_SWL:
272                 lightrec_swl(state, ops, opcode, host, addr, data);
273                 return 0;
274         case OP_SWR:
275                 lightrec_swr(state, ops, opcode, host, addr, data);
276                 return 0;
277         case OP_SW:
278                 ops->sw(state, opcode, host, addr, data);
279                 return 0;
280         case OP_SWC2:
281                 lightrec_swc2(state, op, ops, host, addr);
282                 return 0;
283         case OP_LB:
284                 return (s32) (s8) ops->lb(state, opcode, host, addr);
285         case OP_LBU:
286                 return ops->lb(state, opcode, host, addr);
287         case OP_LH:
288                 return (s32) (s16) ops->lh(state, opcode, host, addr);
289         case OP_LHU:
290                 return ops->lh(state, opcode, host, addr);
291         case OP_LWC2:
292                 lightrec_lwc2(state, op, ops, host, addr);
293                 return 0;
294         case OP_LWL:
295                 return lightrec_lwl(state, ops, opcode, host, addr, data);
296         case OP_LWR:
297                 return lightrec_lwr(state, ops, opcode, host, addr, data);
298         case OP_LW:
299         default:
300                 return ops->lw(state, opcode, host, addr);
301         }
302 }
303
304 static void lightrec_rw_helper(struct lightrec_state *state,
305                                union code op, u16 *flags,
306                                struct block *block)
307 {
308         u32 ret = lightrec_rw(state, op, state->regs.gpr[op.i.rs],
309                               state->regs.gpr[op.i.rt], flags, block);
310
311         switch (op.i.op) {
312         case OP_LB:
313         case OP_LBU:
314         case OP_LH:
315         case OP_LHU:
316         case OP_LWL:
317         case OP_LWR:
318         case OP_LW:
319                 if (op.i.rt)
320                         state->regs.gpr[op.i.rt] = ret;
321         default: /* fall-through */
322                 break;
323         }
324 }
325
326 static void lightrec_rw_cb(struct lightrec_state *state)
327 {
328         lightrec_rw_helper(state, (union code)state->c_wrapper_arg, NULL, NULL);
329 }
330
331 static void lightrec_rw_generic_cb(struct lightrec_state *state)
332 {
333         struct block *block;
334         struct opcode *op;
335         bool was_tagged;
336         u32 arg = state->c_wrapper_arg;
337         u16 offset = (u16)arg;
338
339         block = lightrec_find_block_from_lut(state->block_cache,
340                                              arg >> 16, state->next_pc);
341         if (unlikely(!block)) {
342                 pr_err("rw_generic: No block found in LUT for PC 0x%x offset 0x%x\n",
343                          state->next_pc, offset);
344                 return;
345         }
346
347         op = &block->opcode_list[offset];
348         was_tagged = LIGHTREC_FLAGS_GET_IO_MODE(op->flags);
349
350         lightrec_rw_helper(state, op->c, &op->flags, block);
351
352         if (!was_tagged) {
353                 pr_debug("Opcode of block at PC 0x%08x has been tagged - flag "
354                          "for recompilation\n", block->pc);
355
356                 block->flags |= BLOCK_SHOULD_RECOMPILE;
357         }
358 }
359
360 static u32 clamp_s32(s32 val, s32 min, s32 max)
361 {
362         return val < min ? min : val > max ? max : val;
363 }
364
365 static u32 lightrec_mfc2(struct lightrec_state *state, u8 reg)
366 {
367         s16 gteir1, gteir2, gteir3;
368
369         switch (reg) {
370         case 1:
371         case 3:
372         case 5:
373         case 8:
374         case 9:
375         case 10:
376         case 11:
377                 return (s32)(s16) state->regs.cp2d[reg];
378         case 7:
379         case 16:
380         case 17:
381         case 18:
382         case 19:
383                 return (u16) state->regs.cp2d[reg];
384         case 28:
385         case 29:
386                 gteir1 = (s16) state->regs.cp2d[9];
387                 gteir2 = (s16) state->regs.cp2d[10];
388                 gteir3 = (s16) state->regs.cp2d[11];
389
390                 return clamp_s32(gteir1 >> 7, 0, 0x1f) << 0 |
391                         clamp_s32(gteir2 >> 7, 0, 0x1f) << 5 |
392                         clamp_s32(gteir3 >> 7, 0, 0x1f) << 10;
393         case 15:
394                 reg = 14;
395         default: /* fall-through */
396                 return state->regs.cp2d[reg];
397         }
398 }
399
400 u32 lightrec_mfc(struct lightrec_state *state, union code op)
401 {
402         if (op.i.op == OP_CP0)
403                 return state->regs.cp0[op.r.rd];
404         else if (op.r.rs == OP_CP2_BASIC_MFC2)
405                 return lightrec_mfc2(state, op.r.rd);
406         else
407                 return state->regs.cp2c[op.r.rd];
408 }
409
410 static void lightrec_mtc0(struct lightrec_state *state, u8 reg, u32 data)
411 {
412         u32 status, oldstatus, cause;
413
414         switch (reg) {
415         case 1:
416         case 4:
417         case 8:
418         case 14:
419         case 15:
420                 /* Those registers are read-only */
421                 return;
422         default:
423                 break;
424         }
425
426         if (reg == 12) {
427                 status = state->regs.cp0[12];
428                 oldstatus = status;
429
430                 if (status & ~data & BIT(16)) {
431                         state->ops.enable_ram(state, true);
432                         lightrec_invalidate_all(state);
433                 } else if (~status & data & BIT(16)) {
434                         state->ops.enable_ram(state, false);
435                 }
436         }
437
438         if (reg == 13) {
439                 state->regs.cp0[13] &= ~0x300;
440                 state->regs.cp0[13] |= data & 0x300;
441         } else {
442                 state->regs.cp0[reg] = data;
443         }
444
445         if (reg == 12 || reg == 13) {
446                 cause = state->regs.cp0[13];
447                 status = state->regs.cp0[12];
448
449                 /* Handle software interrupts */
450                 if (!!(status & cause & 0x300) & status)
451                         lightrec_set_exit_flags(state, LIGHTREC_EXIT_CHECK_INTERRUPT);
452
453                 /* Handle hardware interrupts */
454                 if (reg == 12 && !(~status & 0x401) && (~oldstatus & 0x401))
455                         lightrec_set_exit_flags(state, LIGHTREC_EXIT_CHECK_INTERRUPT);
456         }
457 }
458
459 static u32 count_leading_bits(s32 data)
460 {
461         u32 cnt = 33;
462
463 #ifdef __has_builtin
464 #if __has_builtin(__builtin_clrsb)
465         return 1 + __builtin_clrsb(data);
466 #endif
467 #endif
468
469         data = (data ^ (data >> 31)) << 1;
470
471         do {
472                 cnt -= 1;
473                 data >>= 1;
474         } while (data);
475
476         return cnt;
477 }
478
479 static void lightrec_mtc2(struct lightrec_state *state, u8 reg, u32 data)
480 {
481         switch (reg) {
482         case 15:
483                 state->regs.cp2d[12] = state->regs.cp2d[13];
484                 state->regs.cp2d[13] = state->regs.cp2d[14];
485                 state->regs.cp2d[14] = data;
486                 break;
487         case 28:
488                 state->regs.cp2d[9] = (data << 7) & 0xf80;
489                 state->regs.cp2d[10] = (data << 2) & 0xf80;
490                 state->regs.cp2d[11] = (data >> 3) & 0xf80;
491                 break;
492         case 31:
493                 return;
494         case 30:
495                 state->regs.cp2d[31] = count_leading_bits((s32) data);
496         default: /* fall-through */
497                 state->regs.cp2d[reg] = data;
498                 break;
499         }
500 }
501
502 static void lightrec_ctc2(struct lightrec_state *state, u8 reg, u32 data)
503 {
504         switch (reg) {
505         case 4:
506         case 12:
507         case 20:
508         case 26:
509         case 27:
510         case 29:
511         case 30:
512                 data = (s32)(s16) data;
513                 break;
514         case 31:
515                 data = (data & 0x7ffff000) | !!(data & 0x7f87e000) << 31;
516         default: /* fall-through */
517                 break;
518         }
519
520         state->regs.cp2c[reg] = data;
521 }
522
523 void lightrec_mtc(struct lightrec_state *state, union code op, u32 data)
524 {
525         if (op.i.op == OP_CP0)
526                 lightrec_mtc0(state, op.r.rd, data);
527         else if (op.r.rs == OP_CP2_BASIC_CTC2)
528                 lightrec_ctc2(state, op.r.rd, data);
529         else
530                 lightrec_mtc2(state, op.r.rd, data);
531 }
532
533 static void lightrec_mtc_cb(struct lightrec_state *state)
534 {
535         union code op = (union code) state->c_wrapper_arg;
536
537         lightrec_mtc(state, op, state->regs.gpr[op.r.rt]);
538 }
539
540 void lightrec_rfe(struct lightrec_state *state)
541 {
542         u32 status;
543
544         /* Read CP0 Status register (r12) */
545         status = state->regs.cp0[12];
546
547         /* Switch the bits */
548         status = ((status & 0x3c) >> 2) | (status & ~0xf);
549
550         /* Write it back */
551         lightrec_mtc0(state, 12, status);
552 }
553
554 void lightrec_cp(struct lightrec_state *state, union code op)
555 {
556         if (op.i.op == OP_CP0) {
557                 pr_err("Invalid CP opcode to coprocessor #0\n");
558                 return;
559         }
560
561         (*state->ops.cop2_op)(state, op.opcode);
562 }
563
564 static void lightrec_cp_cb(struct lightrec_state *state)
565 {
566         lightrec_cp(state, (union code) state->c_wrapper_arg);
567 }
568
569 static void lightrec_syscall_cb(struct lightrec_state *state)
570 {
571         lightrec_set_exit_flags(state, LIGHTREC_EXIT_SYSCALL);
572 }
573
574 static void lightrec_break_cb(struct lightrec_state *state)
575 {
576         lightrec_set_exit_flags(state, LIGHTREC_EXIT_BREAK);
577 }
578
579 struct block * lightrec_get_block(struct lightrec_state *state, u32 pc)
580 {
581         struct block *block = lightrec_find_block(state->block_cache, pc);
582
583         if (block && lightrec_block_is_outdated(state, block)) {
584                 pr_debug("Block at PC 0x%08x is outdated!\n", block->pc);
585
586                 /* Make sure the recompiler isn't processing the block we'll
587                  * destroy */
588                 if (ENABLE_THREADED_COMPILER)
589                         lightrec_recompiler_remove(state->rec, block);
590
591                 lightrec_unregister_block(state->block_cache, block);
592                 remove_from_code_lut(state->block_cache, block);
593                 lightrec_free_block(state, block);
594                 block = NULL;
595         }
596
597         if (!block) {
598                 block = lightrec_precompile_block(state, pc);
599                 if (!block) {
600                         pr_err("Unable to recompile block at PC 0x%x\n", pc);
601                         lightrec_set_exit_flags(state, LIGHTREC_EXIT_SEGFAULT);
602                         return NULL;
603                 }
604
605                 lightrec_register_block(state->block_cache, block);
606         }
607
608         return block;
609 }
610
611 static void * get_next_block_func(struct lightrec_state *state, u32 pc)
612 {
613         struct block *block;
614         bool should_recompile;
615         void *func;
616
617         for (;;) {
618                 func = state->code_lut[lut_offset(pc)];
619                 if (func && func != state->get_next_block)
620                         break;
621
622                 block = lightrec_get_block(state, pc);
623
624                 if (unlikely(!block))
625                         break;
626
627                 if (OPT_REPLACE_MEMSET && (block->flags & BLOCK_IS_MEMSET)) {
628                         func = state->memset_func;
629                         break;
630                 }
631
632                 should_recompile = block->flags & BLOCK_SHOULD_RECOMPILE &&
633                         !(block->flags & BLOCK_IS_DEAD);
634
635                 if (unlikely(should_recompile)) {
636                         pr_debug("Block at PC 0x%08x should recompile\n", pc);
637
638                         lightrec_unregister(MEM_FOR_CODE, block->code_size);
639
640                         if (ENABLE_THREADED_COMPILER)
641                                 lightrec_recompiler_add(state->rec, block);
642                         else
643                                 lightrec_compile_block(state->cstate, block);
644                 }
645
646                 if (ENABLE_THREADED_COMPILER && likely(!should_recompile))
647                         func = lightrec_recompiler_run_first_pass(state, block, &pc);
648                 else
649                         func = block->function;
650
651                 if (likely(func))
652                         break;
653
654                 if (unlikely(block->flags & BLOCK_NEVER_COMPILE)) {
655                         pc = lightrec_emulate_block(state, block, pc);
656
657                 } else if (!ENABLE_THREADED_COMPILER) {
658                         /* Block wasn't compiled yet - run the interpreter */
659                         if (block->flags & BLOCK_FULLY_TAGGED)
660                                 pr_debug("Block fully tagged, skipping first pass\n");
661                         else if (ENABLE_FIRST_PASS && likely(!should_recompile))
662                                 pc = lightrec_emulate_block(state, block, pc);
663
664                         /* Then compile it using the profiled data */
665                         lightrec_compile_block(state->cstate, block);
666                 } else {
667                         lightrec_recompiler_add(state->rec, block);
668                 }
669
670                 if (state->exit_flags != LIGHTREC_EXIT_NORMAL ||
671                     state->current_cycle >= state->target_cycle)
672                         break;
673         }
674
675         state->next_pc = pc;
676         return func;
677 }
678
679 static s32 c_function_wrapper(struct lightrec_state *state, s32 cycles_delta,
680                               void (*f)(struct lightrec_state *))
681 {
682         state->current_cycle = state->target_cycle - cycles_delta;
683
684         (*f)(state);
685
686         return state->target_cycle - state->current_cycle;
687 }
688
689 static struct block * generate_wrapper(struct lightrec_state *state)
690 {
691         struct block *block;
692         jit_state_t *_jit;
693         unsigned int i;
694         int stack_ptr;
695         jit_word_t code_size;
696         jit_node_t *to_tramp, *to_fn_epilog;
697         jit_node_t *addr[C_WRAPPERS_COUNT - 1];
698
699         block = lightrec_malloc(state, MEM_FOR_IR, sizeof(*block));
700         if (!block)
701                 goto err_no_mem;
702
703         _jit = jit_new_state();
704         if (!_jit)
705                 goto err_free_block;
706
707         jit_name("RW wrapper");
708         jit_note(__FILE__, __LINE__);
709
710         /* Wrapper entry point */
711         jit_prolog();
712         jit_tramp(256);
713
714         /* Add entry points; separate them by opcodes that increment
715          * LIGHTREC_REG_STATE (since we cannot touch other registers).
716          * The difference will then tell us which C function to call. */
717         for (i = C_WRAPPERS_COUNT - 1; i > 0; i--) {
718                 jit_addi(LIGHTREC_REG_STATE, LIGHTREC_REG_STATE, __WORDSIZE / 8);
719                 addr[i - 1] = jit_indirect();
720         }
721
722         jit_epilog();
723         jit_prolog();
724
725         stack_ptr = jit_allocai(sizeof(uintptr_t) * NUM_TEMPS);
726
727         /* Save all temporaries on stack */
728         for (i = 0; i < NUM_TEMPS; i++)
729                 jit_stxi(stack_ptr + i * sizeof(uintptr_t), JIT_FP, JIT_R(i));
730
731         /* Jump to the trampoline */
732         to_tramp = jit_jmpi();
733
734         /* The trampoline will jump back here */
735         to_fn_epilog = jit_label();
736
737         /* Restore temporaries from stack */
738         for (i = 0; i < NUM_TEMPS; i++)
739                 jit_ldxi(JIT_R(i), JIT_FP, stack_ptr + i * sizeof(uintptr_t));
740
741         jit_ret();
742         jit_epilog();
743
744         /* Trampoline entry point.
745          * The sole purpose of the trampoline is to cheese Lightning not to
746          * save/restore the callee-saved register LIGHTREC_REG_CYCLE, since we
747          * do want to return to the caller with this register modified. */
748         jit_prolog();
749         jit_tramp(256);
750         jit_patch(to_tramp);
751
752         /* Retrieve the wrapper function */
753         jit_ldxi(JIT_R0, LIGHTREC_REG_STATE,
754                  offsetof(struct lightrec_state, c_wrappers));
755
756         /* Restore LIGHTREC_REG_STATE to its correct value */
757         jit_movi(LIGHTREC_REG_STATE, (uintptr_t) state);
758
759         jit_prepare();
760         jit_pushargr(LIGHTREC_REG_STATE);
761         jit_pushargr(LIGHTREC_REG_CYCLE);
762         jit_pushargr(JIT_R0);
763         jit_finishi(c_function_wrapper);
764         jit_retval_i(LIGHTREC_REG_CYCLE);
765
766         jit_patch_at(jit_jmpi(), to_fn_epilog);
767         jit_epilog();
768
769         block->_jit = _jit;
770         block->function = jit_emit();
771         block->opcode_list = NULL;
772         block->flags = 0;
773         block->nb_ops = 0;
774
775         state->wrappers_eps[C_WRAPPERS_COUNT - 1] = block->function;
776
777         for (i = 0; i < C_WRAPPERS_COUNT - 1; i++)
778                 state->wrappers_eps[i] = jit_address(addr[i]);
779
780         jit_get_code(&code_size);
781         lightrec_register(MEM_FOR_CODE, code_size);
782
783         block->code_size = code_size;
784
785         if (ENABLE_DISASSEMBLER) {
786                 pr_debug("Wrapper block:\n");
787                 jit_disassemble();
788         }
789
790         jit_clear_state();
791         return block;
792
793 err_free_block:
794         lightrec_free(state, MEM_FOR_IR, sizeof(*block), block);
795 err_no_mem:
796         pr_err("Unable to compile wrapper: Out of memory\n");
797         return NULL;
798 }
799
800 static u32 lightrec_memset(struct lightrec_state *state)
801 {
802         u32 kunseg_pc = kunseg(state->regs.gpr[4]);
803         void *host;
804         const struct lightrec_mem_map *map = lightrec_get_map(state, &host, kunseg_pc);
805         u32 length = state->regs.gpr[5] * 4;
806
807         if (!map) {
808                 pr_err("Unable to find memory map for memset target address "
809                        "0x%x\n", kunseg_pc);
810                 return 0;
811         }
812
813         pr_debug("Calling host memset, PC 0x%x (host address 0x%" PRIxPTR ") for %u bytes\n",
814                  kunseg_pc, (uintptr_t)host, length);
815         memset(host, 0, length);
816
817         if (!state->invalidate_from_dma_only)
818                 lightrec_invalidate_map(state, map, kunseg_pc, length);
819
820         /* Rough estimation of the number of cycles consumed */
821         return 8 + 5 * (length  + 3 / 4);
822 }
823
824 static struct block * generate_dispatcher(struct lightrec_state *state)
825 {
826         struct block *block;
827         jit_state_t *_jit;
828         jit_node_t *to_end, *to_c, *loop, *addr, *addr2, *addr3;
829         unsigned int i;
830         u32 offset, ram_len;
831         jit_word_t code_size;
832
833         block = lightrec_malloc(state, MEM_FOR_IR, sizeof(*block));
834         if (!block)
835                 goto err_no_mem;
836
837         _jit = jit_new_state();
838         if (!_jit)
839                 goto err_free_block;
840
841         jit_name("dispatcher");
842         jit_note(__FILE__, __LINE__);
843
844         jit_prolog();
845         jit_frame(256);
846
847         jit_getarg(JIT_R0, jit_arg());
848         jit_getarg_i(LIGHTREC_REG_CYCLE, jit_arg());
849
850         /* Force all callee-saved registers to be pushed on the stack */
851         for (i = 0; i < NUM_REGS; i++)
852                 jit_movr(JIT_V(i), JIT_V(i));
853
854         /* Pass lightrec_state structure to blocks, using the last callee-saved
855          * register that Lightning provides */
856         jit_movi(LIGHTREC_REG_STATE, (intptr_t) state);
857
858         loop = jit_label();
859
860         /* Call the block's code */
861         jit_jmpr(JIT_R0);
862
863         if (OPT_REPLACE_MEMSET) {
864                 /* Blocks will jump here when they need to call
865                  * lightrec_memset() */
866                 addr3 = jit_indirect();
867
868                 jit_prepare();
869                 jit_pushargr(LIGHTREC_REG_STATE);
870                 jit_finishi(lightrec_memset);
871
872                 jit_ldxi_ui(JIT_V0, LIGHTREC_REG_STATE,
873                             offsetof(struct lightrec_state, regs.gpr[31]));
874
875                 jit_retval(JIT_R0);
876                 jit_subr(LIGHTREC_REG_CYCLE, LIGHTREC_REG_CYCLE, JIT_R0);
877         }
878
879         /* The block will jump here, with the number of cycles remaining in
880          * LIGHTREC_REG_CYCLE */
881         addr2 = jit_indirect();
882
883         /* Store back the next_pc to the lightrec_state structure */
884         offset = offsetof(struct lightrec_state, next_pc);
885         jit_stxi_i(offset, LIGHTREC_REG_STATE, JIT_V0);
886
887         /* Jump to end if state->target_cycle < state->current_cycle */
888         to_end = jit_blei(LIGHTREC_REG_CYCLE, 0);
889
890         /* Convert next PC to KUNSEG and avoid mirrors */
891         ram_len = state->maps[PSX_MAP_KERNEL_USER_RAM].length;
892         jit_andi(JIT_R0, JIT_V0, 0x10000000 | (ram_len - 1));
893         to_c = jit_bgei(JIT_R0, ram_len);
894
895         /* Fast path: code is running from RAM, use the code LUT */
896         if (__WORDSIZE == 64)
897                 jit_lshi(JIT_R0, JIT_R0, 1);
898         jit_addr(JIT_R0, JIT_R0, LIGHTREC_REG_STATE);
899         jit_ldxi(JIT_R0, JIT_R0, offsetof(struct lightrec_state, code_lut));
900
901         /* If we get non-NULL, loop */
902         jit_patch_at(jit_bnei(JIT_R0, 0), loop);
903
904         /* Slow path: call C function get_next_block_func() */
905         jit_patch(to_c);
906
907         if (ENABLE_FIRST_PASS || OPT_DETECT_IMPOSSIBLE_BRANCHES) {
908                 /* We may call the interpreter - update state->current_cycle */
909                 jit_ldxi_i(JIT_R2, LIGHTREC_REG_STATE,
910                            offsetof(struct lightrec_state, target_cycle));
911                 jit_subr(JIT_R1, JIT_R2, LIGHTREC_REG_CYCLE);
912                 jit_stxi_i(offsetof(struct lightrec_state, current_cycle),
913                            LIGHTREC_REG_STATE, JIT_R1);
914         }
915
916         /* The code LUT will be set to this address when the block at the target
917          * PC has been preprocessed but not yet compiled by the threaded
918          * recompiler */
919         addr = jit_indirect();
920
921         /* Get the next block */
922         jit_prepare();
923         jit_pushargr(LIGHTREC_REG_STATE);
924         jit_pushargr(JIT_V0);
925         jit_finishi(&get_next_block_func);
926         jit_retval(JIT_R0);
927
928         if (ENABLE_FIRST_PASS || OPT_DETECT_IMPOSSIBLE_BRANCHES) {
929                 /* The interpreter may have updated state->current_cycle and
930                  * state->target_cycle - recalc the delta */
931                 jit_ldxi_i(JIT_R1, LIGHTREC_REG_STATE,
932                            offsetof(struct lightrec_state, current_cycle));
933                 jit_ldxi_i(JIT_R2, LIGHTREC_REG_STATE,
934                            offsetof(struct lightrec_state, target_cycle));
935                 jit_subr(LIGHTREC_REG_CYCLE, JIT_R2, JIT_R1);
936         }
937
938         /* If we get non-NULL, loop */
939         jit_patch_at(jit_bnei(JIT_R0, 0), loop);
940
941         /* When exiting, the recompiled code will jump to that address */
942         jit_note(__FILE__, __LINE__);
943         jit_patch(to_end);
944
945         jit_retr(LIGHTREC_REG_CYCLE);
946         jit_epilog();
947
948         block->_jit = _jit;
949         block->function = jit_emit();
950         block->opcode_list = NULL;
951         block->flags = 0;
952         block->nb_ops = 0;
953
954         jit_get_code(&code_size);
955         lightrec_register(MEM_FOR_CODE, code_size);
956
957         block->code_size = code_size;
958
959         state->eob_wrapper_func = jit_address(addr2);
960         if (OPT_REPLACE_MEMSET)
961                 state->memset_func = jit_address(addr3);
962         state->get_next_block = jit_address(addr);
963
964         if (ENABLE_DISASSEMBLER) {
965                 pr_debug("Dispatcher block:\n");
966                 jit_disassemble();
967         }
968
969         /* We're done! */
970         jit_clear_state();
971         return block;
972
973 err_free_block:
974         lightrec_free(state, MEM_FOR_IR, sizeof(*block), block);
975 err_no_mem:
976         pr_err("Unable to compile dispatcher: Out of memory\n");
977         return NULL;
978 }
979
980 union code lightrec_read_opcode(struct lightrec_state *state, u32 pc)
981 {
982         void *host = NULL;
983
984         lightrec_get_map(state, &host, kunseg(pc));
985
986         const u32 *code = (u32 *)host;
987         return (union code) *code;
988 }
989
990 unsigned int lightrec_cycles_of_opcode(union code code)
991 {
992         return 2;
993 }
994
995 void lightrec_free_opcode_list(struct lightrec_state *state, struct block *block)
996 {
997         lightrec_free(state, MEM_FOR_IR,
998                       sizeof(*block->opcode_list) * block->nb_ops,
999                       block->opcode_list);
1000 }
1001
1002 static unsigned int lightrec_get_mips_block_len(const u32 *src)
1003 {
1004         unsigned int i;
1005         union code c;
1006
1007         for (i = 1; ; i++) {
1008                 c.opcode = LE32TOH(*src++);
1009
1010                 if (is_syscall(c))
1011                         return i;
1012
1013                 if (is_unconditional_jump(c))
1014                         return i + 1;
1015         }
1016 }
1017
1018 static struct opcode * lightrec_disassemble(struct lightrec_state *state,
1019                                             const u32 *src, unsigned int *len)
1020 {
1021         struct opcode *list;
1022         unsigned int i, length;
1023
1024         length = lightrec_get_mips_block_len(src);
1025
1026         list = lightrec_malloc(state, MEM_FOR_IR, sizeof(*list) * length);
1027         if (!list) {
1028                 pr_err("Unable to allocate memory\n");
1029                 return NULL;
1030         }
1031
1032         for (i = 0; i < length; i++) {
1033                 list[i].opcode = LE32TOH(src[i]);
1034                 list[i].flags = 0;
1035         }
1036
1037         *len = length * sizeof(u32);
1038
1039         return list;
1040 }
1041
1042 static struct block * lightrec_precompile_block(struct lightrec_state *state,
1043                                                 u32 pc)
1044 {
1045         struct opcode *list;
1046         struct block *block;
1047         void *host;
1048         const struct lightrec_mem_map *map = lightrec_get_map(state, &host, kunseg(pc));
1049         const u32 *code = (u32 *) host;
1050         unsigned int length;
1051         bool fully_tagged;
1052
1053         if (!map)
1054                 return NULL;
1055
1056         block = lightrec_malloc(state, MEM_FOR_IR, sizeof(*block));
1057         if (!block) {
1058                 pr_err("Unable to recompile block: Out of memory\n");
1059                 return NULL;
1060         }
1061
1062         list = lightrec_disassemble(state, code, &length);
1063         if (!list) {
1064                 lightrec_free(state, MEM_FOR_IR, sizeof(*block), block);
1065                 return NULL;
1066         }
1067
1068         block->pc = pc;
1069         block->_jit = NULL;
1070         block->function = NULL;
1071         block->opcode_list = list;
1072         block->code = code;
1073         block->next = NULL;
1074         block->flags = 0;
1075         block->code_size = 0;
1076 #if ENABLE_THREADED_COMPILER
1077         block->op_list_freed = (atomic_flag)ATOMIC_FLAG_INIT;
1078 #endif
1079         block->nb_ops = length / sizeof(u32);
1080
1081         lightrec_optimize(state, block);
1082
1083         length = block->nb_ops * sizeof(u32);
1084
1085         lightrec_register(MEM_FOR_MIPS_CODE, length);
1086
1087         if (ENABLE_DISASSEMBLER) {
1088                 pr_debug("Disassembled block at PC: 0x%08x\n", block->pc);
1089                 lightrec_print_disassembly(block, code);
1090         }
1091
1092         pr_debug("Block size: %hu opcodes\n", block->nb_ops);
1093
1094         /* If the first opcode is an 'impossible' branch, never compile the
1095          * block */
1096         if (should_emulate(block->opcode_list))
1097                 block->flags |= BLOCK_NEVER_COMPILE;
1098
1099         fully_tagged = lightrec_block_is_fully_tagged(block);
1100         if (fully_tagged)
1101                 block->flags |= BLOCK_FULLY_TAGGED;
1102
1103         if (OPT_REPLACE_MEMSET && (block->flags & BLOCK_IS_MEMSET))
1104                 state->code_lut[lut_offset(pc)] = state->memset_func;
1105
1106         block->hash = lightrec_calculate_block_hash(block);
1107
1108         pr_debug("Recompile count: %u\n", state->nb_precompile++);
1109
1110         return block;
1111 }
1112
1113 static bool lightrec_block_is_fully_tagged(const struct block *block)
1114 {
1115         const struct opcode *op;
1116         unsigned int i;
1117
1118         for (i = 0; i < block->nb_ops; i++) {
1119                 op = &block->opcode_list[i];
1120
1121                 /* Verify that all load/stores of the opcode list
1122                  * Check all loads/stores of the opcode list and mark the
1123                  * block as fully compiled if they all have been tagged. */
1124                 switch (op->c.i.op) {
1125                 case OP_LB:
1126                 case OP_LH:
1127                 case OP_LWL:
1128                 case OP_LW:
1129                 case OP_LBU:
1130                 case OP_LHU:
1131                 case OP_LWR:
1132                 case OP_SB:
1133                 case OP_SH:
1134                 case OP_SWL:
1135                 case OP_SW:
1136                 case OP_SWR:
1137                 case OP_LWC2:
1138                 case OP_SWC2:
1139                         if (!LIGHTREC_FLAGS_GET_IO_MODE(op->flags))
1140                                 return false;
1141                 default: /* fall-through */
1142                         continue;
1143                 }
1144         }
1145
1146         return true;
1147 }
1148
1149 static void lightrec_reap_block(struct lightrec_state *state, void *data)
1150 {
1151         struct block *block = data;
1152
1153         pr_debug("Reap dead block at PC 0x%08x\n", block->pc);
1154         lightrec_unregister_block(state->block_cache, block);
1155         lightrec_free_block(state, block);
1156 }
1157
1158 static void lightrec_reap_jit(struct lightrec_state *state, void *data)
1159 {
1160         _jit_destroy_state(data);
1161 }
1162
1163 int lightrec_compile_block(struct lightrec_cstate *cstate,
1164                            struct block *block)
1165 {
1166         struct lightrec_state *state = cstate->state;
1167         struct lightrec_branch_target *target;
1168         bool op_list_freed = false, fully_tagged = false;
1169         struct block *block2;
1170         struct opcode *elm;
1171         jit_state_t *_jit, *oldjit;
1172         jit_node_t *start_of_block;
1173         bool skip_next = false;
1174         jit_word_t code_size;
1175         unsigned int i, j;
1176         u32 offset;
1177
1178         fully_tagged = lightrec_block_is_fully_tagged(block);
1179         if (fully_tagged)
1180                 block->flags |= BLOCK_FULLY_TAGGED;
1181
1182         _jit = jit_new_state();
1183         if (!_jit)
1184                 return -ENOMEM;
1185
1186         oldjit = block->_jit;
1187         block->_jit = _jit;
1188
1189         lightrec_regcache_reset(cstate->reg_cache);
1190         cstate->cycles = 0;
1191         cstate->nb_branches = 0;
1192         cstate->nb_local_branches = 0;
1193         cstate->nb_targets = 0;
1194
1195         jit_prolog();
1196         jit_tramp(256);
1197
1198         start_of_block = jit_label();
1199
1200         for (i = 0; i < block->nb_ops; i++) {
1201                 elm = &block->opcode_list[i];
1202
1203                 if (skip_next) {
1204                         skip_next = false;
1205                         continue;
1206                 }
1207
1208                 cstate->cycles += lightrec_cycles_of_opcode(elm->c);
1209
1210                 if (should_emulate(elm)) {
1211                         pr_debug("Branch at offset 0x%x will be emulated\n",
1212                                  i << 2);
1213
1214                         lightrec_emit_eob(cstate, block, i, false);
1215                         skip_next = !(elm->flags & LIGHTREC_NO_DS);
1216                 } else {
1217                         lightrec_rec_opcode(cstate, block, i);
1218                         skip_next = has_delay_slot(elm->c) &&
1219                                 !(elm->flags & LIGHTREC_NO_DS);
1220 #if _WIN32
1221                         /* FIXME: GNU Lightning on Windows seems to use our
1222                          * mapped registers as temporaries. Until the actual bug
1223                          * is found and fixed, unconditionally mark our
1224                          * registers as live here. */
1225                         lightrec_regcache_mark_live(cstate->reg_cache, _jit);
1226 #endif
1227                 }
1228         }
1229
1230         for (i = 0; i < cstate->nb_branches; i++)
1231                 jit_patch(cstate->branches[i]);
1232
1233         for (i = 0; i < cstate->nb_local_branches; i++) {
1234                 struct lightrec_branch *branch = &cstate->local_branches[i];
1235
1236                 pr_debug("Patch local branch to offset 0x%x\n",
1237                          branch->target << 2);
1238
1239                 if (branch->target == 0) {
1240                         jit_patch_at(branch->branch, start_of_block);
1241                         continue;
1242                 }
1243
1244                 for (j = 0; j < cstate->nb_targets; j++) {
1245                         if (cstate->targets[j].offset == branch->target) {
1246                                 jit_patch_at(branch->branch,
1247                                              cstate->targets[j].label);
1248                                 break;
1249                         }
1250                 }
1251
1252                 if (j == cstate->nb_targets)
1253                         pr_err("Unable to find branch target\n");
1254         }
1255
1256         jit_ldxi(JIT_R0, LIGHTREC_REG_STATE,
1257                  offsetof(struct lightrec_state, eob_wrapper_func));
1258
1259         jit_jmpr(JIT_R0);
1260
1261         jit_ret();
1262         jit_epilog();
1263
1264         block->function = jit_emit();
1265         block->flags &= ~BLOCK_SHOULD_RECOMPILE;
1266
1267         /* Add compiled function to the LUT */
1268         state->code_lut[lut_offset(block->pc)] = block->function;
1269
1270         if (ENABLE_THREADED_COMPILER) {
1271                 /* Since we might try to reap the same block multiple times,
1272                  * we need the reaper to wait until everything has been
1273                  * submitted, so that the duplicate entries can be dropped. */
1274                 lightrec_reaper_pause(state->reaper);
1275         }
1276
1277         /* Detect old blocks that have been covered by the new one */
1278         for (i = 0; i < cstate->nb_targets; i++) {
1279                 target = &cstate->targets[i];
1280
1281                 if (!target->offset)
1282                         continue;
1283
1284                 offset = block->pc + target->offset * sizeof(u32);
1285                 block2 = lightrec_find_block(state->block_cache, offset);
1286                 if (block2) {
1287                         /* No need to check if block2 is compilable - it must
1288                          * be, otherwise block wouldn't be compilable either */
1289
1290                         /* Set the "block dead" flag to prevent the dynarec from
1291                          * recompiling this block */
1292                         block2->flags |= BLOCK_IS_DEAD;
1293
1294                         /* If block2 was pending for compilation, cancel it.
1295                          * If it's being compiled right now, wait until it
1296                          * finishes. */
1297                         if (ENABLE_THREADED_COMPILER)
1298                                 lightrec_recompiler_remove(state->rec, block2);
1299                 }
1300
1301                 /* We know from now on that block2 (if present) isn't going to
1302                  * be compiled. We can override the LUT entry with our new
1303                  * block's entry point. */
1304                 offset = lut_offset(block->pc) + target->offset;
1305                 state->code_lut[offset] = jit_address(target->label);
1306
1307                 if (block2) {
1308                         pr_debug("Reap block 0x%08x as it's covered by block "
1309                                  "0x%08x\n", block2->pc, block->pc);
1310
1311                         /* Finally, reap the block. */
1312                         if (ENABLE_THREADED_COMPILER) {
1313                                 lightrec_reaper_add(state->reaper,
1314                                                     lightrec_reap_block,
1315                                                     block2);
1316                         } else {
1317                                 lightrec_unregister_block(state->block_cache, block2);
1318                                 lightrec_free_block(state, block2);
1319                         }
1320                 }
1321         }
1322
1323         if (ENABLE_THREADED_COMPILER)
1324                 lightrec_reaper_continue(state->reaper);
1325
1326         jit_get_code(&code_size);
1327         lightrec_register(MEM_FOR_CODE, code_size);
1328
1329         block->code_size = code_size;
1330
1331         if (ENABLE_DISASSEMBLER) {
1332                 pr_debug("Compiling block at PC: 0x%08x\n", block->pc);
1333                 jit_disassemble();
1334         }
1335
1336         jit_clear_state();
1337
1338 #if ENABLE_THREADED_COMPILER
1339         if (fully_tagged)
1340                 op_list_freed = atomic_flag_test_and_set(&block->op_list_freed);
1341 #endif
1342         if (fully_tagged && !op_list_freed) {
1343                 pr_debug("Block PC 0x%08x is fully tagged"
1344                          " - free opcode list\n", block->pc);
1345                 lightrec_free_opcode_list(state, block);
1346                 block->opcode_list = NULL;
1347         }
1348
1349         if (oldjit) {
1350                 pr_debug("Block 0x%08x recompiled, reaping old jit context.\n",
1351                          block->pc);
1352
1353                 if (ENABLE_THREADED_COMPILER)
1354                         lightrec_reaper_add(state->reaper,
1355                                             lightrec_reap_jit, oldjit);
1356                 else
1357                         _jit_destroy_state(oldjit);
1358         }
1359
1360         return 0;
1361 }
1362
1363 static void lightrec_print_info(struct lightrec_state *state)
1364 {
1365         if ((state->current_cycle & ~0xfffffff) != state->old_cycle_counter) {
1366                 pr_info("Lightrec RAM usage: IR %u KiB, CODE %u KiB, "
1367                         "MIPS %u KiB, TOTAL %u KiB, avg. IPI %f\n",
1368                         lightrec_get_mem_usage(MEM_FOR_IR) / 1024,
1369                         lightrec_get_mem_usage(MEM_FOR_CODE) / 1024,
1370                         lightrec_get_mem_usage(MEM_FOR_MIPS_CODE) / 1024,
1371                         lightrec_get_total_mem_usage() / 1024,
1372                        lightrec_get_average_ipi());
1373                 state->old_cycle_counter = state->current_cycle & ~0xfffffff;
1374         }
1375 }
1376
1377 u32 lightrec_execute(struct lightrec_state *state, u32 pc, u32 target_cycle)
1378 {
1379         s32 (*func)(void *, s32) = (void *)state->dispatcher->function;
1380         void *block_trace;
1381         s32 cycles_delta;
1382
1383         state->exit_flags = LIGHTREC_EXIT_NORMAL;
1384
1385         /* Handle the cycle counter overflowing */
1386         if (unlikely(target_cycle < state->current_cycle))
1387                 target_cycle = UINT_MAX;
1388
1389         state->target_cycle = target_cycle;
1390         state->next_pc = pc;
1391
1392         block_trace = get_next_block_func(state, pc);
1393         if (block_trace) {
1394                 cycles_delta = state->target_cycle - state->current_cycle;
1395
1396                 cycles_delta = (*func)(block_trace, cycles_delta);
1397
1398                 state->current_cycle = state->target_cycle - cycles_delta;
1399         }
1400
1401         if (ENABLE_THREADED_COMPILER)
1402                 lightrec_reaper_reap(state->reaper);
1403
1404         if (LOG_LEVEL >= INFO_L)
1405                 lightrec_print_info(state);
1406
1407         return state->next_pc;
1408 }
1409
1410 u32 lightrec_execute_one(struct lightrec_state *state, u32 pc)
1411 {
1412         return lightrec_execute(state, pc, state->current_cycle);
1413 }
1414
1415 u32 lightrec_run_interpreter(struct lightrec_state *state, u32 pc)
1416 {
1417         struct block *block = lightrec_get_block(state, pc);
1418         if (!block)
1419                 return 0;
1420
1421         state->exit_flags = LIGHTREC_EXIT_NORMAL;
1422
1423         pc = lightrec_emulate_block(state, block, pc);
1424
1425         if (LOG_LEVEL >= INFO_L)
1426                 lightrec_print_info(state);
1427
1428         return pc;
1429 }
1430
1431 void lightrec_free_block(struct lightrec_state *state, struct block *block)
1432 {
1433         lightrec_unregister(MEM_FOR_MIPS_CODE, block->nb_ops * sizeof(u32));
1434         if (block->opcode_list)
1435                 lightrec_free_opcode_list(state, block);
1436         if (block->_jit)
1437                 _jit_destroy_state(block->_jit);
1438         lightrec_unregister(MEM_FOR_CODE, block->code_size);
1439         lightrec_free(state, MEM_FOR_IR, sizeof(*block), block);
1440 }
1441
1442 struct lightrec_cstate * lightrec_create_cstate(struct lightrec_state *state)
1443 {
1444         struct lightrec_cstate *cstate;
1445
1446         cstate = lightrec_malloc(state, MEM_FOR_LIGHTREC, sizeof(*cstate));
1447         if (!cstate)
1448                 return NULL;
1449
1450         cstate->reg_cache = lightrec_regcache_init(state);
1451         if (!cstate->reg_cache) {
1452                 lightrec_free(state, MEM_FOR_LIGHTREC, sizeof(*cstate), cstate);
1453                 return NULL;
1454         }
1455
1456         cstate->state = state;
1457
1458         return cstate;
1459 }
1460
1461 void lightrec_free_cstate(struct lightrec_cstate *cstate)
1462 {
1463         lightrec_free_regcache(cstate->reg_cache);
1464         lightrec_free(cstate->state, MEM_FOR_LIGHTREC, sizeof(*cstate), cstate);
1465 }
1466
1467 struct lightrec_state * lightrec_init(char *argv0,
1468                                       const struct lightrec_mem_map *map,
1469                                       size_t nb,
1470                                       const struct lightrec_ops *ops)
1471 {
1472         struct lightrec_state *state;
1473
1474         /* Sanity-check ops */
1475         if (!ops || !ops->cop2_op || !ops->enable_ram) {
1476                 pr_err("Missing callbacks in lightrec_ops structure\n");
1477                 return NULL;
1478         }
1479
1480         init_jit(argv0);
1481
1482         state = calloc(1, sizeof(*state) +
1483                        sizeof(*state->code_lut) * CODE_LUT_SIZE);
1484         if (!state)
1485                 goto err_finish_jit;
1486
1487         lightrec_register(MEM_FOR_LIGHTREC, sizeof(*state) +
1488                           sizeof(*state->code_lut) * CODE_LUT_SIZE);
1489
1490 #if ENABLE_TINYMM
1491         state->tinymm = tinymm_init(malloc, free, 4096);
1492         if (!state->tinymm)
1493                 goto err_free_state;
1494 #endif
1495
1496         state->block_cache = lightrec_blockcache_init(state);
1497         if (!state->block_cache)
1498                 goto err_free_tinymm;
1499
1500         if (ENABLE_THREADED_COMPILER) {
1501                 state->rec = lightrec_recompiler_init(state);
1502                 if (!state->rec)
1503                         goto err_free_block_cache;
1504
1505                 state->reaper = lightrec_reaper_init(state);
1506                 if (!state->reaper)
1507                         goto err_free_recompiler;
1508         } else {
1509                 state->cstate = lightrec_create_cstate(state);
1510                 if (!state->cstate)
1511                         goto err_free_block_cache;
1512         }
1513
1514         state->nb_maps = nb;
1515         state->maps = map;
1516
1517         memcpy(&state->ops, ops, sizeof(*ops));
1518
1519         state->dispatcher = generate_dispatcher(state);
1520         if (!state->dispatcher)
1521                 goto err_free_reaper;
1522
1523         state->c_wrapper_block = generate_wrapper(state);
1524         if (!state->c_wrapper_block)
1525                 goto err_free_dispatcher;
1526
1527         state->c_wrappers[C_WRAPPER_RW] = lightrec_rw_cb;
1528         state->c_wrappers[C_WRAPPER_RW_GENERIC] = lightrec_rw_generic_cb;
1529         state->c_wrappers[C_WRAPPER_MTC] = lightrec_mtc_cb;
1530         state->c_wrappers[C_WRAPPER_CP] = lightrec_cp_cb;
1531         state->c_wrappers[C_WRAPPER_SYSCALL] = lightrec_syscall_cb;
1532         state->c_wrappers[C_WRAPPER_BREAK] = lightrec_break_cb;
1533
1534         map = &state->maps[PSX_MAP_BIOS];
1535         state->offset_bios = (uintptr_t)map->address - map->pc;
1536
1537         map = &state->maps[PSX_MAP_SCRATCH_PAD];
1538         state->offset_scratch = (uintptr_t)map->address - map->pc;
1539
1540         map = &state->maps[PSX_MAP_KERNEL_USER_RAM];
1541         state->offset_ram = (uintptr_t)map->address - map->pc;
1542
1543         if (state->maps[PSX_MAP_MIRROR1].address == map->address + 0x200000 &&
1544             state->maps[PSX_MAP_MIRROR2].address == map->address + 0x400000 &&
1545             state->maps[PSX_MAP_MIRROR3].address == map->address + 0x600000)
1546                 state->mirrors_mapped = true;
1547
1548         if (state->offset_bios == 0 &&
1549             state->offset_scratch == 0 &&
1550             state->offset_ram == 0 &&
1551             state->mirrors_mapped) {
1552                 pr_info("Memory map is perfect. Emitted code will be best.\n");
1553         } else {
1554                 pr_info("Memory map is sub-par. Emitted code will be slow.\n");
1555         }
1556
1557         return state;
1558
1559 err_free_dispatcher:
1560         lightrec_free_block(state, state->dispatcher);
1561 err_free_reaper:
1562         if (ENABLE_THREADED_COMPILER)
1563                 lightrec_reaper_destroy(state->reaper);
1564 err_free_recompiler:
1565         if (ENABLE_THREADED_COMPILER)
1566                 lightrec_free_recompiler(state->rec);
1567         else
1568                 lightrec_free_cstate(state->cstate);
1569 err_free_block_cache:
1570         lightrec_free_block_cache(state->block_cache);
1571 err_free_tinymm:
1572 #if ENABLE_TINYMM
1573         tinymm_shutdown(state->tinymm);
1574 err_free_state:
1575 #endif
1576         lightrec_unregister(MEM_FOR_LIGHTREC, sizeof(*state) +
1577                             sizeof(*state->code_lut) * CODE_LUT_SIZE);
1578         free(state);
1579 err_finish_jit:
1580         finish_jit();
1581         return NULL;
1582 }
1583
1584 void lightrec_destroy(struct lightrec_state *state)
1585 {
1586         /* Force a print info on destroy*/
1587         state->current_cycle = ~state->current_cycle;
1588         lightrec_print_info(state);
1589
1590         if (ENABLE_THREADED_COMPILER) {
1591                 lightrec_free_recompiler(state->rec);
1592                 lightrec_reaper_destroy(state->reaper);
1593         } else {
1594                 lightrec_free_cstate(state->cstate);
1595         }
1596
1597         lightrec_free_block_cache(state->block_cache);
1598         lightrec_free_block(state, state->dispatcher);
1599         lightrec_free_block(state, state->c_wrapper_block);
1600         finish_jit();
1601
1602 #if ENABLE_TINYMM
1603         tinymm_shutdown(state->tinymm);
1604 #endif
1605         lightrec_unregister(MEM_FOR_LIGHTREC, sizeof(*state) +
1606                             sizeof(*state->code_lut) * CODE_LUT_SIZE);
1607         free(state);
1608 }
1609
1610 void lightrec_invalidate(struct lightrec_state *state, u32 addr, u32 len)
1611 {
1612         u32 kaddr = kunseg(addr & ~0x3);
1613         const struct lightrec_mem_map *map = lightrec_get_map(state, NULL, kaddr);
1614
1615         if (map) {
1616                 if (map != &state->maps[PSX_MAP_KERNEL_USER_RAM])
1617                         return;
1618
1619                 /* Handle mirrors */
1620                 kaddr &= (state->maps[PSX_MAP_KERNEL_USER_RAM].length - 1);
1621
1622                 lightrec_invalidate_map(state, map, kaddr, len);
1623         }
1624 }
1625
1626 void lightrec_invalidate_all(struct lightrec_state *state)
1627 {
1628         memset(state->code_lut, 0, sizeof(*state->code_lut) * CODE_LUT_SIZE);
1629 }
1630
1631 void lightrec_set_invalidate_mode(struct lightrec_state *state, bool dma_only)
1632 {
1633         if (state->invalidate_from_dma_only != dma_only)
1634                 lightrec_invalidate_all(state);
1635
1636         state->invalidate_from_dma_only = dma_only;
1637 }
1638
1639 void lightrec_set_exit_flags(struct lightrec_state *state, u32 flags)
1640 {
1641         if (flags != LIGHTREC_EXIT_NORMAL) {
1642                 state->exit_flags |= flags;
1643                 state->target_cycle = state->current_cycle;
1644         }
1645 }
1646
1647 u32 lightrec_exit_flags(struct lightrec_state *state)
1648 {
1649         return state->exit_flags;
1650 }
1651
1652 u32 lightrec_current_cycle_count(const struct lightrec_state *state)
1653 {
1654         return state->current_cycle;
1655 }
1656
1657 void lightrec_reset_cycle_count(struct lightrec_state *state, u32 cycles)
1658 {
1659         state->current_cycle = cycles;
1660
1661         if (state->target_cycle < cycles)
1662                 state->target_cycle = cycles;
1663 }
1664
1665 void lightrec_set_target_cycle_count(struct lightrec_state *state, u32 cycles)
1666 {
1667         if (state->exit_flags == LIGHTREC_EXIT_NORMAL) {
1668                 if (cycles < state->current_cycle)
1669                         cycles = state->current_cycle;
1670
1671                 state->target_cycle = cycles;
1672         }
1673 }
1674
1675 struct lightrec_registers * lightrec_get_registers(struct lightrec_state *state)
1676 {
1677         return &state->regs;
1678 }