43636b2ad6767a193c63928d3817ddf485231729
[pcsx_rearmed.git] / deps / lightrec / lightrec.c
1 // SPDX-License-Identifier: LGPL-2.1-or-later
2 /*
3  * Copyright (C) 2014-2021 Paul Cercueil <paul@crapouillou.net>
4  */
5
6 #include "blockcache.h"
7 #include "debug.h"
8 #include "disassembler.h"
9 #include "emitter.h"
10 #include "interpreter.h"
11 #include "lightrec-config.h"
12 #include "lightning-wrapper.h"
13 #include "lightrec.h"
14 #include "memmanager.h"
15 #include "reaper.h"
16 #include "recompiler.h"
17 #include "regcache.h"
18 #include "optimizer.h"
19 #include "tlsf/tlsf.h"
20
21 #include <errno.h>
22 #include <inttypes.h>
23 #include <limits.h>
24 #if ENABLE_THREADED_COMPILER
25 #include <stdatomic.h>
26 #endif
27 #include <stdbool.h>
28 #include <stddef.h>
29 #include <string.h>
30
31 #define GENMASK(h, l) \
32         (((uintptr_t)-1 << (l)) & ((uintptr_t)-1 >> (__WORDSIZE - 1 - (h))))
33
34 static struct block * lightrec_precompile_block(struct lightrec_state *state,
35                                                 u32 pc);
36 static bool lightrec_block_is_fully_tagged(const struct block *block);
37
38 static void lightrec_mtc2(struct lightrec_state *state, u8 reg, u32 data);
39 static u32 lightrec_mfc2(struct lightrec_state *state, u8 reg);
40
41 static void lightrec_default_sb(struct lightrec_state *state, u32 opcode,
42                                 void *host, u32 addr, u8 data)
43 {
44         *(u8 *)host = data;
45
46         if (!state->invalidate_from_dma_only)
47                 lightrec_invalidate(state, addr, 1);
48 }
49
50 static void lightrec_default_sh(struct lightrec_state *state, u32 opcode,
51                                 void *host, u32 addr, u16 data)
52 {
53         *(u16 *)host = HTOLE16(data);
54
55         if (!state->invalidate_from_dma_only)
56                 lightrec_invalidate(state, addr, 2);
57 }
58
59 static void lightrec_default_sw(struct lightrec_state *state, u32 opcode,
60                                 void *host, u32 addr, u32 data)
61 {
62         *(u32 *)host = HTOLE32(data);
63
64         if (!state->invalidate_from_dma_only)
65                 lightrec_invalidate(state, addr, 4);
66 }
67
68 static u8 lightrec_default_lb(struct lightrec_state *state,
69                               u32 opcode, void *host, u32 addr)
70 {
71         return *(u8 *)host;
72 }
73
74 static u16 lightrec_default_lh(struct lightrec_state *state,
75                                u32 opcode, void *host, u32 addr)
76 {
77         return LE16TOH(*(u16 *)host);
78 }
79
80 static u32 lightrec_default_lw(struct lightrec_state *state,
81                                u32 opcode, void *host, u32 addr)
82 {
83         return LE32TOH(*(u32 *)host);
84 }
85
86 static const struct lightrec_mem_map_ops lightrec_default_ops = {
87         .sb = lightrec_default_sb,
88         .sh = lightrec_default_sh,
89         .sw = lightrec_default_sw,
90         .lb = lightrec_default_lb,
91         .lh = lightrec_default_lh,
92         .lw = lightrec_default_lw,
93 };
94
95 static void __segfault_cb(struct lightrec_state *state, u32 addr,
96                           const struct block *block)
97 {
98         lightrec_set_exit_flags(state, LIGHTREC_EXIT_SEGFAULT);
99         pr_err("Segmentation fault in recompiled code: invalid "
100                "load/store at address 0x%08x\n", addr);
101         if (block)
102                 pr_err("Was executing block PC 0x%08x\n", block->pc);
103 }
104
105 static void lightrec_swl(struct lightrec_state *state,
106                          const struct lightrec_mem_map_ops *ops,
107                          u32 opcode, void *host, u32 addr, u32 data)
108 {
109         unsigned int shift = addr & 0x3;
110         unsigned int mask = GENMASK(31, (shift + 1) * 8);
111         u32 old_data;
112
113         /* Align to 32 bits */
114         addr &= ~3;
115         host = (void *)((uintptr_t)host & ~3);
116
117         old_data = ops->lw(state, opcode, host, addr);
118
119         data = (data >> ((3 - shift) * 8)) | (old_data & mask);
120
121         ops->sw(state, opcode, host, addr, data);
122 }
123
124 static void lightrec_swr(struct lightrec_state *state,
125                          const struct lightrec_mem_map_ops *ops,
126                          u32 opcode, void *host, u32 addr, u32 data)
127 {
128         unsigned int shift = addr & 0x3;
129         unsigned int mask = (1 << (shift * 8)) - 1;
130         u32 old_data;
131
132         /* Align to 32 bits */
133         addr &= ~3;
134         host = (void *)((uintptr_t)host & ~3);
135
136         old_data = ops->lw(state, opcode, host, addr);
137
138         data = (data << (shift * 8)) | (old_data & mask);
139
140         ops->sw(state, opcode, host, addr, data);
141 }
142
143 static void lightrec_swc2(struct lightrec_state *state, union code op,
144                           const struct lightrec_mem_map_ops *ops,
145                           void *host, u32 addr)
146 {
147         u32 data = lightrec_mfc2(state, op.i.rt);
148
149         ops->sw(state, op.opcode, host, addr, data);
150 }
151
152 static u32 lightrec_lwl(struct lightrec_state *state,
153                         const struct lightrec_mem_map_ops *ops,
154                         u32 opcode, void *host, u32 addr, u32 data)
155 {
156         unsigned int shift = addr & 0x3;
157         unsigned int mask = (1 << (24 - shift * 8)) - 1;
158         u32 old_data;
159
160         /* Align to 32 bits */
161         addr &= ~3;
162         host = (void *)((uintptr_t)host & ~3);
163
164         old_data = ops->lw(state, opcode, host, addr);
165
166         return (data & mask) | (old_data << (24 - shift * 8));
167 }
168
169 static u32 lightrec_lwr(struct lightrec_state *state,
170                         const struct lightrec_mem_map_ops *ops,
171                         u32 opcode, void *host, u32 addr, u32 data)
172 {
173         unsigned int shift = addr & 0x3;
174         unsigned int mask = GENMASK(31, 32 - shift * 8);
175         u32 old_data;
176
177         /* Align to 32 bits */
178         addr &= ~3;
179         host = (void *)((uintptr_t)host & ~3);
180
181         old_data = ops->lw(state, opcode, host, addr);
182
183         return (data & mask) | (old_data >> (shift * 8));
184 }
185
186 static void lightrec_lwc2(struct lightrec_state *state, union code op,
187                           const struct lightrec_mem_map_ops *ops,
188                           void *host, u32 addr)
189 {
190         u32 data = ops->lw(state, op.opcode, host, addr);
191
192         lightrec_mtc2(state, op.i.rt, data);
193 }
194
195 static void lightrec_invalidate_map(struct lightrec_state *state,
196                 const struct lightrec_mem_map *map, u32 addr, u32 len)
197 {
198         if (map == &state->maps[PSX_MAP_KERNEL_USER_RAM]) {
199                 memset(lut_address(state, lut_offset(addr)), 0,
200                        ((len + 3) / 4) * lut_elm_size(state));
201         }
202 }
203
204 enum psx_map
205 lightrec_get_map_idx(struct lightrec_state *state, u32 kaddr)
206 {
207         const struct lightrec_mem_map *map;
208         unsigned int i;
209
210         for (i = 0; i < state->nb_maps; i++) {
211                 map = &state->maps[i];
212
213                 if (kaddr >= map->pc && kaddr < map->pc + map->length)
214                         return (enum psx_map) i;
215         }
216
217         return PSX_MAP_UNKNOWN;
218 }
219
220 const struct lightrec_mem_map *
221 lightrec_get_map(struct lightrec_state *state, void **host, u32 kaddr)
222 {
223         const struct lightrec_mem_map *map;
224         enum psx_map idx;
225         u32 addr;
226
227         idx = lightrec_get_map_idx(state, kaddr);
228         if (idx == PSX_MAP_UNKNOWN)
229                 return NULL;
230
231         map = &state->maps[idx];
232         addr = kaddr - map->pc;
233
234         while (map->mirror_of)
235                 map = map->mirror_of;
236
237         if (host)
238                 *host = map->address + addr;
239
240         return map;
241 }
242
243 u32 lightrec_rw(struct lightrec_state *state, union code op,
244                 u32 addr, u32 data, u32 *flags, struct block *block)
245 {
246         const struct lightrec_mem_map *map;
247         const struct lightrec_mem_map_ops *ops;
248         u32 opcode = op.opcode;
249         void *host;
250
251         addr += (s16) op.i.imm;
252
253         map = lightrec_get_map(state, &host, kunseg(addr));
254         if (!map) {
255                 __segfault_cb(state, addr, block);
256                 return 0;
257         }
258
259
260         if (likely(!map->ops)) {
261                 if (flags && !LIGHTREC_FLAGS_GET_IO_MODE(*flags))
262                         *flags |= LIGHTREC_IO_MODE(LIGHTREC_IO_DIRECT);
263
264                 ops = &lightrec_default_ops;
265         } else if (flags &&
266                    LIGHTREC_FLAGS_GET_IO_MODE(*flags) == LIGHTREC_IO_DIRECT_HW) {
267                 ops = &lightrec_default_ops;
268         } else {
269                 if (flags && !LIGHTREC_FLAGS_GET_IO_MODE(*flags))
270                         *flags |= LIGHTREC_IO_MODE(LIGHTREC_IO_HW);
271
272                 ops = map->ops;
273         }
274
275         switch (op.i.op) {
276         case OP_SB:
277                 ops->sb(state, opcode, host, addr, (u8) data);
278                 return 0;
279         case OP_SH:
280                 ops->sh(state, opcode, host, addr, (u16) data);
281                 return 0;
282         case OP_SWL:
283                 lightrec_swl(state, ops, opcode, host, addr, data);
284                 return 0;
285         case OP_SWR:
286                 lightrec_swr(state, ops, opcode, host, addr, data);
287                 return 0;
288         case OP_SW:
289                 ops->sw(state, opcode, host, addr, data);
290                 return 0;
291         case OP_SWC2:
292                 lightrec_swc2(state, op, ops, host, addr);
293                 return 0;
294         case OP_LB:
295                 return (s32) (s8) ops->lb(state, opcode, host, addr);
296         case OP_LBU:
297                 return ops->lb(state, opcode, host, addr);
298         case OP_LH:
299                 return (s32) (s16) ops->lh(state, opcode, host, addr);
300         case OP_LHU:
301                 return ops->lh(state, opcode, host, addr);
302         case OP_LWC2:
303                 lightrec_lwc2(state, op, ops, host, addr);
304                 return 0;
305         case OP_LWL:
306                 return lightrec_lwl(state, ops, opcode, host, addr, data);
307         case OP_LWR:
308                 return lightrec_lwr(state, ops, opcode, host, addr, data);
309         case OP_LW:
310         default:
311                 return ops->lw(state, opcode, host, addr);
312         }
313 }
314
315 static void lightrec_rw_helper(struct lightrec_state *state,
316                                union code op, u32 *flags,
317                                struct block *block)
318 {
319         u32 ret = lightrec_rw(state, op, state->regs.gpr[op.i.rs],
320                               state->regs.gpr[op.i.rt], flags, block);
321
322         switch (op.i.op) {
323         case OP_LB:
324         case OP_LBU:
325         case OP_LH:
326         case OP_LHU:
327         case OP_LWL:
328         case OP_LWR:
329         case OP_LW:
330                 if (op.i.rt)
331                         state->regs.gpr[op.i.rt] = ret;
332                 fallthrough;
333         default:
334                 break;
335         }
336 }
337
338 static void lightrec_rw_cb(struct lightrec_state *state, u32 arg)
339 {
340         lightrec_rw_helper(state, (union code) arg, NULL, NULL);
341 }
342
343 static void lightrec_rw_generic_cb(struct lightrec_state *state, u32 arg)
344 {
345         struct block *block;
346         struct opcode *op;
347         bool was_tagged;
348         u16 offset = (u16)arg;
349         u16 old_flags;
350
351         block = lightrec_find_block_from_lut(state->block_cache,
352                                              arg >> 16, state->next_pc);
353         if (unlikely(!block)) {
354                 pr_err("rw_generic: No block found in LUT for PC 0x%x offset 0x%x\n",
355                          state->next_pc, offset);
356                 lightrec_set_exit_flags(state, LIGHTREC_EXIT_SEGFAULT);
357                 return;
358         }
359
360         op = &block->opcode_list[offset];
361         was_tagged = LIGHTREC_FLAGS_GET_IO_MODE(op->flags);
362
363         lightrec_rw_helper(state, op->c, &op->flags, block);
364
365         if (!was_tagged) {
366                 old_flags = block_set_flags(block, BLOCK_SHOULD_RECOMPILE);
367
368                 if (!(old_flags & BLOCK_SHOULD_RECOMPILE)) {
369                         pr_debug("Opcode of block at PC 0x%08x has been tagged"
370                                  " - flag for recompilation\n", block->pc);
371
372                         lut_write(state, lut_offset(block->pc), NULL);
373                 }
374         }
375 }
376
377 static u32 clamp_s32(s32 val, s32 min, s32 max)
378 {
379         return val < min ? min : val > max ? max : val;
380 }
381
382 static u16 load_u16(u32 *ptr)
383 {
384         return ((struct u16x2 *) ptr)->l;
385 }
386
387 static void store_u16(u32 *ptr, u16 value)
388 {
389         ((struct u16x2 *) ptr)->l = value;
390 }
391
392 static u32 lightrec_mfc2(struct lightrec_state *state, u8 reg)
393 {
394         s16 gteir1, gteir2, gteir3;
395
396         switch (reg) {
397         case 1:
398         case 3:
399         case 5:
400         case 8:
401         case 9:
402         case 10:
403         case 11:
404                 return (s32)(s16) load_u16(&state->regs.cp2d[reg]);
405         case 7:
406         case 16:
407         case 17:
408         case 18:
409         case 19:
410                 return load_u16(&state->regs.cp2d[reg]);
411         case 28:
412         case 29:
413                 gteir1 = (s16) load_u16(&state->regs.cp2d[9]);
414                 gteir2 = (s16) load_u16(&state->regs.cp2d[10]);
415                 gteir3 = (s16) load_u16(&state->regs.cp2d[11]);
416
417                 return clamp_s32(gteir1 >> 7, 0, 0x1f) << 0 |
418                         clamp_s32(gteir2 >> 7, 0, 0x1f) << 5 |
419                         clamp_s32(gteir3 >> 7, 0, 0x1f) << 10;
420         case 15:
421                 reg = 14;
422                 fallthrough;
423         default:
424                 return state->regs.cp2d[reg];
425         }
426 }
427
428 u32 lightrec_mfc(struct lightrec_state *state, union code op)
429 {
430         u32 val;
431
432         if (op.i.op == OP_CP0)
433                 return state->regs.cp0[op.r.rd];
434         else if (op.r.rs == OP_CP2_BASIC_MFC2)
435                 val = lightrec_mfc2(state, op.r.rd);
436         else {
437                 val = state->regs.cp2c[op.r.rd];
438
439                 switch (op.r.rd) {
440                 case 4:
441                 case 12:
442                 case 20:
443                 case 26:
444                 case 27:
445                 case 29:
446                 case 30:
447                         val = (u32)(s16)val;
448                         fallthrough;
449                 default:
450                         break;
451                 }
452         }
453
454         if (state->ops.cop2_notify)
455                 (*state->ops.cop2_notify)(state, op.opcode, val);
456
457         return val;
458 }
459
460 static void lightrec_mfc_cb(struct lightrec_state *state, union code op)
461 {
462         u32 rt = lightrec_mfc(state, op);
463
464         if (op.r.rt)
465                 state->regs.gpr[op.r.rt] = rt;
466 }
467
468 static void lightrec_mtc0(struct lightrec_state *state, u8 reg, u32 data)
469 {
470         u32 status, oldstatus, cause;
471
472         switch (reg) {
473         case 1:
474         case 4:
475         case 8:
476         case 14:
477         case 15:
478                 /* Those registers are read-only */
479                 return;
480         default:
481                 break;
482         }
483
484         if (reg == 12) {
485                 status = state->regs.cp0[12];
486                 oldstatus = status;
487
488                 if (status & ~data & BIT(16)) {
489                         state->ops.enable_ram(state, true);
490                         lightrec_invalidate_all(state);
491                 } else if (~status & data & BIT(16)) {
492                         state->ops.enable_ram(state, false);
493                 }
494         }
495
496         if (reg == 13) {
497                 state->regs.cp0[13] &= ~0x300;
498                 state->regs.cp0[13] |= data & 0x300;
499         } else {
500                 state->regs.cp0[reg] = data;
501         }
502
503         if (reg == 12 || reg == 13) {
504                 cause = state->regs.cp0[13];
505                 status = state->regs.cp0[12];
506
507                 /* Handle software interrupts */
508                 if (!!(status & cause & 0x300) & status)
509                         lightrec_set_exit_flags(state, LIGHTREC_EXIT_CHECK_INTERRUPT);
510
511                 /* Handle hardware interrupts */
512                 if (reg == 12 && !(~status & 0x401) && (~oldstatus & 0x401))
513                         lightrec_set_exit_flags(state, LIGHTREC_EXIT_CHECK_INTERRUPT);
514         }
515 }
516
517 static u32 count_leading_bits(s32 data)
518 {
519         u32 cnt = 33;
520
521 #ifdef __has_builtin
522 #if __has_builtin(__builtin_clrsb)
523         return 1 + __builtin_clrsb(data);
524 #endif
525 #endif
526
527         data = (data ^ (data >> 31)) << 1;
528
529         do {
530                 cnt -= 1;
531                 data >>= 1;
532         } while (data);
533
534         return cnt;
535 }
536
537 static void lightrec_mtc2(struct lightrec_state *state, u8 reg, u32 data)
538 {
539         switch (reg) {
540         case 15:
541                 state->regs.cp2d[12] = state->regs.cp2d[13];
542                 state->regs.cp2d[13] = state->regs.cp2d[14];
543                 state->regs.cp2d[14] = data;
544                 break;
545         case 28:
546                 state->regs.cp2d[9] = (data << 7) & 0xf80;
547                 state->regs.cp2d[10] = (data << 2) & 0xf80;
548                 state->regs.cp2d[11] = (data >> 3) & 0xf80;
549                 break;
550         case 31:
551                 return;
552         case 30:
553                 state->regs.cp2d[31] = count_leading_bits((s32) data);
554                 fallthrough;
555         default:
556                 state->regs.cp2d[reg] = data;
557                 break;
558         }
559 }
560
561 static void lightrec_ctc2(struct lightrec_state *state, u8 reg, u32 data)
562 {
563         switch (reg) {
564         case 4:
565         case 12:
566         case 20:
567         case 26:
568         case 27:
569         case 29:
570         case 30:
571                 store_u16(&state->regs.cp2c[reg], data);
572                 break;
573         case 31:
574                 data = (data & 0x7ffff000) | !!(data & 0x7f87e000) << 31;
575                 fallthrough;
576         default:
577                 state->regs.cp2c[reg] = data;
578                 break;
579         }
580 }
581
582 void lightrec_mtc(struct lightrec_state *state, union code op, u32 data)
583 {
584         if (op.i.op == OP_CP0) {
585                 lightrec_mtc0(state, op.r.rd, data);
586         } else {
587                 if (op.r.rs == OP_CP2_BASIC_CTC2)
588                         lightrec_ctc2(state, op.r.rd, data);
589                 else
590                         lightrec_mtc2(state, op.r.rd, data);
591
592                 if (state->ops.cop2_notify)
593                         (*state->ops.cop2_notify)(state, op.opcode, data);
594         }
595 }
596
597 static void lightrec_mtc_cb(struct lightrec_state *state, u32 arg)
598 {
599         union code op = (union code) arg;
600
601         lightrec_mtc(state, op, state->regs.gpr[op.r.rt]);
602 }
603
604 void lightrec_rfe(struct lightrec_state *state)
605 {
606         u32 status;
607
608         /* Read CP0 Status register (r12) */
609         status = state->regs.cp0[12];
610
611         /* Switch the bits */
612         status = ((status & 0x3c) >> 2) | (status & ~0xf);
613
614         /* Write it back */
615         lightrec_mtc0(state, 12, status);
616 }
617
618 void lightrec_cp(struct lightrec_state *state, union code op)
619 {
620         if (op.i.op == OP_CP0) {
621                 pr_err("Invalid CP opcode to coprocessor #0\n");
622                 return;
623         }
624
625         (*state->ops.cop2_op)(state, op.opcode);
626 }
627
628 static void lightrec_cp_cb(struct lightrec_state *state, u32 arg)
629 {
630         lightrec_cp(state, (union code) arg);
631 }
632
633 static struct block * lightrec_get_block(struct lightrec_state *state, u32 pc)
634 {
635         struct block *block = lightrec_find_block(state->block_cache, pc);
636         u8 old_flags;
637
638         if (block && lightrec_block_is_outdated(state, block)) {
639                 pr_debug("Block at PC 0x%08x is outdated!\n", block->pc);
640
641                 old_flags = block_set_flags(block, BLOCK_IS_DEAD);
642                 if (!(old_flags & BLOCK_IS_DEAD)) {
643                         /* Make sure the recompiler isn't processing the block
644                          * we'll destroy */
645                         if (ENABLE_THREADED_COMPILER)
646                                 lightrec_recompiler_remove(state->rec, block);
647
648                         lightrec_unregister_block(state->block_cache, block);
649                         remove_from_code_lut(state->block_cache, block);
650                         lightrec_free_block(state, block);
651                 }
652
653                 block = NULL;
654         }
655
656         if (!block) {
657                 block = lightrec_precompile_block(state, pc);
658                 if (!block) {
659                         pr_err("Unable to recompile block at PC 0x%x\n", pc);
660                         lightrec_set_exit_flags(state, LIGHTREC_EXIT_SEGFAULT);
661                         return NULL;
662                 }
663
664                 lightrec_register_block(state->block_cache, block);
665         }
666
667         return block;
668 }
669
670 static void * get_next_block_func(struct lightrec_state *state, u32 pc)
671 {
672         struct block *block;
673         bool should_recompile;
674         void *func;
675         int err;
676
677         for (;;) {
678                 func = lut_read(state, lut_offset(pc));
679                 if (func && func != state->get_next_block)
680                         break;
681
682                 block = lightrec_get_block(state, pc);
683
684                 if (unlikely(!block))
685                         break;
686
687                 if (OPT_REPLACE_MEMSET &&
688                     block_has_flag(block, BLOCK_IS_MEMSET)) {
689                         func = state->memset_func;
690                         break;
691                 }
692
693                 should_recompile = block_has_flag(block, BLOCK_SHOULD_RECOMPILE) &&
694                         !block_has_flag(block, BLOCK_IS_DEAD);
695
696                 if (unlikely(should_recompile)) {
697                         pr_debug("Block at PC 0x%08x should recompile\n", pc);
698
699                         if (ENABLE_THREADED_COMPILER) {
700                                 lightrec_recompiler_add(state->rec, block);
701                         } else {
702                                 err = lightrec_compile_block(state->cstate, block);
703                                 if (err) {
704                                         state->exit_flags = LIGHTREC_EXIT_NOMEM;
705                                         return NULL;
706                                 }
707                         }
708                 }
709
710                 if (ENABLE_THREADED_COMPILER && likely(!should_recompile))
711                         func = lightrec_recompiler_run_first_pass(state, block, &pc);
712                 else
713                         func = block->function;
714
715                 if (likely(func))
716                         break;
717
718                 if (unlikely(block_has_flag(block, BLOCK_NEVER_COMPILE))) {
719                         pc = lightrec_emulate_block(state, block, pc);
720
721                 } else if (!ENABLE_THREADED_COMPILER) {
722                         /* Block wasn't compiled yet - run the interpreter */
723                         if (block_has_flag(block, BLOCK_FULLY_TAGGED))
724                                 pr_debug("Block fully tagged, skipping first pass\n");
725                         else if (ENABLE_FIRST_PASS && likely(!should_recompile))
726                                 pc = lightrec_emulate_block(state, block, pc);
727
728                         /* Then compile it using the profiled data */
729                         err = lightrec_compile_block(state->cstate, block);
730                         if (err) {
731                                 state->exit_flags = LIGHTREC_EXIT_NOMEM;
732                                 return NULL;
733                         }
734                 } else if (unlikely(block_has_flag(block, BLOCK_IS_DEAD))) {
735                         /*
736                          * If the block is dead but has never been compiled,
737                          * then its function pointer is NULL and we cannot
738                          * execute the block. In that case, reap all the dead
739                          * blocks now, and in the next loop we will create a
740                          * new block.
741                          */
742                         lightrec_reaper_reap(state->reaper);
743                 } else {
744                         lightrec_recompiler_add(state->rec, block);
745                 }
746
747                 if (state->exit_flags != LIGHTREC_EXIT_NORMAL ||
748                     state->current_cycle >= state->target_cycle)
749                         break;
750         }
751
752         state->next_pc = pc;
753         return func;
754 }
755
756 static void * lightrec_alloc_code(struct lightrec_state *state, size_t size)
757 {
758         void *code;
759
760         if (ENABLE_THREADED_COMPILER)
761                 lightrec_code_alloc_lock(state);
762
763         code = tlsf_malloc(state->tlsf, size);
764
765         if (ENABLE_THREADED_COMPILER)
766                 lightrec_code_alloc_unlock(state);
767
768         return code;
769 }
770
771 static void lightrec_realloc_code(struct lightrec_state *state,
772                                   void *ptr, size_t size)
773 {
774         /* NOTE: 'size' MUST be smaller than the size specified during
775          * the allocation. */
776
777         if (ENABLE_THREADED_COMPILER)
778                 lightrec_code_alloc_lock(state);
779
780         tlsf_realloc(state->tlsf, ptr, size);
781
782         if (ENABLE_THREADED_COMPILER)
783                 lightrec_code_alloc_unlock(state);
784 }
785
786 static void lightrec_free_code(struct lightrec_state *state, void *ptr)
787 {
788         if (ENABLE_THREADED_COMPILER)
789                 lightrec_code_alloc_lock(state);
790
791         tlsf_free(state->tlsf, ptr);
792
793         if (ENABLE_THREADED_COMPILER)
794                 lightrec_code_alloc_unlock(state);
795 }
796
797 static void * lightrec_emit_code(struct lightrec_state *state,
798                                  const struct block *block,
799                                  jit_state_t *_jit, unsigned int *size)
800 {
801         bool has_code_buffer = ENABLE_CODE_BUFFER && state->tlsf;
802         jit_word_t code_size, new_code_size;
803         void *code;
804
805         jit_realize();
806
807         if (!ENABLE_DISASSEMBLER)
808                 jit_set_data(NULL, 0, JIT_DISABLE_DATA | JIT_DISABLE_NOTE);
809
810         if (has_code_buffer) {
811                 jit_get_code(&code_size);
812                 code = lightrec_alloc_code(state, (size_t) code_size);
813
814                 if (!code) {
815                         if (ENABLE_THREADED_COMPILER) {
816                                 /* If we're using the threaded compiler, return
817                                  * an allocation error here. The threaded
818                                  * compiler will then empty its job queue and
819                                  * request a code flush using the reaper. */
820                                 return NULL;
821                         }
822
823                         /* Remove outdated blocks, and try again */
824                         lightrec_remove_outdated_blocks(state->block_cache, block);
825
826                         pr_debug("Re-try to alloc %zu bytes...\n", code_size);
827
828                         code = lightrec_alloc_code(state, code_size);
829                         if (!code) {
830                                 pr_err("Could not alloc even after removing old blocks!\n");
831                                 return NULL;
832                         }
833                 }
834
835                 jit_set_code(code, code_size);
836         }
837
838         code = jit_emit();
839
840         jit_get_code(&new_code_size);
841         lightrec_register(MEM_FOR_CODE, new_code_size);
842
843         if (has_code_buffer) {
844                 lightrec_realloc_code(state, code, (size_t) new_code_size);
845
846                 pr_debug("Creating code block at address 0x%" PRIxPTR ", "
847                          "code size: %" PRIuPTR " new: %" PRIuPTR "\n",
848                          (uintptr_t) code, code_size, new_code_size);
849         }
850
851         *size = (unsigned int) new_code_size;
852
853         return code;
854 }
855
856 static struct block * generate_wrapper(struct lightrec_state *state)
857 {
858         struct block *block;
859         jit_state_t *_jit;
860         unsigned int i;
861         jit_node_t *addr[C_WRAPPERS_COUNT - 1];
862         jit_node_t *to_end[C_WRAPPERS_COUNT - 1];
863
864         block = lightrec_malloc(state, MEM_FOR_IR, sizeof(*block));
865         if (!block)
866                 goto err_no_mem;
867
868         _jit = jit_new_state();
869         if (!_jit)
870                 goto err_free_block;
871
872         jit_name("RW wrapper");
873         jit_note(__FILE__, __LINE__);
874
875         /* Wrapper entry point */
876         jit_prolog();
877         jit_tramp(256);
878
879         /* Add entry points */
880         for (i = C_WRAPPERS_COUNT - 1; i > 0; i--) {
881                 jit_ldxi(JIT_R1, LIGHTREC_REG_STATE,
882                          offsetof(struct lightrec_state, c_wrappers[i]));
883                 to_end[i - 1] = jit_b();
884                 addr[i - 1] = jit_indirect();
885         }
886
887         jit_ldxi(JIT_R1, LIGHTREC_REG_STATE,
888                  offsetof(struct lightrec_state, c_wrappers[0]));
889
890         for (i = 0; i < C_WRAPPERS_COUNT - 1; i++)
891                 jit_patch(to_end[i]);
892
893         jit_epilog();
894         jit_prolog();
895
896         /* Save all temporaries on stack */
897         for (i = 0; i < NUM_TEMPS; i++) {
898                 if (i + FIRST_TEMP != 1) {
899                         jit_stxi(offsetof(struct lightrec_state, wrapper_regs[i]),
900                                  LIGHTREC_REG_STATE, JIT_R(i + FIRST_TEMP));
901                 }
902         }
903
904         jit_getarg(JIT_R2, jit_arg());
905
906         jit_prepare();
907         jit_pushargr(LIGHTREC_REG_STATE);
908         jit_pushargr(JIT_R2);
909
910         jit_ldxi_ui(JIT_R2, LIGHTREC_REG_STATE,
911                     offsetof(struct lightrec_state, target_cycle));
912
913         /* state->current_cycle = state->target_cycle - delta; */
914         jit_subr(LIGHTREC_REG_CYCLE, JIT_R2, LIGHTREC_REG_CYCLE);
915         jit_stxi_i(offsetof(struct lightrec_state, current_cycle),
916                    LIGHTREC_REG_STATE, LIGHTREC_REG_CYCLE);
917
918         /* Call the wrapper function */
919         jit_finishr(JIT_R1);
920
921         /* delta = state->target_cycle - state->current_cycle */;
922         jit_ldxi_ui(LIGHTREC_REG_CYCLE, LIGHTREC_REG_STATE,
923                     offsetof(struct lightrec_state, current_cycle));
924         jit_ldxi_ui(JIT_R1, LIGHTREC_REG_STATE,
925                     offsetof(struct lightrec_state, target_cycle));
926         jit_subr(LIGHTREC_REG_CYCLE, JIT_R1, LIGHTREC_REG_CYCLE);
927
928         /* Restore temporaries from stack */
929         for (i = 0; i < NUM_TEMPS; i++) {
930                 if (i + FIRST_TEMP != 1) {
931                         jit_ldxi(JIT_R(i + FIRST_TEMP), LIGHTREC_REG_STATE,
932                                  offsetof(struct lightrec_state, wrapper_regs[i]));
933                 }
934         }
935
936         jit_ret();
937         jit_epilog();
938
939         block->_jit = _jit;
940         block->opcode_list = NULL;
941         block->flags = BLOCK_NO_OPCODE_LIST;
942         block->nb_ops = 0;
943
944         block->function = lightrec_emit_code(state, block, _jit,
945                                              &block->code_size);
946         if (!block->function)
947                 goto err_free_block;
948
949         state->wrappers_eps[C_WRAPPERS_COUNT - 1] = block->function;
950
951         for (i = 0; i < C_WRAPPERS_COUNT - 1; i++)
952                 state->wrappers_eps[i] = jit_address(addr[i]);
953
954         if (ENABLE_DISASSEMBLER) {
955                 pr_debug("Wrapper block:\n");
956                 jit_disassemble();
957         }
958
959         jit_clear_state();
960         return block;
961
962 err_free_block:
963         lightrec_free(state, MEM_FOR_IR, sizeof(*block), block);
964 err_no_mem:
965         pr_err("Unable to compile wrapper: Out of memory\n");
966         return NULL;
967 }
968
969 static u32 lightrec_memset(struct lightrec_state *state)
970 {
971         u32 kunseg_pc = kunseg(state->regs.gpr[4]);
972         void *host;
973         const struct lightrec_mem_map *map = lightrec_get_map(state, &host, kunseg_pc);
974         u32 length = state->regs.gpr[5] * 4;
975
976         if (!map) {
977                 pr_err("Unable to find memory map for memset target address "
978                        "0x%x\n", kunseg_pc);
979                 return 0;
980         }
981
982         pr_debug("Calling host memset, PC 0x%x (host address 0x%" PRIxPTR ") for %u bytes\n",
983                  kunseg_pc, (uintptr_t)host, length);
984         memset(host, 0, length);
985
986         if (!state->invalidate_from_dma_only)
987                 lightrec_invalidate_map(state, map, kunseg_pc, length);
988
989         /* Rough estimation of the number of cycles consumed */
990         return 8 + 5 * (length  + 3 / 4);
991 }
992
993 static struct block * generate_dispatcher(struct lightrec_state *state)
994 {
995         struct block *block;
996         jit_state_t *_jit;
997         jit_node_t *to_end, *loop, *addr, *addr2, *addr3;
998         unsigned int i;
999         u32 offset;
1000
1001         block = lightrec_malloc(state, MEM_FOR_IR, sizeof(*block));
1002         if (!block)
1003                 goto err_no_mem;
1004
1005         _jit = jit_new_state();
1006         if (!_jit)
1007                 goto err_free_block;
1008
1009         jit_name("dispatcher");
1010         jit_note(__FILE__, __LINE__);
1011
1012         jit_prolog();
1013         jit_frame(256);
1014
1015         jit_getarg(JIT_V1, jit_arg());
1016         jit_getarg_i(LIGHTREC_REG_CYCLE, jit_arg());
1017
1018         /* Force all callee-saved registers to be pushed on the stack */
1019         for (i = 0; i < NUM_REGS; i++)
1020                 jit_movr(JIT_V(i + FIRST_REG), JIT_V(i + FIRST_REG));
1021
1022         /* Pass lightrec_state structure to blocks, using the last callee-saved
1023          * register that Lightning provides */
1024         jit_movi(LIGHTREC_REG_STATE, (intptr_t) state);
1025
1026         loop = jit_label();
1027
1028         /* Call the block's code */
1029         jit_jmpr(JIT_V1);
1030
1031         if (OPT_REPLACE_MEMSET) {
1032                 /* Blocks will jump here when they need to call
1033                  * lightrec_memset() */
1034                 addr3 = jit_indirect();
1035
1036                 jit_movr(JIT_V1, LIGHTREC_REG_CYCLE);
1037
1038                 jit_prepare();
1039                 jit_pushargr(LIGHTREC_REG_STATE);
1040                 jit_finishi(lightrec_memset);
1041
1042                 jit_ldxi_ui(JIT_V0, LIGHTREC_REG_STATE,
1043                             offsetof(struct lightrec_state, regs.gpr[31]));
1044
1045                 jit_retval(LIGHTREC_REG_CYCLE);
1046                 jit_subr(LIGHTREC_REG_CYCLE, JIT_V1, LIGHTREC_REG_CYCLE);
1047         }
1048
1049         /* The block will jump here, with the number of cycles remaining in
1050          * LIGHTREC_REG_CYCLE */
1051         addr2 = jit_indirect();
1052
1053         /* Store back the next_pc to the lightrec_state structure */
1054         offset = offsetof(struct lightrec_state, next_pc);
1055         jit_stxi_i(offset, LIGHTREC_REG_STATE, JIT_V0);
1056
1057         /* Jump to end if state->target_cycle < state->current_cycle */
1058         to_end = jit_blei(LIGHTREC_REG_CYCLE, 0);
1059
1060         /* Convert next PC to KUNSEG and avoid mirrors */
1061         jit_andi(JIT_V1, JIT_V0, 0x10000000 | (RAM_SIZE - 1));
1062         jit_rshi_u(JIT_R1, JIT_V1, 28);
1063         jit_andi(JIT_R2, JIT_V0, BIOS_SIZE - 1);
1064         jit_addi(JIT_R2, JIT_R2, RAM_SIZE);
1065         jit_movnr(JIT_V1, JIT_R2, JIT_R1);
1066
1067         /* If possible, use the code LUT */
1068         if (!lut_is_32bit(state))
1069                 jit_lshi(JIT_V1, JIT_V1, 1);
1070         jit_addr(JIT_V1, JIT_V1, LIGHTREC_REG_STATE);
1071
1072         offset = offsetof(struct lightrec_state, code_lut);
1073         if (lut_is_32bit(state))
1074                 jit_ldxi_ui(JIT_V1, JIT_V1, offset);
1075         else
1076                 jit_ldxi(JIT_V1, JIT_V1, offset);
1077
1078         /* If we get non-NULL, loop */
1079         jit_patch_at(jit_bnei(JIT_V1, 0), loop);
1080
1081         /* The code LUT will be set to this address when the block at the target
1082          * PC has been preprocessed but not yet compiled by the threaded
1083          * recompiler */
1084         addr = jit_indirect();
1085
1086         /* Slow path: call C function get_next_block_func() */
1087
1088         if (ENABLE_FIRST_PASS || OPT_DETECT_IMPOSSIBLE_BRANCHES) {
1089                 /* We may call the interpreter - update state->current_cycle */
1090                 jit_ldxi_i(JIT_R2, LIGHTREC_REG_STATE,
1091                            offsetof(struct lightrec_state, target_cycle));
1092                 jit_subr(JIT_V1, JIT_R2, LIGHTREC_REG_CYCLE);
1093                 jit_stxi_i(offsetof(struct lightrec_state, current_cycle),
1094                            LIGHTREC_REG_STATE, JIT_V1);
1095         }
1096
1097         jit_prepare();
1098         jit_pushargr(LIGHTREC_REG_STATE);
1099         jit_pushargr(JIT_V0);
1100
1101         /* Save the cycles register if needed */
1102         if (!(ENABLE_FIRST_PASS || OPT_DETECT_IMPOSSIBLE_BRANCHES))
1103                 jit_movr(JIT_V0, LIGHTREC_REG_CYCLE);
1104
1105         /* Get the next block */
1106         jit_finishi(&get_next_block_func);
1107         jit_retval(JIT_V1);
1108
1109         if (ENABLE_FIRST_PASS || OPT_DETECT_IMPOSSIBLE_BRANCHES) {
1110                 /* The interpreter may have updated state->current_cycle and
1111                  * state->target_cycle - recalc the delta */
1112                 jit_ldxi_i(JIT_R1, LIGHTREC_REG_STATE,
1113                            offsetof(struct lightrec_state, current_cycle));
1114                 jit_ldxi_i(JIT_R2, LIGHTREC_REG_STATE,
1115                            offsetof(struct lightrec_state, target_cycle));
1116                 jit_subr(LIGHTREC_REG_CYCLE, JIT_R2, JIT_R1);
1117         } else {
1118                 jit_movr(LIGHTREC_REG_CYCLE, JIT_V0);
1119         }
1120
1121         /* If we get non-NULL, loop */
1122         jit_patch_at(jit_bnei(JIT_V1, 0), loop);
1123
1124         /* When exiting, the recompiled code will jump to that address */
1125         jit_note(__FILE__, __LINE__);
1126         jit_patch(to_end);
1127
1128         jit_retr(LIGHTREC_REG_CYCLE);
1129         jit_epilog();
1130
1131         block->_jit = _jit;
1132         block->opcode_list = NULL;
1133         block->flags = BLOCK_NO_OPCODE_LIST;
1134         block->nb_ops = 0;
1135
1136         block->function = lightrec_emit_code(state, block, _jit,
1137                                              &block->code_size);
1138         if (!block->function)
1139                 goto err_free_block;
1140
1141         state->eob_wrapper_func = jit_address(addr2);
1142         if (OPT_REPLACE_MEMSET)
1143                 state->memset_func = jit_address(addr3);
1144         state->get_next_block = jit_address(addr);
1145
1146         if (ENABLE_DISASSEMBLER) {
1147                 pr_debug("Dispatcher block:\n");
1148                 jit_disassemble();
1149         }
1150
1151         /* We're done! */
1152         jit_clear_state();
1153         return block;
1154
1155 err_free_block:
1156         lightrec_free(state, MEM_FOR_IR, sizeof(*block), block);
1157 err_no_mem:
1158         pr_err("Unable to compile dispatcher: Out of memory\n");
1159         return NULL;
1160 }
1161
1162 union code lightrec_read_opcode(struct lightrec_state *state, u32 pc)
1163 {
1164         void *host = NULL;
1165
1166         lightrec_get_map(state, &host, kunseg(pc));
1167
1168         const u32 *code = (u32 *)host;
1169         return (union code) LE32TOH(*code);
1170 }
1171
1172 unsigned int lightrec_cycles_of_opcode(union code code)
1173 {
1174         return 2;
1175 }
1176
1177 void lightrec_free_opcode_list(struct lightrec_state *state, struct opcode *ops)
1178 {
1179         struct opcode_list *list = container_of(ops, struct opcode_list, ops);
1180
1181         lightrec_free(state, MEM_FOR_IR,
1182                       sizeof(*list) + list->nb_ops * sizeof(struct opcode),
1183                       list);
1184 }
1185
1186 static unsigned int lightrec_get_mips_block_len(const u32 *src)
1187 {
1188         unsigned int i;
1189         union code c;
1190
1191         for (i = 1; ; i++) {
1192                 c.opcode = LE32TOH(*src++);
1193
1194                 if (is_syscall(c))
1195                         return i;
1196
1197                 if (is_unconditional_jump(c))
1198                         return i + 1;
1199         }
1200 }
1201
1202 static struct opcode * lightrec_disassemble(struct lightrec_state *state,
1203                                             const u32 *src, unsigned int *len)
1204 {
1205         struct opcode_list *list;
1206         unsigned int i, length;
1207
1208         length = lightrec_get_mips_block_len(src);
1209
1210         list = lightrec_malloc(state, MEM_FOR_IR,
1211                                sizeof(*list) + sizeof(struct opcode) * length);
1212         if (!list) {
1213                 pr_err("Unable to allocate memory\n");
1214                 return NULL;
1215         }
1216
1217         list->nb_ops = (u16) length;
1218
1219         for (i = 0; i < length; i++) {
1220                 list->ops[i].opcode = LE32TOH(src[i]);
1221                 list->ops[i].flags = 0;
1222         }
1223
1224         *len = length * sizeof(u32);
1225
1226         return list->ops;
1227 }
1228
1229 static struct block * lightrec_precompile_block(struct lightrec_state *state,
1230                                                 u32 pc)
1231 {
1232         struct opcode *list;
1233         struct block *block;
1234         void *host, *addr;
1235         const struct lightrec_mem_map *map = lightrec_get_map(state, &host, kunseg(pc));
1236         const u32 *code = (u32 *) host;
1237         unsigned int length;
1238         bool fully_tagged;
1239         u8 block_flags = 0;
1240
1241         if (!map)
1242                 return NULL;
1243
1244         block = lightrec_malloc(state, MEM_FOR_IR, sizeof(*block));
1245         if (!block) {
1246                 pr_err("Unable to recompile block: Out of memory\n");
1247                 return NULL;
1248         }
1249
1250         list = lightrec_disassemble(state, code, &length);
1251         if (!list) {
1252                 lightrec_free(state, MEM_FOR_IR, sizeof(*block), block);
1253                 return NULL;
1254         }
1255
1256         block->pc = pc;
1257         block->_jit = NULL;
1258         block->function = NULL;
1259         block->opcode_list = list;
1260         block->code = code;
1261         block->next = NULL;
1262         block->flags = 0;
1263         block->code_size = 0;
1264         block->precompile_date = state->current_cycle;
1265         block->nb_ops = length / sizeof(u32);
1266
1267         lightrec_optimize(state, block);
1268
1269         length = block->nb_ops * sizeof(u32);
1270
1271         lightrec_register(MEM_FOR_MIPS_CODE, length);
1272
1273         if (ENABLE_DISASSEMBLER) {
1274                 pr_debug("Disassembled block at PC: 0x%08x\n", block->pc);
1275                 lightrec_print_disassembly(block, code);
1276         }
1277
1278         pr_debug("Block size: %hu opcodes\n", block->nb_ops);
1279
1280         /* If the first opcode is an 'impossible' branch, never compile the
1281          * block */
1282         if (should_emulate(block->opcode_list))
1283                 block_flags |= BLOCK_NEVER_COMPILE;
1284
1285         fully_tagged = lightrec_block_is_fully_tagged(block);
1286         if (fully_tagged)
1287                 block_flags |= BLOCK_FULLY_TAGGED;
1288
1289         if (block_flags)
1290                 block_set_flags(block, block_flags);
1291
1292         block->hash = lightrec_calculate_block_hash(block);
1293
1294         if (OPT_REPLACE_MEMSET && block_has_flag(block, BLOCK_IS_MEMSET))
1295                 addr = state->memset_func;
1296         else
1297                 addr = state->get_next_block;
1298         lut_write(state, lut_offset(pc), addr);
1299
1300         pr_debug("Recompile count: %u\n", state->nb_precompile++);
1301
1302         return block;
1303 }
1304
1305 static bool lightrec_block_is_fully_tagged(const struct block *block)
1306 {
1307         const struct opcode *op;
1308         unsigned int i;
1309
1310         for (i = 0; i < block->nb_ops; i++) {
1311                 op = &block->opcode_list[i];
1312
1313                 /* Verify that all load/stores of the opcode list
1314                  * Check all loads/stores of the opcode list and mark the
1315                  * block as fully compiled if they all have been tagged. */
1316                 switch (op->c.i.op) {
1317                 case OP_LB:
1318                 case OP_LH:
1319                 case OP_LWL:
1320                 case OP_LW:
1321                 case OP_LBU:
1322                 case OP_LHU:
1323                 case OP_LWR:
1324                 case OP_SB:
1325                 case OP_SH:
1326                 case OP_SWL:
1327                 case OP_SW:
1328                 case OP_SWR:
1329                 case OP_LWC2:
1330                 case OP_SWC2:
1331                         if (!LIGHTREC_FLAGS_GET_IO_MODE(op->flags))
1332                                 return false;
1333                         fallthrough;
1334                 default:
1335                         continue;
1336                 }
1337         }
1338
1339         return true;
1340 }
1341
1342 static void lightrec_reap_block(struct lightrec_state *state, void *data)
1343 {
1344         struct block *block = data;
1345
1346         pr_debug("Reap dead block at PC 0x%08x\n", block->pc);
1347         lightrec_unregister_block(state->block_cache, block);
1348         lightrec_free_block(state, block);
1349 }
1350
1351 static void lightrec_reap_jit(struct lightrec_state *state, void *data)
1352 {
1353         _jit_destroy_state(data);
1354 }
1355
1356 static void lightrec_free_function(struct lightrec_state *state, void *fn)
1357 {
1358         if (ENABLE_CODE_BUFFER && state->tlsf) {
1359                 pr_debug("Freeing code block at 0x%" PRIxPTR "\n", (uintptr_t) fn);
1360                 lightrec_free_code(state, fn);
1361         }
1362 }
1363
1364 static void lightrec_reap_function(struct lightrec_state *state, void *data)
1365 {
1366         lightrec_free_function(state, data);
1367 }
1368
1369 static void lightrec_reap_opcode_list(struct lightrec_state *state, void *data)
1370 {
1371         lightrec_free_opcode_list(state, data);
1372 }
1373
1374 int lightrec_compile_block(struct lightrec_cstate *cstate,
1375                            struct block *block)
1376 {
1377         struct lightrec_state *state = cstate->state;
1378         struct lightrec_branch_target *target;
1379         bool fully_tagged = false;
1380         struct block *block2;
1381         struct opcode *elm;
1382         jit_state_t *_jit, *oldjit;
1383         jit_node_t *start_of_block;
1384         bool skip_next = false;
1385         void *old_fn, *new_fn;
1386         size_t old_code_size;
1387         unsigned int i, j;
1388         u8 old_flags;
1389         u32 offset;
1390
1391         fully_tagged = lightrec_block_is_fully_tagged(block);
1392         if (fully_tagged)
1393                 block_set_flags(block, BLOCK_FULLY_TAGGED);
1394
1395         _jit = jit_new_state();
1396         if (!_jit)
1397                 return -ENOMEM;
1398
1399         oldjit = block->_jit;
1400         old_fn = block->function;
1401         old_code_size = block->code_size;
1402         block->_jit = _jit;
1403
1404         lightrec_regcache_reset(cstate->reg_cache);
1405         cstate->cycles = 0;
1406         cstate->nb_local_branches = 0;
1407         cstate->nb_targets = 0;
1408
1409         jit_prolog();
1410         jit_tramp(256);
1411
1412         start_of_block = jit_label();
1413
1414         for (i = 0; i < block->nb_ops; i++) {
1415                 elm = &block->opcode_list[i];
1416
1417                 if (skip_next) {
1418                         skip_next = false;
1419                         continue;
1420                 }
1421
1422                 if (should_emulate(elm)) {
1423                         pr_debug("Branch at offset 0x%x will be emulated\n",
1424                                  i << 2);
1425
1426                         lightrec_emit_eob(cstate, block, i, false);
1427                         skip_next = !op_flag_no_ds(elm->flags);
1428                 } else {
1429                         lightrec_rec_opcode(cstate, block, i);
1430                         skip_next = !op_flag_no_ds(elm->flags) && has_delay_slot(elm->c);
1431 #if _WIN32
1432                         /* FIXME: GNU Lightning on Windows seems to use our
1433                          * mapped registers as temporaries. Until the actual bug
1434                          * is found and fixed, unconditionally mark our
1435                          * registers as live here. */
1436                         lightrec_regcache_mark_live(cstate->reg_cache, _jit);
1437 #endif
1438                 }
1439
1440                 cstate->cycles += lightrec_cycles_of_opcode(elm->c);
1441         }
1442
1443         for (i = 0; i < cstate->nb_local_branches; i++) {
1444                 struct lightrec_branch *branch = &cstate->local_branches[i];
1445
1446                 pr_debug("Patch local branch to offset 0x%x\n",
1447                          branch->target << 2);
1448
1449                 if (branch->target == 0) {
1450                         jit_patch_at(branch->branch, start_of_block);
1451                         continue;
1452                 }
1453
1454                 for (j = 0; j < cstate->nb_targets; j++) {
1455                         if (cstate->targets[j].offset == branch->target) {
1456                                 jit_patch_at(branch->branch,
1457                                              cstate->targets[j].label);
1458                                 break;
1459                         }
1460                 }
1461
1462                 if (j == cstate->nb_targets)
1463                         pr_err("Unable to find branch target\n");
1464         }
1465
1466         jit_ret();
1467         jit_epilog();
1468
1469         new_fn = lightrec_emit_code(state, block, _jit, &block->code_size);
1470         if (!new_fn) {
1471                 if (!ENABLE_THREADED_COMPILER)
1472                         pr_err("Unable to compile block!\n");
1473                 block->_jit = oldjit;
1474                 jit_clear_state();
1475                 _jit_destroy_state(_jit);
1476                 return -ENOMEM;
1477         }
1478
1479         /* Pause the reaper, because lightrec_reset_lut_offset() may try to set
1480          * the old block->function pointer to the code LUT. */
1481         if (ENABLE_THREADED_COMPILER)
1482                 lightrec_reaper_pause(state->reaper);
1483
1484         block->function = new_fn;
1485         block_clear_flags(block, BLOCK_SHOULD_RECOMPILE);
1486
1487         /* Add compiled function to the LUT */
1488         lut_write(state, lut_offset(block->pc), block->function);
1489
1490         if (ENABLE_THREADED_COMPILER)
1491                 lightrec_reaper_continue(state->reaper);
1492
1493         /* Detect old blocks that have been covered by the new one */
1494         for (i = 0; i < cstate->nb_targets; i++) {
1495                 target = &cstate->targets[i];
1496
1497                 if (!target->offset)
1498                         continue;
1499
1500                 offset = block->pc + target->offset * sizeof(u32);
1501
1502                 /* Pause the reaper while we search for the block until we set
1503                  * the BLOCK_IS_DEAD flag, otherwise the block may be removed
1504                  * under our feet. */
1505                 if (ENABLE_THREADED_COMPILER)
1506                         lightrec_reaper_pause(state->reaper);
1507
1508                 block2 = lightrec_find_block(state->block_cache, offset);
1509                 if (block2) {
1510                         /* No need to check if block2 is compilable - it must
1511                          * be, otherwise block wouldn't be compilable either */
1512
1513                         /* Set the "block dead" flag to prevent the dynarec from
1514                          * recompiling this block */
1515                         old_flags = block_set_flags(block2, BLOCK_IS_DEAD);
1516                 }
1517
1518                 if (ENABLE_THREADED_COMPILER) {
1519                         lightrec_reaper_continue(state->reaper);
1520
1521                         /* If block2 was pending for compilation, cancel it.
1522                          * If it's being compiled right now, wait until it
1523                          * finishes. */
1524                         if (block2)
1525                                 lightrec_recompiler_remove(state->rec, block2);
1526                 }
1527
1528                 /* We know from now on that block2 (if present) isn't going to
1529                  * be compiled. We can override the LUT entry with our new
1530                  * block's entry point. */
1531                 offset = lut_offset(block->pc) + target->offset;
1532                 lut_write(state, offset, jit_address(target->label));
1533
1534                 if (block2) {
1535                         pr_debug("Reap block 0x%08x as it's covered by block "
1536                                  "0x%08x\n", block2->pc, block->pc);
1537
1538                         /* Finally, reap the block. */
1539                         if (!ENABLE_THREADED_COMPILER) {
1540                                 lightrec_unregister_block(state->block_cache, block2);
1541                                 lightrec_free_block(state, block2);
1542                         } else if (!(old_flags & BLOCK_IS_DEAD)) {
1543                                 lightrec_reaper_add(state->reaper,
1544                                                     lightrec_reap_block,
1545                                                     block2);
1546                         }
1547                 }
1548         }
1549
1550         if (ENABLE_DISASSEMBLER) {
1551                 pr_debug("Compiling block at PC: 0x%08x\n", block->pc);
1552                 jit_disassemble();
1553         }
1554
1555         jit_clear_state();
1556
1557         if (fully_tagged)
1558                 old_flags = block_set_flags(block, BLOCK_NO_OPCODE_LIST);
1559
1560         if (fully_tagged && !(old_flags & BLOCK_NO_OPCODE_LIST)) {
1561                 pr_debug("Block PC 0x%08x is fully tagged"
1562                          " - free opcode list\n", block->pc);
1563
1564                 if (ENABLE_THREADED_COMPILER) {
1565                         lightrec_reaper_add(state->reaper,
1566                                             lightrec_reap_opcode_list,
1567                                             block->opcode_list);
1568                 } else {
1569                         lightrec_free_opcode_list(state, block->opcode_list);
1570                 }
1571         }
1572
1573         if (oldjit) {
1574                 pr_debug("Block 0x%08x recompiled, reaping old jit context.\n",
1575                          block->pc);
1576
1577                 if (ENABLE_THREADED_COMPILER) {
1578                         lightrec_reaper_add(state->reaper,
1579                                             lightrec_reap_jit, oldjit);
1580                         lightrec_reaper_add(state->reaper,
1581                                             lightrec_reap_function, old_fn);
1582                 } else {
1583                         _jit_destroy_state(oldjit);
1584                         lightrec_free_function(state, old_fn);
1585                 }
1586
1587                 lightrec_unregister(MEM_FOR_CODE, old_code_size);
1588         }
1589
1590         return 0;
1591 }
1592
1593 static void lightrec_print_info(struct lightrec_state *state)
1594 {
1595         if ((state->current_cycle & ~0xfffffff) != state->old_cycle_counter) {
1596                 pr_info("Lightrec RAM usage: IR %u KiB, CODE %u KiB, "
1597                         "MIPS %u KiB, TOTAL %u KiB, avg. IPI %f\n",
1598                         lightrec_get_mem_usage(MEM_FOR_IR) / 1024,
1599                         lightrec_get_mem_usage(MEM_FOR_CODE) / 1024,
1600                         lightrec_get_mem_usage(MEM_FOR_MIPS_CODE) / 1024,
1601                         lightrec_get_total_mem_usage() / 1024,
1602                        lightrec_get_average_ipi());
1603                 state->old_cycle_counter = state->current_cycle & ~0xfffffff;
1604         }
1605 }
1606
1607 u32 lightrec_execute(struct lightrec_state *state, u32 pc, u32 target_cycle)
1608 {
1609         s32 (*func)(void *, s32) = (void *)state->dispatcher->function;
1610         void *block_trace;
1611         s32 cycles_delta;
1612
1613         state->exit_flags = LIGHTREC_EXIT_NORMAL;
1614
1615         /* Handle the cycle counter overflowing */
1616         if (unlikely(target_cycle < state->current_cycle))
1617                 target_cycle = UINT_MAX;
1618
1619         state->target_cycle = target_cycle;
1620         state->next_pc = pc;
1621
1622         block_trace = get_next_block_func(state, pc);
1623         if (block_trace) {
1624                 cycles_delta = state->target_cycle - state->current_cycle;
1625
1626                 cycles_delta = (*func)(block_trace, cycles_delta);
1627
1628                 state->current_cycle = state->target_cycle - cycles_delta;
1629         }
1630
1631         if (ENABLE_THREADED_COMPILER)
1632                 lightrec_reaper_reap(state->reaper);
1633
1634         if (LOG_LEVEL >= INFO_L)
1635                 lightrec_print_info(state);
1636
1637         return state->next_pc;
1638 }
1639
1640 u32 lightrec_run_interpreter(struct lightrec_state *state, u32 pc,
1641                              u32 target_cycle)
1642 {
1643         struct block *block;
1644
1645         state->exit_flags = LIGHTREC_EXIT_NORMAL;
1646         state->target_cycle = target_cycle;
1647
1648         do {
1649                 block = lightrec_get_block(state, pc);
1650                 if (!block)
1651                         break;
1652
1653                 pc = lightrec_emulate_block(state, block, pc);
1654
1655                 if (ENABLE_THREADED_COMPILER)
1656                         lightrec_reaper_reap(state->reaper);
1657         } while (state->current_cycle < state->target_cycle);
1658
1659         if (LOG_LEVEL >= INFO_L)
1660                 lightrec_print_info(state);
1661
1662         return pc;
1663 }
1664
1665 void lightrec_free_block(struct lightrec_state *state, struct block *block)
1666 {
1667         u8 old_flags;
1668
1669         lightrec_unregister(MEM_FOR_MIPS_CODE, block->nb_ops * sizeof(u32));
1670         old_flags = block_set_flags(block, BLOCK_NO_OPCODE_LIST);
1671
1672         if (!(old_flags & BLOCK_NO_OPCODE_LIST))
1673                 lightrec_free_opcode_list(state, block->opcode_list);
1674         if (block->_jit)
1675                 _jit_destroy_state(block->_jit);
1676         if (block->function) {
1677                 lightrec_free_function(state, block->function);
1678                 lightrec_unregister(MEM_FOR_CODE, block->code_size);
1679         }
1680         lightrec_free(state, MEM_FOR_IR, sizeof(*block), block);
1681 }
1682
1683 struct lightrec_cstate * lightrec_create_cstate(struct lightrec_state *state)
1684 {
1685         struct lightrec_cstate *cstate;
1686
1687         cstate = lightrec_malloc(state, MEM_FOR_LIGHTREC, sizeof(*cstate));
1688         if (!cstate)
1689                 return NULL;
1690
1691         cstate->reg_cache = lightrec_regcache_init(state);
1692         if (!cstate->reg_cache) {
1693                 lightrec_free(state, MEM_FOR_LIGHTREC, sizeof(*cstate), cstate);
1694                 return NULL;
1695         }
1696
1697         cstate->state = state;
1698
1699         return cstate;
1700 }
1701
1702 void lightrec_free_cstate(struct lightrec_cstate *cstate)
1703 {
1704         lightrec_free_regcache(cstate->reg_cache);
1705         lightrec_free(cstate->state, MEM_FOR_LIGHTREC, sizeof(*cstate), cstate);
1706 }
1707
1708 struct lightrec_state * lightrec_init(char *argv0,
1709                                       const struct lightrec_mem_map *map,
1710                                       size_t nb,
1711                                       const struct lightrec_ops *ops)
1712 {
1713         const struct lightrec_mem_map *codebuf_map = &map[PSX_MAP_CODE_BUFFER];
1714         struct lightrec_state *state;
1715         uintptr_t addr;
1716         void *tlsf = NULL;
1717         bool with_32bit_lut = false;
1718         size_t lut_size;
1719
1720         /* Sanity-check ops */
1721         if (!ops || !ops->cop2_op || !ops->enable_ram) {
1722                 pr_err("Missing callbacks in lightrec_ops structure\n");
1723                 return NULL;
1724         }
1725
1726         if (ops->cop2_notify)
1727                 pr_debug("Optional cop2_notify callback in lightrec_ops\n");
1728         else
1729                 pr_debug("No optional cop2_notify callback in lightrec_ops\n");
1730
1731         if (ENABLE_CODE_BUFFER && nb > PSX_MAP_CODE_BUFFER
1732             && codebuf_map->address) {
1733                 tlsf = tlsf_create_with_pool(codebuf_map->address,
1734                                              codebuf_map->length);
1735                 if (!tlsf) {
1736                         pr_err("Unable to initialize code buffer\n");
1737                         return NULL;
1738                 }
1739
1740                 if (__WORDSIZE == 64) {
1741                         addr = (uintptr_t) codebuf_map->address + codebuf_map->length - 1;
1742                         with_32bit_lut = addr == (u32) addr;
1743                 }
1744         }
1745
1746         if (with_32bit_lut)
1747                 lut_size = CODE_LUT_SIZE * 4;
1748         else
1749                 lut_size = CODE_LUT_SIZE * sizeof(void *);
1750
1751         init_jit(argv0);
1752
1753         state = calloc(1, sizeof(*state) + lut_size);
1754         if (!state)
1755                 goto err_finish_jit;
1756
1757         lightrec_register(MEM_FOR_LIGHTREC, sizeof(*state) + lut_size);
1758
1759         state->tlsf = tlsf;
1760         state->with_32bit_lut = with_32bit_lut;
1761
1762         state->block_cache = lightrec_blockcache_init(state);
1763         if (!state->block_cache)
1764                 goto err_free_state;
1765
1766         if (ENABLE_THREADED_COMPILER) {
1767                 state->rec = lightrec_recompiler_init(state);
1768                 if (!state->rec)
1769                         goto err_free_block_cache;
1770
1771                 state->reaper = lightrec_reaper_init(state);
1772                 if (!state->reaper)
1773                         goto err_free_recompiler;
1774         } else {
1775                 state->cstate = lightrec_create_cstate(state);
1776                 if (!state->cstate)
1777                         goto err_free_block_cache;
1778         }
1779
1780         state->nb_maps = nb;
1781         state->maps = map;
1782
1783         memcpy(&state->ops, ops, sizeof(*ops));
1784
1785         state->dispatcher = generate_dispatcher(state);
1786         if (!state->dispatcher)
1787                 goto err_free_reaper;
1788
1789         state->c_wrapper_block = generate_wrapper(state);
1790         if (!state->c_wrapper_block)
1791                 goto err_free_dispatcher;
1792
1793         state->c_wrappers[C_WRAPPER_RW] = lightrec_rw_cb;
1794         state->c_wrappers[C_WRAPPER_RW_GENERIC] = lightrec_rw_generic_cb;
1795         state->c_wrappers[C_WRAPPER_MFC] = lightrec_mfc_cb;
1796         state->c_wrappers[C_WRAPPER_MTC] = lightrec_mtc_cb;
1797         state->c_wrappers[C_WRAPPER_CP] = lightrec_cp_cb;
1798
1799         map = &state->maps[PSX_MAP_BIOS];
1800         state->offset_bios = (uintptr_t)map->address - map->pc;
1801
1802         map = &state->maps[PSX_MAP_SCRATCH_PAD];
1803         state->offset_scratch = (uintptr_t)map->address - map->pc;
1804
1805         map = &state->maps[PSX_MAP_HW_REGISTERS];
1806         state->offset_io = (uintptr_t)map->address - map->pc;
1807
1808         map = &state->maps[PSX_MAP_KERNEL_USER_RAM];
1809         state->offset_ram = (uintptr_t)map->address - map->pc;
1810
1811         if (state->maps[PSX_MAP_MIRROR1].address == map->address + 0x200000 &&
1812             state->maps[PSX_MAP_MIRROR2].address == map->address + 0x400000 &&
1813             state->maps[PSX_MAP_MIRROR3].address == map->address + 0x600000)
1814                 state->mirrors_mapped = true;
1815
1816         if (state->offset_bios == 0 &&
1817             state->offset_scratch == 0 &&
1818             state->offset_ram == 0 &&
1819             state->offset_io == 0 &&
1820             state->mirrors_mapped) {
1821                 pr_info("Memory map is perfect. Emitted code will be best.\n");
1822         } else {
1823                 pr_info("Memory map is sub-par. Emitted code will be slow.\n");
1824         }
1825
1826         if (state->with_32bit_lut)
1827                 pr_info("Using 32-bit LUT\n");
1828
1829         return state;
1830
1831 err_free_dispatcher:
1832         lightrec_free_block(state, state->dispatcher);
1833 err_free_reaper:
1834         if (ENABLE_THREADED_COMPILER)
1835                 lightrec_reaper_destroy(state->reaper);
1836 err_free_recompiler:
1837         if (ENABLE_THREADED_COMPILER)
1838                 lightrec_free_recompiler(state->rec);
1839         else
1840                 lightrec_free_cstate(state->cstate);
1841 err_free_block_cache:
1842         lightrec_free_block_cache(state->block_cache);
1843 err_free_state:
1844         lightrec_unregister(MEM_FOR_LIGHTREC, sizeof(*state) +
1845                             lut_elm_size(state) * CODE_LUT_SIZE);
1846         free(state);
1847 err_finish_jit:
1848         finish_jit();
1849         if (ENABLE_CODE_BUFFER && tlsf)
1850                 tlsf_destroy(tlsf);
1851         return NULL;
1852 }
1853
1854 void lightrec_destroy(struct lightrec_state *state)
1855 {
1856         /* Force a print info on destroy*/
1857         state->current_cycle = ~state->current_cycle;
1858         lightrec_print_info(state);
1859
1860         lightrec_free_block_cache(state->block_cache);
1861         lightrec_free_block(state, state->dispatcher);
1862         lightrec_free_block(state, state->c_wrapper_block);
1863
1864         if (ENABLE_THREADED_COMPILER) {
1865                 lightrec_free_recompiler(state->rec);
1866                 lightrec_reaper_destroy(state->reaper);
1867         } else {
1868                 lightrec_free_cstate(state->cstate);
1869         }
1870
1871         finish_jit();
1872         if (ENABLE_CODE_BUFFER && state->tlsf)
1873                 tlsf_destroy(state->tlsf);
1874
1875         lightrec_unregister(MEM_FOR_LIGHTREC, sizeof(*state) +
1876                             lut_elm_size(state) * CODE_LUT_SIZE);
1877         free(state);
1878 }
1879
1880 void lightrec_invalidate(struct lightrec_state *state, u32 addr, u32 len)
1881 {
1882         u32 kaddr = kunseg(addr & ~0x3);
1883         enum psx_map idx = lightrec_get_map_idx(state, kaddr);
1884
1885         switch (idx) {
1886         case PSX_MAP_MIRROR1:
1887         case PSX_MAP_MIRROR2:
1888         case PSX_MAP_MIRROR3:
1889                 /* Handle mirrors */
1890                 kaddr &= RAM_SIZE - 1;
1891                 fallthrough;
1892         case PSX_MAP_KERNEL_USER_RAM:
1893                 break;
1894         default:
1895                 return;
1896         }
1897
1898         memset(lut_address(state, lut_offset(kaddr)), 0,
1899                ((len + 3) / 4) * lut_elm_size(state));
1900 }
1901
1902 void lightrec_invalidate_all(struct lightrec_state *state)
1903 {
1904         memset(state->code_lut, 0, lut_elm_size(state) * CODE_LUT_SIZE);
1905 }
1906
1907 void lightrec_set_invalidate_mode(struct lightrec_state *state, bool dma_only)
1908 {
1909         if (state->invalidate_from_dma_only != dma_only)
1910                 lightrec_invalidate_all(state);
1911
1912         state->invalidate_from_dma_only = dma_only;
1913 }
1914
1915 void lightrec_set_exit_flags(struct lightrec_state *state, u32 flags)
1916 {
1917         if (flags != LIGHTREC_EXIT_NORMAL) {
1918                 state->exit_flags |= flags;
1919                 state->target_cycle = state->current_cycle;
1920         }
1921 }
1922
1923 u32 lightrec_exit_flags(struct lightrec_state *state)
1924 {
1925         return state->exit_flags;
1926 }
1927
1928 u32 lightrec_current_cycle_count(const struct lightrec_state *state)
1929 {
1930         return state->current_cycle;
1931 }
1932
1933 void lightrec_reset_cycle_count(struct lightrec_state *state, u32 cycles)
1934 {
1935         state->current_cycle = cycles;
1936
1937         if (state->target_cycle < cycles)
1938                 state->target_cycle = cycles;
1939 }
1940
1941 void lightrec_set_target_cycle_count(struct lightrec_state *state, u32 cycles)
1942 {
1943         if (state->exit_flags == LIGHTREC_EXIT_NORMAL) {
1944                 if (cycles < state->current_cycle)
1945                         cycles = state->current_cycle;
1946
1947                 state->target_cycle = cycles;
1948         }
1949 }
1950
1951 struct lightrec_registers * lightrec_get_registers(struct lightrec_state *state)
1952 {
1953         return &state->regs;
1954 }