Update lightrec 20220910 (#686)
[pcsx_rearmed.git] / deps / lightrec / lightrec.c
1 // SPDX-License-Identifier: LGPL-2.1-or-later
2 /*
3  * Copyright (C) 2014-2021 Paul Cercueil <paul@crapouillou.net>
4  */
5
6 #include "blockcache.h"
7 #include "debug.h"
8 #include "disassembler.h"
9 #include "emitter.h"
10 #include "interpreter.h"
11 #include "lightrec-config.h"
12 #include "lightning-wrapper.h"
13 #include "lightrec.h"
14 #include "memmanager.h"
15 #include "reaper.h"
16 #include "recompiler.h"
17 #include "regcache.h"
18 #include "optimizer.h"
19 #include "tlsf/tlsf.h"
20
21 #include <errno.h>
22 #include <inttypes.h>
23 #include <limits.h>
24 #if ENABLE_THREADED_COMPILER
25 #include <stdatomic.h>
26 #endif
27 #include <stdbool.h>
28 #include <stddef.h>
29 #include <string.h>
30
31 #define GENMASK(h, l) \
32         (((uintptr_t)-1 << (l)) & ((uintptr_t)-1 >> (__WORDSIZE - 1 - (h))))
33
34 static struct block * lightrec_precompile_block(struct lightrec_state *state,
35                                                 u32 pc);
36 static bool lightrec_block_is_fully_tagged(const struct block *block);
37
38 static void lightrec_mtc2(struct lightrec_state *state, u8 reg, u32 data);
39 static u32 lightrec_mfc2(struct lightrec_state *state, u8 reg);
40
41 static void lightrec_default_sb(struct lightrec_state *state, u32 opcode,
42                                 void *host, u32 addr, u8 data)
43 {
44         *(u8 *)host = data;
45
46         if (!state->invalidate_from_dma_only)
47                 lightrec_invalidate(state, addr, 1);
48 }
49
50 static void lightrec_default_sh(struct lightrec_state *state, u32 opcode,
51                                 void *host, u32 addr, u16 data)
52 {
53         *(u16 *)host = HTOLE16(data);
54
55         if (!state->invalidate_from_dma_only)
56                 lightrec_invalidate(state, addr, 2);
57 }
58
59 static void lightrec_default_sw(struct lightrec_state *state, u32 opcode,
60                                 void *host, u32 addr, u32 data)
61 {
62         *(u32 *)host = HTOLE32(data);
63
64         if (!state->invalidate_from_dma_only)
65                 lightrec_invalidate(state, addr, 4);
66 }
67
68 static u8 lightrec_default_lb(struct lightrec_state *state,
69                               u32 opcode, void *host, u32 addr)
70 {
71         return *(u8 *)host;
72 }
73
74 static u16 lightrec_default_lh(struct lightrec_state *state,
75                                u32 opcode, void *host, u32 addr)
76 {
77         return LE16TOH(*(u16 *)host);
78 }
79
80 static u32 lightrec_default_lw(struct lightrec_state *state,
81                                u32 opcode, void *host, u32 addr)
82 {
83         return LE32TOH(*(u32 *)host);
84 }
85
86 static const struct lightrec_mem_map_ops lightrec_default_ops = {
87         .sb = lightrec_default_sb,
88         .sh = lightrec_default_sh,
89         .sw = lightrec_default_sw,
90         .lb = lightrec_default_lb,
91         .lh = lightrec_default_lh,
92         .lw = lightrec_default_lw,
93 };
94
95 static void __segfault_cb(struct lightrec_state *state, u32 addr,
96                           const struct block *block)
97 {
98         lightrec_set_exit_flags(state, LIGHTREC_EXIT_SEGFAULT);
99         pr_err("Segmentation fault in recompiled code: invalid "
100                "load/store at address 0x%08x\n", addr);
101         if (block)
102                 pr_err("Was executing block PC 0x%08x\n", block->pc);
103 }
104
105 static void lightrec_swl(struct lightrec_state *state,
106                          const struct lightrec_mem_map_ops *ops,
107                          u32 opcode, void *host, u32 addr, u32 data)
108 {
109         unsigned int shift = addr & 0x3;
110         unsigned int mask = GENMASK(31, (shift + 1) * 8);
111         u32 old_data;
112
113         /* Align to 32 bits */
114         addr &= ~3;
115         host = (void *)((uintptr_t)host & ~3);
116
117         old_data = ops->lw(state, opcode, host, addr);
118
119         data = (data >> ((3 - shift) * 8)) | (old_data & mask);
120
121         ops->sw(state, opcode, host, addr, data);
122 }
123
124 static void lightrec_swr(struct lightrec_state *state,
125                          const struct lightrec_mem_map_ops *ops,
126                          u32 opcode, void *host, u32 addr, u32 data)
127 {
128         unsigned int shift = addr & 0x3;
129         unsigned int mask = (1 << (shift * 8)) - 1;
130         u32 old_data;
131
132         /* Align to 32 bits */
133         addr &= ~3;
134         host = (void *)((uintptr_t)host & ~3);
135
136         old_data = ops->lw(state, opcode, host, addr);
137
138         data = (data << (shift * 8)) | (old_data & mask);
139
140         ops->sw(state, opcode, host, addr, data);
141 }
142
143 static void lightrec_swc2(struct lightrec_state *state, union code op,
144                           const struct lightrec_mem_map_ops *ops,
145                           void *host, u32 addr)
146 {
147         u32 data = lightrec_mfc2(state, op.i.rt);
148
149         ops->sw(state, op.opcode, host, addr, data);
150 }
151
152 static u32 lightrec_lwl(struct lightrec_state *state,
153                         const struct lightrec_mem_map_ops *ops,
154                         u32 opcode, void *host, u32 addr, u32 data)
155 {
156         unsigned int shift = addr & 0x3;
157         unsigned int mask = (1 << (24 - shift * 8)) - 1;
158         u32 old_data;
159
160         /* Align to 32 bits */
161         addr &= ~3;
162         host = (void *)((uintptr_t)host & ~3);
163
164         old_data = ops->lw(state, opcode, host, addr);
165
166         return (data & mask) | (old_data << (24 - shift * 8));
167 }
168
169 static u32 lightrec_lwr(struct lightrec_state *state,
170                         const struct lightrec_mem_map_ops *ops,
171                         u32 opcode, void *host, u32 addr, u32 data)
172 {
173         unsigned int shift = addr & 0x3;
174         unsigned int mask = GENMASK(31, 32 - shift * 8);
175         u32 old_data;
176
177         /* Align to 32 bits */
178         addr &= ~3;
179         host = (void *)((uintptr_t)host & ~3);
180
181         old_data = ops->lw(state, opcode, host, addr);
182
183         return (data & mask) | (old_data >> (shift * 8));
184 }
185
186 static void lightrec_lwc2(struct lightrec_state *state, union code op,
187                           const struct lightrec_mem_map_ops *ops,
188                           void *host, u32 addr)
189 {
190         u32 data = ops->lw(state, op.opcode, host, addr);
191
192         lightrec_mtc2(state, op.i.rt, data);
193 }
194
195 static void lightrec_invalidate_map(struct lightrec_state *state,
196                 const struct lightrec_mem_map *map, u32 addr, u32 len)
197 {
198         if (map == &state->maps[PSX_MAP_KERNEL_USER_RAM]) {
199                 memset(lut_address(state, lut_offset(addr)), 0,
200                        ((len + 3) / 4) * lut_elm_size(state));
201         }
202 }
203
204 enum psx_map
205 lightrec_get_map_idx(struct lightrec_state *state, u32 kaddr)
206 {
207         const struct lightrec_mem_map *map;
208         unsigned int i;
209
210         for (i = 0; i < state->nb_maps; i++) {
211                 map = &state->maps[i];
212
213                 if (kaddr >= map->pc && kaddr < map->pc + map->length)
214                         return (enum psx_map) i;
215         }
216
217         return PSX_MAP_UNKNOWN;
218 }
219
220 const struct lightrec_mem_map *
221 lightrec_get_map(struct lightrec_state *state, void **host, u32 kaddr)
222 {
223         const struct lightrec_mem_map *map;
224         enum psx_map idx;
225         u32 addr;
226
227         idx = lightrec_get_map_idx(state, kaddr);
228         if (idx == PSX_MAP_UNKNOWN)
229                 return NULL;
230
231         map = &state->maps[idx];
232         addr = kaddr - map->pc;
233
234         while (map->mirror_of)
235                 map = map->mirror_of;
236
237         if (host)
238                 *host = map->address + addr;
239
240         return map;
241 }
242
243 u32 lightrec_rw(struct lightrec_state *state, union code op,
244                 u32 addr, u32 data, u32 *flags, struct block *block)
245 {
246         const struct lightrec_mem_map *map;
247         const struct lightrec_mem_map_ops *ops;
248         u32 opcode = op.opcode;
249         void *host;
250
251         addr += (s16) op.i.imm;
252
253         map = lightrec_get_map(state, &host, kunseg(addr));
254         if (!map) {
255                 __segfault_cb(state, addr, block);
256                 return 0;
257         }
258
259         if (unlikely(map->ops)) {
260                 if (flags && !LIGHTREC_FLAGS_GET_IO_MODE(*flags))
261                         *flags |= LIGHTREC_IO_MODE(LIGHTREC_IO_HW);
262
263                 ops = map->ops;
264         } else {
265                 if (flags && !LIGHTREC_FLAGS_GET_IO_MODE(*flags))
266                         *flags |= LIGHTREC_IO_MODE(LIGHTREC_IO_DIRECT);
267
268                 ops = &lightrec_default_ops;
269         }
270
271         switch (op.i.op) {
272         case OP_SB:
273                 ops->sb(state, opcode, host, addr, (u8) data);
274                 return 0;
275         case OP_SH:
276                 ops->sh(state, opcode, host, addr, (u16) data);
277                 return 0;
278         case OP_SWL:
279                 lightrec_swl(state, ops, opcode, host, addr, data);
280                 return 0;
281         case OP_SWR:
282                 lightrec_swr(state, ops, opcode, host, addr, data);
283                 return 0;
284         case OP_SW:
285                 ops->sw(state, opcode, host, addr, data);
286                 return 0;
287         case OP_SWC2:
288                 lightrec_swc2(state, op, ops, host, addr);
289                 return 0;
290         case OP_LB:
291                 return (s32) (s8) ops->lb(state, opcode, host, addr);
292         case OP_LBU:
293                 return ops->lb(state, opcode, host, addr);
294         case OP_LH:
295                 return (s32) (s16) ops->lh(state, opcode, host, addr);
296         case OP_LHU:
297                 return ops->lh(state, opcode, host, addr);
298         case OP_LWC2:
299                 lightrec_lwc2(state, op, ops, host, addr);
300                 return 0;
301         case OP_LWL:
302                 return lightrec_lwl(state, ops, opcode, host, addr, data);
303         case OP_LWR:
304                 return lightrec_lwr(state, ops, opcode, host, addr, data);
305         case OP_LW:
306         default:
307                 return ops->lw(state, opcode, host, addr);
308         }
309 }
310
311 static void lightrec_rw_helper(struct lightrec_state *state,
312                                union code op, u32 *flags,
313                                struct block *block)
314 {
315         u32 ret = lightrec_rw(state, op, state->regs.gpr[op.i.rs],
316                               state->regs.gpr[op.i.rt], flags, block);
317
318         switch (op.i.op) {
319         case OP_LB:
320         case OP_LBU:
321         case OP_LH:
322         case OP_LHU:
323         case OP_LWL:
324         case OP_LWR:
325         case OP_LW:
326                 if (op.i.rt)
327                         state->regs.gpr[op.i.rt] = ret;
328                 fallthrough;
329         default:
330                 break;
331         }
332 }
333
334 static void lightrec_rw_cb(struct lightrec_state *state, u32 arg)
335 {
336         lightrec_rw_helper(state, (union code) arg, NULL, NULL);
337 }
338
339 static void lightrec_rw_generic_cb(struct lightrec_state *state, u32 arg)
340 {
341         struct block *block;
342         struct opcode *op;
343         bool was_tagged;
344         u16 offset = (u16)arg;
345         u16 old_flags;
346
347         block = lightrec_find_block_from_lut(state->block_cache,
348                                              arg >> 16, state->next_pc);
349         if (unlikely(!block)) {
350                 pr_err("rw_generic: No block found in LUT for PC 0x%x offset 0x%x\n",
351                          state->next_pc, offset);
352                 lightrec_set_exit_flags(state, LIGHTREC_EXIT_SEGFAULT);
353                 return;
354         }
355
356         op = &block->opcode_list[offset];
357         was_tagged = LIGHTREC_FLAGS_GET_IO_MODE(op->flags);
358
359         lightrec_rw_helper(state, op->c, &op->flags, block);
360
361         if (!was_tagged) {
362                 old_flags = block_set_flags(block, BLOCK_SHOULD_RECOMPILE);
363
364                 if (!(old_flags & BLOCK_SHOULD_RECOMPILE)) {
365                         pr_debug("Opcode of block at PC 0x%08x has been tagged"
366                                  " - flag for recompilation\n", block->pc);
367
368                         lut_write(state, lut_offset(block->pc), NULL);
369                 }
370         }
371 }
372
373 static u32 clamp_s32(s32 val, s32 min, s32 max)
374 {
375         return val < min ? min : val > max ? max : val;
376 }
377
378 static u16 load_u16(u32 *ptr)
379 {
380         return ((struct u16x2 *) ptr)->l;
381 }
382
383 static void store_u16(u32 *ptr, u16 value)
384 {
385         ((struct u16x2 *) ptr)->l = value;
386 }
387
388 static u32 lightrec_mfc2(struct lightrec_state *state, u8 reg)
389 {
390         s16 gteir1, gteir2, gteir3;
391
392         switch (reg) {
393         case 1:
394         case 3:
395         case 5:
396         case 8:
397         case 9:
398         case 10:
399         case 11:
400                 return (s32)(s16) load_u16(&state->regs.cp2d[reg]);
401         case 7:
402         case 16:
403         case 17:
404         case 18:
405         case 19:
406                 return load_u16(&state->regs.cp2d[reg]);
407         case 28:
408         case 29:
409                 gteir1 = (s16) load_u16(&state->regs.cp2d[9]);
410                 gteir2 = (s16) load_u16(&state->regs.cp2d[10]);
411                 gteir3 = (s16) load_u16(&state->regs.cp2d[11]);
412
413                 return clamp_s32(gteir1 >> 7, 0, 0x1f) << 0 |
414                         clamp_s32(gteir2 >> 7, 0, 0x1f) << 5 |
415                         clamp_s32(gteir3 >> 7, 0, 0x1f) << 10;
416         case 15:
417                 reg = 14;
418                 fallthrough;
419         default:
420                 return state->regs.cp2d[reg];
421         }
422 }
423
424 u32 lightrec_mfc(struct lightrec_state *state, union code op)
425 {
426         u32 val;
427
428         if (op.i.op == OP_CP0)
429                 return state->regs.cp0[op.r.rd];
430         else if (op.r.rs == OP_CP2_BASIC_MFC2)
431                 return lightrec_mfc2(state, op.r.rd);
432
433         val = state->regs.cp2c[op.r.rd];
434
435         switch (op.r.rd) {
436         case 4:
437         case 12:
438         case 20:
439         case 26:
440         case 27:
441         case 29:
442         case 30:
443                 return (u32)(s16)val;
444         default:
445                 return val;
446         }
447 }
448
449 static void lightrec_mtc0(struct lightrec_state *state, u8 reg, u32 data)
450 {
451         u32 status, oldstatus, cause;
452
453         switch (reg) {
454         case 1:
455         case 4:
456         case 8:
457         case 14:
458         case 15:
459                 /* Those registers are read-only */
460                 return;
461         default:
462                 break;
463         }
464
465         if (reg == 12) {
466                 status = state->regs.cp0[12];
467                 oldstatus = status;
468
469                 if (status & ~data & BIT(16)) {
470                         state->ops.enable_ram(state, true);
471                         lightrec_invalidate_all(state);
472                 } else if (~status & data & BIT(16)) {
473                         state->ops.enable_ram(state, false);
474                 }
475         }
476
477         if (reg == 13) {
478                 state->regs.cp0[13] &= ~0x300;
479                 state->regs.cp0[13] |= data & 0x300;
480         } else {
481                 state->regs.cp0[reg] = data;
482         }
483
484         if (reg == 12 || reg == 13) {
485                 cause = state->regs.cp0[13];
486                 status = state->regs.cp0[12];
487
488                 /* Handle software interrupts */
489                 if (!!(status & cause & 0x300) & status)
490                         lightrec_set_exit_flags(state, LIGHTREC_EXIT_CHECK_INTERRUPT);
491
492                 /* Handle hardware interrupts */
493                 if (reg == 12 && !(~status & 0x401) && (~oldstatus & 0x401))
494                         lightrec_set_exit_flags(state, LIGHTREC_EXIT_CHECK_INTERRUPT);
495         }
496 }
497
498 static u32 count_leading_bits(s32 data)
499 {
500         u32 cnt = 33;
501
502 #ifdef __has_builtin
503 #if __has_builtin(__builtin_clrsb)
504         return 1 + __builtin_clrsb(data);
505 #endif
506 #endif
507
508         data = (data ^ (data >> 31)) << 1;
509
510         do {
511                 cnt -= 1;
512                 data >>= 1;
513         } while (data);
514
515         return cnt;
516 }
517
518 static void lightrec_mtc2(struct lightrec_state *state, u8 reg, u32 data)
519 {
520         switch (reg) {
521         case 15:
522                 state->regs.cp2d[12] = state->regs.cp2d[13];
523                 state->regs.cp2d[13] = state->regs.cp2d[14];
524                 state->regs.cp2d[14] = data;
525                 break;
526         case 28:
527                 state->regs.cp2d[9] = (data << 7) & 0xf80;
528                 state->regs.cp2d[10] = (data << 2) & 0xf80;
529                 state->regs.cp2d[11] = (data >> 3) & 0xf80;
530                 break;
531         case 31:
532                 return;
533         case 30:
534                 state->regs.cp2d[31] = count_leading_bits((s32) data);
535                 fallthrough;
536         default:
537                 state->regs.cp2d[reg] = data;
538                 break;
539         }
540 }
541
542 static void lightrec_ctc2(struct lightrec_state *state, u8 reg, u32 data)
543 {
544         switch (reg) {
545         case 4:
546         case 12:
547         case 20:
548         case 26:
549         case 27:
550         case 29:
551         case 30:
552                 store_u16(&state->regs.cp2c[reg], data);
553                 break;
554         case 31:
555                 data = (data & 0x7ffff000) | !!(data & 0x7f87e000) << 31;
556                 fallthrough;
557         default:
558                 state->regs.cp2c[reg] = data;
559                 break;
560         }
561 }
562
563 void lightrec_mtc(struct lightrec_state *state, union code op, u32 data)
564 {
565         if (op.i.op == OP_CP0)
566                 lightrec_mtc0(state, op.r.rd, data);
567         else if (op.r.rs == OP_CP2_BASIC_CTC2)
568                 lightrec_ctc2(state, op.r.rd, data);
569         else
570                 lightrec_mtc2(state, op.r.rd, data);
571 }
572
573 static void lightrec_mtc_cb(struct lightrec_state *state, u32 arg)
574 {
575         union code op = (union code) arg;
576
577         lightrec_mtc(state, op, state->regs.gpr[op.r.rt]);
578 }
579
580 void lightrec_rfe(struct lightrec_state *state)
581 {
582         u32 status;
583
584         /* Read CP0 Status register (r12) */
585         status = state->regs.cp0[12];
586
587         /* Switch the bits */
588         status = ((status & 0x3c) >> 2) | (status & ~0xf);
589
590         /* Write it back */
591         lightrec_mtc0(state, 12, status);
592 }
593
594 void lightrec_cp(struct lightrec_state *state, union code op)
595 {
596         if (op.i.op == OP_CP0) {
597                 pr_err("Invalid CP opcode to coprocessor #0\n");
598                 return;
599         }
600
601         (*state->ops.cop2_op)(state, op.opcode);
602 }
603
604 static void lightrec_cp_cb(struct lightrec_state *state, u32 arg)
605 {
606         lightrec_cp(state, (union code) arg);
607 }
608
609 static struct block * lightrec_get_block(struct lightrec_state *state, u32 pc)
610 {
611         struct block *block = lightrec_find_block(state->block_cache, pc);
612         u8 old_flags;
613
614         if (block && lightrec_block_is_outdated(state, block)) {
615                 pr_debug("Block at PC 0x%08x is outdated!\n", block->pc);
616
617                 old_flags = block_set_flags(block, BLOCK_IS_DEAD);
618                 if (!(old_flags & BLOCK_IS_DEAD)) {
619                         /* Make sure the recompiler isn't processing the block
620                          * we'll destroy */
621                         if (ENABLE_THREADED_COMPILER)
622                                 lightrec_recompiler_remove(state->rec, block);
623
624                         lightrec_unregister_block(state->block_cache, block);
625                         remove_from_code_lut(state->block_cache, block);
626                         lightrec_free_block(state, block);
627                 }
628
629                 block = NULL;
630         }
631
632         if (!block) {
633                 block = lightrec_precompile_block(state, pc);
634                 if (!block) {
635                         pr_err("Unable to recompile block at PC 0x%x\n", pc);
636                         lightrec_set_exit_flags(state, LIGHTREC_EXIT_SEGFAULT);
637                         return NULL;
638                 }
639
640                 lightrec_register_block(state->block_cache, block);
641         }
642
643         return block;
644 }
645
646 static void * get_next_block_func(struct lightrec_state *state, u32 pc)
647 {
648         struct block *block;
649         bool should_recompile;
650         void *func;
651         int err;
652
653         for (;;) {
654                 func = lut_read(state, lut_offset(pc));
655                 if (func && func != state->get_next_block)
656                         break;
657
658                 block = lightrec_get_block(state, pc);
659
660                 if (unlikely(!block))
661                         break;
662
663                 if (OPT_REPLACE_MEMSET &&
664                     block_has_flag(block, BLOCK_IS_MEMSET)) {
665                         func = state->memset_func;
666                         break;
667                 }
668
669                 should_recompile = block_has_flag(block, BLOCK_SHOULD_RECOMPILE) &&
670                         !block_has_flag(block, BLOCK_IS_DEAD);
671
672                 if (unlikely(should_recompile)) {
673                         pr_debug("Block at PC 0x%08x should recompile\n", pc);
674
675                         if (ENABLE_THREADED_COMPILER) {
676                                 lightrec_recompiler_add(state->rec, block);
677                         } else {
678                                 err = lightrec_compile_block(state->cstate, block);
679                                 if (err) {
680                                         state->exit_flags = LIGHTREC_EXIT_NOMEM;
681                                         return NULL;
682                                 }
683                         }
684                 }
685
686                 if (ENABLE_THREADED_COMPILER && likely(!should_recompile))
687                         func = lightrec_recompiler_run_first_pass(state, block, &pc);
688                 else
689                         func = block->function;
690
691                 if (likely(func))
692                         break;
693
694                 if (unlikely(block_has_flag(block, BLOCK_NEVER_COMPILE))) {
695                         pc = lightrec_emulate_block(state, block, pc);
696
697                 } else if (!ENABLE_THREADED_COMPILER) {
698                         /* Block wasn't compiled yet - run the interpreter */
699                         if (block_has_flag(block, BLOCK_FULLY_TAGGED))
700                                 pr_debug("Block fully tagged, skipping first pass\n");
701                         else if (ENABLE_FIRST_PASS && likely(!should_recompile))
702                                 pc = lightrec_emulate_block(state, block, pc);
703
704                         /* Then compile it using the profiled data */
705                         err = lightrec_compile_block(state->cstate, block);
706                         if (err) {
707                                 state->exit_flags = LIGHTREC_EXIT_NOMEM;
708                                 return NULL;
709                         }
710                 } else if (unlikely(block_has_flag(block, BLOCK_IS_DEAD))) {
711                         /*
712                          * If the block is dead but has never been compiled,
713                          * then its function pointer is NULL and we cannot
714                          * execute the block. In that case, reap all the dead
715                          * blocks now, and in the next loop we will create a
716                          * new block.
717                          */
718                         lightrec_reaper_reap(state->reaper);
719                 } else {
720                         lightrec_recompiler_add(state->rec, block);
721                 }
722
723                 if (state->exit_flags != LIGHTREC_EXIT_NORMAL ||
724                     state->current_cycle >= state->target_cycle)
725                         break;
726         }
727
728         state->next_pc = pc;
729         return func;
730 }
731
732 static void * lightrec_alloc_code(struct lightrec_state *state, size_t size)
733 {
734         void *code;
735
736         if (ENABLE_THREADED_COMPILER)
737                 lightrec_code_alloc_lock(state);
738
739         code = tlsf_malloc(state->tlsf, size);
740
741         if (ENABLE_THREADED_COMPILER)
742                 lightrec_code_alloc_unlock(state);
743
744         return code;
745 }
746
747 static void lightrec_realloc_code(struct lightrec_state *state,
748                                   void *ptr, size_t size)
749 {
750         /* NOTE: 'size' MUST be smaller than the size specified during
751          * the allocation. */
752
753         if (ENABLE_THREADED_COMPILER)
754                 lightrec_code_alloc_lock(state);
755
756         tlsf_realloc(state->tlsf, ptr, size);
757
758         if (ENABLE_THREADED_COMPILER)
759                 lightrec_code_alloc_unlock(state);
760 }
761
762 static void lightrec_free_code(struct lightrec_state *state, void *ptr)
763 {
764         if (ENABLE_THREADED_COMPILER)
765                 lightrec_code_alloc_lock(state);
766
767         tlsf_free(state->tlsf, ptr);
768
769         if (ENABLE_THREADED_COMPILER)
770                 lightrec_code_alloc_unlock(state);
771 }
772
773 static void * lightrec_emit_code(struct lightrec_state *state,
774                                  const struct block *block,
775                                  jit_state_t *_jit, unsigned int *size)
776 {
777         bool has_code_buffer = ENABLE_CODE_BUFFER && state->tlsf;
778         jit_word_t code_size, new_code_size;
779         void *code;
780
781         jit_realize();
782
783         if (!ENABLE_DISASSEMBLER)
784                 jit_set_data(NULL, 0, JIT_DISABLE_DATA | JIT_DISABLE_NOTE);
785
786         if (has_code_buffer) {
787                 jit_get_code(&code_size);
788                 code = lightrec_alloc_code(state, (size_t) code_size);
789
790                 if (!code) {
791                         if (ENABLE_THREADED_COMPILER) {
792                                 /* If we're using the threaded compiler, return
793                                  * an allocation error here. The threaded
794                                  * compiler will then empty its job queue and
795                                  * request a code flush using the reaper. */
796                                 return NULL;
797                         }
798
799                         /* Remove outdated blocks, and try again */
800                         lightrec_remove_outdated_blocks(state->block_cache, block);
801
802                         pr_debug("Re-try to alloc %zu bytes...\n", code_size);
803
804                         code = lightrec_alloc_code(state, code_size);
805                         if (!code) {
806                                 pr_err("Could not alloc even after removing old blocks!\n");
807                                 return NULL;
808                         }
809                 }
810
811                 jit_set_code(code, code_size);
812         }
813
814         code = jit_emit();
815
816         jit_get_code(&new_code_size);
817         lightrec_register(MEM_FOR_CODE, new_code_size);
818
819         if (has_code_buffer) {
820                 lightrec_realloc_code(state, code, (size_t) new_code_size);
821
822                 pr_debug("Creating code block at address 0x%" PRIxPTR ", "
823                          "code size: %" PRIuPTR " new: %" PRIuPTR "\n",
824                          (uintptr_t) code, code_size, new_code_size);
825         }
826
827         *size = (unsigned int) new_code_size;
828
829         return code;
830 }
831
832 static struct block * generate_wrapper(struct lightrec_state *state)
833 {
834         struct block *block;
835         jit_state_t *_jit;
836         unsigned int i;
837         jit_node_t *addr[C_WRAPPERS_COUNT - 1];
838         jit_node_t *to_end[C_WRAPPERS_COUNT - 1];
839
840         block = lightrec_malloc(state, MEM_FOR_IR, sizeof(*block));
841         if (!block)
842                 goto err_no_mem;
843
844         _jit = jit_new_state();
845         if (!_jit)
846                 goto err_free_block;
847
848         jit_name("RW wrapper");
849         jit_note(__FILE__, __LINE__);
850
851         /* Wrapper entry point */
852         jit_prolog();
853         jit_tramp(256);
854
855         /* Add entry points */
856         for (i = C_WRAPPERS_COUNT - 1; i > 0; i--) {
857                 jit_ldxi(JIT_R1, LIGHTREC_REG_STATE,
858                          offsetof(struct lightrec_state, c_wrappers[i]));
859                 to_end[i - 1] = jit_b();
860                 addr[i - 1] = jit_indirect();
861         }
862
863         jit_ldxi(JIT_R1, LIGHTREC_REG_STATE,
864                  offsetof(struct lightrec_state, c_wrappers[0]));
865
866         for (i = 0; i < C_WRAPPERS_COUNT - 1; i++)
867                 jit_patch(to_end[i]);
868
869         jit_epilog();
870         jit_prolog();
871
872         /* Save all temporaries on stack */
873         for (i = 0; i < NUM_TEMPS; i++) {
874                 if (i + FIRST_TEMP != 1) {
875                         jit_stxi(offsetof(struct lightrec_state, wrapper_regs[i]),
876                                  LIGHTREC_REG_STATE, JIT_R(i + FIRST_TEMP));
877                 }
878         }
879
880         jit_getarg(JIT_R2, jit_arg());
881
882         jit_prepare();
883         jit_pushargr(LIGHTREC_REG_STATE);
884         jit_pushargr(JIT_R2);
885
886         jit_ldxi_ui(JIT_R2, LIGHTREC_REG_STATE,
887                     offsetof(struct lightrec_state, target_cycle));
888
889         /* state->current_cycle = state->target_cycle - delta; */
890         jit_subr(LIGHTREC_REG_CYCLE, JIT_R2, LIGHTREC_REG_CYCLE);
891         jit_stxi_i(offsetof(struct lightrec_state, current_cycle),
892                    LIGHTREC_REG_STATE, LIGHTREC_REG_CYCLE);
893
894         /* Call the wrapper function */
895         jit_finishr(JIT_R1);
896
897         /* delta = state->target_cycle - state->current_cycle */;
898         jit_ldxi_ui(LIGHTREC_REG_CYCLE, LIGHTREC_REG_STATE,
899                     offsetof(struct lightrec_state, current_cycle));
900         jit_ldxi_ui(JIT_R1, LIGHTREC_REG_STATE,
901                     offsetof(struct lightrec_state, target_cycle));
902         jit_subr(LIGHTREC_REG_CYCLE, JIT_R1, LIGHTREC_REG_CYCLE);
903
904         /* Restore temporaries from stack */
905         for (i = 0; i < NUM_TEMPS; i++) {
906                 if (i + FIRST_TEMP != 1) {
907                         jit_ldxi(JIT_R(i + FIRST_TEMP), LIGHTREC_REG_STATE,
908                                  offsetof(struct lightrec_state, wrapper_regs[i]));
909                 }
910         }
911
912         jit_ret();
913         jit_epilog();
914
915         block->_jit = _jit;
916         block->opcode_list = NULL;
917         block->flags = BLOCK_NO_OPCODE_LIST;
918         block->nb_ops = 0;
919
920         block->function = lightrec_emit_code(state, block, _jit,
921                                              &block->code_size);
922         if (!block->function)
923                 goto err_free_block;
924
925         state->wrappers_eps[C_WRAPPERS_COUNT - 1] = block->function;
926
927         for (i = 0; i < C_WRAPPERS_COUNT - 1; i++)
928                 state->wrappers_eps[i] = jit_address(addr[i]);
929
930         if (ENABLE_DISASSEMBLER) {
931                 pr_debug("Wrapper block:\n");
932                 jit_disassemble();
933         }
934
935         jit_clear_state();
936         return block;
937
938 err_free_block:
939         lightrec_free(state, MEM_FOR_IR, sizeof(*block), block);
940 err_no_mem:
941         pr_err("Unable to compile wrapper: Out of memory\n");
942         return NULL;
943 }
944
945 static u32 lightrec_memset(struct lightrec_state *state)
946 {
947         u32 kunseg_pc = kunseg(state->regs.gpr[4]);
948         void *host;
949         const struct lightrec_mem_map *map = lightrec_get_map(state, &host, kunseg_pc);
950         u32 length = state->regs.gpr[5] * 4;
951
952         if (!map) {
953                 pr_err("Unable to find memory map for memset target address "
954                        "0x%x\n", kunseg_pc);
955                 return 0;
956         }
957
958         pr_debug("Calling host memset, PC 0x%x (host address 0x%" PRIxPTR ") for %u bytes\n",
959                  kunseg_pc, (uintptr_t)host, length);
960         memset(host, 0, length);
961
962         if (!state->invalidate_from_dma_only)
963                 lightrec_invalidate_map(state, map, kunseg_pc, length);
964
965         /* Rough estimation of the number of cycles consumed */
966         return 8 + 5 * (length  + 3 / 4);
967 }
968
969 static struct block * generate_dispatcher(struct lightrec_state *state)
970 {
971         struct block *block;
972         jit_state_t *_jit;
973         jit_node_t *to_end, *loop, *addr, *addr2, *addr3;
974         unsigned int i;
975         u32 offset;
976
977         block = lightrec_malloc(state, MEM_FOR_IR, sizeof(*block));
978         if (!block)
979                 goto err_no_mem;
980
981         _jit = jit_new_state();
982         if (!_jit)
983                 goto err_free_block;
984
985         jit_name("dispatcher");
986         jit_note(__FILE__, __LINE__);
987
988         jit_prolog();
989         jit_frame(256);
990
991         jit_getarg(JIT_V1, jit_arg());
992         jit_getarg_i(LIGHTREC_REG_CYCLE, jit_arg());
993
994         /* Force all callee-saved registers to be pushed on the stack */
995         for (i = 0; i < NUM_REGS; i++)
996                 jit_movr(JIT_V(i + FIRST_REG), JIT_V(i + FIRST_REG));
997
998         /* Pass lightrec_state structure to blocks, using the last callee-saved
999          * register that Lightning provides */
1000         jit_movi(LIGHTREC_REG_STATE, (intptr_t) state);
1001
1002         loop = jit_label();
1003
1004         /* Call the block's code */
1005         jit_jmpr(JIT_V1);
1006
1007         if (OPT_REPLACE_MEMSET) {
1008                 /* Blocks will jump here when they need to call
1009                  * lightrec_memset() */
1010                 addr3 = jit_indirect();
1011
1012                 jit_movr(JIT_V1, LIGHTREC_REG_CYCLE);
1013
1014                 jit_prepare();
1015                 jit_pushargr(LIGHTREC_REG_STATE);
1016                 jit_finishi(lightrec_memset);
1017
1018                 jit_ldxi_ui(JIT_V0, LIGHTREC_REG_STATE,
1019                             offsetof(struct lightrec_state, regs.gpr[31]));
1020
1021                 jit_retval(LIGHTREC_REG_CYCLE);
1022                 jit_subr(LIGHTREC_REG_CYCLE, JIT_V1, LIGHTREC_REG_CYCLE);
1023         }
1024
1025         /* The block will jump here, with the number of cycles remaining in
1026          * LIGHTREC_REG_CYCLE */
1027         addr2 = jit_indirect();
1028
1029         /* Store back the next_pc to the lightrec_state structure */
1030         offset = offsetof(struct lightrec_state, next_pc);
1031         jit_stxi_i(offset, LIGHTREC_REG_STATE, JIT_V0);
1032
1033         /* Jump to end if state->target_cycle < state->current_cycle */
1034         to_end = jit_blei(LIGHTREC_REG_CYCLE, 0);
1035
1036         /* Convert next PC to KUNSEG and avoid mirrors */
1037         jit_andi(JIT_V1, JIT_V0, 0x10000000 | (RAM_SIZE - 1));
1038         jit_rshi_u(JIT_R1, JIT_V1, 28);
1039         jit_andi(JIT_R2, JIT_V0, BIOS_SIZE - 1);
1040         jit_addi(JIT_R2, JIT_R2, RAM_SIZE);
1041         jit_movnr(JIT_V1, JIT_R2, JIT_R1);
1042
1043         /* If possible, use the code LUT */
1044         if (!lut_is_32bit(state))
1045                 jit_lshi(JIT_V1, JIT_V1, 1);
1046         jit_addr(JIT_V1, JIT_V1, LIGHTREC_REG_STATE);
1047
1048         offset = offsetof(struct lightrec_state, code_lut);
1049         if (lut_is_32bit(state))
1050                 jit_ldxi_ui(JIT_V1, JIT_V1, offset);
1051         else
1052                 jit_ldxi(JIT_V1, JIT_V1, offset);
1053
1054         /* If we get non-NULL, loop */
1055         jit_patch_at(jit_bnei(JIT_V1, 0), loop);
1056
1057         /* The code LUT will be set to this address when the block at the target
1058          * PC has been preprocessed but not yet compiled by the threaded
1059          * recompiler */
1060         addr = jit_indirect();
1061
1062         /* Slow path: call C function get_next_block_func() */
1063
1064         if (ENABLE_FIRST_PASS || OPT_DETECT_IMPOSSIBLE_BRANCHES) {
1065                 /* We may call the interpreter - update state->current_cycle */
1066                 jit_ldxi_i(JIT_R2, LIGHTREC_REG_STATE,
1067                            offsetof(struct lightrec_state, target_cycle));
1068                 jit_subr(JIT_V1, JIT_R2, LIGHTREC_REG_CYCLE);
1069                 jit_stxi_i(offsetof(struct lightrec_state, current_cycle),
1070                            LIGHTREC_REG_STATE, JIT_V1);
1071         }
1072
1073         jit_prepare();
1074         jit_pushargr(LIGHTREC_REG_STATE);
1075         jit_pushargr(JIT_V0);
1076
1077         /* Save the cycles register if needed */
1078         if (!(ENABLE_FIRST_PASS || OPT_DETECT_IMPOSSIBLE_BRANCHES))
1079                 jit_movr(JIT_V0, LIGHTREC_REG_CYCLE);
1080
1081         /* Get the next block */
1082         jit_finishi(&get_next_block_func);
1083         jit_retval(JIT_V1);
1084
1085         if (ENABLE_FIRST_PASS || OPT_DETECT_IMPOSSIBLE_BRANCHES) {
1086                 /* The interpreter may have updated state->current_cycle and
1087                  * state->target_cycle - recalc the delta */
1088                 jit_ldxi_i(JIT_R1, LIGHTREC_REG_STATE,
1089                            offsetof(struct lightrec_state, current_cycle));
1090                 jit_ldxi_i(JIT_R2, LIGHTREC_REG_STATE,
1091                            offsetof(struct lightrec_state, target_cycle));
1092                 jit_subr(LIGHTREC_REG_CYCLE, JIT_R2, JIT_R1);
1093         } else {
1094                 jit_movr(LIGHTREC_REG_CYCLE, JIT_V0);
1095         }
1096
1097         /* If we get non-NULL, loop */
1098         jit_patch_at(jit_bnei(JIT_V1, 0), loop);
1099
1100         /* When exiting, the recompiled code will jump to that address */
1101         jit_note(__FILE__, __LINE__);
1102         jit_patch(to_end);
1103
1104         jit_retr(LIGHTREC_REG_CYCLE);
1105         jit_epilog();
1106
1107         block->_jit = _jit;
1108         block->opcode_list = NULL;
1109         block->flags = BLOCK_NO_OPCODE_LIST;
1110         block->nb_ops = 0;
1111
1112         block->function = lightrec_emit_code(state, block, _jit,
1113                                              &block->code_size);
1114         if (!block->function)
1115                 goto err_free_block;
1116
1117         state->eob_wrapper_func = jit_address(addr2);
1118         if (OPT_REPLACE_MEMSET)
1119                 state->memset_func = jit_address(addr3);
1120         state->get_next_block = jit_address(addr);
1121
1122         if (ENABLE_DISASSEMBLER) {
1123                 pr_debug("Dispatcher block:\n");
1124                 jit_disassemble();
1125         }
1126
1127         /* We're done! */
1128         jit_clear_state();
1129         return block;
1130
1131 err_free_block:
1132         lightrec_free(state, MEM_FOR_IR, sizeof(*block), block);
1133 err_no_mem:
1134         pr_err("Unable to compile dispatcher: Out of memory\n");
1135         return NULL;
1136 }
1137
1138 union code lightrec_read_opcode(struct lightrec_state *state, u32 pc)
1139 {
1140         void *host = NULL;
1141
1142         lightrec_get_map(state, &host, kunseg(pc));
1143
1144         const u32 *code = (u32 *)host;
1145         return (union code) LE32TOH(*code);
1146 }
1147
1148 unsigned int lightrec_cycles_of_opcode(union code code)
1149 {
1150         return 2;
1151 }
1152
1153 void lightrec_free_opcode_list(struct lightrec_state *state, struct opcode *ops)
1154 {
1155         struct opcode_list *list = container_of(ops, struct opcode_list, ops);
1156
1157         lightrec_free(state, MEM_FOR_IR,
1158                       sizeof(*list) + list->nb_ops * sizeof(struct opcode),
1159                       list);
1160 }
1161
1162 static unsigned int lightrec_get_mips_block_len(const u32 *src)
1163 {
1164         unsigned int i;
1165         union code c;
1166
1167         for (i = 1; ; i++) {
1168                 c.opcode = LE32TOH(*src++);
1169
1170                 if (is_syscall(c))
1171                         return i;
1172
1173                 if (is_unconditional_jump(c))
1174                         return i + 1;
1175         }
1176 }
1177
1178 static struct opcode * lightrec_disassemble(struct lightrec_state *state,
1179                                             const u32 *src, unsigned int *len)
1180 {
1181         struct opcode_list *list;
1182         unsigned int i, length;
1183
1184         length = lightrec_get_mips_block_len(src);
1185
1186         list = lightrec_malloc(state, MEM_FOR_IR,
1187                                sizeof(*list) + sizeof(struct opcode) * length);
1188         if (!list) {
1189                 pr_err("Unable to allocate memory\n");
1190                 return NULL;
1191         }
1192
1193         list->nb_ops = (u16) length;
1194
1195         for (i = 0; i < length; i++) {
1196                 list->ops[i].opcode = LE32TOH(src[i]);
1197                 list->ops[i].flags = 0;
1198         }
1199
1200         *len = length * sizeof(u32);
1201
1202         return list->ops;
1203 }
1204
1205 static struct block * lightrec_precompile_block(struct lightrec_state *state,
1206                                                 u32 pc)
1207 {
1208         struct opcode *list;
1209         struct block *block;
1210         void *host, *addr;
1211         const struct lightrec_mem_map *map = lightrec_get_map(state, &host, kunseg(pc));
1212         const u32 *code = (u32 *) host;
1213         unsigned int length;
1214         bool fully_tagged;
1215         u8 block_flags = 0;
1216
1217         if (!map)
1218                 return NULL;
1219
1220         block = lightrec_malloc(state, MEM_FOR_IR, sizeof(*block));
1221         if (!block) {
1222                 pr_err("Unable to recompile block: Out of memory\n");
1223                 return NULL;
1224         }
1225
1226         list = lightrec_disassemble(state, code, &length);
1227         if (!list) {
1228                 lightrec_free(state, MEM_FOR_IR, sizeof(*block), block);
1229                 return NULL;
1230         }
1231
1232         block->pc = pc;
1233         block->_jit = NULL;
1234         block->function = NULL;
1235         block->opcode_list = list;
1236         block->code = code;
1237         block->next = NULL;
1238         block->flags = 0;
1239         block->code_size = 0;
1240         block->precompile_date = state->current_cycle;
1241         block->nb_ops = length / sizeof(u32);
1242
1243         lightrec_optimize(state, block);
1244
1245         length = block->nb_ops * sizeof(u32);
1246
1247         lightrec_register(MEM_FOR_MIPS_CODE, length);
1248
1249         if (ENABLE_DISASSEMBLER) {
1250                 pr_debug("Disassembled block at PC: 0x%08x\n", block->pc);
1251                 lightrec_print_disassembly(block, code);
1252         }
1253
1254         pr_debug("Block size: %hu opcodes\n", block->nb_ops);
1255
1256         /* If the first opcode is an 'impossible' branch, never compile the
1257          * block */
1258         if (should_emulate(block->opcode_list))
1259                 block_flags |= BLOCK_NEVER_COMPILE;
1260
1261         fully_tagged = lightrec_block_is_fully_tagged(block);
1262         if (fully_tagged)
1263                 block_flags |= BLOCK_FULLY_TAGGED;
1264
1265         if (block_flags)
1266                 block_set_flags(block, block_flags);
1267
1268         block->hash = lightrec_calculate_block_hash(block);
1269
1270         if (OPT_REPLACE_MEMSET && block_has_flag(block, BLOCK_IS_MEMSET))
1271                 addr = state->memset_func;
1272         else
1273                 addr = state->get_next_block;
1274         lut_write(state, lut_offset(pc), addr);
1275
1276         pr_debug("Recompile count: %u\n", state->nb_precompile++);
1277
1278         return block;
1279 }
1280
1281 static bool lightrec_block_is_fully_tagged(const struct block *block)
1282 {
1283         const struct opcode *op;
1284         unsigned int i;
1285
1286         for (i = 0; i < block->nb_ops; i++) {
1287                 op = &block->opcode_list[i];
1288
1289                 /* Verify that all load/stores of the opcode list
1290                  * Check all loads/stores of the opcode list and mark the
1291                  * block as fully compiled if they all have been tagged. */
1292                 switch (op->c.i.op) {
1293                 case OP_LB:
1294                 case OP_LH:
1295                 case OP_LWL:
1296                 case OP_LW:
1297                 case OP_LBU:
1298                 case OP_LHU:
1299                 case OP_LWR:
1300                 case OP_SB:
1301                 case OP_SH:
1302                 case OP_SWL:
1303                 case OP_SW:
1304                 case OP_SWR:
1305                 case OP_LWC2:
1306                 case OP_SWC2:
1307                         if (!LIGHTREC_FLAGS_GET_IO_MODE(op->flags))
1308                                 return false;
1309                         fallthrough;
1310                 default:
1311                         continue;
1312                 }
1313         }
1314
1315         return true;
1316 }
1317
1318 static void lightrec_reap_block(struct lightrec_state *state, void *data)
1319 {
1320         struct block *block = data;
1321
1322         pr_debug("Reap dead block at PC 0x%08x\n", block->pc);
1323         lightrec_unregister_block(state->block_cache, block);
1324         lightrec_free_block(state, block);
1325 }
1326
1327 static void lightrec_reap_jit(struct lightrec_state *state, void *data)
1328 {
1329         _jit_destroy_state(data);
1330 }
1331
1332 static void lightrec_free_function(struct lightrec_state *state, void *fn)
1333 {
1334         if (ENABLE_CODE_BUFFER && state->tlsf) {
1335                 pr_debug("Freeing code block at 0x%" PRIxPTR "\n", (uintptr_t) fn);
1336                 lightrec_free_code(state, fn);
1337         }
1338 }
1339
1340 static void lightrec_reap_function(struct lightrec_state *state, void *data)
1341 {
1342         lightrec_free_function(state, data);
1343 }
1344
1345 static void lightrec_reap_opcode_list(struct lightrec_state *state, void *data)
1346 {
1347         lightrec_free_opcode_list(state, data);
1348 }
1349
1350 int lightrec_compile_block(struct lightrec_cstate *cstate,
1351                            struct block *block)
1352 {
1353         struct lightrec_state *state = cstate->state;
1354         struct lightrec_branch_target *target;
1355         bool fully_tagged = false;
1356         struct block *block2;
1357         struct opcode *elm;
1358         jit_state_t *_jit, *oldjit;
1359         jit_node_t *start_of_block;
1360         bool skip_next = false;
1361         void *old_fn, *new_fn;
1362         size_t old_code_size;
1363         unsigned int i, j;
1364         u8 old_flags;
1365         u32 offset;
1366
1367         fully_tagged = lightrec_block_is_fully_tagged(block);
1368         if (fully_tagged)
1369                 block_set_flags(block, BLOCK_FULLY_TAGGED);
1370
1371         _jit = jit_new_state();
1372         if (!_jit)
1373                 return -ENOMEM;
1374
1375         oldjit = block->_jit;
1376         old_fn = block->function;
1377         old_code_size = block->code_size;
1378         block->_jit = _jit;
1379
1380         lightrec_regcache_reset(cstate->reg_cache);
1381         cstate->cycles = 0;
1382         cstate->nb_local_branches = 0;
1383         cstate->nb_targets = 0;
1384
1385         jit_prolog();
1386         jit_tramp(256);
1387
1388         start_of_block = jit_label();
1389
1390         for (i = 0; i < block->nb_ops; i++) {
1391                 elm = &block->opcode_list[i];
1392
1393                 if (skip_next) {
1394                         skip_next = false;
1395                         continue;
1396                 }
1397
1398                 if (should_emulate(elm)) {
1399                         pr_debug("Branch at offset 0x%x will be emulated\n",
1400                                  i << 2);
1401
1402                         lightrec_emit_eob(cstate, block, i, false);
1403                         skip_next = !op_flag_no_ds(elm->flags);
1404                 } else {
1405                         lightrec_rec_opcode(cstate, block, i);
1406                         skip_next = !op_flag_no_ds(elm->flags) && has_delay_slot(elm->c);
1407 #if _WIN32
1408                         /* FIXME: GNU Lightning on Windows seems to use our
1409                          * mapped registers as temporaries. Until the actual bug
1410                          * is found and fixed, unconditionally mark our
1411                          * registers as live here. */
1412                         lightrec_regcache_mark_live(cstate->reg_cache, _jit);
1413 #endif
1414                 }
1415
1416                 cstate->cycles += lightrec_cycles_of_opcode(elm->c);
1417         }
1418
1419         for (i = 0; i < cstate->nb_local_branches; i++) {
1420                 struct lightrec_branch *branch = &cstate->local_branches[i];
1421
1422                 pr_debug("Patch local branch to offset 0x%x\n",
1423                          branch->target << 2);
1424
1425                 if (branch->target == 0) {
1426                         jit_patch_at(branch->branch, start_of_block);
1427                         continue;
1428                 }
1429
1430                 for (j = 0; j < cstate->nb_targets; j++) {
1431                         if (cstate->targets[j].offset == branch->target) {
1432                                 jit_patch_at(branch->branch,
1433                                              cstate->targets[j].label);
1434                                 break;
1435                         }
1436                 }
1437
1438                 if (j == cstate->nb_targets)
1439                         pr_err("Unable to find branch target\n");
1440         }
1441
1442         jit_ret();
1443         jit_epilog();
1444
1445         new_fn = lightrec_emit_code(state, block, _jit, &block->code_size);
1446         if (!new_fn) {
1447                 if (!ENABLE_THREADED_COMPILER)
1448                         pr_err("Unable to compile block!\n");
1449                 block->_jit = oldjit;
1450                 jit_clear_state();
1451                 _jit_destroy_state(_jit);
1452                 return -ENOMEM;
1453         }
1454
1455         /* Pause the reaper, because lightrec_reset_lut_offset() may try to set
1456          * the old block->function pointer to the code LUT. */
1457         if (ENABLE_THREADED_COMPILER)
1458                 lightrec_reaper_pause(state->reaper);
1459
1460         block->function = new_fn;
1461         block_clear_flags(block, BLOCK_SHOULD_RECOMPILE);
1462
1463         /* Add compiled function to the LUT */
1464         lut_write(state, lut_offset(block->pc), block->function);
1465
1466         if (ENABLE_THREADED_COMPILER)
1467                 lightrec_reaper_continue(state->reaper);
1468
1469         /* Detect old blocks that have been covered by the new one */
1470         for (i = 0; i < cstate->nb_targets; i++) {
1471                 target = &cstate->targets[i];
1472
1473                 if (!target->offset)
1474                         continue;
1475
1476                 offset = block->pc + target->offset * sizeof(u32);
1477
1478                 /* Pause the reaper while we search for the block until we set
1479                  * the BLOCK_IS_DEAD flag, otherwise the block may be removed
1480                  * under our feet. */
1481                 if (ENABLE_THREADED_COMPILER)
1482                         lightrec_reaper_pause(state->reaper);
1483
1484                 block2 = lightrec_find_block(state->block_cache, offset);
1485                 if (block2) {
1486                         /* No need to check if block2 is compilable - it must
1487                          * be, otherwise block wouldn't be compilable either */
1488
1489                         /* Set the "block dead" flag to prevent the dynarec from
1490                          * recompiling this block */
1491                         old_flags = block_set_flags(block2, BLOCK_IS_DEAD);
1492                 }
1493
1494                 if (ENABLE_THREADED_COMPILER) {
1495                         lightrec_reaper_continue(state->reaper);
1496
1497                         /* If block2 was pending for compilation, cancel it.
1498                          * If it's being compiled right now, wait until it
1499                          * finishes. */
1500                         if (block2)
1501                                 lightrec_recompiler_remove(state->rec, block2);
1502                 }
1503
1504                 /* We know from now on that block2 (if present) isn't going to
1505                  * be compiled. We can override the LUT entry with our new
1506                  * block's entry point. */
1507                 offset = lut_offset(block->pc) + target->offset;
1508                 lut_write(state, offset, jit_address(target->label));
1509
1510                 if (block2) {
1511                         pr_debug("Reap block 0x%08x as it's covered by block "
1512                                  "0x%08x\n", block2->pc, block->pc);
1513
1514                         /* Finally, reap the block. */
1515                         if (!ENABLE_THREADED_COMPILER) {
1516                                 lightrec_unregister_block(state->block_cache, block2);
1517                                 lightrec_free_block(state, block2);
1518                         } else if (!(old_flags & BLOCK_IS_DEAD)) {
1519                                 lightrec_reaper_add(state->reaper,
1520                                                     lightrec_reap_block,
1521                                                     block2);
1522                         }
1523                 }
1524         }
1525
1526         if (ENABLE_DISASSEMBLER) {
1527                 pr_debug("Compiling block at PC: 0x%08x\n", block->pc);
1528                 jit_disassemble();
1529         }
1530
1531         jit_clear_state();
1532
1533         if (fully_tagged)
1534                 old_flags = block_set_flags(block, BLOCK_NO_OPCODE_LIST);
1535
1536         if (fully_tagged && !(old_flags & BLOCK_NO_OPCODE_LIST)) {
1537                 pr_debug("Block PC 0x%08x is fully tagged"
1538                          " - free opcode list\n", block->pc);
1539
1540                 if (ENABLE_THREADED_COMPILER) {
1541                         lightrec_reaper_add(state->reaper,
1542                                             lightrec_reap_opcode_list,
1543                                             block->opcode_list);
1544                 } else {
1545                         lightrec_free_opcode_list(state, block->opcode_list);
1546                 }
1547         }
1548
1549         if (oldjit) {
1550                 pr_debug("Block 0x%08x recompiled, reaping old jit context.\n",
1551                          block->pc);
1552
1553                 if (ENABLE_THREADED_COMPILER) {
1554                         lightrec_reaper_add(state->reaper,
1555                                             lightrec_reap_jit, oldjit);
1556                         lightrec_reaper_add(state->reaper,
1557                                             lightrec_reap_function, old_fn);
1558                 } else {
1559                         _jit_destroy_state(oldjit);
1560                         lightrec_free_function(state, old_fn);
1561                 }
1562
1563                 lightrec_unregister(MEM_FOR_CODE, old_code_size);
1564         }
1565
1566         return 0;
1567 }
1568
1569 static void lightrec_print_info(struct lightrec_state *state)
1570 {
1571         if ((state->current_cycle & ~0xfffffff) != state->old_cycle_counter) {
1572                 pr_info("Lightrec RAM usage: IR %u KiB, CODE %u KiB, "
1573                         "MIPS %u KiB, TOTAL %u KiB, avg. IPI %f\n",
1574                         lightrec_get_mem_usage(MEM_FOR_IR) / 1024,
1575                         lightrec_get_mem_usage(MEM_FOR_CODE) / 1024,
1576                         lightrec_get_mem_usage(MEM_FOR_MIPS_CODE) / 1024,
1577                         lightrec_get_total_mem_usage() / 1024,
1578                        lightrec_get_average_ipi());
1579                 state->old_cycle_counter = state->current_cycle & ~0xfffffff;
1580         }
1581 }
1582
1583 u32 lightrec_execute(struct lightrec_state *state, u32 pc, u32 target_cycle)
1584 {
1585         s32 (*func)(void *, s32) = (void *)state->dispatcher->function;
1586         void *block_trace;
1587         s32 cycles_delta;
1588
1589         state->exit_flags = LIGHTREC_EXIT_NORMAL;
1590
1591         /* Handle the cycle counter overflowing */
1592         if (unlikely(target_cycle < state->current_cycle))
1593                 target_cycle = UINT_MAX;
1594
1595         state->target_cycle = target_cycle;
1596         state->next_pc = pc;
1597
1598         block_trace = get_next_block_func(state, pc);
1599         if (block_trace) {
1600                 cycles_delta = state->target_cycle - state->current_cycle;
1601
1602                 cycles_delta = (*func)(block_trace, cycles_delta);
1603
1604                 state->current_cycle = state->target_cycle - cycles_delta;
1605         }
1606
1607         if (ENABLE_THREADED_COMPILER)
1608                 lightrec_reaper_reap(state->reaper);
1609
1610         if (LOG_LEVEL >= INFO_L)
1611                 lightrec_print_info(state);
1612
1613         return state->next_pc;
1614 }
1615
1616 u32 lightrec_run_interpreter(struct lightrec_state *state, u32 pc,
1617                              u32 target_cycle)
1618 {
1619         struct block *block;
1620
1621         state->exit_flags = LIGHTREC_EXIT_NORMAL;
1622         state->target_cycle = target_cycle;
1623
1624         do {
1625                 block = lightrec_get_block(state, pc);
1626                 if (!block)
1627                         break;
1628
1629                 pc = lightrec_emulate_block(state, block, pc);
1630
1631                 if (ENABLE_THREADED_COMPILER)
1632                         lightrec_reaper_reap(state->reaper);
1633         } while (state->current_cycle < state->target_cycle);
1634
1635         if (LOG_LEVEL >= INFO_L)
1636                 lightrec_print_info(state);
1637
1638         return pc;
1639 }
1640
1641 void lightrec_free_block(struct lightrec_state *state, struct block *block)
1642 {
1643         u8 old_flags;
1644
1645         lightrec_unregister(MEM_FOR_MIPS_CODE, block->nb_ops * sizeof(u32));
1646         old_flags = block_set_flags(block, BLOCK_NO_OPCODE_LIST);
1647
1648         if (!(old_flags & BLOCK_NO_OPCODE_LIST))
1649                 lightrec_free_opcode_list(state, block->opcode_list);
1650         if (block->_jit)
1651                 _jit_destroy_state(block->_jit);
1652         if (block->function) {
1653                 lightrec_free_function(state, block->function);
1654                 lightrec_unregister(MEM_FOR_CODE, block->code_size);
1655         }
1656         lightrec_free(state, MEM_FOR_IR, sizeof(*block), block);
1657 }
1658
1659 struct lightrec_cstate * lightrec_create_cstate(struct lightrec_state *state)
1660 {
1661         struct lightrec_cstate *cstate;
1662
1663         cstate = lightrec_malloc(state, MEM_FOR_LIGHTREC, sizeof(*cstate));
1664         if (!cstate)
1665                 return NULL;
1666
1667         cstate->reg_cache = lightrec_regcache_init(state);
1668         if (!cstate->reg_cache) {
1669                 lightrec_free(state, MEM_FOR_LIGHTREC, sizeof(*cstate), cstate);
1670                 return NULL;
1671         }
1672
1673         cstate->state = state;
1674
1675         return cstate;
1676 }
1677
1678 void lightrec_free_cstate(struct lightrec_cstate *cstate)
1679 {
1680         lightrec_free_regcache(cstate->reg_cache);
1681         lightrec_free(cstate->state, MEM_FOR_LIGHTREC, sizeof(*cstate), cstate);
1682 }
1683
1684 struct lightrec_state * lightrec_init(char *argv0,
1685                                       const struct lightrec_mem_map *map,
1686                                       size_t nb,
1687                                       const struct lightrec_ops *ops)
1688 {
1689         const struct lightrec_mem_map *codebuf_map = &map[PSX_MAP_CODE_BUFFER];
1690         struct lightrec_state *state;
1691         uintptr_t addr;
1692         void *tlsf = NULL;
1693         bool with_32bit_lut = false;
1694         size_t lut_size;
1695
1696         /* Sanity-check ops */
1697         if (!ops || !ops->cop2_op || !ops->enable_ram) {
1698                 pr_err("Missing callbacks in lightrec_ops structure\n");
1699                 return NULL;
1700         }
1701
1702         if (ENABLE_CODE_BUFFER && nb > PSX_MAP_CODE_BUFFER
1703             && codebuf_map->address) {
1704                 tlsf = tlsf_create_with_pool(codebuf_map->address,
1705                                              codebuf_map->length);
1706                 if (!tlsf) {
1707                         pr_err("Unable to initialize code buffer\n");
1708                         return NULL;
1709                 }
1710
1711                 if (__WORDSIZE == 64) {
1712                         addr = (uintptr_t) codebuf_map->address + codebuf_map->length - 1;
1713                         with_32bit_lut = addr == (u32) addr;
1714                 }
1715         }
1716
1717         if (with_32bit_lut)
1718                 lut_size = CODE_LUT_SIZE * 4;
1719         else
1720                 lut_size = CODE_LUT_SIZE * sizeof(void *);
1721
1722         init_jit(argv0);
1723
1724         state = calloc(1, sizeof(*state) + lut_size);
1725         if (!state)
1726                 goto err_finish_jit;
1727
1728         lightrec_register(MEM_FOR_LIGHTREC, sizeof(*state) + lut_size);
1729
1730         state->tlsf = tlsf;
1731         state->with_32bit_lut = with_32bit_lut;
1732
1733         state->block_cache = lightrec_blockcache_init(state);
1734         if (!state->block_cache)
1735                 goto err_free_state;
1736
1737         if (ENABLE_THREADED_COMPILER) {
1738                 state->rec = lightrec_recompiler_init(state);
1739                 if (!state->rec)
1740                         goto err_free_block_cache;
1741
1742                 state->reaper = lightrec_reaper_init(state);
1743                 if (!state->reaper)
1744                         goto err_free_recompiler;
1745         } else {
1746                 state->cstate = lightrec_create_cstate(state);
1747                 if (!state->cstate)
1748                         goto err_free_block_cache;
1749         }
1750
1751         state->nb_maps = nb;
1752         state->maps = map;
1753
1754         memcpy(&state->ops, ops, sizeof(*ops));
1755
1756         state->dispatcher = generate_dispatcher(state);
1757         if (!state->dispatcher)
1758                 goto err_free_reaper;
1759
1760         state->c_wrapper_block = generate_wrapper(state);
1761         if (!state->c_wrapper_block)
1762                 goto err_free_dispatcher;
1763
1764         state->c_wrappers[C_WRAPPER_RW] = lightrec_rw_cb;
1765         state->c_wrappers[C_WRAPPER_RW_GENERIC] = lightrec_rw_generic_cb;
1766         state->c_wrappers[C_WRAPPER_MTC] = lightrec_mtc_cb;
1767         state->c_wrappers[C_WRAPPER_CP] = lightrec_cp_cb;
1768
1769         map = &state->maps[PSX_MAP_BIOS];
1770         state->offset_bios = (uintptr_t)map->address - map->pc;
1771
1772         map = &state->maps[PSX_MAP_SCRATCH_PAD];
1773         state->offset_scratch = (uintptr_t)map->address - map->pc;
1774
1775         map = &state->maps[PSX_MAP_HW_REGISTERS];
1776         state->offset_io = (uintptr_t)map->address - map->pc;
1777
1778         map = &state->maps[PSX_MAP_KERNEL_USER_RAM];
1779         state->offset_ram = (uintptr_t)map->address - map->pc;
1780
1781         if (state->maps[PSX_MAP_MIRROR1].address == map->address + 0x200000 &&
1782             state->maps[PSX_MAP_MIRROR2].address == map->address + 0x400000 &&
1783             state->maps[PSX_MAP_MIRROR3].address == map->address + 0x600000)
1784                 state->mirrors_mapped = true;
1785
1786         if (state->offset_bios == 0 &&
1787             state->offset_scratch == 0 &&
1788             state->offset_ram == 0 &&
1789             state->offset_io == 0 &&
1790             state->mirrors_mapped) {
1791                 pr_info("Memory map is perfect. Emitted code will be best.\n");
1792         } else {
1793                 pr_info("Memory map is sub-par. Emitted code will be slow.\n");
1794         }
1795
1796         if (state->with_32bit_lut)
1797                 pr_info("Using 32-bit LUT\n");
1798
1799         return state;
1800
1801 err_free_dispatcher:
1802         lightrec_free_block(state, state->dispatcher);
1803 err_free_reaper:
1804         if (ENABLE_THREADED_COMPILER)
1805                 lightrec_reaper_destroy(state->reaper);
1806 err_free_recompiler:
1807         if (ENABLE_THREADED_COMPILER)
1808                 lightrec_free_recompiler(state->rec);
1809         else
1810                 lightrec_free_cstate(state->cstate);
1811 err_free_block_cache:
1812         lightrec_free_block_cache(state->block_cache);
1813 err_free_state:
1814         lightrec_unregister(MEM_FOR_LIGHTREC, sizeof(*state) +
1815                             lut_elm_size(state) * CODE_LUT_SIZE);
1816         free(state);
1817 err_finish_jit:
1818         finish_jit();
1819         if (ENABLE_CODE_BUFFER && tlsf)
1820                 tlsf_destroy(tlsf);
1821         return NULL;
1822 }
1823
1824 void lightrec_destroy(struct lightrec_state *state)
1825 {
1826         /* Force a print info on destroy*/
1827         state->current_cycle = ~state->current_cycle;
1828         lightrec_print_info(state);
1829
1830         lightrec_free_block_cache(state->block_cache);
1831         lightrec_free_block(state, state->dispatcher);
1832         lightrec_free_block(state, state->c_wrapper_block);
1833
1834         if (ENABLE_THREADED_COMPILER) {
1835                 lightrec_free_recompiler(state->rec);
1836                 lightrec_reaper_destroy(state->reaper);
1837         } else {
1838                 lightrec_free_cstate(state->cstate);
1839         }
1840
1841         finish_jit();
1842         if (ENABLE_CODE_BUFFER && state->tlsf)
1843                 tlsf_destroy(state->tlsf);
1844
1845         lightrec_unregister(MEM_FOR_LIGHTREC, sizeof(*state) +
1846                             lut_elm_size(state) * CODE_LUT_SIZE);
1847         free(state);
1848 }
1849
1850 void lightrec_invalidate(struct lightrec_state *state, u32 addr, u32 len)
1851 {
1852         u32 kaddr = kunseg(addr & ~0x3);
1853         enum psx_map idx = lightrec_get_map_idx(state, kaddr);
1854
1855         switch (idx) {
1856         case PSX_MAP_MIRROR1:
1857         case PSX_MAP_MIRROR2:
1858         case PSX_MAP_MIRROR3:
1859                 /* Handle mirrors */
1860                 kaddr &= RAM_SIZE - 1;
1861                 fallthrough;
1862         case PSX_MAP_KERNEL_USER_RAM:
1863                 break;
1864         default:
1865                 return;
1866         }
1867
1868         memset(lut_address(state, lut_offset(kaddr)), 0,
1869                ((len + 3) / 4) * lut_elm_size(state));
1870 }
1871
1872 void lightrec_invalidate_all(struct lightrec_state *state)
1873 {
1874         memset(state->code_lut, 0, lut_elm_size(state) * CODE_LUT_SIZE);
1875 }
1876
1877 void lightrec_set_invalidate_mode(struct lightrec_state *state, bool dma_only)
1878 {
1879         if (state->invalidate_from_dma_only != dma_only)
1880                 lightrec_invalidate_all(state);
1881
1882         state->invalidate_from_dma_only = dma_only;
1883 }
1884
1885 void lightrec_set_exit_flags(struct lightrec_state *state, u32 flags)
1886 {
1887         if (flags != LIGHTREC_EXIT_NORMAL) {
1888                 state->exit_flags |= flags;
1889                 state->target_cycle = state->current_cycle;
1890         }
1891 }
1892
1893 u32 lightrec_exit_flags(struct lightrec_state *state)
1894 {
1895         return state->exit_flags;
1896 }
1897
1898 u32 lightrec_current_cycle_count(const struct lightrec_state *state)
1899 {
1900         return state->current_cycle;
1901 }
1902
1903 void lightrec_reset_cycle_count(struct lightrec_state *state, u32 cycles)
1904 {
1905         state->current_cycle = cycles;
1906
1907         if (state->target_cycle < cycles)
1908                 state->target_cycle = cycles;
1909 }
1910
1911 void lightrec_set_target_cycle_count(struct lightrec_state *state, u32 cycles)
1912 {
1913         if (state->exit_flags == LIGHTREC_EXIT_NORMAL) {
1914                 if (cycles < state->current_cycle)
1915                         cycles = state->current_cycle;
1916
1917                 state->target_cycle = cycles;
1918         }
1919 }
1920
1921 struct lightrec_registers * lightrec_get_registers(struct lightrec_state *state)
1922 {
1923         return &state->regs;
1924 }