spu: irq adjustments according to MiSTer
[pcsx_rearmed.git] / deps / lightrec / lightrec.c
1 // SPDX-License-Identifier: LGPL-2.1-or-later
2 /*
3  * Copyright (C) 2014-2021 Paul Cercueil <paul@crapouillou.net>
4  */
5
6 #include "blockcache.h"
7 #include "debug.h"
8 #include "disassembler.h"
9 #include "emitter.h"
10 #include "interpreter.h"
11 #include "lightrec-config.h"
12 #include "lightning-wrapper.h"
13 #include "lightrec.h"
14 #include "memmanager.h"
15 #include "reaper.h"
16 #include "recompiler.h"
17 #include "regcache.h"
18 #include "optimizer.h"
19 #include "tlsf/tlsf.h"
20
21 #include <errno.h>
22 #include <inttypes.h>
23 #include <limits.h>
24 #if ENABLE_THREADED_COMPILER
25 #include <stdatomic.h>
26 #endif
27 #include <stdbool.h>
28 #include <stddef.h>
29 #include <string.h>
30
31 static struct block * lightrec_precompile_block(struct lightrec_state *state,
32                                                 u32 pc);
33 static bool lightrec_block_is_fully_tagged(const struct block *block);
34
35 static void lightrec_mtc2(struct lightrec_state *state, u8 reg, u32 data);
36 static u32 lightrec_mfc2(struct lightrec_state *state, u8 reg);
37
38 static void lightrec_default_sb(struct lightrec_state *state, u32 opcode,
39                                 void *host, u32 addr, u8 data)
40 {
41         *(u8 *)host = data;
42
43         if (!state->invalidate_from_dma_only)
44                 lightrec_invalidate(state, addr, 1);
45 }
46
47 static void lightrec_default_sh(struct lightrec_state *state, u32 opcode,
48                                 void *host, u32 addr, u16 data)
49 {
50         *(u16 *)host = HTOLE16(data);
51
52         if (!state->invalidate_from_dma_only)
53                 lightrec_invalidate(state, addr, 2);
54 }
55
56 static void lightrec_default_sw(struct lightrec_state *state, u32 opcode,
57                                 void *host, u32 addr, u32 data)
58 {
59         *(u32 *)host = HTOLE32(data);
60
61         if (!state->invalidate_from_dma_only)
62                 lightrec_invalidate(state, addr, 4);
63 }
64
65 static u8 lightrec_default_lb(struct lightrec_state *state,
66                               u32 opcode, void *host, u32 addr)
67 {
68         return *(u8 *)host;
69 }
70
71 static u16 lightrec_default_lh(struct lightrec_state *state,
72                                u32 opcode, void *host, u32 addr)
73 {
74         return LE16TOH(*(u16 *)host);
75 }
76
77 static u32 lightrec_default_lw(struct lightrec_state *state,
78                                u32 opcode, void *host, u32 addr)
79 {
80         return LE32TOH(*(u32 *)host);
81 }
82
83 static const struct lightrec_mem_map_ops lightrec_default_ops = {
84         .sb = lightrec_default_sb,
85         .sh = lightrec_default_sh,
86         .sw = lightrec_default_sw,
87         .lb = lightrec_default_lb,
88         .lh = lightrec_default_lh,
89         .lw = lightrec_default_lw,
90 };
91
92 static void __segfault_cb(struct lightrec_state *state, u32 addr,
93                           const struct block *block)
94 {
95         lightrec_set_exit_flags(state, LIGHTREC_EXIT_SEGFAULT);
96         pr_err("Segmentation fault in recompiled code: invalid "
97                "load/store at address 0x%08x\n", addr);
98         if (block)
99                 pr_err("Was executing block PC 0x%08x\n", block->pc);
100 }
101
102 static void lightrec_swl(struct lightrec_state *state,
103                          const struct lightrec_mem_map_ops *ops,
104                          u32 opcode, void *host, u32 addr, u32 data)
105 {
106         unsigned int shift = addr & 0x3;
107         unsigned int mask = shift < 3 ? GENMASK(31, (shift + 1) * 8) : 0;
108         u32 old_data;
109
110         /* Align to 32 bits */
111         addr &= ~3;
112         host = (void *)((uintptr_t)host & ~3);
113
114         old_data = ops->lw(state, opcode, host, addr);
115
116         data = (data >> ((3 - shift) * 8)) | (old_data & mask);
117
118         ops->sw(state, opcode, host, addr, data);
119 }
120
121 static void lightrec_swr(struct lightrec_state *state,
122                          const struct lightrec_mem_map_ops *ops,
123                          u32 opcode, void *host, u32 addr, u32 data)
124 {
125         unsigned int shift = addr & 0x3;
126         unsigned int mask = (1 << (shift * 8)) - 1;
127         u32 old_data;
128
129         /* Align to 32 bits */
130         addr &= ~3;
131         host = (void *)((uintptr_t)host & ~3);
132
133         old_data = ops->lw(state, opcode, host, addr);
134
135         data = (data << (shift * 8)) | (old_data & mask);
136
137         ops->sw(state, opcode, host, addr, data);
138 }
139
140 static void lightrec_swc2(struct lightrec_state *state, union code op,
141                           const struct lightrec_mem_map_ops *ops,
142                           void *host, u32 addr)
143 {
144         u32 data = lightrec_mfc2(state, op.i.rt);
145
146         ops->sw(state, op.opcode, host, addr, data);
147 }
148
149 static u32 lightrec_lwl(struct lightrec_state *state,
150                         const struct lightrec_mem_map_ops *ops,
151                         u32 opcode, void *host, u32 addr, u32 data)
152 {
153         unsigned int shift = addr & 0x3;
154         unsigned int mask = (1 << (24 - shift * 8)) - 1;
155         u32 old_data;
156
157         /* Align to 32 bits */
158         addr &= ~3;
159         host = (void *)((uintptr_t)host & ~3);
160
161         old_data = ops->lw(state, opcode, host, addr);
162
163         return (data & mask) | (old_data << (24 - shift * 8));
164 }
165
166 static u32 lightrec_lwr(struct lightrec_state *state,
167                         const struct lightrec_mem_map_ops *ops,
168                         u32 opcode, void *host, u32 addr, u32 data)
169 {
170         unsigned int shift = addr & 0x3;
171         unsigned int mask = shift ? GENMASK(31, 32 - shift * 8) : 0;
172         u32 old_data;
173
174         /* Align to 32 bits */
175         addr &= ~3;
176         host = (void *)((uintptr_t)host & ~3);
177
178         old_data = ops->lw(state, opcode, host, addr);
179
180         return (data & mask) | (old_data >> (shift * 8));
181 }
182
183 static void lightrec_lwc2(struct lightrec_state *state, union code op,
184                           const struct lightrec_mem_map_ops *ops,
185                           void *host, u32 addr)
186 {
187         u32 data = ops->lw(state, op.opcode, host, addr);
188
189         lightrec_mtc2(state, op.i.rt, data);
190 }
191
192 static void lightrec_invalidate_map(struct lightrec_state *state,
193                 const struct lightrec_mem_map *map, u32 addr, u32 len)
194 {
195         if (map == &state->maps[PSX_MAP_KERNEL_USER_RAM]) {
196                 memset(lut_address(state, lut_offset(addr)), 0,
197                        ((len + 3) / 4) * lut_elm_size(state));
198         }
199 }
200
201 static enum psx_map
202 lightrec_get_map_idx(struct lightrec_state *state, u32 kaddr)
203 {
204         const struct lightrec_mem_map *map;
205         unsigned int i;
206
207         for (i = 0; i < state->nb_maps; i++) {
208                 map = &state->maps[i];
209
210                 if (kaddr >= map->pc && kaddr < map->pc + map->length)
211                         return (enum psx_map) i;
212         }
213
214         return PSX_MAP_UNKNOWN;
215 }
216
217 const struct lightrec_mem_map *
218 lightrec_get_map(struct lightrec_state *state, void **host, u32 kaddr)
219 {
220         const struct lightrec_mem_map *map;
221         enum psx_map idx;
222         u32 addr;
223
224         idx = lightrec_get_map_idx(state, kaddr);
225         if (idx == PSX_MAP_UNKNOWN)
226                 return NULL;
227
228         map = &state->maps[idx];
229         addr = kaddr - map->pc;
230
231         while (map->mirror_of)
232                 map = map->mirror_of;
233
234         if (host)
235                 *host = map->address + addr;
236
237         return map;
238 }
239
240 u32 lightrec_rw(struct lightrec_state *state, union code op,
241                 u32 addr, u32 data, u32 *flags, struct block *block)
242 {
243         const struct lightrec_mem_map *map;
244         const struct lightrec_mem_map_ops *ops;
245         u32 opcode = op.opcode;
246         void *host;
247
248         addr += (s16) op.i.imm;
249
250         map = lightrec_get_map(state, &host, kunseg(addr));
251         if (!map) {
252                 __segfault_cb(state, addr, block);
253                 return 0;
254         }
255
256
257         if (likely(!map->ops)) {
258                 if (flags && !LIGHTREC_FLAGS_GET_IO_MODE(*flags))
259                         *flags |= LIGHTREC_IO_MODE(LIGHTREC_IO_DIRECT);
260
261                 ops = &lightrec_default_ops;
262         } else if (flags &&
263                    LIGHTREC_FLAGS_GET_IO_MODE(*flags) == LIGHTREC_IO_DIRECT_HW) {
264                 ops = &lightrec_default_ops;
265         } else {
266                 if (flags && !LIGHTREC_FLAGS_GET_IO_MODE(*flags))
267                         *flags |= LIGHTREC_IO_MODE(LIGHTREC_IO_HW);
268
269                 ops = map->ops;
270         }
271
272         switch (op.i.op) {
273         case OP_SB:
274                 ops->sb(state, opcode, host, addr, (u8) data);
275                 return 0;
276         case OP_SH:
277                 ops->sh(state, opcode, host, addr, (u16) data);
278                 return 0;
279         case OP_SWL:
280                 lightrec_swl(state, ops, opcode, host, addr, data);
281                 return 0;
282         case OP_SWR:
283                 lightrec_swr(state, ops, opcode, host, addr, data);
284                 return 0;
285         case OP_SW:
286                 ops->sw(state, opcode, host, addr, data);
287                 return 0;
288         case OP_SWC2:
289                 lightrec_swc2(state, op, ops, host, addr);
290                 return 0;
291         case OP_LB:
292                 return (s32) (s8) ops->lb(state, opcode, host, addr);
293         case OP_LBU:
294                 return ops->lb(state, opcode, host, addr);
295         case OP_LH:
296                 return (s32) (s16) ops->lh(state, opcode, host, addr);
297         case OP_LHU:
298                 return ops->lh(state, opcode, host, addr);
299         case OP_LWC2:
300                 lightrec_lwc2(state, op, ops, host, addr);
301                 return 0;
302         case OP_LWL:
303                 return lightrec_lwl(state, ops, opcode, host, addr, data);
304         case OP_LWR:
305                 return lightrec_lwr(state, ops, opcode, host, addr, data);
306         case OP_LW:
307         default:
308                 return ops->lw(state, opcode, host, addr);
309         }
310 }
311
312 static void lightrec_rw_helper(struct lightrec_state *state,
313                                union code op, u32 *flags,
314                                struct block *block)
315 {
316         u32 ret = lightrec_rw(state, op, state->regs.gpr[op.i.rs],
317                               state->regs.gpr[op.i.rt], flags, block);
318
319         switch (op.i.op) {
320         case OP_LB:
321         case OP_LBU:
322         case OP_LH:
323         case OP_LHU:
324         case OP_LWL:
325         case OP_LWR:
326         case OP_LW:
327                 if (op.i.rt)
328                         state->regs.gpr[op.i.rt] = ret;
329                 fallthrough;
330         default:
331                 break;
332         }
333 }
334
335 static void lightrec_rw_cb(struct lightrec_state *state, u32 arg)
336 {
337         lightrec_rw_helper(state, (union code) arg, NULL, NULL);
338 }
339
340 static void lightrec_rw_generic_cb(struct lightrec_state *state, u32 arg)
341 {
342         struct block *block;
343         struct opcode *op;
344         bool was_tagged;
345         u16 offset = (u16)arg;
346         u16 old_flags;
347
348         block = lightrec_find_block_from_lut(state->block_cache,
349                                              arg >> 16, state->next_pc);
350         if (unlikely(!block)) {
351                 pr_err("rw_generic: No block found in LUT for PC 0x%x offset 0x%x\n",
352                          state->next_pc, offset);
353                 lightrec_set_exit_flags(state, LIGHTREC_EXIT_SEGFAULT);
354                 return;
355         }
356
357         op = &block->opcode_list[offset];
358         was_tagged = LIGHTREC_FLAGS_GET_IO_MODE(op->flags);
359
360         lightrec_rw_helper(state, op->c, &op->flags, block);
361
362         if (!was_tagged) {
363                 old_flags = block_set_flags(block, BLOCK_SHOULD_RECOMPILE);
364
365                 if (!(old_flags & BLOCK_SHOULD_RECOMPILE)) {
366                         pr_debug("Opcode of block at PC 0x%08x has been tagged"
367                                  " - flag for recompilation\n", block->pc);
368
369                         lut_write(state, lut_offset(block->pc), NULL);
370                 }
371         }
372 }
373
374 static u32 clamp_s32(s32 val, s32 min, s32 max)
375 {
376         return val < min ? min : val > max ? max : val;
377 }
378
379 static u16 load_u16(u32 *ptr)
380 {
381         return ((struct u16x2 *) ptr)->l;
382 }
383
384 static void store_u16(u32 *ptr, u16 value)
385 {
386         ((struct u16x2 *) ptr)->l = value;
387 }
388
389 static u32 lightrec_mfc2(struct lightrec_state *state, u8 reg)
390 {
391         s16 gteir1, gteir2, gteir3;
392
393         switch (reg) {
394         case 1:
395         case 3:
396         case 5:
397         case 8:
398         case 9:
399         case 10:
400         case 11:
401                 return (s32)(s16) load_u16(&state->regs.cp2d[reg]);
402         case 7:
403         case 16:
404         case 17:
405         case 18:
406         case 19:
407                 return load_u16(&state->regs.cp2d[reg]);
408         case 28:
409         case 29:
410                 gteir1 = (s16) load_u16(&state->regs.cp2d[9]);
411                 gteir2 = (s16) load_u16(&state->regs.cp2d[10]);
412                 gteir3 = (s16) load_u16(&state->regs.cp2d[11]);
413
414                 return clamp_s32(gteir1 >> 7, 0, 0x1f) << 0 |
415                         clamp_s32(gteir2 >> 7, 0, 0x1f) << 5 |
416                         clamp_s32(gteir3 >> 7, 0, 0x1f) << 10;
417         case 15:
418                 reg = 14;
419                 fallthrough;
420         default:
421                 return state->regs.cp2d[reg];
422         }
423 }
424
425 u32 lightrec_mfc(struct lightrec_state *state, union code op)
426 {
427         u32 val;
428
429         if (op.i.op == OP_CP0)
430                 return state->regs.cp0[op.r.rd];
431
432         if (op.i.op == OP_SWC2) {
433                 val = lightrec_mfc2(state, op.i.rt);
434         } else if (op.r.rs == OP_CP2_BASIC_MFC2)
435                 val = lightrec_mfc2(state, op.r.rd);
436         else {
437                 val = state->regs.cp2c[op.r.rd];
438
439                 switch (op.r.rd) {
440                 case 4:
441                 case 12:
442                 case 20:
443                 case 26:
444                 case 27:
445                 case 29:
446                 case 30:
447                         val = (u32)(s16)val;
448                         fallthrough;
449                 default:
450                         break;
451                 }
452         }
453
454         if (state->ops.cop2_notify)
455                 (*state->ops.cop2_notify)(state, op.opcode, val);
456
457         return val;
458 }
459
460 static void lightrec_mfc_cb(struct lightrec_state *state, union code op)
461 {
462         u32 rt = lightrec_mfc(state, op);
463
464         if (op.i.op == OP_SWC2)
465                 state->cp2_temp_reg = rt;
466         else if (op.r.rt)
467                 state->regs.gpr[op.r.rt] = rt;
468 }
469
470 static void lightrec_mtc0(struct lightrec_state *state, u8 reg, u32 data)
471 {
472         u32 status, oldstatus, cause;
473
474         switch (reg) {
475         case 1:
476         case 4:
477         case 8:
478         case 14:
479         case 15:
480                 /* Those registers are read-only */
481                 return;
482         default:
483                 break;
484         }
485
486         if (reg == 12) {
487                 status = state->regs.cp0[12];
488                 oldstatus = status;
489
490                 if (status & ~data & BIT(16)) {
491                         state->ops.enable_ram(state, true);
492                         lightrec_invalidate_all(state);
493                 } else if (~status & data & BIT(16)) {
494                         state->ops.enable_ram(state, false);
495                 }
496         }
497
498         if (reg == 13) {
499                 state->regs.cp0[13] &= ~0x300;
500                 state->regs.cp0[13] |= data & 0x300;
501         } else {
502                 state->regs.cp0[reg] = data;
503         }
504
505         if (reg == 12 || reg == 13) {
506                 cause = state->regs.cp0[13];
507                 status = state->regs.cp0[12];
508
509                 /* Handle software interrupts */
510                 if (!!(status & cause & 0x300) & status)
511                         lightrec_set_exit_flags(state, LIGHTREC_EXIT_CHECK_INTERRUPT);
512
513                 /* Handle hardware interrupts */
514                 if (reg == 12 && !(~status & 0x401) && (~oldstatus & 0x401))
515                         lightrec_set_exit_flags(state, LIGHTREC_EXIT_CHECK_INTERRUPT);
516         }
517 }
518
519 static u32 count_leading_bits(s32 data)
520 {
521         u32 cnt = 33;
522
523 #ifdef __has_builtin
524 #if __has_builtin(__builtin_clrsb)
525         return 1 + __builtin_clrsb(data);
526 #endif
527 #endif
528
529         data = (data ^ (data >> 31)) << 1;
530
531         do {
532                 cnt -= 1;
533                 data >>= 1;
534         } while (data);
535
536         return cnt;
537 }
538
539 static void lightrec_mtc2(struct lightrec_state *state, u8 reg, u32 data)
540 {
541         switch (reg) {
542         case 15:
543                 state->regs.cp2d[12] = state->regs.cp2d[13];
544                 state->regs.cp2d[13] = state->regs.cp2d[14];
545                 state->regs.cp2d[14] = data;
546                 break;
547         case 28:
548                 state->regs.cp2d[9] = (data << 7) & 0xf80;
549                 state->regs.cp2d[10] = (data << 2) & 0xf80;
550                 state->regs.cp2d[11] = (data >> 3) & 0xf80;
551                 break;
552         case 31:
553                 return;
554         case 30:
555                 state->regs.cp2d[31] = count_leading_bits((s32) data);
556                 fallthrough;
557         default:
558                 state->regs.cp2d[reg] = data;
559                 break;
560         }
561 }
562
563 static void lightrec_ctc2(struct lightrec_state *state, u8 reg, u32 data)
564 {
565         switch (reg) {
566         case 4:
567         case 12:
568         case 20:
569         case 26:
570         case 27:
571         case 29:
572         case 30:
573                 store_u16(&state->regs.cp2c[reg], data);
574                 break;
575         case 31:
576                 data = (data & 0x7ffff000) | !!(data & 0x7f87e000) << 31;
577                 fallthrough;
578         default:
579                 state->regs.cp2c[reg] = data;
580                 break;
581         }
582 }
583
584 void lightrec_mtc(struct lightrec_state *state, union code op, u8 reg, u32 data)
585 {
586         if (op.i.op == OP_CP0) {
587                 lightrec_mtc0(state, reg, data);
588         } else {
589                 if (op.i.op == OP_LWC2 || op.r.rs != OP_CP2_BASIC_CTC2)
590                         lightrec_mtc2(state, reg, data);
591                 else
592                         lightrec_ctc2(state, reg, data);
593
594                 if (state->ops.cop2_notify)
595                         (*state->ops.cop2_notify)(state, op.opcode, data);
596         }
597 }
598
599 static void lightrec_mtc_cb(struct lightrec_state *state, u32 arg)
600 {
601         union code op = (union code) arg;
602         u32 data;
603         u8 reg;
604
605         if (op.i.op == OP_LWC2) {
606                 data = state->cp2_temp_reg;
607                 reg = op.i.rt;
608         } else {
609                 data = state->regs.gpr[op.r.rt];
610                 reg = op.r.rd;
611         }
612
613         lightrec_mtc(state, op, reg, data);
614 }
615
616 void lightrec_rfe(struct lightrec_state *state)
617 {
618         u32 status;
619
620         /* Read CP0 Status register (r12) */
621         status = state->regs.cp0[12];
622
623         /* Switch the bits */
624         status = ((status & 0x3c) >> 2) | (status & ~0xf);
625
626         /* Write it back */
627         lightrec_mtc0(state, 12, status);
628 }
629
630 void lightrec_cp(struct lightrec_state *state, union code op)
631 {
632         if (op.i.op == OP_CP0) {
633                 pr_err("Invalid CP opcode to coprocessor #0\n");
634                 return;
635         }
636
637         (*state->ops.cop2_op)(state, op.opcode);
638 }
639
640 static void lightrec_cp_cb(struct lightrec_state *state, u32 arg)
641 {
642         lightrec_cp(state, (union code) arg);
643 }
644
645 static struct block * lightrec_get_block(struct lightrec_state *state, u32 pc)
646 {
647         struct block *block = lightrec_find_block(state->block_cache, pc);
648         u8 old_flags;
649
650         if (block && lightrec_block_is_outdated(state, block)) {
651                 pr_debug("Block at PC 0x%08x is outdated!\n", block->pc);
652
653                 old_flags = block_set_flags(block, BLOCK_IS_DEAD);
654                 if (!(old_flags & BLOCK_IS_DEAD)) {
655                         /* Make sure the recompiler isn't processing the block
656                          * we'll destroy */
657                         if (ENABLE_THREADED_COMPILER)
658                                 lightrec_recompiler_remove(state->rec, block);
659
660                         lightrec_unregister_block(state->block_cache, block);
661                         remove_from_code_lut(state->block_cache, block);
662                         lightrec_free_block(state, block);
663                 }
664
665                 block = NULL;
666         }
667
668         if (!block) {
669                 block = lightrec_precompile_block(state, pc);
670                 if (!block) {
671                         pr_err("Unable to recompile block at PC 0x%x\n", pc);
672                         lightrec_set_exit_flags(state, LIGHTREC_EXIT_SEGFAULT);
673                         return NULL;
674                 }
675
676                 lightrec_register_block(state->block_cache, block);
677         }
678
679         return block;
680 }
681
682 static void * get_next_block_func(struct lightrec_state *state, u32 pc)
683 {
684         struct block *block;
685         bool should_recompile;
686         void *func;
687         int err;
688
689         do {
690                 func = lut_read(state, lut_offset(pc));
691                 if (func && func != state->get_next_block)
692                         break;
693
694                 block = lightrec_get_block(state, pc);
695
696                 if (unlikely(!block))
697                         break;
698
699                 if (OPT_REPLACE_MEMSET &&
700                     block_has_flag(block, BLOCK_IS_MEMSET)) {
701                         func = state->memset_func;
702                         break;
703                 }
704
705                 should_recompile = block_has_flag(block, BLOCK_SHOULD_RECOMPILE) &&
706                         !block_has_flag(block, BLOCK_IS_DEAD);
707
708                 if (unlikely(should_recompile)) {
709                         pr_debug("Block at PC 0x%08x should recompile\n", pc);
710
711                         if (ENABLE_THREADED_COMPILER) {
712                                 lightrec_recompiler_add(state->rec, block);
713                         } else {
714                                 err = lightrec_compile_block(state->cstate, block);
715                                 if (err) {
716                                         state->exit_flags = LIGHTREC_EXIT_NOMEM;
717                                         return NULL;
718                                 }
719                         }
720                 }
721
722                 if (ENABLE_THREADED_COMPILER && likely(!should_recompile))
723                         func = lightrec_recompiler_run_first_pass(state, block, &pc);
724                 else
725                         func = block->function;
726
727                 if (likely(func))
728                         break;
729
730                 if (unlikely(block_has_flag(block, BLOCK_NEVER_COMPILE))) {
731                         pc = lightrec_emulate_block(state, block, pc);
732
733                 } else if (!ENABLE_THREADED_COMPILER) {
734                         /* Block wasn't compiled yet - run the interpreter */
735                         if (block_has_flag(block, BLOCK_FULLY_TAGGED))
736                                 pr_debug("Block fully tagged, skipping first pass\n");
737                         else if (ENABLE_FIRST_PASS && likely(!should_recompile))
738                                 pc = lightrec_emulate_block(state, block, pc);
739
740                         /* Then compile it using the profiled data */
741                         err = lightrec_compile_block(state->cstate, block);
742                         if (err) {
743                                 state->exit_flags = LIGHTREC_EXIT_NOMEM;
744                                 return NULL;
745                         }
746                 } else if (unlikely(block_has_flag(block, BLOCK_IS_DEAD))) {
747                         /*
748                          * If the block is dead but has never been compiled,
749                          * then its function pointer is NULL and we cannot
750                          * execute the block. In that case, reap all the dead
751                          * blocks now, and in the next loop we will create a
752                          * new block.
753                          */
754                         lightrec_reaper_reap(state->reaper);
755                 } else {
756                         lightrec_recompiler_add(state->rec, block);
757                 }
758         } while (state->exit_flags == LIGHTREC_EXIT_NORMAL
759                  && state->current_cycle < state->target_cycle);
760
761         state->next_pc = pc;
762         return func;
763 }
764
765 static void * lightrec_alloc_code(struct lightrec_state *state, size_t size)
766 {
767         void *code;
768
769         if (ENABLE_THREADED_COMPILER)
770                 lightrec_code_alloc_lock(state);
771
772         code = tlsf_malloc(state->tlsf, size);
773
774         if (ENABLE_THREADED_COMPILER)
775                 lightrec_code_alloc_unlock(state);
776
777         return code;
778 }
779
780 static void lightrec_realloc_code(struct lightrec_state *state,
781                                   void *ptr, size_t size)
782 {
783         /* NOTE: 'size' MUST be smaller than the size specified during
784          * the allocation. */
785
786         if (ENABLE_THREADED_COMPILER)
787                 lightrec_code_alloc_lock(state);
788
789         tlsf_realloc(state->tlsf, ptr, size);
790
791         if (ENABLE_THREADED_COMPILER)
792                 lightrec_code_alloc_unlock(state);
793 }
794
795 static void lightrec_free_code(struct lightrec_state *state, void *ptr)
796 {
797         if (ENABLE_THREADED_COMPILER)
798                 lightrec_code_alloc_lock(state);
799
800         tlsf_free(state->tlsf, ptr);
801
802         if (ENABLE_THREADED_COMPILER)
803                 lightrec_code_alloc_unlock(state);
804 }
805
806 static void * lightrec_emit_code(struct lightrec_state *state,
807                                  const struct block *block,
808                                  jit_state_t *_jit, unsigned int *size)
809 {
810         bool has_code_buffer = ENABLE_CODE_BUFFER && state->tlsf;
811         jit_word_t code_size, new_code_size;
812         void *code;
813
814         jit_realize();
815
816         if (!ENABLE_DISASSEMBLER)
817                 jit_set_data(NULL, 0, JIT_DISABLE_DATA | JIT_DISABLE_NOTE);
818
819         if (has_code_buffer) {
820                 jit_get_code(&code_size);
821                 code = lightrec_alloc_code(state, (size_t) code_size);
822
823                 if (!code) {
824                         if (ENABLE_THREADED_COMPILER) {
825                                 /* If we're using the threaded compiler, return
826                                  * an allocation error here. The threaded
827                                  * compiler will then empty its job queue and
828                                  * request a code flush using the reaper. */
829                                 return NULL;
830                         }
831
832                         /* Remove outdated blocks, and try again */
833                         lightrec_remove_outdated_blocks(state->block_cache, block);
834
835                         pr_debug("Re-try to alloc %zu bytes...\n", code_size);
836
837                         code = lightrec_alloc_code(state, code_size);
838                         if (!code) {
839                                 pr_err("Could not alloc even after removing old blocks!\n");
840                                 return NULL;
841                         }
842                 }
843
844                 jit_set_code(code, code_size);
845         }
846
847         code = jit_emit();
848
849         jit_get_code(&new_code_size);
850         lightrec_register(MEM_FOR_CODE, new_code_size);
851
852         if (has_code_buffer) {
853                 lightrec_realloc_code(state, code, (size_t) new_code_size);
854
855                 pr_debug("Creating code block at address 0x%" PRIxPTR ", "
856                          "code size: %" PRIuPTR " new: %" PRIuPTR "\n",
857                          (uintptr_t) code, code_size, new_code_size);
858         }
859
860         *size = (unsigned int) new_code_size;
861
862         if (state->ops.code_inv)
863                 state->ops.code_inv(code, new_code_size);
864
865         return code;
866 }
867
868 static struct block * generate_wrapper(struct lightrec_state *state)
869 {
870         struct block *block;
871         jit_state_t *_jit;
872         unsigned int i;
873         jit_node_t *addr[C_WRAPPERS_COUNT - 1];
874         jit_node_t *to_end[C_WRAPPERS_COUNT - 1];
875
876         block = lightrec_malloc(state, MEM_FOR_IR, sizeof(*block));
877         if (!block)
878                 goto err_no_mem;
879
880         _jit = jit_new_state();
881         if (!_jit)
882                 goto err_free_block;
883
884         jit_name("RW wrapper");
885         jit_note(__FILE__, __LINE__);
886
887         /* Wrapper entry point */
888         jit_prolog();
889         jit_tramp(256);
890
891         /* Add entry points */
892         for (i = C_WRAPPERS_COUNT - 1; i > 0; i--) {
893                 jit_ldxi(JIT_R1, LIGHTREC_REG_STATE,
894                          offsetof(struct lightrec_state, c_wrappers[i]));
895                 to_end[i - 1] = jit_b();
896                 addr[i - 1] = jit_indirect();
897         }
898
899         jit_ldxi(JIT_R1, LIGHTREC_REG_STATE,
900                  offsetof(struct lightrec_state, c_wrappers[0]));
901
902         for (i = 0; i < C_WRAPPERS_COUNT - 1; i++)
903                 jit_patch(to_end[i]);
904
905         jit_epilog();
906         jit_prolog();
907
908         /* Save all temporaries on stack */
909         for (i = 0; i < NUM_TEMPS; i++) {
910                 if (i + FIRST_TEMP != 1) {
911                         jit_stxi(offsetof(struct lightrec_state, wrapper_regs[i]),
912                                  LIGHTREC_REG_STATE, JIT_R(i + FIRST_TEMP));
913                 }
914         }
915
916         jit_getarg(JIT_R2, jit_arg());
917
918         jit_prepare();
919         jit_pushargr(LIGHTREC_REG_STATE);
920         jit_pushargr(JIT_R2);
921
922         jit_ldxi_ui(JIT_R2, LIGHTREC_REG_STATE,
923                     offsetof(struct lightrec_state, target_cycle));
924
925         /* state->current_cycle = state->target_cycle - delta; */
926         jit_subr(LIGHTREC_REG_CYCLE, JIT_R2, LIGHTREC_REG_CYCLE);
927         jit_stxi_i(offsetof(struct lightrec_state, current_cycle),
928                    LIGHTREC_REG_STATE, LIGHTREC_REG_CYCLE);
929
930         /* Call the wrapper function */
931         jit_finishr(JIT_R1);
932
933         /* delta = state->target_cycle - state->current_cycle */;
934         jit_ldxi_ui(LIGHTREC_REG_CYCLE, LIGHTREC_REG_STATE,
935                     offsetof(struct lightrec_state, current_cycle));
936         jit_ldxi_ui(JIT_R1, LIGHTREC_REG_STATE,
937                     offsetof(struct lightrec_state, target_cycle));
938         jit_subr(LIGHTREC_REG_CYCLE, JIT_R1, LIGHTREC_REG_CYCLE);
939
940         /* Restore temporaries from stack */
941         for (i = 0; i < NUM_TEMPS; i++) {
942                 if (i + FIRST_TEMP != 1) {
943                         jit_ldxi(JIT_R(i + FIRST_TEMP), LIGHTREC_REG_STATE,
944                                  offsetof(struct lightrec_state, wrapper_regs[i]));
945                 }
946         }
947
948         jit_ret();
949         jit_epilog();
950
951         block->_jit = _jit;
952         block->opcode_list = NULL;
953         block->flags = BLOCK_NO_OPCODE_LIST;
954         block->nb_ops = 0;
955
956         block->function = lightrec_emit_code(state, block, _jit,
957                                              &block->code_size);
958         if (!block->function)
959                 goto err_free_block;
960
961         state->wrappers_eps[C_WRAPPERS_COUNT - 1] = block->function;
962
963         for (i = 0; i < C_WRAPPERS_COUNT - 1; i++)
964                 state->wrappers_eps[i] = jit_address(addr[i]);
965
966         if (ENABLE_DISASSEMBLER) {
967                 pr_debug("Wrapper block:\n");
968                 jit_disassemble();
969         }
970
971         jit_clear_state();
972         return block;
973
974 err_free_block:
975         lightrec_free(state, MEM_FOR_IR, sizeof(*block), block);
976 err_no_mem:
977         pr_err("Unable to compile wrapper: Out of memory\n");
978         return NULL;
979 }
980
981 static u32 lightrec_memset(struct lightrec_state *state)
982 {
983         u32 kunseg_pc = kunseg(state->regs.gpr[4]);
984         void *host;
985         const struct lightrec_mem_map *map = lightrec_get_map(state, &host, kunseg_pc);
986         u32 length = state->regs.gpr[5] * 4;
987
988         if (!map) {
989                 pr_err("Unable to find memory map for memset target address "
990                        "0x%x\n", kunseg_pc);
991                 return 0;
992         }
993
994         pr_debug("Calling host memset, PC 0x%x (host address 0x%" PRIxPTR ") for %u bytes\n",
995                  kunseg_pc, (uintptr_t)host, length);
996         memset(host, 0, length);
997
998         if (!state->invalidate_from_dma_only)
999                 lightrec_invalidate_map(state, map, kunseg_pc, length);
1000
1001         /* Rough estimation of the number of cycles consumed */
1002         return 8 + 5 * (length  + 3 / 4);
1003 }
1004
1005 static struct block * generate_dispatcher(struct lightrec_state *state)
1006 {
1007         struct block *block;
1008         jit_state_t *_jit;
1009         jit_node_t *to_end, *loop, *addr, *addr2, *addr3;
1010         unsigned int i;
1011         u32 offset;
1012
1013         block = lightrec_malloc(state, MEM_FOR_IR, sizeof(*block));
1014         if (!block)
1015                 goto err_no_mem;
1016
1017         _jit = jit_new_state();
1018         if (!_jit)
1019                 goto err_free_block;
1020
1021         jit_name("dispatcher");
1022         jit_note(__FILE__, __LINE__);
1023
1024         jit_prolog();
1025         jit_frame(256);
1026
1027         jit_getarg(LIGHTREC_REG_STATE, jit_arg());
1028         jit_getarg(JIT_V0, jit_arg());
1029         jit_getarg(JIT_V1, jit_arg());
1030         jit_getarg_i(LIGHTREC_REG_CYCLE, jit_arg());
1031
1032         /* Force all callee-saved registers to be pushed on the stack */
1033         for (i = 0; i < NUM_REGS; i++)
1034                 jit_movr(JIT_V(i + FIRST_REG), JIT_V(i + FIRST_REG));
1035
1036         loop = jit_label();
1037
1038         /* Call the block's code */
1039         jit_jmpr(JIT_V1);
1040
1041         if (OPT_REPLACE_MEMSET) {
1042                 /* Blocks will jump here when they need to call
1043                  * lightrec_memset() */
1044                 addr3 = jit_indirect();
1045
1046                 jit_movr(JIT_V1, LIGHTREC_REG_CYCLE);
1047
1048                 jit_prepare();
1049                 jit_pushargr(LIGHTREC_REG_STATE);
1050                 jit_finishi(lightrec_memset);
1051
1052                 jit_ldxi_ui(JIT_V0, LIGHTREC_REG_STATE,
1053                             offsetof(struct lightrec_state, regs.gpr[31]));
1054
1055                 jit_retval(LIGHTREC_REG_CYCLE);
1056                 jit_subr(LIGHTREC_REG_CYCLE, JIT_V1, LIGHTREC_REG_CYCLE);
1057         }
1058
1059         /* The block will jump here, with the number of cycles remaining in
1060          * LIGHTREC_REG_CYCLE */
1061         addr2 = jit_indirect();
1062
1063         /* Store back the next_pc to the lightrec_state structure */
1064         offset = offsetof(struct lightrec_state, next_pc);
1065         jit_stxi_i(offset, LIGHTREC_REG_STATE, JIT_V0);
1066
1067         /* Jump to end if state->target_cycle < state->current_cycle */
1068         to_end = jit_blei(LIGHTREC_REG_CYCLE, 0);
1069
1070         /* Convert next PC to KUNSEG and avoid mirrors */
1071         jit_andi(JIT_V1, JIT_V0, 0x10000000 | (RAM_SIZE - 1));
1072         jit_rshi_u(JIT_R1, JIT_V1, 28);
1073         jit_andi(JIT_R2, JIT_V0, BIOS_SIZE - 1);
1074         jit_addi(JIT_R2, JIT_R2, RAM_SIZE);
1075         jit_movnr(JIT_V1, JIT_R2, JIT_R1);
1076
1077         /* If possible, use the code LUT */
1078         if (!lut_is_32bit(state))
1079                 jit_lshi(JIT_V1, JIT_V1, 1);
1080         jit_addr(JIT_V1, JIT_V1, LIGHTREC_REG_STATE);
1081
1082         offset = offsetof(struct lightrec_state, code_lut);
1083         if (lut_is_32bit(state))
1084                 jit_ldxi_ui(JIT_V1, JIT_V1, offset);
1085         else
1086                 jit_ldxi(JIT_V1, JIT_V1, offset);
1087
1088         /* If we get non-NULL, loop */
1089         jit_patch_at(jit_bnei(JIT_V1, 0), loop);
1090
1091         /* The code LUT will be set to this address when the block at the target
1092          * PC has been preprocessed but not yet compiled by the threaded
1093          * recompiler */
1094         addr = jit_indirect();
1095
1096         /* Slow path: call C function get_next_block_func() */
1097
1098         if (ENABLE_FIRST_PASS || OPT_DETECT_IMPOSSIBLE_BRANCHES) {
1099                 /* We may call the interpreter - update state->current_cycle */
1100                 jit_ldxi_i(JIT_R2, LIGHTREC_REG_STATE,
1101                            offsetof(struct lightrec_state, target_cycle));
1102                 jit_subr(JIT_V1, JIT_R2, LIGHTREC_REG_CYCLE);
1103                 jit_stxi_i(offsetof(struct lightrec_state, current_cycle),
1104                            LIGHTREC_REG_STATE, JIT_V1);
1105         }
1106
1107         jit_prepare();
1108         jit_pushargr(LIGHTREC_REG_STATE);
1109         jit_pushargr(JIT_V0);
1110
1111         /* Save the cycles register if needed */
1112         if (!(ENABLE_FIRST_PASS || OPT_DETECT_IMPOSSIBLE_BRANCHES))
1113                 jit_movr(JIT_V0, LIGHTREC_REG_CYCLE);
1114
1115         /* Get the next block */
1116         jit_finishi(&get_next_block_func);
1117         jit_retval(JIT_V1);
1118
1119         if (ENABLE_FIRST_PASS || OPT_DETECT_IMPOSSIBLE_BRANCHES) {
1120                 /* The interpreter may have updated state->current_cycle and
1121                  * state->target_cycle - recalc the delta */
1122                 jit_ldxi_i(JIT_R1, LIGHTREC_REG_STATE,
1123                            offsetof(struct lightrec_state, current_cycle));
1124                 jit_ldxi_i(JIT_R2, LIGHTREC_REG_STATE,
1125                            offsetof(struct lightrec_state, target_cycle));
1126                 jit_subr(LIGHTREC_REG_CYCLE, JIT_R2, JIT_R1);
1127         } else {
1128                 jit_movr(LIGHTREC_REG_CYCLE, JIT_V0);
1129         }
1130
1131         /* Reset JIT_V0 to the next PC */
1132         jit_ldxi_ui(JIT_V0, LIGHTREC_REG_STATE,
1133                     offsetof(struct lightrec_state, next_pc));
1134
1135         /* If we get non-NULL, loop */
1136         jit_patch_at(jit_bnei(JIT_V1, 0), loop);
1137
1138         /* When exiting, the recompiled code will jump to that address */
1139         jit_note(__FILE__, __LINE__);
1140         jit_patch(to_end);
1141
1142         jit_retr(LIGHTREC_REG_CYCLE);
1143         jit_epilog();
1144
1145         block->_jit = _jit;
1146         block->opcode_list = NULL;
1147         block->flags = BLOCK_NO_OPCODE_LIST;
1148         block->nb_ops = 0;
1149
1150         block->function = lightrec_emit_code(state, block, _jit,
1151                                              &block->code_size);
1152         if (!block->function)
1153                 goto err_free_block;
1154
1155         state->eob_wrapper_func = jit_address(addr2);
1156         if (OPT_REPLACE_MEMSET)
1157                 state->memset_func = jit_address(addr3);
1158         state->get_next_block = jit_address(addr);
1159
1160         if (ENABLE_DISASSEMBLER) {
1161                 pr_debug("Dispatcher block:\n");
1162                 jit_disassemble();
1163         }
1164
1165         /* We're done! */
1166         jit_clear_state();
1167         return block;
1168
1169 err_free_block:
1170         lightrec_free(state, MEM_FOR_IR, sizeof(*block), block);
1171 err_no_mem:
1172         pr_err("Unable to compile dispatcher: Out of memory\n");
1173         return NULL;
1174 }
1175
1176 union code lightrec_read_opcode(struct lightrec_state *state, u32 pc)
1177 {
1178         void *host = NULL;
1179
1180         lightrec_get_map(state, &host, kunseg(pc));
1181
1182         const u32 *code = (u32 *)host;
1183         return (union code) LE32TOH(*code);
1184 }
1185
1186 unsigned int lightrec_cycles_of_opcode(union code code)
1187 {
1188         return 2;
1189 }
1190
1191 void lightrec_free_opcode_list(struct lightrec_state *state, struct opcode *ops)
1192 {
1193         struct opcode_list *list = container_of(ops, struct opcode_list, ops);
1194
1195         lightrec_free(state, MEM_FOR_IR,
1196                       sizeof(*list) + list->nb_ops * sizeof(struct opcode),
1197                       list);
1198 }
1199
1200 static unsigned int lightrec_get_mips_block_len(const u32 *src)
1201 {
1202         unsigned int i;
1203         union code c;
1204
1205         for (i = 1; ; i++) {
1206                 c.opcode = LE32TOH(*src++);
1207
1208                 if (is_syscall(c))
1209                         return i;
1210
1211                 if (is_unconditional_jump(c))
1212                         return i + 1;
1213         }
1214 }
1215
1216 static struct opcode * lightrec_disassemble(struct lightrec_state *state,
1217                                             const u32 *src, unsigned int *len)
1218 {
1219         struct opcode_list *list;
1220         unsigned int i, length;
1221
1222         length = lightrec_get_mips_block_len(src);
1223
1224         list = lightrec_malloc(state, MEM_FOR_IR,
1225                                sizeof(*list) + sizeof(struct opcode) * length);
1226         if (!list) {
1227                 pr_err("Unable to allocate memory\n");
1228                 return NULL;
1229         }
1230
1231         list->nb_ops = (u16) length;
1232
1233         for (i = 0; i < length; i++) {
1234                 list->ops[i].opcode = LE32TOH(src[i]);
1235                 list->ops[i].flags = 0;
1236         }
1237
1238         *len = length * sizeof(u32);
1239
1240         return list->ops;
1241 }
1242
1243 static struct block * lightrec_precompile_block(struct lightrec_state *state,
1244                                                 u32 pc)
1245 {
1246         struct opcode *list;
1247         struct block *block;
1248         void *host, *addr;
1249         const struct lightrec_mem_map *map = lightrec_get_map(state, &host, kunseg(pc));
1250         const u32 *code = (u32 *) host;
1251         unsigned int length;
1252         bool fully_tagged;
1253         u8 block_flags = 0;
1254
1255         if (!map)
1256                 return NULL;
1257
1258         block = lightrec_malloc(state, MEM_FOR_IR, sizeof(*block));
1259         if (!block) {
1260                 pr_err("Unable to recompile block: Out of memory\n");
1261                 return NULL;
1262         }
1263
1264         list = lightrec_disassemble(state, code, &length);
1265         if (!list) {
1266                 lightrec_free(state, MEM_FOR_IR, sizeof(*block), block);
1267                 return NULL;
1268         }
1269
1270         block->pc = pc;
1271         block->_jit = NULL;
1272         block->function = NULL;
1273         block->opcode_list = list;
1274         block->code = code;
1275         block->next = NULL;
1276         block->flags = 0;
1277         block->code_size = 0;
1278         block->precompile_date = state->current_cycle;
1279         block->nb_ops = length / sizeof(u32);
1280
1281         lightrec_optimize(state, block);
1282
1283         length = block->nb_ops * sizeof(u32);
1284
1285         lightrec_register(MEM_FOR_MIPS_CODE, length);
1286
1287         if (ENABLE_DISASSEMBLER) {
1288                 pr_debug("Disassembled block at PC: 0x%08x\n", block->pc);
1289                 lightrec_print_disassembly(block, code);
1290         }
1291
1292         pr_debug("Block size: %hu opcodes\n", block->nb_ops);
1293
1294         /* If the first opcode is an 'impossible' branch, never compile the
1295          * block */
1296         if (should_emulate(block->opcode_list))
1297                 block_flags |= BLOCK_NEVER_COMPILE;
1298
1299         fully_tagged = lightrec_block_is_fully_tagged(block);
1300         if (fully_tagged)
1301                 block_flags |= BLOCK_FULLY_TAGGED;
1302
1303         if (block_flags)
1304                 block_set_flags(block, block_flags);
1305
1306         block->hash = lightrec_calculate_block_hash(block);
1307
1308         if (OPT_REPLACE_MEMSET && block_has_flag(block, BLOCK_IS_MEMSET))
1309                 addr = state->memset_func;
1310         else
1311                 addr = state->get_next_block;
1312         lut_write(state, lut_offset(pc), addr);
1313
1314         pr_debug("Recompile count: %u\n", state->nb_precompile++);
1315
1316         return block;
1317 }
1318
1319 static bool lightrec_block_is_fully_tagged(const struct block *block)
1320 {
1321         const struct opcode *op;
1322         unsigned int i;
1323
1324         for (i = 0; i < block->nb_ops; i++) {
1325                 op = &block->opcode_list[i];
1326
1327                 /* Verify that all load/stores of the opcode list
1328                  * Check all loads/stores of the opcode list and mark the
1329                  * block as fully compiled if they all have been tagged. */
1330                 switch (op->c.i.op) {
1331                 case OP_LB:
1332                 case OP_LH:
1333                 case OP_LWL:
1334                 case OP_LW:
1335                 case OP_LBU:
1336                 case OP_LHU:
1337                 case OP_LWR:
1338                 case OP_SB:
1339                 case OP_SH:
1340                 case OP_SWL:
1341                 case OP_SW:
1342                 case OP_SWR:
1343                 case OP_LWC2:
1344                 case OP_SWC2:
1345                         if (!LIGHTREC_FLAGS_GET_IO_MODE(op->flags))
1346                                 return false;
1347                         fallthrough;
1348                 default:
1349                         continue;
1350                 }
1351         }
1352
1353         return true;
1354 }
1355
1356 static void lightrec_reap_block(struct lightrec_state *state, void *data)
1357 {
1358         struct block *block = data;
1359
1360         pr_debug("Reap dead block at PC 0x%08x\n", block->pc);
1361         lightrec_unregister_block(state->block_cache, block);
1362         lightrec_free_block(state, block);
1363 }
1364
1365 static void lightrec_reap_jit(struct lightrec_state *state, void *data)
1366 {
1367         _jit_destroy_state(data);
1368 }
1369
1370 static void lightrec_free_function(struct lightrec_state *state, void *fn)
1371 {
1372         if (ENABLE_CODE_BUFFER && state->tlsf) {
1373                 pr_debug("Freeing code block at 0x%" PRIxPTR "\n", (uintptr_t) fn);
1374                 lightrec_free_code(state, fn);
1375         }
1376 }
1377
1378 static void lightrec_reap_function(struct lightrec_state *state, void *data)
1379 {
1380         lightrec_free_function(state, data);
1381 }
1382
1383 static void lightrec_reap_opcode_list(struct lightrec_state *state, void *data)
1384 {
1385         lightrec_free_opcode_list(state, data);
1386 }
1387
1388 int lightrec_compile_block(struct lightrec_cstate *cstate,
1389                            struct block *block)
1390 {
1391         struct lightrec_state *state = cstate->state;
1392         struct lightrec_branch_target *target;
1393         bool fully_tagged = false;
1394         struct block *block2;
1395         struct opcode *elm;
1396         jit_state_t *_jit, *oldjit;
1397         jit_node_t *start_of_block;
1398         bool skip_next = false;
1399         void *old_fn, *new_fn;
1400         size_t old_code_size;
1401         unsigned int i, j;
1402         u8 old_flags;
1403         u32 offset;
1404
1405         fully_tagged = lightrec_block_is_fully_tagged(block);
1406         if (fully_tagged)
1407                 block_set_flags(block, BLOCK_FULLY_TAGGED);
1408
1409         _jit = jit_new_state();
1410         if (!_jit)
1411                 return -ENOMEM;
1412
1413         oldjit = block->_jit;
1414         old_fn = block->function;
1415         old_code_size = block->code_size;
1416         block->_jit = _jit;
1417
1418         lightrec_regcache_reset(cstate->reg_cache);
1419         lightrec_preload_pc(cstate->reg_cache);
1420
1421         cstate->cycles = 0;
1422         cstate->nb_local_branches = 0;
1423         cstate->nb_targets = 0;
1424
1425         jit_prolog();
1426         jit_tramp(256);
1427
1428         start_of_block = jit_label();
1429
1430         for (i = 0; i < block->nb_ops; i++) {
1431                 elm = &block->opcode_list[i];
1432
1433                 if (skip_next) {
1434                         skip_next = false;
1435                         continue;
1436                 }
1437
1438                 if (should_emulate(elm)) {
1439                         pr_debug("Branch at offset 0x%x will be emulated\n",
1440                                  i << 2);
1441
1442                         lightrec_emit_eob(cstate, block, i);
1443                         skip_next = !op_flag_no_ds(elm->flags);
1444                 } else {
1445                         lightrec_rec_opcode(cstate, block, i);
1446                         skip_next = !op_flag_no_ds(elm->flags) && has_delay_slot(elm->c);
1447 #if _WIN32
1448                         /* FIXME: GNU Lightning on Windows seems to use our
1449                          * mapped registers as temporaries. Until the actual bug
1450                          * is found and fixed, unconditionally mark our
1451                          * registers as live here. */
1452                         lightrec_regcache_mark_live(cstate->reg_cache, _jit);
1453 #endif
1454                 }
1455
1456                 cstate->cycles += lightrec_cycles_of_opcode(elm->c);
1457         }
1458
1459         for (i = 0; i < cstate->nb_local_branches; i++) {
1460                 struct lightrec_branch *branch = &cstate->local_branches[i];
1461
1462                 pr_debug("Patch local branch to offset 0x%x\n",
1463                          branch->target << 2);
1464
1465                 if (branch->target == 0) {
1466                         jit_patch_at(branch->branch, start_of_block);
1467                         continue;
1468                 }
1469
1470                 for (j = 0; j < cstate->nb_targets; j++) {
1471                         if (cstate->targets[j].offset == branch->target) {
1472                                 jit_patch_at(branch->branch,
1473                                              cstate->targets[j].label);
1474                                 break;
1475                         }
1476                 }
1477
1478                 if (j == cstate->nb_targets)
1479                         pr_err("Unable to find branch target\n");
1480         }
1481
1482         jit_ret();
1483         jit_epilog();
1484
1485         new_fn = lightrec_emit_code(state, block, _jit, &block->code_size);
1486         if (!new_fn) {
1487                 if (!ENABLE_THREADED_COMPILER)
1488                         pr_err("Unable to compile block!\n");
1489                 block->_jit = oldjit;
1490                 jit_clear_state();
1491                 _jit_destroy_state(_jit);
1492                 return -ENOMEM;
1493         }
1494
1495         /* Pause the reaper, because lightrec_reset_lut_offset() may try to set
1496          * the old block->function pointer to the code LUT. */
1497         if (ENABLE_THREADED_COMPILER)
1498                 lightrec_reaper_pause(state->reaper);
1499
1500         block->function = new_fn;
1501         block_clear_flags(block, BLOCK_SHOULD_RECOMPILE);
1502
1503         /* Add compiled function to the LUT */
1504         lut_write(state, lut_offset(block->pc), block->function);
1505
1506         if (ENABLE_THREADED_COMPILER)
1507                 lightrec_reaper_continue(state->reaper);
1508
1509         /* Detect old blocks that have been covered by the new one */
1510         for (i = 0; i < cstate->nb_targets; i++) {
1511                 target = &cstate->targets[i];
1512
1513                 if (!target->offset)
1514                         continue;
1515
1516                 offset = block->pc + target->offset * sizeof(u32);
1517
1518                 /* Pause the reaper while we search for the block until we set
1519                  * the BLOCK_IS_DEAD flag, otherwise the block may be removed
1520                  * under our feet. */
1521                 if (ENABLE_THREADED_COMPILER)
1522                         lightrec_reaper_pause(state->reaper);
1523
1524                 block2 = lightrec_find_block(state->block_cache, offset);
1525                 if (block2) {
1526                         /* No need to check if block2 is compilable - it must
1527                          * be, otherwise block wouldn't be compilable either */
1528
1529                         /* Set the "block dead" flag to prevent the dynarec from
1530                          * recompiling this block */
1531                         old_flags = block_set_flags(block2, BLOCK_IS_DEAD);
1532                 }
1533
1534                 if (ENABLE_THREADED_COMPILER) {
1535                         lightrec_reaper_continue(state->reaper);
1536
1537                         /* If block2 was pending for compilation, cancel it.
1538                          * If it's being compiled right now, wait until it
1539                          * finishes. */
1540                         if (block2)
1541                                 lightrec_recompiler_remove(state->rec, block2);
1542                 }
1543
1544                 /* We know from now on that block2 (if present) isn't going to
1545                  * be compiled. We can override the LUT entry with our new
1546                  * block's entry point. */
1547                 offset = lut_offset(block->pc) + target->offset;
1548                 lut_write(state, offset, jit_address(target->label));
1549
1550                 if (block2) {
1551                         pr_debug("Reap block 0x%08x as it's covered by block "
1552                                  "0x%08x\n", block2->pc, block->pc);
1553
1554                         /* Finally, reap the block. */
1555                         if (!ENABLE_THREADED_COMPILER) {
1556                                 lightrec_unregister_block(state->block_cache, block2);
1557                                 lightrec_free_block(state, block2);
1558                         } else if (!(old_flags & BLOCK_IS_DEAD)) {
1559                                 lightrec_reaper_add(state->reaper,
1560                                                     lightrec_reap_block,
1561                                                     block2);
1562                         }
1563                 }
1564         }
1565
1566         if (ENABLE_DISASSEMBLER) {
1567                 pr_debug("Compiling block at PC: 0x%08x\n", block->pc);
1568                 jit_disassemble();
1569         }
1570
1571         jit_clear_state();
1572
1573         if (fully_tagged)
1574                 old_flags = block_set_flags(block, BLOCK_NO_OPCODE_LIST);
1575
1576         if (fully_tagged && !(old_flags & BLOCK_NO_OPCODE_LIST)) {
1577                 pr_debug("Block PC 0x%08x is fully tagged"
1578                          " - free opcode list\n", block->pc);
1579
1580                 if (ENABLE_THREADED_COMPILER) {
1581                         lightrec_reaper_add(state->reaper,
1582                                             lightrec_reap_opcode_list,
1583                                             block->opcode_list);
1584                 } else {
1585                         lightrec_free_opcode_list(state, block->opcode_list);
1586                 }
1587         }
1588
1589         if (oldjit) {
1590                 pr_debug("Block 0x%08x recompiled, reaping old jit context.\n",
1591                          block->pc);
1592
1593                 if (ENABLE_THREADED_COMPILER) {
1594                         lightrec_reaper_add(state->reaper,
1595                                             lightrec_reap_jit, oldjit);
1596                         lightrec_reaper_add(state->reaper,
1597                                             lightrec_reap_function, old_fn);
1598                 } else {
1599                         _jit_destroy_state(oldjit);
1600                         lightrec_free_function(state, old_fn);
1601                 }
1602
1603                 lightrec_unregister(MEM_FOR_CODE, old_code_size);
1604         }
1605
1606         return 0;
1607 }
1608
1609 static void lightrec_print_info(struct lightrec_state *state)
1610 {
1611         if ((state->current_cycle & ~0xfffffff) != state->old_cycle_counter) {
1612                 pr_info("Lightrec RAM usage: IR %u KiB, CODE %u KiB, "
1613                         "MIPS %u KiB, TOTAL %u KiB, avg. IPI %f\n",
1614                         lightrec_get_mem_usage(MEM_FOR_IR) / 1024,
1615                         lightrec_get_mem_usage(MEM_FOR_CODE) / 1024,
1616                         lightrec_get_mem_usage(MEM_FOR_MIPS_CODE) / 1024,
1617                         lightrec_get_total_mem_usage() / 1024,
1618                        lightrec_get_average_ipi());
1619                 state->old_cycle_counter = state->current_cycle & ~0xfffffff;
1620         }
1621 }
1622
1623 u32 lightrec_execute(struct lightrec_state *state, u32 pc, u32 target_cycle)
1624 {
1625         s32 (*func)(struct lightrec_state *, u32, void *, s32) = (void *)state->dispatcher->function;
1626         void *block_trace;
1627         s32 cycles_delta;
1628
1629         state->exit_flags = LIGHTREC_EXIT_NORMAL;
1630
1631         /* Handle the cycle counter overflowing */
1632         if (unlikely(target_cycle < state->current_cycle))
1633                 target_cycle = UINT_MAX;
1634
1635         state->target_cycle = target_cycle;
1636         state->next_pc = pc;
1637
1638         block_trace = get_next_block_func(state, pc);
1639         if (block_trace) {
1640                 cycles_delta = state->target_cycle - state->current_cycle;
1641
1642                 cycles_delta = (*func)(state, state->next_pc,
1643                                        block_trace, cycles_delta);
1644
1645                 state->current_cycle = state->target_cycle - cycles_delta;
1646         }
1647
1648         if (ENABLE_THREADED_COMPILER)
1649                 lightrec_reaper_reap(state->reaper);
1650
1651         if (LOG_LEVEL >= INFO_L)
1652                 lightrec_print_info(state);
1653
1654         return state->next_pc;
1655 }
1656
1657 u32 lightrec_run_interpreter(struct lightrec_state *state, u32 pc,
1658                              u32 target_cycle)
1659 {
1660         struct block *block;
1661
1662         state->exit_flags = LIGHTREC_EXIT_NORMAL;
1663         state->target_cycle = target_cycle;
1664
1665         do {
1666                 block = lightrec_get_block(state, pc);
1667                 if (!block)
1668                         break;
1669
1670                 pc = lightrec_emulate_block(state, block, pc);
1671
1672                 if (ENABLE_THREADED_COMPILER)
1673                         lightrec_reaper_reap(state->reaper);
1674         } while (state->current_cycle < state->target_cycle);
1675
1676         if (LOG_LEVEL >= INFO_L)
1677                 lightrec_print_info(state);
1678
1679         return pc;
1680 }
1681
1682 void lightrec_free_block(struct lightrec_state *state, struct block *block)
1683 {
1684         u8 old_flags;
1685
1686         lightrec_unregister(MEM_FOR_MIPS_CODE, block->nb_ops * sizeof(u32));
1687         old_flags = block_set_flags(block, BLOCK_NO_OPCODE_LIST);
1688
1689         if (!(old_flags & BLOCK_NO_OPCODE_LIST))
1690                 lightrec_free_opcode_list(state, block->opcode_list);
1691         if (block->_jit)
1692                 _jit_destroy_state(block->_jit);
1693         if (block->function) {
1694                 lightrec_free_function(state, block->function);
1695                 lightrec_unregister(MEM_FOR_CODE, block->code_size);
1696         }
1697         lightrec_free(state, MEM_FOR_IR, sizeof(*block), block);
1698 }
1699
1700 struct lightrec_cstate * lightrec_create_cstate(struct lightrec_state *state)
1701 {
1702         struct lightrec_cstate *cstate;
1703
1704         cstate = lightrec_malloc(state, MEM_FOR_LIGHTREC, sizeof(*cstate));
1705         if (!cstate)
1706                 return NULL;
1707
1708         cstate->reg_cache = lightrec_regcache_init(state);
1709         if (!cstate->reg_cache) {
1710                 lightrec_free(state, MEM_FOR_LIGHTREC, sizeof(*cstate), cstate);
1711                 return NULL;
1712         }
1713
1714         cstate->state = state;
1715
1716         return cstate;
1717 }
1718
1719 void lightrec_free_cstate(struct lightrec_cstate *cstate)
1720 {
1721         lightrec_free_regcache(cstate->reg_cache);
1722         lightrec_free(cstate->state, MEM_FOR_LIGHTREC, sizeof(*cstate), cstate);
1723 }
1724
1725 struct lightrec_state * lightrec_init(char *argv0,
1726                                       const struct lightrec_mem_map *map,
1727                                       size_t nb,
1728                                       const struct lightrec_ops *ops)
1729 {
1730         const struct lightrec_mem_map *codebuf_map = &map[PSX_MAP_CODE_BUFFER];
1731         struct lightrec_state *state;
1732         uintptr_t addr;
1733         void *tlsf = NULL;
1734         bool with_32bit_lut = false;
1735         size_t lut_size;
1736
1737         /* Sanity-check ops */
1738         if (!ops || !ops->cop2_op || !ops->enable_ram) {
1739                 pr_err("Missing callbacks in lightrec_ops structure\n");
1740                 return NULL;
1741         }
1742
1743         if (ops->cop2_notify)
1744                 pr_debug("Optional cop2_notify callback in lightrec_ops\n");
1745         else
1746                 pr_debug("No optional cop2_notify callback in lightrec_ops\n");
1747
1748         if (ENABLE_CODE_BUFFER && nb > PSX_MAP_CODE_BUFFER
1749             && codebuf_map->address) {
1750                 tlsf = tlsf_create_with_pool(codebuf_map->address,
1751                                              codebuf_map->length);
1752                 if (!tlsf) {
1753                         pr_err("Unable to initialize code buffer\n");
1754                         return NULL;
1755                 }
1756
1757                 if (__WORDSIZE == 64) {
1758                         addr = (uintptr_t) codebuf_map->address + codebuf_map->length - 1;
1759                         with_32bit_lut = addr == (u32) addr;
1760                 }
1761         }
1762
1763         if (with_32bit_lut)
1764                 lut_size = CODE_LUT_SIZE * 4;
1765         else
1766                 lut_size = CODE_LUT_SIZE * sizeof(void *);
1767
1768         init_jit(argv0);
1769
1770         state = calloc(1, sizeof(*state) + lut_size);
1771         if (!state)
1772                 goto err_finish_jit;
1773
1774         lightrec_register(MEM_FOR_LIGHTREC, sizeof(*state) + lut_size);
1775
1776         state->tlsf = tlsf;
1777         state->with_32bit_lut = with_32bit_lut;
1778
1779         state->block_cache = lightrec_blockcache_init(state);
1780         if (!state->block_cache)
1781                 goto err_free_state;
1782
1783         if (ENABLE_THREADED_COMPILER) {
1784                 state->rec = lightrec_recompiler_init(state);
1785                 if (!state->rec)
1786                         goto err_free_block_cache;
1787
1788                 state->reaper = lightrec_reaper_init(state);
1789                 if (!state->reaper)
1790                         goto err_free_recompiler;
1791         } else {
1792                 state->cstate = lightrec_create_cstate(state);
1793                 if (!state->cstate)
1794                         goto err_free_block_cache;
1795         }
1796
1797         state->nb_maps = nb;
1798         state->maps = map;
1799
1800         memcpy(&state->ops, ops, sizeof(*ops));
1801
1802         state->dispatcher = generate_dispatcher(state);
1803         if (!state->dispatcher)
1804                 goto err_free_reaper;
1805
1806         state->c_wrapper_block = generate_wrapper(state);
1807         if (!state->c_wrapper_block)
1808                 goto err_free_dispatcher;
1809
1810         state->c_wrappers[C_WRAPPER_RW] = lightrec_rw_cb;
1811         state->c_wrappers[C_WRAPPER_RW_GENERIC] = lightrec_rw_generic_cb;
1812         state->c_wrappers[C_WRAPPER_MFC] = lightrec_mfc_cb;
1813         state->c_wrappers[C_WRAPPER_MTC] = lightrec_mtc_cb;
1814         state->c_wrappers[C_WRAPPER_CP] = lightrec_cp_cb;
1815
1816         map = &state->maps[PSX_MAP_BIOS];
1817         state->offset_bios = (uintptr_t)map->address - map->pc;
1818
1819         map = &state->maps[PSX_MAP_SCRATCH_PAD];
1820         state->offset_scratch = (uintptr_t)map->address - map->pc;
1821
1822         map = &state->maps[PSX_MAP_HW_REGISTERS];
1823         state->offset_io = (uintptr_t)map->address - map->pc;
1824
1825         map = &state->maps[PSX_MAP_KERNEL_USER_RAM];
1826         state->offset_ram = (uintptr_t)map->address - map->pc;
1827
1828         if (state->maps[PSX_MAP_MIRROR1].address == map->address + 0x200000 &&
1829             state->maps[PSX_MAP_MIRROR2].address == map->address + 0x400000 &&
1830             state->maps[PSX_MAP_MIRROR3].address == map->address + 0x600000)
1831                 state->mirrors_mapped = true;
1832
1833         if (state->offset_bios == 0 &&
1834             state->offset_scratch == 0 &&
1835             state->offset_ram == 0 &&
1836             state->offset_io == 0 &&
1837             state->mirrors_mapped) {
1838                 pr_info("Memory map is perfect. Emitted code will be best.\n");
1839         } else {
1840                 pr_info("Memory map is sub-par. Emitted code will be slow.\n");
1841         }
1842
1843         if (state->with_32bit_lut)
1844                 pr_info("Using 32-bit LUT\n");
1845
1846         return state;
1847
1848 err_free_dispatcher:
1849         lightrec_free_block(state, state->dispatcher);
1850 err_free_reaper:
1851         if (ENABLE_THREADED_COMPILER)
1852                 lightrec_reaper_destroy(state->reaper);
1853 err_free_recompiler:
1854         if (ENABLE_THREADED_COMPILER)
1855                 lightrec_free_recompiler(state->rec);
1856         else
1857                 lightrec_free_cstate(state->cstate);
1858 err_free_block_cache:
1859         lightrec_free_block_cache(state->block_cache);
1860 err_free_state:
1861         lightrec_unregister(MEM_FOR_LIGHTREC, sizeof(*state) +
1862                             lut_elm_size(state) * CODE_LUT_SIZE);
1863         free(state);
1864 err_finish_jit:
1865         finish_jit();
1866         if (ENABLE_CODE_BUFFER && tlsf)
1867                 tlsf_destroy(tlsf);
1868         return NULL;
1869 }
1870
1871 void lightrec_destroy(struct lightrec_state *state)
1872 {
1873         /* Force a print info on destroy*/
1874         state->current_cycle = ~state->current_cycle;
1875         lightrec_print_info(state);
1876
1877         lightrec_free_block_cache(state->block_cache);
1878         lightrec_free_block(state, state->dispatcher);
1879         lightrec_free_block(state, state->c_wrapper_block);
1880
1881         if (ENABLE_THREADED_COMPILER) {
1882                 lightrec_free_recompiler(state->rec);
1883                 lightrec_reaper_destroy(state->reaper);
1884         } else {
1885                 lightrec_free_cstate(state->cstate);
1886         }
1887
1888         finish_jit();
1889         if (ENABLE_CODE_BUFFER && state->tlsf)
1890                 tlsf_destroy(state->tlsf);
1891
1892         lightrec_unregister(MEM_FOR_LIGHTREC, sizeof(*state) +
1893                             lut_elm_size(state) * CODE_LUT_SIZE);
1894         free(state);
1895 }
1896
1897 void lightrec_invalidate(struct lightrec_state *state, u32 addr, u32 len)
1898 {
1899         u32 kaddr = kunseg(addr & ~0x3);
1900         enum psx_map idx = lightrec_get_map_idx(state, kaddr);
1901
1902         switch (idx) {
1903         case PSX_MAP_MIRROR1:
1904         case PSX_MAP_MIRROR2:
1905         case PSX_MAP_MIRROR3:
1906                 /* Handle mirrors */
1907                 kaddr &= RAM_SIZE - 1;
1908                 fallthrough;
1909         case PSX_MAP_KERNEL_USER_RAM:
1910                 break;
1911         default:
1912                 return;
1913         }
1914
1915         memset(lut_address(state, lut_offset(kaddr)), 0,
1916                ((len + 3) / 4) * lut_elm_size(state));
1917 }
1918
1919 void lightrec_invalidate_all(struct lightrec_state *state)
1920 {
1921         memset(state->code_lut, 0, lut_elm_size(state) * CODE_LUT_SIZE);
1922 }
1923
1924 void lightrec_set_invalidate_mode(struct lightrec_state *state, bool dma_only)
1925 {
1926         if (state->invalidate_from_dma_only != dma_only)
1927                 lightrec_invalidate_all(state);
1928
1929         state->invalidate_from_dma_only = dma_only;
1930 }
1931
1932 void lightrec_set_exit_flags(struct lightrec_state *state, u32 flags)
1933 {
1934         if (flags != LIGHTREC_EXIT_NORMAL) {
1935                 state->exit_flags |= flags;
1936                 state->target_cycle = state->current_cycle;
1937         }
1938 }
1939
1940 u32 lightrec_exit_flags(struct lightrec_state *state)
1941 {
1942         return state->exit_flags;
1943 }
1944
1945 u32 lightrec_current_cycle_count(const struct lightrec_state *state)
1946 {
1947         return state->current_cycle;
1948 }
1949
1950 void lightrec_reset_cycle_count(struct lightrec_state *state, u32 cycles)
1951 {
1952         state->current_cycle = cycles;
1953
1954         if (state->target_cycle < cycles)
1955                 state->target_cycle = cycles;
1956 }
1957
1958 void lightrec_set_target_cycle_count(struct lightrec_state *state, u32 cycles)
1959 {
1960         if (state->exit_flags == LIGHTREC_EXIT_NORMAL) {
1961                 if (cycles < state->current_cycle)
1962                         cycles = state->current_cycle;
1963
1964                 state->target_cycle = cycles;
1965         }
1966 }
1967
1968 struct lightrec_registers * lightrec_get_registers(struct lightrec_state *state)
1969 {
1970         return &state->regs;
1971 }