libpcsxcore: Set unsafe flags after reset
[pcsx_rearmed.git] / deps / lightrec / emitter.c
... / ...
CommitLineData
1// SPDX-License-Identifier: LGPL-2.1-or-later
2/*
3 * Copyright (C) 2014-2021 Paul Cercueil <paul@crapouillou.net>
4 */
5
6#include "blockcache.h"
7#include "debug.h"
8#include "disassembler.h"
9#include "emitter.h"
10#include "lightning-wrapper.h"
11#include "optimizer.h"
12#include "regcache.h"
13
14#include <stdbool.h>
15#include <stddef.h>
16
17typedef void (*lightrec_rec_func_t)(struct lightrec_cstate *, const struct block *, u16);
18
19/* Forward declarations */
20static void rec_SPECIAL(struct lightrec_cstate *state, const struct block *block, u16 offset);
21static void rec_REGIMM(struct lightrec_cstate *state, const struct block *block, u16 offset);
22static void rec_CP0(struct lightrec_cstate *state, const struct block *block, u16 offset);
23static void rec_CP2(struct lightrec_cstate *state, const struct block *block, u16 offset);
24static void rec_META(struct lightrec_cstate *state, const struct block *block, u16 offset);
25static void rec_cp2_do_mtc2(struct lightrec_cstate *state,
26 const struct block *block, u16 offset, u8 reg, u8 in_reg);
27static void rec_cp2_do_mfc2(struct lightrec_cstate *state,
28 const struct block *block, u16 offset,
29 u8 reg, u8 out_reg);
30
31static void
32lightrec_jump_to_fn(jit_state_t *_jit, void (*fn)(void))
33{
34 /* Prevent jit_jmpi() from using our cycles register as a temporary */
35 jit_live(LIGHTREC_REG_CYCLE);
36
37 jit_patch_abs(jit_jmpi(), fn);
38}
39
40static void
41lightrec_jump_to_eob(struct lightrec_cstate *state, jit_state_t *_jit)
42{
43 lightrec_jump_to_fn(_jit, state->state->eob_wrapper_func);
44}
45
46static void
47lightrec_jump_to_ds_check(struct lightrec_cstate *state, jit_state_t *_jit)
48{
49 lightrec_jump_to_fn(_jit, state->state->ds_check_func);
50}
51
52static void update_ra_register(struct regcache *reg_cache, jit_state_t *_jit,
53 u8 ra_reg, u32 pc, u32 link)
54{
55 u8 link_reg;
56
57 link_reg = lightrec_alloc_reg_out(reg_cache, _jit, ra_reg, 0);
58 lightrec_load_imm(reg_cache, _jit, link_reg, pc, link);
59 lightrec_free_reg(reg_cache, link_reg);
60}
61
62static void lightrec_emit_end_of_block(struct lightrec_cstate *state,
63 const struct block *block, u16 offset,
64 s8 reg_new_pc, u32 imm, u8 ra_reg,
65 u32 link, bool update_cycles)
66{
67 struct regcache *reg_cache = state->reg_cache;
68 jit_state_t *_jit = block->_jit;
69 const struct opcode *op = &block->opcode_list[offset],
70 *ds = get_delay_slot(block->opcode_list, offset);
71 u32 cycles = state->cycles + lightrec_cycles_of_opcode(state->state, op->c);
72
73 jit_note(__FILE__, __LINE__);
74
75 if (link && ra_reg != reg_new_pc)
76 update_ra_register(reg_cache, _jit, ra_reg, block->pc, link);
77
78 if (reg_new_pc < 0)
79 lightrec_load_next_pc_imm(reg_cache, _jit, block->pc, imm);
80 else
81 lightrec_load_next_pc(reg_cache, _jit, reg_new_pc);
82
83 if (link && ra_reg == reg_new_pc) {
84 /* Handle the special case: JALR $r0, $r0
85 * In that case the target PC should be the old value of the
86 * register. */
87 update_ra_register(reg_cache, _jit, ra_reg, block->pc, link);
88 }
89
90 if (has_delay_slot(op->c) &&
91 !op_flag_no_ds(op->flags) && !op_flag_local_branch(op->flags)) {
92 cycles += lightrec_cycles_of_opcode(state->state, ds->c);
93
94 /* Recompile the delay slot */
95 if (ds->c.opcode)
96 lightrec_rec_opcode(state, block, offset + 1);
97 }
98
99 /* Clean the remaining registers */
100 lightrec_clean_regs(reg_cache, _jit);
101
102 if (cycles && update_cycles) {
103 jit_subi(LIGHTREC_REG_CYCLE, LIGHTREC_REG_CYCLE, cycles);
104 pr_debug("EOB: %u cycles\n", cycles);
105 }
106
107 if (op_flag_load_delay(ds->flags)
108 && opcode_is_load(ds->c) && !state->no_load_delay) {
109 /* If the delay slot is a load opcode, its target register
110 * will be written after the first opcode of the target is
111 * executed. Handle this by jumping to a special section of
112 * the dispatcher. It expects the loaded value to be in
113 * REG_TEMP, and the target register number to be in JIT_V1.*/
114 jit_movi(JIT_V1, ds->c.i.rt);
115
116 lightrec_jump_to_ds_check(state, _jit);
117 } else {
118 lightrec_jump_to_eob(state, _jit);
119 }
120
121 lightrec_regcache_reset(reg_cache);
122}
123
124void lightrec_emit_jump_to_interpreter(struct lightrec_cstate *state,
125 const struct block *block, u16 offset)
126{
127 struct regcache *reg_cache = state->reg_cache;
128 jit_state_t *_jit = block->_jit;
129
130 lightrec_clean_regs(reg_cache, _jit);
131
132 /* Call the interpreter with the block's address in JIT_V1 and the
133 * PC (which might have an offset) in JIT_V0. */
134 lightrec_load_imm(reg_cache, _jit, JIT_V0, block->pc,
135 block->pc + (offset << 2));
136 if (lightrec_store_next_pc()) {
137 jit_stxi_i(offsetof(struct lightrec_state, next_pc),
138 LIGHTREC_REG_STATE, JIT_V0);
139 }
140
141 jit_movi(JIT_V1, (uintptr_t)block);
142
143 jit_subi(LIGHTREC_REG_CYCLE, LIGHTREC_REG_CYCLE, state->cycles);
144 lightrec_jump_to_fn(_jit, state->state->interpreter_func);
145}
146
147static void lightrec_emit_eob(struct lightrec_cstate *state,
148 const struct block *block, u16 offset)
149{
150 struct regcache *reg_cache = state->reg_cache;
151 jit_state_t *_jit = block->_jit;
152
153 lightrec_clean_regs(reg_cache, _jit);
154
155 lightrec_load_imm(reg_cache, _jit, JIT_V0, block->pc,
156 block->pc + (offset << 2));
157 if (lightrec_store_next_pc()) {
158 jit_stxi_i(offsetof(struct lightrec_state, next_pc),
159 LIGHTREC_REG_STATE, JIT_V0);
160 }
161
162 jit_subi(LIGHTREC_REG_CYCLE, LIGHTREC_REG_CYCLE, state->cycles);
163
164 lightrec_jump_to_eob(state, _jit);
165}
166
167static void rec_special_JR(struct lightrec_cstate *state, const struct block *block, u16 offset)
168{
169 union code c = block->opcode_list[offset].c;
170
171 _jit_name(block->_jit, __func__);
172 lightrec_emit_end_of_block(state, block, offset, c.r.rs, 0, 31, 0, true);
173}
174
175static void rec_special_JALR(struct lightrec_cstate *state, const struct block *block, u16 offset)
176{
177 union code c = block->opcode_list[offset].c;
178
179 _jit_name(block->_jit, __func__);
180 lightrec_emit_end_of_block(state, block, offset, c.r.rs, 0, c.r.rd,
181 get_branch_pc(block, offset, 2), true);
182}
183
184static void rec_J(struct lightrec_cstate *state, const struct block *block, u16 offset)
185{
186 union code c = block->opcode_list[offset].c;
187
188 _jit_name(block->_jit, __func__);
189 lightrec_emit_end_of_block(state, block, offset, -1,
190 (block->pc & 0xf0000000) | (c.j.imm << 2),
191 31, 0, true);
192}
193
194static void rec_JAL(struct lightrec_cstate *state, const struct block *block, u16 offset)
195{
196 union code c = block->opcode_list[offset].c;
197
198 _jit_name(block->_jit, __func__);
199 lightrec_emit_end_of_block(state, block, offset, -1,
200 (block->pc & 0xf0000000) | (c.j.imm << 2),
201 31, get_branch_pc(block, offset, 2), true);
202}
203
204static void lightrec_do_early_unload(struct lightrec_cstate *state,
205 const struct block *block, u16 offset)
206{
207 struct regcache *reg_cache = state->reg_cache;
208 const struct opcode *op = &block->opcode_list[offset];
209 jit_state_t *_jit = block->_jit;
210 unsigned int i;
211 u8 reg;
212 struct {
213 u8 reg, op;
214 } reg_ops[3] = {
215 { op->r.rd, LIGHTREC_FLAGS_GET_RD(op->flags), },
216 { op->i.rt, LIGHTREC_FLAGS_GET_RT(op->flags), },
217 { op->i.rs, LIGHTREC_FLAGS_GET_RS(op->flags), },
218 };
219
220 for (i = 0; i < ARRAY_SIZE(reg_ops); i++) {
221 reg = reg_ops[i].reg;
222
223 switch (reg_ops[i].op) {
224 case LIGHTREC_REG_UNLOAD:
225 lightrec_clean_reg_if_loaded(reg_cache, _jit, reg, true);
226 break;
227
228 case LIGHTREC_REG_DISCARD:
229 lightrec_discard_reg_if_loaded(reg_cache, reg);
230 break;
231
232 case LIGHTREC_REG_CLEAN:
233 lightrec_clean_reg_if_loaded(reg_cache, _jit, reg, false);
234 break;
235 default:
236 break;
237 };
238 }
239}
240
241static void rec_b(struct lightrec_cstate *state, const struct block *block, u16 offset,
242 jit_code_t code, jit_code_t code2, u32 link, bool unconditional, bool bz)
243{
244 struct regcache *reg_cache = state->reg_cache;
245 struct native_register *regs_backup;
246 jit_state_t *_jit = block->_jit;
247 struct lightrec_branch *branch;
248 const struct opcode *op = &block->opcode_list[offset],
249 *ds = get_delay_slot(block->opcode_list, offset);
250 jit_node_t *addr;
251 bool is_forward = (s16)op->i.imm >= 0;
252 int op_cycles = lightrec_cycles_of_opcode(state->state, op->c);
253 u32 target_offset, cycles = state->cycles + op_cycles;
254 bool no_indirection = false;
255 u32 next_pc;
256 u8 rs, rt;
257
258 jit_note(__FILE__, __LINE__);
259
260 if (!op_flag_no_ds(op->flags))
261 cycles += lightrec_cycles_of_opcode(state->state, ds->c);
262
263 state->cycles = -op_cycles;
264
265 if (!unconditional) {
266 rs = lightrec_alloc_reg_in(reg_cache, _jit, op->i.rs, REG_EXT);
267 rt = bz ? 0 : lightrec_alloc_reg_in(reg_cache,
268 _jit, op->i.rt, REG_EXT);
269
270 /* Unload dead registers before evaluating the branch */
271 if (OPT_EARLY_UNLOAD)
272 lightrec_do_early_unload(state, block, offset);
273
274 if (op_flag_local_branch(op->flags) &&
275 (op_flag_no_ds(op->flags) || !ds->opcode) &&
276 is_forward && !lightrec_has_dirty_regs(reg_cache))
277 no_indirection = true;
278
279 if (no_indirection)
280 pr_debug("Using no indirection for branch at offset 0x%hx\n", offset << 2);
281 }
282
283 if (cycles)
284 jit_subi(LIGHTREC_REG_CYCLE, LIGHTREC_REG_CYCLE, cycles);
285
286 if (!unconditional) {
287 /* Generate the branch opcode */
288 if (!no_indirection)
289 addr = jit_new_node_pww(code, NULL, rs, rt);
290
291 lightrec_free_regs(reg_cache);
292 regs_backup = lightrec_regcache_enter_branch(reg_cache);
293 }
294
295 if (op_flag_local_branch(op->flags)) {
296 /* Recompile the delay slot */
297 if (!op_flag_no_ds(op->flags) && ds->opcode) {
298 /* Never handle load delays with local branches. */
299 state->no_load_delay = true;
300 lightrec_rec_opcode(state, block, offset + 1);
301 }
302
303 if (link)
304 update_ra_register(reg_cache, _jit, 31, block->pc, link);
305
306 /* Clean remaining registers */
307 lightrec_clean_regs(reg_cache, _jit);
308
309 target_offset = offset + 1 + (s16)op->i.imm
310 - !!op_flag_no_ds(op->flags);
311 pr_debug("Adding local branch to offset 0x%x\n",
312 target_offset << 2);
313 branch = &state->local_branches[
314 state->nb_local_branches++];
315
316 branch->target = target_offset;
317
318 if (no_indirection)
319 branch->branch = jit_new_node_pww(code2, NULL, rs, rt);
320 else if (is_forward)
321 branch->branch = jit_b();
322 else
323 branch->branch = jit_bgti(LIGHTREC_REG_CYCLE, 0);
324 }
325
326 if (!op_flag_local_branch(op->flags) || !is_forward) {
327 next_pc = get_branch_pc(block, offset, 1 + (s16)op->i.imm);
328 state->no_load_delay = op_flag_local_branch(op->flags);
329 lightrec_emit_end_of_block(state, block, offset, -1, next_pc,
330 31, link, false);
331 }
332
333 if (!unconditional) {
334 if (!no_indirection)
335 jit_patch(addr);
336
337 lightrec_regcache_leave_branch(reg_cache, regs_backup);
338
339 if (bz && link)
340 update_ra_register(reg_cache, _jit, 31, block->pc, link);
341
342 if (!op_flag_no_ds(op->flags) && ds->opcode) {
343 state->no_load_delay = true;
344 lightrec_rec_opcode(state, block, offset + 1);
345 }
346 }
347}
348
349static void rec_BNE(struct lightrec_cstate *state,
350 const struct block *block, u16 offset)
351{
352 union code c = block->opcode_list[offset].c;
353
354 _jit_name(block->_jit, __func__);
355
356 if (c.i.rt == 0)
357 rec_b(state, block, offset, jit_code_beqi, jit_code_bnei, 0, false, true);
358 else
359 rec_b(state, block, offset, jit_code_beqr, jit_code_bner, 0, false, false);
360}
361
362static void rec_BEQ(struct lightrec_cstate *state,
363 const struct block *block, u16 offset)
364{
365 union code c = block->opcode_list[offset].c;
366
367 _jit_name(block->_jit, __func__);
368
369 if (c.i.rt == 0)
370 rec_b(state, block, offset, jit_code_bnei, jit_code_beqi, 0, c.i.rs == 0, true);
371 else
372 rec_b(state, block, offset, jit_code_bner, jit_code_beqr, 0, c.i.rs == c.i.rt, false);
373}
374
375static void rec_BLEZ(struct lightrec_cstate *state,
376 const struct block *block, u16 offset)
377{
378 union code c = block->opcode_list[offset].c;
379
380 _jit_name(block->_jit, __func__);
381 rec_b(state, block, offset, jit_code_bgti, jit_code_blei, 0, c.i.rs == 0, true);
382}
383
384static void rec_BGTZ(struct lightrec_cstate *state,
385 const struct block *block, u16 offset)
386{
387 _jit_name(block->_jit, __func__);
388 rec_b(state, block, offset, jit_code_blei, jit_code_bgti, 0, false, true);
389}
390
391static void rec_regimm_BLTZ(struct lightrec_cstate *state,
392 const struct block *block, u16 offset)
393{
394 _jit_name(block->_jit, __func__);
395 rec_b(state, block, offset, jit_code_bgei, jit_code_blti, 0, false, true);
396}
397
398static void rec_regimm_BLTZAL(struct lightrec_cstate *state,
399 const struct block *block, u16 offset)
400{
401 _jit_name(block->_jit, __func__);
402 rec_b(state, block, offset, jit_code_bgei, jit_code_blti,
403 get_branch_pc(block, offset, 2), false, true);
404}
405
406static void rec_regimm_BGEZ(struct lightrec_cstate *state,
407 const struct block *block, u16 offset)
408{
409 union code c = block->opcode_list[offset].c;
410
411 _jit_name(block->_jit, __func__);
412 rec_b(state, block, offset, jit_code_blti, jit_code_bgei, 0, !c.i.rs, true);
413}
414
415static void rec_regimm_BGEZAL(struct lightrec_cstate *state,
416 const struct block *block, u16 offset)
417{
418 const struct opcode *op = &block->opcode_list[offset];
419 _jit_name(block->_jit, __func__);
420 rec_b(state, block, offset, jit_code_blti, jit_code_bgei,
421 get_branch_pc(block, offset, 2),
422 !op->i.rs, true);
423}
424
425static void rec_alloc_rs_rd(struct regcache *reg_cache,
426 jit_state_t *_jit,
427 const struct opcode *op,
428 u8 rs, u8 rd,
429 u8 in_flags, u8 out_flags,
430 u8 *rs_out, u8 *rd_out)
431{
432 bool unload, discard;
433 u32 unload_flags;
434
435 if (OPT_EARLY_UNLOAD) {
436 unload_flags = LIGHTREC_FLAGS_GET_RS(op->flags);
437 unload = unload_flags == LIGHTREC_REG_UNLOAD;
438 discard = unload_flags == LIGHTREC_REG_DISCARD;
439 }
440
441 if (OPT_EARLY_UNLOAD && rs && rd != rs && (unload || discard)) {
442 rs = lightrec_alloc_reg_in(reg_cache, _jit, rs, in_flags);
443 lightrec_remap_reg(reg_cache, _jit, rs, rd, discard);
444 lightrec_set_reg_out_flags(reg_cache, rs, out_flags);
445 rd = rs;
446 } else {
447 rs = lightrec_alloc_reg_in(reg_cache, _jit, rs, in_flags);
448 rd = lightrec_alloc_reg_out(reg_cache, _jit, rd, out_flags);
449 }
450
451 *rs_out = rs;
452 *rd_out = rd;
453}
454
455static void rec_alu_imm(struct lightrec_cstate *state, const struct block *block,
456 u16 offset, jit_code_t code, bool slti)
457{
458 struct regcache *reg_cache = state->reg_cache;
459 union code c = block->opcode_list[offset].c;
460 jit_state_t *_jit = block->_jit;
461 u8 rs, rt, out_flags = REG_EXT;
462
463 if (slti)
464 out_flags |= REG_ZEXT;
465
466 jit_note(__FILE__, __LINE__);
467
468 rec_alloc_rs_rd(reg_cache, _jit, &block->opcode_list[offset],
469 c.i.rs, c.i.rt, REG_EXT, out_flags, &rs, &rt);
470
471 jit_new_node_www(code, rt, rs, (s32)(s16) c.i.imm);
472
473 lightrec_free_reg(reg_cache, rs);
474 lightrec_free_reg(reg_cache, rt);
475}
476
477static void rec_alu_special(struct lightrec_cstate *state, const struct block *block,
478 u16 offset, jit_code_t code, bool out_ext)
479{
480 struct regcache *reg_cache = state->reg_cache;
481 union code c = block->opcode_list[offset].c;
482 jit_state_t *_jit = block->_jit;
483 u8 rd, rt, rs;
484
485 jit_note(__FILE__, __LINE__);
486
487 rt = lightrec_alloc_reg_in(reg_cache, _jit, c.r.rt, REG_EXT);
488 rec_alloc_rs_rd(reg_cache, _jit, &block->opcode_list[offset],
489 c.r.rs, c.r.rd, REG_EXT,
490 out_ext ? REG_EXT | REG_ZEXT : 0, &rs, &rd);
491
492 jit_new_node_www(code, rd, rs, rt);
493
494 lightrec_free_reg(reg_cache, rs);
495 lightrec_free_reg(reg_cache, rt);
496 lightrec_free_reg(reg_cache, rd);
497}
498
499static void rec_alu_shiftv(struct lightrec_cstate *state, const struct block *block,
500 u16 offset, jit_code_t code)
501{
502 struct regcache *reg_cache = state->reg_cache;
503 union code c = block->opcode_list[offset].c;
504 jit_state_t *_jit = block->_jit;
505 u8 rd, rt, rs, temp, flags = 0;
506
507 jit_note(__FILE__, __LINE__);
508
509 if (code == jit_code_rshr)
510 flags = REG_EXT;
511 else if (code == jit_code_rshr_u)
512 flags = REG_ZEXT;
513
514 rs = lightrec_alloc_reg_in(reg_cache, _jit, c.r.rs, 0);
515 rec_alloc_rs_rd(reg_cache, _jit, &block->opcode_list[offset],
516 c.r.rt, c.r.rd, flags, flags, &rt, &rd);
517
518 if (rt != rd) {
519 jit_andi(rd, rs, 0x1f);
520 jit_new_node_www(code, rd, rt, rd);
521 } else {
522 temp = lightrec_alloc_reg_temp(reg_cache, _jit);
523 jit_andi(temp, rs, 0x1f);
524 jit_new_node_www(code, rd, rt, temp);
525 lightrec_free_reg(reg_cache, temp);
526 }
527
528 lightrec_free_reg(reg_cache, rs);
529 lightrec_free_reg(reg_cache, rt);
530 lightrec_free_reg(reg_cache, rd);
531}
532
533static void rec_movi(struct lightrec_cstate *state,
534 const struct block *block, u16 offset)
535{
536 struct regcache *reg_cache = state->reg_cache;
537 union code c = block->opcode_list[offset].c;
538 jit_state_t *_jit = block->_jit;
539 u16 flags = REG_EXT;
540 s32 value = (s32)(s16) c.i.imm;
541 u8 rt;
542
543 if (block->opcode_list[offset].flags & LIGHTREC_MOVI)
544 value += (s32)((u32)state->movi_temp[c.i.rt] << 16);
545
546 if (value >= 0)
547 flags |= REG_ZEXT;
548
549 rt = lightrec_alloc_reg_out(reg_cache, _jit, c.i.rt, flags);
550
551 jit_movi(rt, value);
552
553 lightrec_free_reg(reg_cache, rt);
554}
555
556static void rec_ADDIU(struct lightrec_cstate *state,
557 const struct block *block, u16 offset)
558{
559 const struct opcode *op = &block->opcode_list[offset];
560
561 _jit_name(block->_jit, __func__);
562
563 if (op->i.rs && !(op->flags & LIGHTREC_MOVI))
564 rec_alu_imm(state, block, offset, jit_code_addi, false);
565 else
566 rec_movi(state, block, offset);
567}
568
569static void rec_ADDI(struct lightrec_cstate *state,
570 const struct block *block, u16 offset)
571{
572 /* TODO: Handle the exception? */
573 _jit_name(block->_jit, __func__);
574 rec_ADDIU(state, block, offset);
575}
576
577static void rec_SLTIU(struct lightrec_cstate *state,
578 const struct block *block, u16 offset)
579{
580 _jit_name(block->_jit, __func__);
581 rec_alu_imm(state, block, offset, jit_code_lti_u, true);
582}
583
584static void rec_SLTI(struct lightrec_cstate *state,
585 const struct block *block, u16 offset)
586{
587 _jit_name(block->_jit, __func__);
588 rec_alu_imm(state, block, offset, jit_code_lti, true);
589}
590
591static void rec_ANDI(struct lightrec_cstate *state,
592 const struct block *block, u16 offset)
593{
594 struct regcache *reg_cache = state->reg_cache;
595 union code c = block->opcode_list[offset].c;
596 jit_state_t *_jit = block->_jit;
597 u8 rs, rt;
598
599 _jit_name(block->_jit, __func__);
600 jit_note(__FILE__, __LINE__);
601
602 rec_alloc_rs_rd(reg_cache, _jit, &block->opcode_list[offset],
603 c.i.rs, c.i.rt, 0, REG_EXT | REG_ZEXT, &rs, &rt);
604
605 /* PSX code uses ANDI 0xff / ANDI 0xffff a lot, which are basically
606 * casts to uint8_t / uint16_t. */
607 if (c.i.imm == 0xff)
608 jit_extr_uc(rt, rs);
609 else if (c.i.imm == 0xffff)
610 jit_extr_us(rt, rs);
611 else
612 jit_andi(rt, rs, (u32)(u16) c.i.imm);
613
614 lightrec_free_reg(reg_cache, rs);
615 lightrec_free_reg(reg_cache, rt);
616}
617
618static void rec_alu_or_xor(struct lightrec_cstate *state, const struct block *block,
619 u16 offset, jit_code_t code)
620{
621 struct regcache *reg_cache = state->reg_cache;
622 union code c = block->opcode_list[offset].c;
623 jit_state_t *_jit = block->_jit;
624 u8 rs, rt, flags;
625
626 jit_note(__FILE__, __LINE__);
627
628 rec_alloc_rs_rd(reg_cache, _jit, &block->opcode_list[offset],
629 c.i.rs, c.i.rt, 0, 0, &rs, &rt);
630
631 flags = lightrec_get_reg_in_flags(reg_cache, rs);
632 lightrec_set_reg_out_flags(reg_cache, rt, flags);
633
634 jit_new_node_www(code, rt, rs, (u32)(u16) c.i.imm);
635
636 lightrec_free_reg(reg_cache, rs);
637 lightrec_free_reg(reg_cache, rt);
638}
639
640
641static void rec_ORI(struct lightrec_cstate *state,
642 const struct block *block, u16 offset)
643{
644 const struct opcode *op = &block->opcode_list[offset];
645 struct regcache *reg_cache = state->reg_cache;
646 jit_state_t *_jit = block->_jit;
647 s32 val;
648 u8 rt;
649
650 _jit_name(_jit, __func__);
651
652 if (op->flags & LIGHTREC_MOVI) {
653 rt = lightrec_alloc_reg_out(reg_cache, _jit, op->i.rt, REG_EXT);
654
655 val = ((u32)state->movi_temp[op->i.rt] << 16) | op->i.imm;
656 jit_movi(rt, val);
657
658 lightrec_free_reg(reg_cache, rt);
659 } else {
660 rec_alu_or_xor(state, block, offset, jit_code_ori);
661 }
662}
663
664static void rec_XORI(struct lightrec_cstate *state,
665 const struct block *block, u16 offset)
666{
667 _jit_name(block->_jit, __func__);
668 rec_alu_or_xor(state, block, offset, jit_code_xori);
669}
670
671static void rec_LUI(struct lightrec_cstate *state,
672 const struct block *block, u16 offset)
673{
674 struct regcache *reg_cache = state->reg_cache;
675 union code c = block->opcode_list[offset].c;
676 jit_state_t *_jit = block->_jit;
677 u8 rt, flags = REG_EXT;
678
679 if (block->opcode_list[offset].flags & LIGHTREC_MOVI) {
680 state->movi_temp[c.i.rt] = c.i.imm;
681 return;
682 }
683
684 jit_name(__func__);
685 jit_note(__FILE__, __LINE__);
686
687 if (!(c.i.imm & BIT(15)))
688 flags |= REG_ZEXT;
689
690 rt = lightrec_alloc_reg_out(reg_cache, _jit, c.i.rt, flags);
691
692 jit_movi(rt, (s32)(c.i.imm << 16));
693
694 lightrec_free_reg(reg_cache, rt);
695}
696
697static void rec_special_ADDU(struct lightrec_cstate *state,
698 const struct block *block, u16 offset)
699{
700 _jit_name(block->_jit, __func__);
701 rec_alu_special(state, block, offset, jit_code_addr, false);
702}
703
704static void rec_special_ADD(struct lightrec_cstate *state,
705 const struct block *block, u16 offset)
706{
707 /* TODO: Handle the exception? */
708 _jit_name(block->_jit, __func__);
709 rec_alu_special(state, block, offset, jit_code_addr, false);
710}
711
712static void rec_special_SUBU(struct lightrec_cstate *state,
713 const struct block *block, u16 offset)
714{
715 _jit_name(block->_jit, __func__);
716 rec_alu_special(state, block, offset, jit_code_subr, false);
717}
718
719static void rec_special_SUB(struct lightrec_cstate *state,
720 const struct block *block, u16 offset)
721{
722 /* TODO: Handle the exception? */
723 _jit_name(block->_jit, __func__);
724 rec_alu_special(state, block, offset, jit_code_subr, false);
725}
726
727static void rec_special_AND(struct lightrec_cstate *state,
728 const struct block *block, u16 offset)
729{
730 struct regcache *reg_cache = state->reg_cache;
731 union code c = block->opcode_list[offset].c;
732 jit_state_t *_jit = block->_jit;
733 u8 rd, rt, rs, flags_rs, flags_rt, flags_rd;
734
735 _jit_name(block->_jit, __func__);
736 jit_note(__FILE__, __LINE__);
737
738 rt = lightrec_alloc_reg_in(reg_cache, _jit, c.r.rt, 0);
739 rec_alloc_rs_rd(reg_cache, _jit, &block->opcode_list[offset],
740 c.r.rs, c.r.rd, 0, 0, &rs, &rd);
741
742 flags_rs = lightrec_get_reg_in_flags(reg_cache, rs);
743 flags_rt = lightrec_get_reg_in_flags(reg_cache, rt);
744
745 /* Z(rd) = Z(rs) | Z(rt) */
746 flags_rd = REG_ZEXT & (flags_rs | flags_rt);
747
748 /* E(rd) = (E(rt) & Z(rt)) | (E(rs) & Z(rs)) | (E(rs) & E(rt)) */
749 if (((flags_rs & REG_EXT) && (flags_rt & REG_ZEXT)) ||
750 ((flags_rt & REG_EXT) && (flags_rs & REG_ZEXT)) ||
751 (REG_EXT & flags_rs & flags_rt))
752 flags_rd |= REG_EXT;
753
754 lightrec_set_reg_out_flags(reg_cache, rd, flags_rd);
755
756 jit_andr(rd, rs, rt);
757
758 lightrec_free_reg(reg_cache, rs);
759 lightrec_free_reg(reg_cache, rt);
760 lightrec_free_reg(reg_cache, rd);
761}
762
763static void rec_special_or_nor(struct lightrec_cstate *state,
764 const struct block *block, u16 offset, bool nor)
765{
766 struct regcache *reg_cache = state->reg_cache;
767 union code c = block->opcode_list[offset].c;
768 jit_state_t *_jit = block->_jit;
769 u8 rd, rt, rs, flags_rs, flags_rt, flags_rd = 0;
770
771 jit_note(__FILE__, __LINE__);
772
773 rt = lightrec_alloc_reg_in(reg_cache, _jit, c.r.rt, 0);
774 rec_alloc_rs_rd(reg_cache, _jit, &block->opcode_list[offset],
775 c.r.rs, c.r.rd, 0, 0, &rs, &rd);
776
777 flags_rs = lightrec_get_reg_in_flags(reg_cache, rs);
778 flags_rt = lightrec_get_reg_in_flags(reg_cache, rt);
779
780 /* or: Z(rd) = Z(rs) & Z(rt)
781 * nor: Z(rd) = 0 */
782 if (!nor)
783 flags_rd = REG_ZEXT & flags_rs & flags_rt;
784
785 /* E(rd) = E(rs) & E(rt) */
786 if (REG_EXT & flags_rs & flags_rt)
787 flags_rd |= REG_EXT;
788
789 lightrec_set_reg_out_flags(reg_cache, rd, flags_rd);
790
791 jit_orr(rd, rs, rt);
792
793 if (nor)
794 jit_comr(rd, rd);
795
796 lightrec_free_reg(reg_cache, rs);
797 lightrec_free_reg(reg_cache, rt);
798 lightrec_free_reg(reg_cache, rd);
799}
800
801static void rec_special_OR(struct lightrec_cstate *state,
802 const struct block *block, u16 offset)
803{
804 _jit_name(block->_jit, __func__);
805 rec_special_or_nor(state, block, offset, false);
806}
807
808static void rec_special_NOR(struct lightrec_cstate *state,
809 const struct block *block, u16 offset)
810{
811 _jit_name(block->_jit, __func__);
812 rec_special_or_nor(state, block, offset, true);
813}
814
815static void rec_special_XOR(struct lightrec_cstate *state,
816 const struct block *block, u16 offset)
817{
818 struct regcache *reg_cache = state->reg_cache;
819 union code c = block->opcode_list[offset].c;
820 jit_state_t *_jit = block->_jit;
821 u8 rd, rt, rs, flags_rs, flags_rt, flags_rd;
822
823 _jit_name(block->_jit, __func__);
824
825 jit_note(__FILE__, __LINE__);
826
827 rt = lightrec_alloc_reg_in(reg_cache, _jit, c.r.rt, 0);
828 rec_alloc_rs_rd(reg_cache, _jit, &block->opcode_list[offset],
829 c.r.rs, c.r.rd, 0, 0, &rs, &rd);
830
831 flags_rs = lightrec_get_reg_in_flags(reg_cache, rs);
832 flags_rt = lightrec_get_reg_in_flags(reg_cache, rt);
833
834 /* Z(rd) = Z(rs) & Z(rt) */
835 flags_rd = REG_ZEXT & flags_rs & flags_rt;
836
837 /* E(rd) = E(rs) & E(rt) */
838 flags_rd |= REG_EXT & flags_rs & flags_rt;
839
840 lightrec_set_reg_out_flags(reg_cache, rd, flags_rd);
841
842 jit_xorr(rd, rs, rt);
843
844 lightrec_free_reg(reg_cache, rs);
845 lightrec_free_reg(reg_cache, rt);
846 lightrec_free_reg(reg_cache, rd);
847}
848
849static void rec_special_SLTU(struct lightrec_cstate *state,
850 const struct block *block, u16 offset)
851{
852 _jit_name(block->_jit, __func__);
853 rec_alu_special(state, block, offset, jit_code_ltr_u, true);
854}
855
856static void rec_special_SLT(struct lightrec_cstate *state,
857 const struct block *block, u16 offset)
858{
859 _jit_name(block->_jit, __func__);
860 rec_alu_special(state, block, offset, jit_code_ltr, true);
861}
862
863static void rec_special_SLLV(struct lightrec_cstate *state,
864 const struct block *block, u16 offset)
865{
866 _jit_name(block->_jit, __func__);
867 rec_alu_shiftv(state, block, offset, jit_code_lshr);
868}
869
870static void rec_special_SRLV(struct lightrec_cstate *state,
871 const struct block *block, u16 offset)
872{
873 _jit_name(block->_jit, __func__);
874 rec_alu_shiftv(state, block, offset, jit_code_rshr_u);
875}
876
877static void rec_special_SRAV(struct lightrec_cstate *state,
878 const struct block *block, u16 offset)
879{
880 _jit_name(block->_jit, __func__);
881 rec_alu_shiftv(state, block, offset, jit_code_rshr);
882}
883
884static void rec_alu_shift(struct lightrec_cstate *state, const struct block *block,
885 u16 offset, jit_code_t code)
886{
887 struct regcache *reg_cache = state->reg_cache;
888 union code c = block->opcode_list[offset].c;
889 jit_state_t *_jit = block->_jit;
890 u8 rd, rt, flags = 0, out_flags = 0;
891
892 jit_note(__FILE__, __LINE__);
893
894 if (code == jit_code_rshi)
895 flags = REG_EXT;
896 else if (code == jit_code_rshi_u)
897 flags = REG_ZEXT;
898
899 /* Input reg is zero-extended, if we SRL at least by one bit, we know
900 * the output reg will be both zero-extended and sign-extended. */
901 out_flags = flags;
902 if (code == jit_code_rshi_u && c.r.imm)
903 out_flags |= REG_EXT;
904
905 rec_alloc_rs_rd(reg_cache, _jit, &block->opcode_list[offset],
906 c.r.rt, c.r.rd, flags, out_flags, &rt, &rd);
907
908 jit_new_node_www(code, rd, rt, c.r.imm);
909
910 lightrec_free_reg(reg_cache, rt);
911 lightrec_free_reg(reg_cache, rd);
912}
913
914static void rec_special_SLL(struct lightrec_cstate *state,
915 const struct block *block, u16 offset)
916{
917 _jit_name(block->_jit, __func__);
918 rec_alu_shift(state, block, offset, jit_code_lshi);
919}
920
921static void rec_special_SRL(struct lightrec_cstate *state,
922 const struct block *block, u16 offset)
923{
924 _jit_name(block->_jit, __func__);
925 rec_alu_shift(state, block, offset, jit_code_rshi_u);
926}
927
928static void rec_special_SRA(struct lightrec_cstate *state,
929 const struct block *block, u16 offset)
930{
931 _jit_name(block->_jit, __func__);
932 rec_alu_shift(state, block, offset, jit_code_rshi);
933}
934
935static void rec_alu_mult(struct lightrec_cstate *state,
936 const struct block *block, u16 offset, bool is_signed)
937{
938 struct regcache *reg_cache = state->reg_cache;
939 union code c = block->opcode_list[offset].c;
940 u32 flags = block->opcode_list[offset].flags;
941 u8 reg_lo = get_mult_div_lo(c);
942 u8 reg_hi = get_mult_div_hi(c);
943 jit_state_t *_jit = block->_jit;
944 u8 lo, hi, rs, rt, rflags = 0;
945
946 jit_note(__FILE__, __LINE__);
947
948 if (is_signed)
949 rflags = REG_EXT;
950 else
951 rflags = REG_ZEXT;
952
953 rs = lightrec_alloc_reg_in(reg_cache, _jit, c.r.rs, rflags);
954 rt = lightrec_alloc_reg_in(reg_cache, _jit, c.r.rt, rflags);
955
956 if (!op_flag_no_lo(flags))
957 lo = lightrec_alloc_reg_out(reg_cache, _jit, reg_lo, 0);
958 else if (__WORDSIZE == 32)
959 lo = lightrec_alloc_reg_temp(reg_cache, _jit);
960
961 if (!op_flag_no_hi(flags))
962 hi = lightrec_alloc_reg_out(reg_cache, _jit, reg_hi, REG_EXT);
963
964 if (__WORDSIZE == 32) {
965 /* On 32-bit systems, do a 32*32->64 bit operation, or a 32*32->32 bit
966 * operation if the MULT was detected a 32-bit only. */
967 if (!op_flag_no_hi(flags)) {
968 if (is_signed)
969 jit_qmulr(lo, hi, rs, rt);
970 else
971 jit_qmulr_u(lo, hi, rs, rt);
972 } else {
973 jit_mulr(lo, rs, rt);
974 }
975 } else {
976 /* On 64-bit systems, do a 64*64->64 bit operation. */
977 if (op_flag_no_lo(flags)) {
978 jit_mulr(hi, rs, rt);
979 jit_rshi(hi, hi, 32);
980 } else {
981 jit_mulr(lo, rs, rt);
982
983 /* The 64-bit output value is in $lo, store the upper 32 bits in $hi */
984 if (!op_flag_no_hi(flags))
985 jit_rshi(hi, lo, 32);
986 }
987 }
988
989 lightrec_free_reg(reg_cache, rs);
990 lightrec_free_reg(reg_cache, rt);
991 if (!op_flag_no_lo(flags) || __WORDSIZE == 32)
992 lightrec_free_reg(reg_cache, lo);
993 if (!op_flag_no_hi(flags))
994 lightrec_free_reg(reg_cache, hi);
995}
996
997static void rec_alu_div(struct lightrec_cstate *state,
998 const struct block *block, u16 offset, bool is_signed)
999{
1000 struct regcache *reg_cache = state->reg_cache;
1001 union code c = block->opcode_list[offset].c;
1002 u32 flags = block->opcode_list[offset].flags;
1003 bool no_check = op_flag_no_div_check(flags);
1004 u8 reg_lo = get_mult_div_lo(c);
1005 u8 reg_hi = get_mult_div_hi(c);
1006 jit_state_t *_jit = block->_jit;
1007 jit_node_t *branch, *to_end;
1008 u8 lo = 0, hi = 0, rs, rt, rflags = 0;
1009
1010 jit_note(__FILE__, __LINE__);
1011
1012 if (is_signed)
1013 rflags = REG_EXT;
1014 else
1015 rflags = REG_ZEXT;
1016
1017 rs = lightrec_alloc_reg_in(reg_cache, _jit, c.r.rs, rflags);
1018 rt = lightrec_alloc_reg_in(reg_cache, _jit, c.r.rt, rflags);
1019
1020 if (!op_flag_no_lo(flags))
1021 lo = lightrec_alloc_reg_out(reg_cache, _jit, reg_lo, 0);
1022
1023 if (!op_flag_no_hi(flags))
1024 hi = lightrec_alloc_reg_out(reg_cache, _jit, reg_hi, 0);
1025
1026 /* Jump to special handler if dividing by zero */
1027 if (!no_check)
1028 branch = jit_beqi(rt, 0);
1029
1030 if (op_flag_no_lo(flags)) {
1031 if (is_signed)
1032 jit_remr(hi, rs, rt);
1033 else
1034 jit_remr_u(hi, rs, rt);
1035 } else if (op_flag_no_hi(flags)) {
1036 if (is_signed)
1037 jit_divr(lo, rs, rt);
1038 else
1039 jit_divr_u(lo, rs, rt);
1040 } else {
1041 if (is_signed)
1042 jit_qdivr(lo, hi, rs, rt);
1043 else
1044 jit_qdivr_u(lo, hi, rs, rt);
1045 }
1046
1047 if (!no_check) {
1048 /* Jump above the div-by-zero handler */
1049 to_end = jit_b();
1050
1051 jit_patch(branch);
1052
1053 if (!op_flag_no_lo(flags)) {
1054 if (is_signed) {
1055 jit_ltr(lo, rs, rt);
1056 jit_lshi(lo, lo, 1);
1057 jit_subi(lo, lo, 1);
1058 } else {
1059 jit_subi(lo, rt, 1);
1060 }
1061 }
1062
1063 if (!op_flag_no_hi(flags))
1064 jit_movr(hi, rs);
1065
1066 jit_patch(to_end);
1067 }
1068
1069 lightrec_free_reg(reg_cache, rs);
1070 lightrec_free_reg(reg_cache, rt);
1071
1072 if (!op_flag_no_lo(flags))
1073 lightrec_free_reg(reg_cache, lo);
1074
1075 if (!op_flag_no_hi(flags))
1076 lightrec_free_reg(reg_cache, hi);
1077}
1078
1079static void rec_special_MULT(struct lightrec_cstate *state,
1080 const struct block *block, u16 offset)
1081{
1082 _jit_name(block->_jit, __func__);
1083 rec_alu_mult(state, block, offset, true);
1084}
1085
1086static void rec_special_MULTU(struct lightrec_cstate *state,
1087 const struct block *block, u16 offset)
1088{
1089 _jit_name(block->_jit, __func__);
1090 rec_alu_mult(state, block, offset, false);
1091}
1092
1093static void rec_special_DIV(struct lightrec_cstate *state,
1094 const struct block *block, u16 offset)
1095{
1096 _jit_name(block->_jit, __func__);
1097 rec_alu_div(state, block, offset, true);
1098}
1099
1100static void rec_special_DIVU(struct lightrec_cstate *state,
1101 const struct block *block, u16 offset)
1102{
1103 _jit_name(block->_jit, __func__);
1104 rec_alu_div(state, block, offset, false);
1105}
1106
1107static void rec_alu_mv_lo_hi(struct lightrec_cstate *state,
1108 const struct block *block, u16 offset,
1109 u8 dst, u8 src)
1110{
1111 struct regcache *reg_cache = state->reg_cache;
1112 jit_state_t *_jit = block->_jit;
1113
1114 jit_note(__FILE__, __LINE__);
1115
1116 rec_alloc_rs_rd(reg_cache, _jit, &block->opcode_list[offset],
1117 src, dst, 0, REG_EXT, &src, &dst);
1118
1119 jit_extr_i(dst, src);
1120
1121 lightrec_free_reg(reg_cache, src);
1122 lightrec_free_reg(reg_cache, dst);
1123}
1124
1125static void rec_special_MFHI(struct lightrec_cstate *state,
1126 const struct block *block, u16 offset)
1127{
1128 union code c = block->opcode_list[offset].c;
1129
1130 _jit_name(block->_jit, __func__);
1131 rec_alu_mv_lo_hi(state, block, offset, c.r.rd, REG_HI);
1132}
1133
1134static void rec_special_MTHI(struct lightrec_cstate *state,
1135 const struct block *block, u16 offset)
1136{
1137 union code c = block->opcode_list[offset].c;
1138
1139 _jit_name(block->_jit, __func__);
1140 rec_alu_mv_lo_hi(state, block, offset, REG_HI, c.r.rs);
1141}
1142
1143static void rec_special_MFLO(struct lightrec_cstate *state,
1144 const struct block *block, u16 offset)
1145{
1146 union code c = block->opcode_list[offset].c;
1147
1148 _jit_name(block->_jit, __func__);
1149 rec_alu_mv_lo_hi(state, block, offset, c.r.rd, REG_LO);
1150}
1151
1152static void rec_special_MTLO(struct lightrec_cstate *state,
1153 const struct block *block, u16 offset)
1154{
1155 union code c = block->opcode_list[offset].c;
1156
1157 _jit_name(block->_jit, __func__);
1158 rec_alu_mv_lo_hi(state, block, offset, REG_LO, c.r.rs);
1159}
1160
1161static void call_to_c_wrapper(struct lightrec_cstate *state,
1162 const struct block *block, u32 arg,
1163 enum c_wrappers wrapper)
1164{
1165 struct regcache *reg_cache = state->reg_cache;
1166 jit_state_t *_jit = block->_jit;
1167 s8 tmp, tmp2;
1168
1169 /* Make sure JIT_R1 is not mapped; it will be used in the C wrapper. */
1170 tmp2 = lightrec_alloc_reg(reg_cache, _jit, JIT_R1);
1171
1172 tmp = lightrec_get_reg_with_value(reg_cache,
1173 (intptr_t) state->state->wrappers_eps[wrapper]);
1174 if (tmp < 0) {
1175 tmp = lightrec_alloc_reg_temp(reg_cache, _jit);
1176 jit_ldxi(tmp, LIGHTREC_REG_STATE,
1177 offsetof(struct lightrec_state, wrappers_eps[wrapper]));
1178
1179 lightrec_temp_set_value(reg_cache, tmp,
1180 (intptr_t) state->state->wrappers_eps[wrapper]);
1181 }
1182
1183 lightrec_free_reg(reg_cache, tmp2);
1184
1185#ifdef __mips__
1186 /* On MIPS, register t9 is always used as the target register for JALR.
1187 * Therefore if it does not contain the target address we must
1188 * invalidate it. */
1189 if (tmp != _T9)
1190 lightrec_unload_reg(reg_cache, _jit, _T9);
1191#endif
1192
1193 jit_prepare();
1194 jit_pushargi(arg);
1195
1196 lightrec_regcache_mark_live(reg_cache, _jit);
1197 jit_callr(tmp);
1198
1199 lightrec_free_reg(reg_cache, tmp);
1200 lightrec_regcache_mark_live(reg_cache, _jit);
1201}
1202
1203static void rec_io(struct lightrec_cstate *state,
1204 const struct block *block, u16 offset,
1205 bool load_rt, bool read_rt)
1206{
1207 struct regcache *reg_cache = state->reg_cache;
1208 jit_state_t *_jit = block->_jit;
1209 union code c = block->opcode_list[offset].c;
1210 u32 flags = block->opcode_list[offset].flags;
1211 bool is_tagged = LIGHTREC_FLAGS_GET_IO_MODE(flags);
1212 u32 lut_entry;
1213 u8 zero;
1214
1215 jit_note(__FILE__, __LINE__);
1216
1217 lightrec_clean_reg_if_loaded(reg_cache, _jit, c.i.rs, false);
1218
1219 if (read_rt && likely(c.i.rt))
1220 lightrec_clean_reg_if_loaded(reg_cache, _jit, c.i.rt, true);
1221 else if (load_rt)
1222 lightrec_clean_reg_if_loaded(reg_cache, _jit, c.i.rt, false);
1223
1224 if (op_flag_load_delay(flags) && !state->no_load_delay) {
1225 /* Clear state->in_delay_slot_n. This notifies the lightrec_rw
1226 * wrapper that it should write the REG_TEMP register instead of
1227 * the actual output register of the opcode. */
1228 zero = lightrec_alloc_reg_in(reg_cache, _jit, 0, 0);
1229 jit_stxi_c(offsetof(struct lightrec_state, in_delay_slot_n),
1230 LIGHTREC_REG_STATE, zero);
1231 lightrec_free_reg(reg_cache, zero);
1232 }
1233
1234 if (is_tagged) {
1235 call_to_c_wrapper(state, block, c.opcode, C_WRAPPER_RW);
1236 } else {
1237 lut_entry = lightrec_get_lut_entry(block);
1238 call_to_c_wrapper(state, block, (lut_entry << 16) | offset,
1239 C_WRAPPER_RW_GENERIC);
1240 }
1241}
1242
1243static u32 rec_ram_mask(struct lightrec_state *state)
1244{
1245 return (RAM_SIZE << (state->mirrors_mapped * 2)) - 1;
1246}
1247
1248static u32 rec_io_mask(const struct lightrec_state *state)
1249{
1250 u32 length = state->maps[PSX_MAP_HW_REGISTERS].length;
1251
1252 return 0x1f800000 | GENMASK(31 - clz32(length - 1), 0);
1253}
1254
1255static void rec_store_memory(struct lightrec_cstate *cstate,
1256 const struct block *block,
1257 u16 offset, jit_code_t code,
1258 jit_code_t swap_code,
1259 uintptr_t addr_offset, u32 addr_mask,
1260 bool invalidate)
1261{
1262 const struct lightrec_state *state = cstate->state;
1263 struct regcache *reg_cache = cstate->reg_cache;
1264 struct opcode *op = &block->opcode_list[offset];
1265 jit_state_t *_jit = block->_jit;
1266 union code c = op->c;
1267 u8 rs, rt, tmp, tmp2, tmp3, addr_reg, addr_reg2;
1268 s16 imm = (s16)c.i.imm;
1269 s32 simm = (s32)imm << (1 - lut_is_32bit(state));
1270 s32 lut_offt = offsetof(struct lightrec_state, code_lut);
1271 bool no_mask = op_flag_no_mask(op->flags);
1272 bool add_imm = c.i.imm &&
1273 ((!state->mirrors_mapped && !no_mask) || (invalidate &&
1274 ((imm & 0x3) || simm + lut_offt != (s16)(simm + lut_offt))));
1275 bool need_tmp = !no_mask || add_imm || invalidate;
1276 bool swc2 = c.i.op == OP_SWC2;
1277 u8 in_reg = swc2 ? REG_TEMP : c.i.rt;
1278 s8 reg_imm;
1279
1280 rs = lightrec_alloc_reg_in(reg_cache, _jit, c.i.rs, 0);
1281 if (need_tmp)
1282 tmp = lightrec_alloc_reg_temp(reg_cache, _jit);
1283
1284 addr_reg = rs;
1285
1286 if (add_imm) {
1287 jit_addi(tmp, addr_reg, (s16)c.i.imm);
1288 lightrec_free_reg(reg_cache, rs);
1289 addr_reg = tmp;
1290 imm = 0;
1291 } else if (simm) {
1292 lut_offt += simm;
1293 }
1294
1295 if (!no_mask) {
1296 reg_imm = lightrec_alloc_reg_temp_with_value(reg_cache, _jit,
1297 addr_mask);
1298
1299 jit_andr(tmp, addr_reg, reg_imm);
1300 addr_reg = tmp;
1301
1302 lightrec_free_reg(reg_cache, reg_imm);
1303 }
1304
1305 if (addr_offset) {
1306 reg_imm = lightrec_alloc_reg_temp_with_value(reg_cache, _jit,
1307 addr_offset);
1308 tmp2 = lightrec_alloc_reg_temp(reg_cache, _jit);
1309 jit_addr(tmp2, addr_reg, reg_imm);
1310 addr_reg2 = tmp2;
1311
1312 lightrec_free_reg(reg_cache, reg_imm);
1313 } else {
1314 addr_reg2 = addr_reg;
1315 }
1316
1317 rt = lightrec_alloc_reg_in(reg_cache, _jit, in_reg, 0);
1318
1319 if (is_big_endian() && swap_code && in_reg) {
1320 tmp3 = lightrec_alloc_reg_temp(reg_cache, _jit);
1321
1322 jit_new_node_ww(swap_code, tmp3, rt);
1323 jit_new_node_www(code, imm, addr_reg2, tmp3);
1324
1325 lightrec_free_reg(reg_cache, tmp3);
1326 } else {
1327 jit_new_node_www(code, imm, addr_reg2, rt);
1328 }
1329
1330 lightrec_free_reg(reg_cache, rt);
1331
1332 if (invalidate) {
1333 tmp3 = lightrec_alloc_reg_in(reg_cache, _jit, 0, 0);
1334
1335 if (c.i.op != OP_SW) {
1336 jit_andi(tmp, addr_reg, ~3);
1337 addr_reg = tmp;
1338 }
1339
1340 if (!lut_is_32bit(state)) {
1341 jit_lshi(tmp, addr_reg, 1);
1342 addr_reg = tmp;
1343 }
1344
1345 if (addr_reg == rs && c.i.rs == 0) {
1346 addr_reg = LIGHTREC_REG_STATE;
1347 } else {
1348 jit_add_state(tmp, addr_reg);
1349 addr_reg = tmp;
1350 }
1351
1352 if (lut_is_32bit(state))
1353 jit_stxi_i(lut_offt, addr_reg, tmp3);
1354 else
1355 jit_stxi(lut_offt, addr_reg, tmp3);
1356
1357 lightrec_free_reg(reg_cache, tmp3);
1358 }
1359
1360 if (addr_offset)
1361 lightrec_free_reg(reg_cache, tmp2);
1362 if (need_tmp)
1363 lightrec_free_reg(reg_cache, tmp);
1364 lightrec_free_reg(reg_cache, rs);
1365}
1366
1367static void rec_store_ram(struct lightrec_cstate *cstate,
1368 const struct block *block,
1369 u16 offset, jit_code_t code,
1370 jit_code_t swap_code, bool invalidate)
1371{
1372 struct lightrec_state *state = cstate->state;
1373
1374 _jit_note(block->_jit, __FILE__, __LINE__);
1375
1376 return rec_store_memory(cstate, block, offset, code, swap_code,
1377 state->offset_ram, rec_ram_mask(state),
1378 invalidate);
1379}
1380
1381static void rec_store_scratch(struct lightrec_cstate *cstate,
1382 const struct block *block, u16 offset,
1383 jit_code_t code, jit_code_t swap_code)
1384{
1385 _jit_note(block->_jit, __FILE__, __LINE__);
1386
1387 return rec_store_memory(cstate, block, offset, code, swap_code,
1388 cstate->state->offset_scratch,
1389 0x1fffffff, false);
1390}
1391
1392static void rec_store_io(struct lightrec_cstate *cstate,
1393 const struct block *block, u16 offset,
1394 jit_code_t code, jit_code_t swap_code)
1395{
1396 _jit_note(block->_jit, __FILE__, __LINE__);
1397
1398 return rec_store_memory(cstate, block, offset, code, swap_code,
1399 cstate->state->offset_io,
1400 rec_io_mask(cstate->state), false);
1401}
1402
1403static void rec_store_direct_no_invalidate(struct lightrec_cstate *cstate,
1404 const struct block *block,
1405 u16 offset, jit_code_t code,
1406 jit_code_t swap_code)
1407{
1408 struct lightrec_state *state = cstate->state;
1409 struct regcache *reg_cache = cstate->reg_cache;
1410 union code c = block->opcode_list[offset].c;
1411 jit_state_t *_jit = block->_jit;
1412 jit_node_t *to_not_ram, *to_end;
1413 bool swc2 = c.i.op == OP_SWC2;
1414 u8 tmp, tmp2 = 0, rs, rt, in_reg = swc2 ? REG_TEMP : c.i.rt;
1415 u32 addr_mask;
1416 s32 reg_imm;
1417 s16 imm;
1418
1419 jit_note(__FILE__, __LINE__);
1420 rs = lightrec_alloc_reg_in(reg_cache, _jit, c.i.rs, 0);
1421 tmp = lightrec_alloc_reg_temp(reg_cache, _jit);
1422
1423 if (state->mirrors_mapped)
1424 addr_mask = 0x1f800000 | (4 * RAM_SIZE - 1);
1425 else
1426 addr_mask = 0x1f800000 | (RAM_SIZE - 1);
1427
1428 reg_imm = lightrec_alloc_reg_temp_with_value(reg_cache, _jit, addr_mask);
1429
1430 /* Convert to KUNSEG and avoid RAM mirrors */
1431 if (!state->mirrors_mapped && c.i.imm) {
1432 imm = 0;
1433 jit_addi(tmp, rs, (s16)c.i.imm);
1434 jit_andr(tmp, tmp, reg_imm);
1435 } else {
1436 imm = (s16)c.i.imm;
1437 jit_andr(tmp, rs, reg_imm);
1438 }
1439
1440 lightrec_free_reg(reg_cache, rs);
1441 lightrec_free_reg(reg_cache, reg_imm);
1442
1443 if (state->offset_ram != state->offset_scratch) {
1444 tmp2 = lightrec_alloc_reg_temp(reg_cache, _jit);
1445
1446 to_not_ram = jit_bmsi(tmp, BIT(28));
1447
1448 jit_movi(tmp2, state->offset_ram);
1449
1450 to_end = jit_b();
1451 jit_patch(to_not_ram);
1452
1453 jit_movi(tmp2, state->offset_scratch);
1454 jit_patch(to_end);
1455 } else if (state->offset_ram) {
1456 tmp2 = lightrec_alloc_reg_temp_with_value(reg_cache, _jit,
1457 state->offset_ram);
1458 }
1459
1460 if (state->offset_ram || state->offset_scratch) {
1461 jit_addr(tmp, tmp, tmp2);
1462 lightrec_free_reg(reg_cache, tmp2);
1463 }
1464
1465 rt = lightrec_alloc_reg_in(reg_cache, _jit, in_reg, 0);
1466
1467 if (is_big_endian() && swap_code && in_reg) {
1468 tmp2 = lightrec_alloc_reg_temp(reg_cache, _jit);
1469
1470 jit_new_node_ww(swap_code, tmp2, rt);
1471 jit_new_node_www(code, imm, tmp, tmp2);
1472
1473 lightrec_free_reg(reg_cache, tmp2);
1474 } else {
1475 jit_new_node_www(code, imm, tmp, rt);
1476 }
1477
1478 lightrec_free_reg(reg_cache, rt);
1479 lightrec_free_reg(reg_cache, tmp);
1480}
1481
1482static void rec_store_direct(struct lightrec_cstate *cstate, const struct block *block,
1483 u16 offset, jit_code_t code, jit_code_t swap_code)
1484{
1485 struct lightrec_state *state = cstate->state;
1486 u32 ram_size = state->mirrors_mapped ? RAM_SIZE * 4 : RAM_SIZE;
1487 struct regcache *reg_cache = cstate->reg_cache;
1488 union code c = block->opcode_list[offset].c;
1489 jit_state_t *_jit = block->_jit;
1490 jit_node_t *to_not_ram, *to_end;
1491 bool swc2 = c.i.op == OP_SWC2;
1492 u8 tmp, tmp2, tmp3, masked_reg, rs, rt;
1493 u8 in_reg = swc2 ? REG_TEMP : c.i.rt;
1494 u32 addr_mask = 0x1f800000 | (ram_size - 1);
1495 s32 reg_imm;
1496
1497 jit_note(__FILE__, __LINE__);
1498
1499 rs = lightrec_alloc_reg_in(reg_cache, _jit, c.i.rs, 0);
1500 tmp2 = lightrec_alloc_reg_temp(reg_cache, _jit);
1501 tmp3 = lightrec_alloc_reg_in(reg_cache, _jit, 0, 0);
1502
1503 reg_imm = lightrec_alloc_reg_temp_with_value(reg_cache, _jit, addr_mask);
1504
1505 /* Convert to KUNSEG and avoid RAM mirrors */
1506 if (c.i.imm) {
1507 jit_addi(tmp2, rs, (s16)c.i.imm);
1508 jit_andr(tmp2, tmp2, reg_imm);
1509 } else {
1510 jit_andr(tmp2, rs, reg_imm);
1511 }
1512
1513 lightrec_free_reg(reg_cache, rs);
1514 lightrec_free_reg(reg_cache, reg_imm);
1515 tmp = lightrec_alloc_reg_temp(reg_cache, _jit);
1516
1517 if (state->offset_ram != state->offset_scratch) {
1518 to_not_ram = jit_bgti(tmp2, ram_size);
1519 masked_reg = tmp2;
1520 } else {
1521 jit_lti_u(tmp, tmp2, ram_size);
1522 jit_movnr(tmp, tmp2, tmp);
1523 masked_reg = tmp;
1524 }
1525
1526 /* Compute the offset to the code LUT */
1527 if (c.i.op == OP_SW)
1528 jit_andi(tmp, masked_reg, RAM_SIZE - 1);
1529 else
1530 jit_andi(tmp, masked_reg, (RAM_SIZE - 1) & ~3);
1531
1532 if (!lut_is_32bit(state))
1533 jit_lshi(tmp, tmp, 1);
1534 jit_add_state(tmp, tmp);
1535
1536 /* Write NULL to the code LUT to invalidate any block that's there */
1537 if (lut_is_32bit(state))
1538 jit_stxi_i(offsetof(struct lightrec_state, code_lut), tmp, tmp3);
1539 else
1540 jit_stxi(offsetof(struct lightrec_state, code_lut), tmp, tmp3);
1541
1542 if (state->offset_ram != state->offset_scratch) {
1543 jit_movi(tmp, state->offset_ram);
1544
1545 to_end = jit_b();
1546 jit_patch(to_not_ram);
1547 }
1548
1549 if (state->offset_ram || state->offset_scratch)
1550 jit_movi(tmp, state->offset_scratch);
1551
1552 if (state->offset_ram != state->offset_scratch)
1553 jit_patch(to_end);
1554
1555 if (state->offset_ram || state->offset_scratch)
1556 jit_addr(tmp2, tmp2, tmp);
1557
1558 lightrec_free_reg(reg_cache, tmp);
1559 lightrec_free_reg(reg_cache, tmp3);
1560
1561 rt = lightrec_alloc_reg_in(reg_cache, _jit, in_reg, 0);
1562
1563 if (is_big_endian() && swap_code && in_reg) {
1564 tmp = lightrec_alloc_reg_temp(reg_cache, _jit);
1565
1566 jit_new_node_ww(swap_code, tmp, rt);
1567 jit_new_node_www(code, 0, tmp2, tmp);
1568
1569 lightrec_free_reg(reg_cache, tmp);
1570 } else {
1571 jit_new_node_www(code, 0, tmp2, rt);
1572 }
1573
1574 lightrec_free_reg(reg_cache, rt);
1575 lightrec_free_reg(reg_cache, tmp2);
1576}
1577
1578static void rec_store(struct lightrec_cstate *state,
1579 const struct block *block, u16 offset,
1580 jit_code_t code, jit_code_t swap_code)
1581{
1582 u32 flags = block->opcode_list[offset].flags;
1583 u32 mode = LIGHTREC_FLAGS_GET_IO_MODE(flags);
1584 bool no_invalidate = op_flag_no_invalidate(flags) ||
1585 (state->state->opt_flags & LIGHTREC_OPT_INV_DMA_ONLY);
1586 union code c = block->opcode_list[offset].c;
1587 bool is_swc2 = c.i.op == OP_SWC2;
1588
1589 if (is_swc2) {
1590 switch (mode) {
1591 case LIGHTREC_IO_RAM:
1592 case LIGHTREC_IO_SCRATCH:
1593 case LIGHTREC_IO_DIRECT:
1594 case LIGHTREC_IO_DIRECT_HW:
1595 rec_cp2_do_mfc2(state, block, offset, c.i.rt, REG_TEMP);
1596 break;
1597 default:
1598 break;
1599 }
1600 }
1601
1602 switch (mode) {
1603 case LIGHTREC_IO_RAM:
1604 rec_store_ram(state, block, offset, code,
1605 swap_code, !no_invalidate);
1606 break;
1607 case LIGHTREC_IO_SCRATCH:
1608 rec_store_scratch(state, block, offset, code, swap_code);
1609 break;
1610 case LIGHTREC_IO_DIRECT:
1611 if (no_invalidate) {
1612 rec_store_direct_no_invalidate(state, block, offset,
1613 code, swap_code);
1614 } else {
1615 rec_store_direct(state, block, offset, code, swap_code);
1616 }
1617 break;
1618 case LIGHTREC_IO_DIRECT_HW:
1619 rec_store_io(state, block, offset, code, swap_code);
1620 break;
1621 default:
1622 rec_io(state, block, offset, true, false);
1623 return;
1624 }
1625
1626 if (is_swc2)
1627 lightrec_discard_reg_if_loaded(state->reg_cache, REG_TEMP);
1628}
1629
1630static void rec_SB(struct lightrec_cstate *state,
1631 const struct block *block, u16 offset)
1632{
1633 _jit_name(block->_jit, __func__);
1634 rec_store(state, block, offset, jit_code_stxi_c, 0);
1635}
1636
1637static void rec_SH(struct lightrec_cstate *state,
1638 const struct block *block, u16 offset)
1639{
1640 _jit_name(block->_jit, __func__);
1641 rec_store(state, block, offset,
1642 jit_code_stxi_s, jit_code_bswapr_us);
1643}
1644
1645static void rec_SW(struct lightrec_cstate *state,
1646 const struct block *block, u16 offset)
1647
1648{
1649 union code c = block->opcode_list[offset].c;
1650
1651 _jit_name(block->_jit, c.i.op == OP_SWC2 ? "rec_SWC2" : "rec_SW");
1652 rec_store(state, block, offset,
1653 jit_code_stxi_i, jit_code_bswapr_ui);
1654}
1655
1656static void rec_SWL(struct lightrec_cstate *state,
1657 const struct block *block, u16 offset)
1658{
1659 _jit_name(block->_jit, __func__);
1660 rec_io(state, block, offset, true, false);
1661}
1662
1663static void rec_SWR(struct lightrec_cstate *state,
1664 const struct block *block, u16 offset)
1665{
1666 _jit_name(block->_jit, __func__);
1667 rec_io(state, block, offset, true, false);
1668}
1669
1670static void rec_load_memory(struct lightrec_cstate *cstate,
1671 const struct block *block, u16 offset,
1672 jit_code_t code, jit_code_t swap_code, bool is_unsigned,
1673 uintptr_t addr_offset, u32 addr_mask)
1674{
1675 struct regcache *reg_cache = cstate->reg_cache;
1676 struct opcode *op = &block->opcode_list[offset];
1677 bool load_delay = op_flag_load_delay(op->flags) && !cstate->no_load_delay;
1678 jit_state_t *_jit = block->_jit;
1679 u8 rs, rt, out_reg, addr_reg, flags = REG_EXT;
1680 bool no_mask = op_flag_no_mask(op->flags);
1681 union code c = op->c;
1682 s8 reg_imm;
1683 s16 imm;
1684
1685 if (load_delay || c.i.op == OP_LWC2)
1686 out_reg = REG_TEMP;
1687 else if (c.i.rt)
1688 out_reg = c.i.rt;
1689 else
1690 return;
1691
1692 if (is_unsigned)
1693 flags |= REG_ZEXT;
1694
1695 rs = lightrec_alloc_reg_in(reg_cache, _jit, c.i.rs, 0);
1696 rt = lightrec_alloc_reg_out(reg_cache, _jit, out_reg, flags);
1697
1698 if (!cstate->state->mirrors_mapped && c.i.imm && !no_mask) {
1699 jit_addi(rt, rs, (s16)c.i.imm);
1700 addr_reg = rt;
1701 imm = 0;
1702 } else {
1703 addr_reg = rs;
1704 imm = (s16)c.i.imm;
1705 }
1706
1707 if (!no_mask) {
1708 reg_imm = lightrec_alloc_reg_temp_with_value(reg_cache, _jit,
1709 addr_mask);
1710
1711 jit_andr(rt, addr_reg, reg_imm);
1712 addr_reg = rt;
1713
1714 lightrec_free_reg(reg_cache, reg_imm);
1715 }
1716
1717 if (addr_offset) {
1718 reg_imm = lightrec_alloc_reg_temp_with_value(reg_cache, _jit,
1719 addr_offset);
1720
1721 jit_addr(rt, addr_reg, reg_imm);
1722 addr_reg = rt;
1723
1724 lightrec_free_reg(reg_cache, reg_imm);
1725 }
1726
1727 jit_new_node_www(code, rt, addr_reg, imm);
1728
1729 if (is_big_endian() && swap_code) {
1730 jit_new_node_ww(swap_code, rt, rt);
1731
1732 if (c.i.op == OP_LH)
1733 jit_extr_s(rt, rt);
1734 else if (c.i.op == OP_LW && __WORDSIZE == 64)
1735 jit_extr_i(rt, rt);
1736 }
1737
1738 lightrec_free_reg(reg_cache, rs);
1739 lightrec_free_reg(reg_cache, rt);
1740}
1741
1742static void rec_load_ram(struct lightrec_cstate *cstate,
1743 const struct block *block, u16 offset,
1744 jit_code_t code, jit_code_t swap_code, bool is_unsigned)
1745{
1746 _jit_note(block->_jit, __FILE__, __LINE__);
1747
1748 rec_load_memory(cstate, block, offset, code, swap_code, is_unsigned,
1749 cstate->state->offset_ram, rec_ram_mask(cstate->state));
1750}
1751
1752static void rec_load_bios(struct lightrec_cstate *cstate,
1753 const struct block *block, u16 offset,
1754 jit_code_t code, jit_code_t swap_code, bool is_unsigned)
1755{
1756 _jit_note(block->_jit, __FILE__, __LINE__);
1757
1758 rec_load_memory(cstate, block, offset, code, swap_code, is_unsigned,
1759 cstate->state->offset_bios, 0x1fffffff);
1760}
1761
1762static void rec_load_scratch(struct lightrec_cstate *cstate,
1763 const struct block *block, u16 offset,
1764 jit_code_t code, jit_code_t swap_code, bool is_unsigned)
1765{
1766 _jit_note(block->_jit, __FILE__, __LINE__);
1767
1768 rec_load_memory(cstate, block, offset, code, swap_code, is_unsigned,
1769 cstate->state->offset_scratch, 0x1fffffff);
1770}
1771
1772static void rec_load_io(struct lightrec_cstate *cstate,
1773 const struct block *block, u16 offset,
1774 jit_code_t code, jit_code_t swap_code, bool is_unsigned)
1775{
1776 _jit_note(block->_jit, __FILE__, __LINE__);
1777
1778 rec_load_memory(cstate, block, offset, code, swap_code, is_unsigned,
1779 cstate->state->offset_io, rec_io_mask(cstate->state));
1780}
1781
1782static void rec_load_direct(struct lightrec_cstate *cstate,
1783 const struct block *block, u16 offset,
1784 jit_code_t code, jit_code_t swap_code,
1785 bool is_unsigned)
1786{
1787 struct lightrec_state *state = cstate->state;
1788 struct regcache *reg_cache = cstate->reg_cache;
1789 struct opcode *op = &block->opcode_list[offset];
1790 bool load_delay = op_flag_load_delay(op->flags) && !cstate->no_load_delay;
1791 jit_state_t *_jit = block->_jit;
1792 jit_node_t *to_not_ram, *to_not_bios, *to_end, *to_end2;
1793 u8 tmp, rs, rt, out_reg, addr_reg, flags = REG_EXT;
1794 union code c = op->c;
1795 s32 addr_mask;
1796 u32 reg_imm;
1797 s8 offt_reg;
1798 s16 imm;
1799
1800 if (load_delay || c.i.op == OP_LWC2)
1801 out_reg = REG_TEMP;
1802 else if (c.i.rt)
1803 out_reg = c.i.rt;
1804 else
1805 return;
1806
1807 if (is_unsigned)
1808 flags |= REG_ZEXT;
1809
1810 jit_note(__FILE__, __LINE__);
1811 rs = lightrec_alloc_reg_in(reg_cache, _jit, c.i.rs, 0);
1812 rt = lightrec_alloc_reg_out(reg_cache, _jit, out_reg, flags);
1813
1814 if ((state->offset_ram == state->offset_bios &&
1815 state->offset_ram == state->offset_scratch &&
1816 state->mirrors_mapped) || !c.i.imm) {
1817 addr_reg = rs;
1818 imm = (s16)c.i.imm;
1819 } else {
1820 jit_addi(rt, rs, (s16)c.i.imm);
1821 addr_reg = rt;
1822 imm = 0;
1823
1824 if (c.i.rs != c.i.rt)
1825 lightrec_free_reg(reg_cache, rs);
1826 }
1827
1828 tmp = lightrec_alloc_reg_temp(reg_cache, _jit);
1829
1830 if (state->offset_ram == state->offset_bios &&
1831 state->offset_ram == state->offset_scratch) {
1832 if (!state->mirrors_mapped)
1833 addr_mask = 0x1f800000 | (RAM_SIZE - 1);
1834 else
1835 addr_mask = 0x1fffffff;
1836
1837 reg_imm = lightrec_alloc_reg_temp_with_value(reg_cache, _jit,
1838 addr_mask);
1839 if (!state->mirrors_mapped) {
1840 jit_andi(tmp, addr_reg, BIT(28));
1841 jit_rshi_u(tmp, tmp, 28 - 22);
1842 jit_orr(tmp, tmp, reg_imm);
1843 jit_andr(rt, addr_reg, tmp);
1844 } else {
1845 jit_andr(rt, addr_reg, reg_imm);
1846 }
1847
1848 lightrec_free_reg(reg_cache, reg_imm);
1849
1850 if (state->offset_ram) {
1851 offt_reg = lightrec_get_reg_with_value(reg_cache,
1852 state->offset_ram);
1853 if (offt_reg < 0) {
1854 jit_movi(tmp, state->offset_ram);
1855 lightrec_temp_set_value(reg_cache, tmp,
1856 state->offset_ram);
1857 } else {
1858 lightrec_free_reg(reg_cache, tmp);
1859 tmp = offt_reg;
1860 }
1861 }
1862 } else {
1863 to_not_ram = jit_bmsi(addr_reg, BIT(28));
1864
1865 /* Convert to KUNSEG and avoid RAM mirrors */
1866 jit_andi(rt, addr_reg, RAM_SIZE - 1);
1867
1868 if (state->offset_ram)
1869 jit_movi(tmp, state->offset_ram);
1870
1871 to_end = jit_b();
1872
1873 jit_patch(to_not_ram);
1874
1875 if (state->offset_bios != state->offset_scratch)
1876 to_not_bios = jit_bmci(addr_reg, BIT(22));
1877
1878 /* Convert to KUNSEG */
1879 jit_andi(rt, addr_reg, 0x1fc00000 | (BIOS_SIZE - 1));
1880
1881 jit_movi(tmp, state->offset_bios);
1882
1883 if (state->offset_bios != state->offset_scratch) {
1884 to_end2 = jit_b();
1885
1886 jit_patch(to_not_bios);
1887
1888 /* Convert to KUNSEG */
1889 jit_andi(rt, addr_reg, 0x1f800fff);
1890
1891 if (state->offset_scratch)
1892 jit_movi(tmp, state->offset_scratch);
1893
1894 jit_patch(to_end2);
1895 }
1896
1897 jit_patch(to_end);
1898 }
1899
1900 if (state->offset_ram || state->offset_bios || state->offset_scratch)
1901 jit_addr(rt, rt, tmp);
1902
1903 jit_new_node_www(code, rt, rt, imm);
1904
1905 if (is_big_endian() && swap_code) {
1906 jit_new_node_ww(swap_code, rt, rt);
1907
1908 if (c.i.op == OP_LH)
1909 jit_extr_s(rt, rt);
1910 else if (c.i.op == OP_LW && __WORDSIZE == 64)
1911 jit_extr_i(rt, rt);
1912 }
1913
1914 lightrec_free_reg(reg_cache, addr_reg);
1915 lightrec_free_reg(reg_cache, rt);
1916 lightrec_free_reg(reg_cache, tmp);
1917}
1918
1919static void rec_load(struct lightrec_cstate *state, const struct block *block,
1920 u16 offset, jit_code_t code, jit_code_t swap_code,
1921 bool is_unsigned)
1922{
1923 const struct opcode *op = &block->opcode_list[offset];
1924 u32 flags = op->flags;
1925
1926 switch (LIGHTREC_FLAGS_GET_IO_MODE(flags)) {
1927 case LIGHTREC_IO_RAM:
1928 rec_load_ram(state, block, offset, code, swap_code, is_unsigned);
1929 break;
1930 case LIGHTREC_IO_BIOS:
1931 rec_load_bios(state, block, offset, code, swap_code, is_unsigned);
1932 break;
1933 case LIGHTREC_IO_SCRATCH:
1934 rec_load_scratch(state, block, offset, code, swap_code, is_unsigned);
1935 break;
1936 case LIGHTREC_IO_DIRECT_HW:
1937 rec_load_io(state, block, offset, code, swap_code, is_unsigned);
1938 break;
1939 case LIGHTREC_IO_DIRECT:
1940 rec_load_direct(state, block, offset, code, swap_code, is_unsigned);
1941 break;
1942 default:
1943 rec_io(state, block, offset, false, true);
1944 return;
1945 }
1946
1947 if (op->i.op == OP_LWC2) {
1948 rec_cp2_do_mtc2(state, block, offset, op->i.rt, REG_TEMP);
1949 lightrec_discard_reg_if_loaded(state->reg_cache, REG_TEMP);
1950 }
1951}
1952
1953static void rec_LB(struct lightrec_cstate *state, const struct block *block, u16 offset)
1954{
1955 _jit_name(block->_jit, __func__);
1956 rec_load(state, block, offset, jit_code_ldxi_c, 0, false);
1957}
1958
1959static void rec_LBU(struct lightrec_cstate *state, const struct block *block, u16 offset)
1960{
1961 _jit_name(block->_jit, __func__);
1962 rec_load(state, block, offset, jit_code_ldxi_uc, 0, true);
1963}
1964
1965static void rec_LH(struct lightrec_cstate *state, const struct block *block, u16 offset)
1966{
1967 jit_code_t code = is_big_endian() ? jit_code_ldxi_us : jit_code_ldxi_s;
1968
1969 _jit_name(block->_jit, __func__);
1970 rec_load(state, block, offset, code, jit_code_bswapr_us, false);
1971}
1972
1973static void rec_LHU(struct lightrec_cstate *state, const struct block *block, u16 offset)
1974{
1975 _jit_name(block->_jit, __func__);
1976 rec_load(state, block, offset, jit_code_ldxi_us, jit_code_bswapr_us, true);
1977}
1978
1979static void rec_LWL(struct lightrec_cstate *state, const struct block *block, u16 offset)
1980{
1981 _jit_name(block->_jit, __func__);
1982 rec_io(state, block, offset, true, true);
1983}
1984
1985static void rec_LWR(struct lightrec_cstate *state, const struct block *block, u16 offset)
1986{
1987 _jit_name(block->_jit, __func__);
1988 rec_io(state, block, offset, true, true);
1989}
1990
1991static void rec_LW(struct lightrec_cstate *state, const struct block *block, u16 offset)
1992{
1993 union code c = block->opcode_list[offset].c;
1994 jit_code_t code;
1995
1996 if (is_big_endian() && __WORDSIZE == 64)
1997 code = jit_code_ldxi_ui;
1998 else
1999 code = jit_code_ldxi_i;
2000
2001 _jit_name(block->_jit, c.i.op == OP_LWC2 ? "rec_LWC2" : "rec_LW");
2002 rec_load(state, block, offset, code, jit_code_bswapr_ui, false);
2003}
2004
2005static void rec_exit_early(struct lightrec_cstate *state,
2006 const struct block *block, u16 offset,
2007 u32 exit_code, u32 pc)
2008{
2009 struct regcache *reg_cache = state->reg_cache;
2010 jit_state_t *_jit = block->_jit;
2011 u8 tmp;
2012
2013 _jit_note(block->_jit, __FILE__, __LINE__);
2014
2015 tmp = lightrec_alloc_reg_temp(reg_cache, _jit);
2016
2017 jit_movi(tmp, exit_code);
2018 jit_stxi_i(offsetof(struct lightrec_state, exit_flags),
2019 LIGHTREC_REG_STATE, tmp);
2020
2021 jit_ldxi_i(tmp, LIGHTREC_REG_STATE,
2022 offsetof(struct lightrec_state, target_cycle));
2023 jit_subr(tmp, tmp, LIGHTREC_REG_CYCLE);
2024 jit_movi(LIGHTREC_REG_CYCLE, 0);
2025 jit_stxi_i(offsetof(struct lightrec_state, target_cycle),
2026 LIGHTREC_REG_STATE, tmp);
2027 jit_stxi_i(offsetof(struct lightrec_state, current_cycle),
2028 LIGHTREC_REG_STATE, tmp);
2029
2030 lightrec_free_reg(reg_cache, tmp);
2031
2032 lightrec_emit_end_of_block(state, block, offset, -1, pc, 31, 0, true);
2033}
2034
2035static void rec_special_SYSCALL(struct lightrec_cstate *state,
2036 const struct block *block, u16 offset)
2037{
2038 _jit_name(block->_jit, __func__);
2039
2040 /* TODO: the return address should be "pc - 4" if we're a delay slot */
2041 rec_exit_early(state, block, offset, LIGHTREC_EXIT_SYSCALL,
2042 get_ds_pc(block, offset, 0));
2043}
2044
2045static void rec_special_BREAK(struct lightrec_cstate *state,
2046 const struct block *block, u16 offset)
2047{
2048 _jit_name(block->_jit, __func__);
2049 rec_exit_early(state, block, offset, LIGHTREC_EXIT_BREAK,
2050 get_ds_pc(block, offset, 0));
2051}
2052
2053static void rec_mfc(struct lightrec_cstate *state, const struct block *block, u16 offset)
2054{
2055 struct regcache *reg_cache = state->reg_cache;
2056 union code c = block->opcode_list[offset].c;
2057 jit_state_t *_jit = block->_jit;
2058
2059 jit_note(__FILE__, __LINE__);
2060
2061 if (c.i.op != OP_SWC2)
2062 lightrec_clean_reg_if_loaded(reg_cache, _jit, c.i.rt, true);
2063
2064 call_to_c_wrapper(state, block, c.opcode, C_WRAPPER_MFC);
2065}
2066
2067static void rec_mtc(struct lightrec_cstate *state, const struct block *block, u16 offset)
2068{
2069 struct regcache *reg_cache = state->reg_cache;
2070 union code c = block->opcode_list[offset].c;
2071 jit_state_t *_jit = block->_jit;
2072
2073 jit_note(__FILE__, __LINE__);
2074 lightrec_clean_reg_if_loaded(reg_cache, _jit, c.i.rs, false);
2075 lightrec_clean_reg_if_loaded(reg_cache, _jit, c.i.rt, false);
2076 lightrec_clean_reg_if_loaded(reg_cache, _jit, REG_TEMP, false);
2077
2078 call_to_c_wrapper(state, block, c.opcode, C_WRAPPER_MTC);
2079
2080 if (c.i.op == OP_CP0 &&
2081 !op_flag_no_ds(block->opcode_list[offset].flags) &&
2082 (c.r.rd == 12 || c.r.rd == 13))
2083 lightrec_emit_end_of_block(state, block, offset, -1,
2084 get_ds_pc(block, offset, 1),
2085 0, 0, true);
2086}
2087
2088static void
2089rec_mfc0(struct lightrec_cstate *state, const struct block *block, u16 offset)
2090{
2091 struct regcache *reg_cache = state->reg_cache;
2092 union code c = block->opcode_list[offset].c;
2093 jit_state_t *_jit = block->_jit;
2094 u8 rt;
2095
2096 jit_note(__FILE__, __LINE__);
2097
2098 rt = lightrec_alloc_reg_out(reg_cache, _jit, c.i.rt, REG_EXT);
2099
2100 jit_ldxi_i(rt, LIGHTREC_REG_STATE,
2101 offsetof(struct lightrec_state, regs.cp0[c.r.rd]));
2102
2103 lightrec_free_reg(reg_cache, rt);
2104}
2105
2106static bool block_uses_icache(const struct lightrec_cstate *state,
2107 const struct block *block)
2108{
2109 const struct lightrec_mem_map *map = &state->state->maps[PSX_MAP_KERNEL_USER_RAM];
2110 u32 pc = kunseg(block->pc);
2111
2112 if (pc < map->pc || pc >= map->pc + map->length)
2113 return false;
2114
2115 return (block->pc >> 28) < 0xa;
2116}
2117
2118static void
2119rec_mtc0(struct lightrec_cstate *state, const struct block *block, u16 offset)
2120{
2121 struct regcache *reg_cache = state->reg_cache;
2122 const union code c = block->opcode_list[offset].c;
2123 jit_state_t *_jit = block->_jit;
2124 u8 rt, tmp = 0, tmp2, status;
2125 jit_node_t *to_end;
2126
2127 jit_note(__FILE__, __LINE__);
2128
2129 switch(c.r.rd) {
2130 case 1:
2131 case 4:
2132 case 8:
2133 case 14:
2134 case 15:
2135 /* Those registers are read-only */
2136 return;
2137 default:
2138 break;
2139 }
2140
2141 if (!block_uses_icache(state, block) && c.r.rd == 12) {
2142 /* If we are not running code from the RAM through kuseg or
2143 * kseg0, handle writes to the Status register in C; as the
2144 * code may toggle bit 16 which isolates the cache. Code
2145 * running from kuseg or kseg0 in RAM cannot do that. */
2146 rec_mtc(state, block, offset);
2147 return;
2148 }
2149
2150 rt = lightrec_alloc_reg_in(reg_cache, _jit, c.i.rt, 0);
2151
2152 if (c.r.rd != 13) {
2153 jit_stxi_i(offsetof(struct lightrec_state, regs.cp0[c.r.rd]),
2154 LIGHTREC_REG_STATE, rt);
2155 }
2156
2157 if (c.r.rd == 12 || c.r.rd == 13) {
2158 tmp = lightrec_alloc_reg_temp(reg_cache, _jit);
2159 jit_ldxi_i(tmp, LIGHTREC_REG_STATE,
2160 offsetof(struct lightrec_state, regs.cp0[13]));
2161
2162 tmp2 = lightrec_alloc_reg_temp(reg_cache, _jit);
2163 }
2164
2165 if (c.r.rd == 12) {
2166 status = rt;
2167 } else if (c.r.rd == 13) {
2168 /* Cause = (Cause & ~0x0300) | (value & 0x0300) */
2169 jit_andi(tmp2, rt, 0x0300);
2170 jit_ori(tmp, tmp, 0x0300);
2171 jit_xori(tmp, tmp, 0x0300);
2172 jit_orr(tmp, tmp, tmp2);
2173 jit_ldxi_i(tmp2, LIGHTREC_REG_STATE,
2174 offsetof(struct lightrec_state, regs.cp0[12]));
2175 jit_stxi_i(offsetof(struct lightrec_state, regs.cp0[13]),
2176 LIGHTREC_REG_STATE, tmp);
2177 status = tmp2;
2178 }
2179
2180 if (c.r.rd == 12 || c.r.rd == 13) {
2181 /* Exit dynarec in case there's a software interrupt.
2182 * exit_flags = !!(status & tmp & 0x0300) & status; */
2183 jit_andr(tmp, tmp, status);
2184 jit_andi(tmp, tmp, 0x0300);
2185 jit_nei(tmp, tmp, 0);
2186 jit_andr(tmp, tmp, status);
2187 }
2188
2189 if (c.r.rd == 12) {
2190 /* Exit dynarec in case we unmask a hardware interrupt.
2191 * exit_flags = !(~status & 0x401) */
2192
2193 jit_comr(tmp2, status);
2194 jit_andi(tmp2, tmp2, 0x401);
2195 jit_eqi(tmp2, tmp2, 0);
2196 jit_orr(tmp, tmp, tmp2);
2197 }
2198
2199 lightrec_free_reg(reg_cache, rt);
2200
2201 if (c.r.rd == 12 || c.r.rd == 13) {
2202 to_end = jit_beqi(tmp, 0);
2203
2204 jit_ldxi_i(tmp2, LIGHTREC_REG_STATE,
2205 offsetof(struct lightrec_state, target_cycle));
2206 jit_subr(tmp2, tmp2, LIGHTREC_REG_CYCLE);
2207 jit_movi(LIGHTREC_REG_CYCLE, 0);
2208 jit_stxi_i(offsetof(struct lightrec_state, target_cycle),
2209 LIGHTREC_REG_STATE, tmp2);
2210 jit_stxi_i(offsetof(struct lightrec_state, current_cycle),
2211 LIGHTREC_REG_STATE, tmp2);
2212
2213
2214 jit_patch(to_end);
2215 }
2216
2217 if (!op_flag_no_ds(block->opcode_list[offset].flags) &&
2218 (c.r.rd == 12 || c.r.rd == 13)) {
2219 state->cycles += lightrec_cycles_of_opcode(state->state, c);
2220 lightrec_emit_eob(state, block, offset + 1);
2221 }
2222}
2223
2224static void rec_cp0_MFC0(struct lightrec_cstate *state,
2225 const struct block *block, u16 offset)
2226{
2227 _jit_name(block->_jit, __func__);
2228 rec_mfc0(state, block, offset);
2229}
2230
2231static void rec_cp0_CFC0(struct lightrec_cstate *state,
2232 const struct block *block, u16 offset)
2233{
2234 _jit_name(block->_jit, __func__);
2235 rec_mfc0(state, block, offset);
2236}
2237
2238static void rec_cp0_MTC0(struct lightrec_cstate *state,
2239 const struct block *block, u16 offset)
2240{
2241 _jit_name(block->_jit, __func__);
2242 rec_mtc0(state, block, offset);
2243}
2244
2245static void rec_cp0_CTC0(struct lightrec_cstate *state,
2246 const struct block *block, u16 offset)
2247{
2248 _jit_name(block->_jit, __func__);
2249 rec_mtc0(state, block, offset);
2250}
2251
2252static unsigned int cp2d_i_offset(u8 reg)
2253{
2254 return offsetof(struct lightrec_state, regs.cp2d[reg]);
2255}
2256
2257static unsigned int cp2d_s_offset(u8 reg)
2258{
2259 return cp2d_i_offset(reg) + is_big_endian() * 2;
2260}
2261
2262static unsigned int cp2c_i_offset(u8 reg)
2263{
2264 return offsetof(struct lightrec_state, regs.cp2c[reg]);
2265}
2266
2267static unsigned int cp2c_s_offset(u8 reg)
2268{
2269 return cp2c_i_offset(reg) + is_big_endian() * 2;
2270}
2271
2272static void rec_cp2_do_mfc2(struct lightrec_cstate *state,
2273 const struct block *block, u16 offset,
2274 u8 reg, u8 out_reg)
2275{
2276 struct regcache *reg_cache = state->reg_cache;
2277 jit_state_t *_jit = block->_jit;
2278 const u32 zext_regs = 0x300f0080;
2279 u8 rt, tmp, tmp2, tmp3, out, flags;
2280 unsigned int i;
2281
2282 _jit_name(block->_jit, __func__);
2283
2284 if (state->state->ops.cop2_notify) {
2285 /* We must call cop2_notify, handle that in C. */
2286 rec_mfc(state, block, offset);
2287 return;
2288 }
2289
2290 flags = (zext_regs & BIT(reg)) ? REG_ZEXT : REG_EXT;
2291 rt = lightrec_alloc_reg_out(reg_cache, _jit, out_reg, flags);
2292
2293 if (reg == 15)
2294 reg = 14;
2295
2296 switch (reg) {
2297 case 1:
2298 case 3:
2299 case 5:
2300 case 8:
2301 case 9:
2302 case 10:
2303 case 11:
2304 jit_ldxi_s(rt, LIGHTREC_REG_STATE, cp2d_s_offset(reg));
2305 break;
2306 case 7:
2307 case 16:
2308 case 17:
2309 case 18:
2310 case 19:
2311 jit_ldxi_us(rt, LIGHTREC_REG_STATE, cp2d_s_offset(reg));
2312 break;
2313 case 28:
2314 case 29:
2315 tmp = lightrec_alloc_reg_temp(reg_cache, _jit);
2316 tmp2 = lightrec_alloc_reg_temp(reg_cache, _jit);
2317 tmp3 = lightrec_alloc_reg_temp(reg_cache, _jit);
2318
2319 for (i = 0; i < 3; i++) {
2320 out = i == 0 ? rt : tmp;
2321
2322 jit_ldxi_s(tmp, LIGHTREC_REG_STATE, cp2d_s_offset(9 + i));
2323 jit_movi(tmp2, 0x1f);
2324 jit_rshi(out, tmp, 7);
2325
2326 jit_ltr(tmp3, tmp2, out);
2327 jit_movnr(out, tmp2, tmp3);
2328
2329 jit_gei(tmp2, out, 0);
2330 jit_movzr(out, tmp2, tmp2);
2331
2332 if (i > 0) {
2333 jit_lshi(tmp, tmp, 5 * i);
2334 jit_orr(rt, rt, tmp);
2335 }
2336 }
2337
2338
2339 lightrec_free_reg(reg_cache, tmp);
2340 lightrec_free_reg(reg_cache, tmp2);
2341 lightrec_free_reg(reg_cache, tmp3);
2342 break;
2343 default:
2344 jit_ldxi_i(rt, LIGHTREC_REG_STATE, cp2d_i_offset(reg));
2345 break;
2346 }
2347
2348 lightrec_free_reg(reg_cache, rt);
2349}
2350
2351static void rec_cp2_basic_MFC2(struct lightrec_cstate *state,
2352 const struct block *block, u16 offset)
2353{
2354 const union code c = block->opcode_list[offset].c;
2355
2356 rec_cp2_do_mfc2(state, block, offset, c.r.rd, c.r.rt);
2357}
2358
2359static void rec_cp2_basic_CFC2(struct lightrec_cstate *state,
2360 const struct block *block, u16 offset)
2361{
2362 struct regcache *reg_cache = state->reg_cache;
2363 const union code c = block->opcode_list[offset].c;
2364 jit_state_t *_jit = block->_jit;
2365 u8 rt;
2366
2367 _jit_name(block->_jit, __func__);
2368
2369 if (state->state->ops.cop2_notify) {
2370 /* We must call cop2_notify, handle that in C. */
2371 rec_mfc(state, block, offset);
2372 return;
2373 }
2374
2375 switch (c.r.rd) {
2376 case 4:
2377 case 12:
2378 case 20:
2379 case 26:
2380 case 27:
2381 case 29:
2382 case 30:
2383 rt = lightrec_alloc_reg_out(reg_cache, _jit, c.r.rt, REG_EXT);
2384 jit_ldxi_s(rt, LIGHTREC_REG_STATE, cp2c_s_offset(c.r.rd));
2385 break;
2386 default:
2387 rt = lightrec_alloc_reg_out(reg_cache, _jit, c.r.rt, REG_ZEXT);
2388 jit_ldxi_ui(rt, LIGHTREC_REG_STATE, cp2c_i_offset(c.r.rd));
2389 break;
2390 }
2391
2392 lightrec_free_reg(reg_cache, rt);
2393}
2394
2395static void rec_cp2_do_mtc2(struct lightrec_cstate *state,
2396 const struct block *block, u16 offset,
2397 u8 reg, u8 in_reg)
2398{
2399 struct regcache *reg_cache = state->reg_cache;
2400 jit_state_t *_jit = block->_jit;
2401 u8 rt, tmp, tmp2, flags = 0;
2402
2403 _jit_name(block->_jit, __func__);
2404
2405 if (state->state->ops.cop2_notify) {
2406 /* We must call cop2_notify, handle that in C. */
2407 rec_mtc(state, block, offset);
2408 return;
2409 }
2410
2411 if (reg == 31)
2412 return;
2413
2414 if (reg == 30)
2415 flags |= REG_EXT;
2416
2417 rt = lightrec_alloc_reg_in(reg_cache, _jit, in_reg, flags);
2418
2419 switch (reg) {
2420 case 15:
2421 tmp = lightrec_alloc_reg_temp(reg_cache, _jit);
2422 jit_ldxi_i(tmp, LIGHTREC_REG_STATE, cp2d_i_offset(13));
2423
2424 tmp2 = lightrec_alloc_reg_temp(reg_cache, _jit);
2425 jit_ldxi_i(tmp2, LIGHTREC_REG_STATE, cp2d_i_offset(14));
2426
2427 jit_stxi_i(cp2d_i_offset(12), LIGHTREC_REG_STATE, tmp);
2428 jit_stxi_i(cp2d_i_offset(13), LIGHTREC_REG_STATE, tmp2);
2429 jit_stxi_i(cp2d_i_offset(14), LIGHTREC_REG_STATE, rt);
2430
2431 lightrec_free_reg(reg_cache, tmp);
2432 lightrec_free_reg(reg_cache, tmp2);
2433 break;
2434 case 28:
2435 tmp = lightrec_alloc_reg_temp(reg_cache, _jit);
2436
2437 jit_lshi(tmp, rt, 7);
2438 jit_andi(tmp, tmp, 0xf80);
2439 jit_stxi_s(cp2d_s_offset(9), LIGHTREC_REG_STATE, tmp);
2440
2441 jit_lshi(tmp, rt, 2);
2442 jit_andi(tmp, tmp, 0xf80);
2443 jit_stxi_s(cp2d_s_offset(10), LIGHTREC_REG_STATE, tmp);
2444
2445 jit_rshi(tmp, rt, 3);
2446 jit_andi(tmp, tmp, 0xf80);
2447 jit_stxi_s(cp2d_s_offset(11), LIGHTREC_REG_STATE, tmp);
2448
2449 lightrec_free_reg(reg_cache, tmp);
2450 break;
2451 case 30:
2452 tmp = lightrec_alloc_reg_temp(reg_cache, _jit);
2453
2454 /* if (rt < 0) rt = ~rt; */
2455 jit_rshi(tmp, rt, 31);
2456 jit_xorr(tmp, rt, tmp);
2457
2458 /* Count leading zeros */
2459 jit_clzr(tmp, tmp);
2460 if (__WORDSIZE != 32)
2461 jit_subi(tmp, tmp, __WORDSIZE - 32);
2462
2463 jit_stxi_i(cp2d_i_offset(31), LIGHTREC_REG_STATE, tmp);
2464
2465 lightrec_free_reg(reg_cache, tmp);
2466 fallthrough;
2467 default:
2468 jit_stxi_i(cp2d_i_offset(reg), LIGHTREC_REG_STATE, rt);
2469 break;
2470 }
2471
2472 lightrec_free_reg(reg_cache, rt);
2473}
2474
2475static void rec_cp2_basic_MTC2(struct lightrec_cstate *state,
2476 const struct block *block, u16 offset)
2477{
2478 const union code c = block->opcode_list[offset].c;
2479
2480 rec_cp2_do_mtc2(state, block, offset, c.r.rd, c.r.rt);
2481}
2482
2483static void rec_cp2_basic_CTC2(struct lightrec_cstate *state,
2484 const struct block *block, u16 offset)
2485{
2486 struct regcache *reg_cache = state->reg_cache;
2487 const union code c = block->opcode_list[offset].c;
2488 jit_state_t *_jit = block->_jit;
2489 u8 rt, tmp, tmp2;
2490
2491 _jit_name(block->_jit, __func__);
2492
2493 if (state->state->ops.cop2_notify) {
2494 /* We must call cop2_notify, handle that in C. */
2495 rec_mtc(state, block, offset);
2496 return;
2497 }
2498
2499 rt = lightrec_alloc_reg_in(reg_cache, _jit, c.r.rt, 0);
2500
2501 switch (c.r.rd) {
2502 case 4:
2503 case 12:
2504 case 20:
2505 case 26:
2506 case 27:
2507 case 29:
2508 case 30:
2509 jit_stxi_s(cp2c_s_offset(c.r.rd), LIGHTREC_REG_STATE, rt);
2510 break;
2511 case 31:
2512 tmp = lightrec_alloc_reg_temp(reg_cache, _jit);
2513 tmp2 = lightrec_alloc_reg_temp(reg_cache, _jit);
2514
2515 jit_andi(tmp, rt, 0x7f87e000);
2516 jit_nei(tmp, tmp, 0);
2517 jit_lshi(tmp, tmp, 31);
2518
2519 jit_andi(tmp2, rt, 0x7ffff000);
2520 jit_orr(tmp, tmp2, tmp);
2521
2522 jit_stxi_i(cp2c_i_offset(31), LIGHTREC_REG_STATE, tmp);
2523
2524 lightrec_free_reg(reg_cache, tmp);
2525 lightrec_free_reg(reg_cache, tmp2);
2526 break;
2527
2528 default:
2529 jit_stxi_i(cp2c_i_offset(c.r.rd), LIGHTREC_REG_STATE, rt);
2530 }
2531
2532 lightrec_free_reg(reg_cache, rt);
2533}
2534
2535static void rec_cp0_RFE(struct lightrec_cstate *state,
2536 const struct block *block, u16 offset)
2537{
2538 struct regcache *reg_cache = state->reg_cache;
2539 jit_state_t *_jit = block->_jit;
2540 u8 status, tmp;
2541
2542 jit_name(__func__);
2543 jit_note(__FILE__, __LINE__);
2544
2545 status = lightrec_alloc_reg_temp(reg_cache, _jit);
2546 jit_ldxi_i(status, LIGHTREC_REG_STATE,
2547 offsetof(struct lightrec_state, regs.cp0[12]));
2548
2549 tmp = lightrec_alloc_reg_temp(reg_cache, _jit);
2550
2551 /* status = ((status >> 2) & 0xf) | status & ~0xf; */
2552 jit_rshi(tmp, status, 2);
2553 jit_andi(tmp, tmp, 0xf);
2554 jit_andi(status, status, ~0xful);
2555 jit_orr(status, status, tmp);
2556
2557 jit_ldxi_i(tmp, LIGHTREC_REG_STATE,
2558 offsetof(struct lightrec_state, regs.cp0[13]));
2559 jit_stxi_i(offsetof(struct lightrec_state, regs.cp0[12]),
2560 LIGHTREC_REG_STATE, status);
2561
2562 /* Exit dynarec in case there's a software interrupt.
2563 * exit_flags = !!(status & cause & 0x0300) & status; */
2564 jit_andr(tmp, tmp, status);
2565 jit_andi(tmp, tmp, 0x0300);
2566 jit_nei(tmp, tmp, 0);
2567 jit_andr(tmp, tmp, status);
2568 jit_stxi_i(offsetof(struct lightrec_state, exit_flags),
2569 LIGHTREC_REG_STATE, tmp);
2570
2571 lightrec_free_reg(reg_cache, status);
2572 lightrec_free_reg(reg_cache, tmp);
2573}
2574
2575static void rec_CP(struct lightrec_cstate *state,
2576 const struct block *block, u16 offset)
2577{
2578 union code c = block->opcode_list[offset].c;
2579 jit_state_t *_jit = block->_jit;
2580
2581 jit_name(__func__);
2582 jit_note(__FILE__, __LINE__);
2583
2584 call_to_c_wrapper(state, block, c.opcode, C_WRAPPER_CP);
2585}
2586
2587static void rec_meta_MOV(struct lightrec_cstate *state,
2588 const struct block *block, u16 offset)
2589{
2590 struct regcache *reg_cache = state->reg_cache;
2591 const struct opcode *op = &block->opcode_list[offset];
2592 union code c = op->c;
2593 jit_state_t *_jit = block->_jit;
2594 bool unload_rd;
2595 bool unload_rs, discard_rs;
2596 u8 rs, rd;
2597
2598 _jit_name(block->_jit, __func__);
2599 jit_note(__FILE__, __LINE__);
2600
2601 unload_rs = OPT_EARLY_UNLOAD
2602 && LIGHTREC_FLAGS_GET_RS(op->flags) == LIGHTREC_REG_UNLOAD;
2603 discard_rs = OPT_EARLY_UNLOAD
2604 && LIGHTREC_FLAGS_GET_RS(op->flags) == LIGHTREC_REG_DISCARD;
2605
2606 if ((unload_rs || discard_rs) && c.m.rs) {
2607 /* If the source register is going to be unloaded or discarded,
2608 * then we can simply mark its host register as now pointing to
2609 * the destination register. */
2610 pr_debug("Remap %s to %s at offset 0x%x\n",
2611 lightrec_reg_name(c.m.rs), lightrec_reg_name(c.m.rd),
2612 offset << 2);
2613 rs = lightrec_alloc_reg_in(reg_cache, _jit, c.m.rs, 0);
2614 lightrec_remap_reg(reg_cache, _jit, rs, c.m.rd, discard_rs);
2615 lightrec_free_reg(reg_cache, rs);
2616 return;
2617 }
2618
2619 unload_rd = OPT_EARLY_UNLOAD
2620 && LIGHTREC_FLAGS_GET_RD(op->flags) == LIGHTREC_REG_UNLOAD;
2621
2622 if (c.m.rs && !lightrec_reg_is_loaded(reg_cache, c.m.rs)) {
2623 /* The source register is not yet loaded - we can load its value
2624 * from the register cache directly into the target register. */
2625 rd = lightrec_alloc_reg_out(reg_cache, _jit, c.m.rd, REG_EXT);
2626
2627 jit_ldxi_i(rd, LIGHTREC_REG_STATE,
2628 offsetof(struct lightrec_state, regs.gpr) + (c.m.rs << 2));
2629
2630 lightrec_free_reg(reg_cache, rd);
2631 } else if (unload_rd) {
2632 /* If the destination register will be unloaded right after the
2633 * MOV meta-opcode, we don't actually need to write any host
2634 * register - we can just store the source register directly to
2635 * the register cache, at the offset corresponding to the
2636 * destination register. */
2637 lightrec_discard_reg_if_loaded(reg_cache, c.m.rd);
2638
2639 rs = lightrec_alloc_reg_in(reg_cache, _jit, c.m.rs, 0);
2640
2641 jit_stxi_i(offsetof(struct lightrec_state, regs.gpr)
2642 + (c.m.rd << 2), LIGHTREC_REG_STATE, rs);
2643
2644 lightrec_free_reg(reg_cache, rs);
2645 } else {
2646 if (c.m.rs)
2647 rs = lightrec_alloc_reg_in(reg_cache, _jit, c.m.rs, 0);
2648
2649 rd = lightrec_alloc_reg_out(reg_cache, _jit, c.m.rd, REG_EXT);
2650
2651 if (c.m.rs == 0) {
2652 jit_movi(rd, 0);
2653 } else {
2654 jit_extr_i(rd, rs);
2655 lightrec_free_reg(reg_cache, rs);
2656 }
2657
2658 lightrec_free_reg(reg_cache, rd);
2659 }
2660}
2661
2662static void rec_meta_EXTC_EXTS(struct lightrec_cstate *state,
2663 const struct block *block,
2664 u16 offset)
2665{
2666 struct regcache *reg_cache = state->reg_cache;
2667 union code c = block->opcode_list[offset].c;
2668 jit_state_t *_jit = block->_jit;
2669 u8 rs, rd;
2670
2671 _jit_name(block->_jit, __func__);
2672 jit_note(__FILE__, __LINE__);
2673
2674 rec_alloc_rs_rd(reg_cache, _jit, &block->opcode_list[offset],
2675 c.m.rs, c.m.rd, 0, REG_EXT, &rs, &rd);
2676
2677 if (c.m.op == OP_META_EXTC)
2678 jit_extr_c(rd, rs);
2679 else
2680 jit_extr_s(rd, rs);
2681
2682 lightrec_free_reg(reg_cache, rs);
2683 lightrec_free_reg(reg_cache, rd);
2684}
2685
2686static void rec_meta_MULT2(struct lightrec_cstate *state,
2687 const struct block *block,
2688 u16 offset)
2689{
2690 struct regcache *reg_cache = state->reg_cache;
2691 union code c = block->opcode_list[offset].c;
2692 jit_state_t *_jit = block->_jit;
2693 u8 reg_lo = get_mult_div_lo(c);
2694 u8 reg_hi = get_mult_div_hi(c);
2695 u32 flags = block->opcode_list[offset].flags;
2696 bool is_signed = c.i.op == OP_META_MULT2;
2697 u8 rs, lo, hi, rflags = 0, hiflags = 0;
2698 unsigned int i;
2699
2700 if (!op_flag_no_hi(flags) && c.r.op < 32) {
2701 rflags = is_signed ? REG_EXT : REG_ZEXT;
2702 hiflags = is_signed ? REG_EXT : (REG_EXT | REG_ZEXT);
2703 }
2704
2705 _jit_name(block->_jit, __func__);
2706 jit_note(__FILE__, __LINE__);
2707
2708 rs = lightrec_alloc_reg_in(reg_cache, _jit, c.i.rs, rflags);
2709
2710 /*
2711 * We must handle the case where one of the output registers is our rs
2712 * input register. Thanksfully, computing LO/HI can be done in any
2713 * order. Here, we make sure that the computation that overwrites the
2714 * input register is always performed last.
2715 */
2716 for (i = 0; i < 2; i++) {
2717 if ((!i ^ (reg_lo == c.i.rs)) && !op_flag_no_lo(flags)) {
2718 lo = lightrec_alloc_reg_out(reg_cache, _jit, reg_lo, 0);
2719
2720 if (c.r.op < 32)
2721 jit_lshi(lo, rs, c.r.op);
2722 else
2723 jit_movi(lo, 0);
2724
2725 lightrec_free_reg(reg_cache, lo);
2726 continue;
2727 }
2728
2729 if ((!!i ^ (reg_lo == c.i.rs)) && !op_flag_no_hi(flags)) {
2730 hi = lightrec_alloc_reg_out(reg_cache, _jit,
2731 reg_hi, hiflags);
2732
2733 if (c.r.op >= 32)
2734 jit_lshi(hi, rs, c.r.op - 32);
2735 else if (is_signed)
2736 jit_rshi(hi, rs, 32 - c.r.op);
2737 else
2738 jit_rshi_u(hi, rs, 32 - c.r.op);
2739
2740 lightrec_free_reg(reg_cache, hi);
2741 }
2742 }
2743
2744 lightrec_free_reg(reg_cache, rs);
2745
2746 _jit_name(block->_jit, __func__);
2747 jit_note(__FILE__, __LINE__);
2748}
2749
2750static void rec_meta_COM(struct lightrec_cstate *state,
2751 const struct block *block, u16 offset)
2752{
2753 struct regcache *reg_cache = state->reg_cache;
2754 union code c = block->opcode_list[offset].c;
2755 jit_state_t *_jit = block->_jit;
2756 u8 rd, rs, flags;
2757
2758 jit_note(__FILE__, __LINE__);
2759
2760 rec_alloc_rs_rd(reg_cache, _jit, &block->opcode_list[offset],
2761 c.m.rs, c.m.rd, 0, 0, &rs, &rd);
2762
2763 flags = lightrec_get_reg_in_flags(reg_cache, rs);
2764
2765 lightrec_set_reg_out_flags(reg_cache, rd,
2766 flags & REG_EXT);
2767
2768 jit_comr(rd, rs);
2769
2770 lightrec_free_reg(reg_cache, rs);
2771 lightrec_free_reg(reg_cache, rd);
2772}
2773
2774static void unknown_opcode(struct lightrec_cstate *state,
2775 const struct block *block, u16 offset)
2776{
2777 rec_exit_early(state, block, offset, LIGHTREC_EXIT_UNKNOWN_OP,
2778 block->pc + (offset << 2));
2779}
2780
2781static const lightrec_rec_func_t rec_standard[64] = {
2782 SET_DEFAULT_ELM(rec_standard, unknown_opcode),
2783 [OP_SPECIAL] = rec_SPECIAL,
2784 [OP_REGIMM] = rec_REGIMM,
2785 [OP_J] = rec_J,
2786 [OP_JAL] = rec_JAL,
2787 [OP_BEQ] = rec_BEQ,
2788 [OP_BNE] = rec_BNE,
2789 [OP_BLEZ] = rec_BLEZ,
2790 [OP_BGTZ] = rec_BGTZ,
2791 [OP_ADDI] = rec_ADDI,
2792 [OP_ADDIU] = rec_ADDIU,
2793 [OP_SLTI] = rec_SLTI,
2794 [OP_SLTIU] = rec_SLTIU,
2795 [OP_ANDI] = rec_ANDI,
2796 [OP_ORI] = rec_ORI,
2797 [OP_XORI] = rec_XORI,
2798 [OP_LUI] = rec_LUI,
2799 [OP_CP0] = rec_CP0,
2800 [OP_CP2] = rec_CP2,
2801 [OP_LB] = rec_LB,
2802 [OP_LH] = rec_LH,
2803 [OP_LWL] = rec_LWL,
2804 [OP_LW] = rec_LW,
2805 [OP_LBU] = rec_LBU,
2806 [OP_LHU] = rec_LHU,
2807 [OP_LWR] = rec_LWR,
2808 [OP_SB] = rec_SB,
2809 [OP_SH] = rec_SH,
2810 [OP_SWL] = rec_SWL,
2811 [OP_SW] = rec_SW,
2812 [OP_SWR] = rec_SWR,
2813 [OP_LWC2] = rec_LW,
2814 [OP_SWC2] = rec_SW,
2815
2816 [OP_META] = rec_META,
2817 [OP_META_MULT2] = rec_meta_MULT2,
2818 [OP_META_MULTU2] = rec_meta_MULT2,
2819};
2820
2821static const lightrec_rec_func_t rec_special[64] = {
2822 SET_DEFAULT_ELM(rec_special, unknown_opcode),
2823 [OP_SPECIAL_SLL] = rec_special_SLL,
2824 [OP_SPECIAL_SRL] = rec_special_SRL,
2825 [OP_SPECIAL_SRA] = rec_special_SRA,
2826 [OP_SPECIAL_SLLV] = rec_special_SLLV,
2827 [OP_SPECIAL_SRLV] = rec_special_SRLV,
2828 [OP_SPECIAL_SRAV] = rec_special_SRAV,
2829 [OP_SPECIAL_JR] = rec_special_JR,
2830 [OP_SPECIAL_JALR] = rec_special_JALR,
2831 [OP_SPECIAL_SYSCALL] = rec_special_SYSCALL,
2832 [OP_SPECIAL_BREAK] = rec_special_BREAK,
2833 [OP_SPECIAL_MFHI] = rec_special_MFHI,
2834 [OP_SPECIAL_MTHI] = rec_special_MTHI,
2835 [OP_SPECIAL_MFLO] = rec_special_MFLO,
2836 [OP_SPECIAL_MTLO] = rec_special_MTLO,
2837 [OP_SPECIAL_MULT] = rec_special_MULT,
2838 [OP_SPECIAL_MULTU] = rec_special_MULTU,
2839 [OP_SPECIAL_DIV] = rec_special_DIV,
2840 [OP_SPECIAL_DIVU] = rec_special_DIVU,
2841 [OP_SPECIAL_ADD] = rec_special_ADD,
2842 [OP_SPECIAL_ADDU] = rec_special_ADDU,
2843 [OP_SPECIAL_SUB] = rec_special_SUB,
2844 [OP_SPECIAL_SUBU] = rec_special_SUBU,
2845 [OP_SPECIAL_AND] = rec_special_AND,
2846 [OP_SPECIAL_OR] = rec_special_OR,
2847 [OP_SPECIAL_XOR] = rec_special_XOR,
2848 [OP_SPECIAL_NOR] = rec_special_NOR,
2849 [OP_SPECIAL_SLT] = rec_special_SLT,
2850 [OP_SPECIAL_SLTU] = rec_special_SLTU,
2851};
2852
2853static const lightrec_rec_func_t rec_regimm[64] = {
2854 SET_DEFAULT_ELM(rec_regimm, unknown_opcode),
2855 [OP_REGIMM_BLTZ] = rec_regimm_BLTZ,
2856 [OP_REGIMM_BGEZ] = rec_regimm_BGEZ,
2857 [OP_REGIMM_BLTZAL] = rec_regimm_BLTZAL,
2858 [OP_REGIMM_BGEZAL] = rec_regimm_BGEZAL,
2859};
2860
2861static const lightrec_rec_func_t rec_cp0[64] = {
2862 SET_DEFAULT_ELM(rec_cp0, rec_CP),
2863 [OP_CP0_MFC0] = rec_cp0_MFC0,
2864 [OP_CP0_CFC0] = rec_cp0_CFC0,
2865 [OP_CP0_MTC0] = rec_cp0_MTC0,
2866 [OP_CP0_CTC0] = rec_cp0_CTC0,
2867 [OP_CP0_RFE] = rec_cp0_RFE,
2868};
2869
2870static const lightrec_rec_func_t rec_cp2_basic[64] = {
2871 SET_DEFAULT_ELM(rec_cp2_basic, rec_CP),
2872 [OP_CP2_BASIC_MFC2] = rec_cp2_basic_MFC2,
2873 [OP_CP2_BASIC_CFC2] = rec_cp2_basic_CFC2,
2874 [OP_CP2_BASIC_MTC2] = rec_cp2_basic_MTC2,
2875 [OP_CP2_BASIC_CTC2] = rec_cp2_basic_CTC2,
2876};
2877
2878static const lightrec_rec_func_t rec_meta[64] = {
2879 SET_DEFAULT_ELM(rec_meta, unknown_opcode),
2880 [OP_META_MOV] = rec_meta_MOV,
2881 [OP_META_EXTC] = rec_meta_EXTC_EXTS,
2882 [OP_META_EXTS] = rec_meta_EXTC_EXTS,
2883 [OP_META_COM] = rec_meta_COM,
2884};
2885
2886static void rec_SPECIAL(struct lightrec_cstate *state,
2887 const struct block *block, u16 offset)
2888{
2889 union code c = block->opcode_list[offset].c;
2890 lightrec_rec_func_t f = rec_special[c.r.op];
2891
2892 if (!HAS_DEFAULT_ELM && unlikely(!f))
2893 unknown_opcode(state, block, offset);
2894 else
2895 (*f)(state, block, offset);
2896}
2897
2898static void rec_REGIMM(struct lightrec_cstate *state,
2899 const struct block *block, u16 offset)
2900{
2901 union code c = block->opcode_list[offset].c;
2902 lightrec_rec_func_t f = rec_regimm[c.r.rt];
2903
2904 if (!HAS_DEFAULT_ELM && unlikely(!f))
2905 unknown_opcode(state, block, offset);
2906 else
2907 (*f)(state, block, offset);
2908}
2909
2910static void rec_CP0(struct lightrec_cstate *state,
2911 const struct block *block, u16 offset)
2912{
2913 union code c = block->opcode_list[offset].c;
2914 lightrec_rec_func_t f = rec_cp0[c.r.rs];
2915
2916 if (!HAS_DEFAULT_ELM && unlikely(!f))
2917 rec_CP(state, block, offset);
2918 else
2919 (*f)(state, block, offset);
2920}
2921
2922static void rec_CP2(struct lightrec_cstate *state,
2923 const struct block *block, u16 offset)
2924{
2925 union code c = block->opcode_list[offset].c;
2926
2927 if (c.r.op == OP_CP2_BASIC) {
2928 lightrec_rec_func_t f = rec_cp2_basic[c.r.rs];
2929
2930 if (HAS_DEFAULT_ELM || likely(f)) {
2931 (*f)(state, block, offset);
2932 return;
2933 }
2934 }
2935
2936 rec_CP(state, block, offset);
2937}
2938
2939static void rec_META(struct lightrec_cstate *state,
2940 const struct block *block, u16 offset)
2941{
2942 union code c = block->opcode_list[offset].c;
2943 lightrec_rec_func_t f = rec_meta[c.m.op];
2944
2945 if (!HAS_DEFAULT_ELM && unlikely(!f))
2946 unknown_opcode(state, block, offset);
2947 else
2948 (*f)(state, block, offset);
2949}
2950
2951void lightrec_rec_opcode(struct lightrec_cstate *state,
2952 const struct block *block, u16 offset)
2953{
2954 struct regcache *reg_cache = state->reg_cache;
2955 struct lightrec_branch_target *target;
2956 const struct opcode *op = &block->opcode_list[offset];
2957 jit_state_t *_jit = block->_jit;
2958 lightrec_rec_func_t f;
2959 u16 unload_offset;
2960
2961 if (op_flag_sync(op->flags)) {
2962 if (state->cycles)
2963 jit_subi(LIGHTREC_REG_CYCLE, LIGHTREC_REG_CYCLE, state->cycles);
2964 state->cycles = 0;
2965
2966 lightrec_storeback_regs(reg_cache, _jit);
2967 lightrec_regcache_reset(reg_cache);
2968
2969 pr_debug("Adding branch target at offset 0x%x\n", offset << 2);
2970 target = &state->targets[state->nb_targets++];
2971 target->offset = offset;
2972 target->label = jit_indirect();
2973 }
2974
2975 if (likely(op->opcode)) {
2976 f = rec_standard[op->i.op];
2977
2978 if (!HAS_DEFAULT_ELM && unlikely(!f))
2979 unknown_opcode(state, block, offset);
2980 else
2981 (*f)(state, block, offset);
2982 }
2983
2984 if (OPT_EARLY_UNLOAD) {
2985 unload_offset = offset +
2986 (has_delay_slot(op->c) && !op_flag_no_ds(op->flags));
2987
2988 lightrec_do_early_unload(state, block, unload_offset);
2989 }
2990
2991 state->no_load_delay = false;
2992}