android again
[pcsx_rearmed.git] / deps / lightrec / emitter.c
... / ...
CommitLineData
1// SPDX-License-Identifier: LGPL-2.1-or-later
2/*
3 * Copyright (C) 2014-2021 Paul Cercueil <paul@crapouillou.net>
4 */
5
6#include "blockcache.h"
7#include "debug.h"
8#include "disassembler.h"
9#include "emitter.h"
10#include "lightning-wrapper.h"
11#include "optimizer.h"
12#include "regcache.h"
13
14#include <stdbool.h>
15#include <stddef.h>
16
17typedef void (*lightrec_rec_func_t)(struct lightrec_cstate *, const struct block *, u16);
18
19/* Forward declarations */
20static void rec_SPECIAL(struct lightrec_cstate *state, const struct block *block, u16 offset);
21static void rec_REGIMM(struct lightrec_cstate *state, const struct block *block, u16 offset);
22static void rec_CP0(struct lightrec_cstate *state, const struct block *block, u16 offset);
23static void rec_CP2(struct lightrec_cstate *state, const struct block *block, u16 offset);
24static void rec_META(struct lightrec_cstate *state, const struct block *block, u16 offset);
25static void rec_cp2_do_mtc2(struct lightrec_cstate *state,
26 const struct block *block, u16 offset, u8 reg, u8 in_reg);
27static void rec_cp2_do_mfc2(struct lightrec_cstate *state,
28 const struct block *block, u16 offset,
29 u8 reg, u8 out_reg);
30
31static void
32lightrec_jump_to_fn(jit_state_t *_jit, void (*fn)(void))
33{
34 /* Prevent jit_jmpi() from using our cycles register as a temporary */
35 jit_live(LIGHTREC_REG_CYCLE);
36
37 jit_patch_abs(jit_jmpi(), fn);
38}
39
40static void
41lightrec_jump_to_eob(struct lightrec_cstate *state, jit_state_t *_jit)
42{
43 lightrec_jump_to_fn(_jit, state->state->eob_wrapper_func);
44}
45
46static void
47lightrec_jump_to_ds_check(struct lightrec_cstate *state, jit_state_t *_jit)
48{
49 lightrec_jump_to_fn(_jit, state->state->ds_check_func);
50}
51
52static void update_ra_register(struct regcache *reg_cache, jit_state_t *_jit,
53 u8 ra_reg, u32 pc, u32 link)
54{
55 u8 link_reg;
56
57 link_reg = lightrec_alloc_reg_out(reg_cache, _jit, ra_reg, 0);
58 lightrec_load_imm(reg_cache, _jit, link_reg, pc, link);
59 lightrec_free_reg(reg_cache, link_reg);
60}
61
62static void lightrec_emit_end_of_block(struct lightrec_cstate *state,
63 const struct block *block, u16 offset,
64 s8 reg_new_pc, u32 imm, u8 ra_reg,
65 u32 link, bool update_cycles)
66{
67 struct regcache *reg_cache = state->reg_cache;
68 jit_state_t *_jit = block->_jit;
69 const struct opcode *op = &block->opcode_list[offset],
70 *ds = get_delay_slot(block->opcode_list, offset);
71 u32 cycles = state->cycles + lightrec_cycles_of_opcode(state->state, op->c);
72
73 jit_note(__FILE__, __LINE__);
74
75 if (link && ra_reg != reg_new_pc)
76 update_ra_register(reg_cache, _jit, ra_reg, block->pc, link);
77
78 if (reg_new_pc < 0)
79 lightrec_load_next_pc_imm(reg_cache, _jit, block->pc, imm);
80 else
81 lightrec_load_next_pc(reg_cache, _jit, reg_new_pc);
82
83 if (link && ra_reg == reg_new_pc) {
84 /* Handle the special case: JALR $r0, $r0
85 * In that case the target PC should be the old value of the
86 * register. */
87 update_ra_register(reg_cache, _jit, ra_reg, block->pc, link);
88 }
89
90 if (has_delay_slot(op->c) &&
91 !op_flag_no_ds(op->flags) && !op_flag_local_branch(op->flags)) {
92 cycles += lightrec_cycles_of_opcode(state->state, ds->c);
93
94 /* Recompile the delay slot */
95 if (ds->c.opcode)
96 lightrec_rec_opcode(state, block, offset + 1);
97 }
98
99 /* Clean the remaining registers */
100 lightrec_clean_regs(reg_cache, _jit);
101
102 if (cycles && update_cycles) {
103 jit_subi(LIGHTREC_REG_CYCLE, LIGHTREC_REG_CYCLE, cycles);
104 pr_debug("EOB: %u cycles\n", cycles);
105 }
106
107 if (op_flag_load_delay(ds->flags)
108 && opcode_is_load(ds->c) && !state->no_load_delay) {
109 /* If the delay slot is a load opcode, its target register
110 * will be written after the first opcode of the target is
111 * executed. Handle this by jumping to a special section of
112 * the dispatcher. It expects the loaded value to be in
113 * REG_TEMP, and the target register number to be in JIT_V1.*/
114 jit_movi(JIT_V1, ds->c.i.rt);
115
116 lightrec_jump_to_ds_check(state, _jit);
117 } else {
118 lightrec_jump_to_eob(state, _jit);
119 }
120
121 lightrec_regcache_reset(reg_cache);
122}
123
124void lightrec_emit_jump_to_interpreter(struct lightrec_cstate *state,
125 const struct block *block, u16 offset)
126{
127 struct regcache *reg_cache = state->reg_cache;
128 jit_state_t *_jit = block->_jit;
129
130 lightrec_clean_regs(reg_cache, _jit);
131
132 /* Call the interpreter with the block's address in JIT_V1 and the
133 * PC (which might have an offset) in JIT_V0. */
134 lightrec_load_imm(reg_cache, _jit, JIT_V0, block->pc,
135 block->pc + (offset << 2));
136 if (lightrec_store_next_pc()) {
137 jit_stxi_i(offsetof(struct lightrec_state, next_pc),
138 LIGHTREC_REG_STATE, JIT_V0);
139 }
140
141 jit_movi(JIT_V1, (uintptr_t)block);
142
143 jit_subi(LIGHTREC_REG_CYCLE, LIGHTREC_REG_CYCLE, state->cycles);
144 lightrec_jump_to_fn(_jit, state->state->interpreter_func);
145}
146
147static void lightrec_emit_eob(struct lightrec_cstate *state,
148 const struct block *block, u16 offset)
149{
150 struct regcache *reg_cache = state->reg_cache;
151 jit_state_t *_jit = block->_jit;
152
153 lightrec_clean_regs(reg_cache, _jit);
154
155 lightrec_load_imm(reg_cache, _jit, JIT_V0, block->pc,
156 block->pc + (offset << 2));
157 if (lightrec_store_next_pc()) {
158 jit_stxi_i(offsetof(struct lightrec_state, next_pc),
159 LIGHTREC_REG_STATE, JIT_V0);
160 }
161
162 jit_subi(LIGHTREC_REG_CYCLE, LIGHTREC_REG_CYCLE, state->cycles);
163
164 lightrec_jump_to_eob(state, _jit);
165}
166
167static void rec_special_JR(struct lightrec_cstate *state, const struct block *block, u16 offset)
168{
169 union code c = block->opcode_list[offset].c;
170
171 _jit_name(block->_jit, __func__);
172 lightrec_emit_end_of_block(state, block, offset, c.r.rs, 0, 31, 0, true);
173}
174
175static void rec_special_JALR(struct lightrec_cstate *state, const struct block *block, u16 offset)
176{
177 union code c = block->opcode_list[offset].c;
178
179 _jit_name(block->_jit, __func__);
180 lightrec_emit_end_of_block(state, block, offset, c.r.rs, 0, c.r.rd,
181 get_branch_pc(block, offset, 2), true);
182}
183
184static void rec_J(struct lightrec_cstate *state, const struct block *block, u16 offset)
185{
186 union code c = block->opcode_list[offset].c;
187
188 _jit_name(block->_jit, __func__);
189 lightrec_emit_end_of_block(state, block, offset, -1,
190 (block->pc & 0xf0000000) | (c.j.imm << 2),
191 31, 0, true);
192}
193
194static void rec_JAL(struct lightrec_cstate *state, const struct block *block, u16 offset)
195{
196 union code c = block->opcode_list[offset].c;
197
198 _jit_name(block->_jit, __func__);
199 lightrec_emit_end_of_block(state, block, offset, -1,
200 (block->pc & 0xf0000000) | (c.j.imm << 2),
201 31, get_branch_pc(block, offset, 2), true);
202}
203
204static void lightrec_do_early_unload(struct lightrec_cstate *state,
205 const struct block *block, u16 offset)
206{
207 struct regcache *reg_cache = state->reg_cache;
208 const struct opcode *op = &block->opcode_list[offset];
209 jit_state_t *_jit = block->_jit;
210 unsigned int i;
211 u8 reg;
212 struct {
213 u8 reg, op;
214 } reg_ops[3] = {
215 { op->r.rd, LIGHTREC_FLAGS_GET_RD(op->flags), },
216 { op->i.rt, LIGHTREC_FLAGS_GET_RT(op->flags), },
217 { op->i.rs, LIGHTREC_FLAGS_GET_RS(op->flags), },
218 };
219
220 for (i = 0; i < ARRAY_SIZE(reg_ops); i++) {
221 reg = reg_ops[i].reg;
222
223 switch (reg_ops[i].op) {
224 case LIGHTREC_REG_UNLOAD:
225 lightrec_clean_reg_if_loaded(reg_cache, _jit, reg, true);
226 break;
227
228 case LIGHTREC_REG_DISCARD:
229 lightrec_discard_reg_if_loaded(reg_cache, reg);
230 break;
231
232 case LIGHTREC_REG_CLEAN:
233 lightrec_clean_reg_if_loaded(reg_cache, _jit, reg, false);
234 break;
235 default:
236 break;
237 };
238 }
239}
240
241static void rec_b(struct lightrec_cstate *state, const struct block *block, u16 offset,
242 jit_code_t code, jit_code_t code2, u32 link, bool unconditional, bool bz)
243{
244 struct regcache *reg_cache = state->reg_cache;
245 struct native_register *regs_backup;
246 jit_state_t *_jit = block->_jit;
247 struct lightrec_branch *branch;
248 const struct opcode *op = &block->opcode_list[offset],
249 *ds = get_delay_slot(block->opcode_list, offset);
250 jit_node_t *addr;
251 bool is_forward = (s16)op->i.imm >= 0;
252 int op_cycles = lightrec_cycles_of_opcode(state->state, op->c);
253 u32 target_offset, cycles = state->cycles + op_cycles;
254 bool no_indirection = false;
255 u32 next_pc;
256 u8 rs, rt;
257
258 jit_note(__FILE__, __LINE__);
259
260 if (!op_flag_no_ds(op->flags))
261 cycles += lightrec_cycles_of_opcode(state->state, ds->c);
262
263 state->cycles = -op_cycles;
264
265 if (!unconditional) {
266 rs = lightrec_alloc_reg_in(reg_cache, _jit, op->i.rs, REG_EXT);
267 rt = bz ? 0 : lightrec_alloc_reg_in(reg_cache,
268 _jit, op->i.rt, REG_EXT);
269
270 /* Unload dead registers before evaluating the branch */
271 if (OPT_EARLY_UNLOAD)
272 lightrec_do_early_unload(state, block, offset);
273
274 if (op_flag_local_branch(op->flags) &&
275 (op_flag_no_ds(op->flags) || !ds->opcode) &&
276 is_forward && !lightrec_has_dirty_regs(reg_cache))
277 no_indirection = true;
278
279 if (no_indirection)
280 pr_debug("Using no indirection for branch at offset 0x%hx\n", offset << 2);
281 }
282
283 if (cycles)
284 jit_subi(LIGHTREC_REG_CYCLE, LIGHTREC_REG_CYCLE, cycles);
285
286 if (!unconditional) {
287 /* Generate the branch opcode */
288 if (!no_indirection)
289 addr = jit_new_node_pww(code, NULL, rs, rt);
290
291 lightrec_free_regs(reg_cache);
292 regs_backup = lightrec_regcache_enter_branch(reg_cache);
293 }
294
295 if (op_flag_local_branch(op->flags)) {
296 /* Recompile the delay slot */
297 if (!op_flag_no_ds(op->flags) && ds->opcode) {
298 /* Never handle load delays with local branches. */
299 state->no_load_delay = true;
300 lightrec_rec_opcode(state, block, offset + 1);
301 }
302
303 if (link)
304 update_ra_register(reg_cache, _jit, 31, block->pc, link);
305
306 /* Clean remaining registers */
307 lightrec_clean_regs(reg_cache, _jit);
308
309 target_offset = offset + 1 + (s16)op->i.imm
310 - !!op_flag_no_ds(op->flags);
311 pr_debug("Adding local branch to offset 0x%x\n",
312 target_offset << 2);
313 branch = &state->local_branches[
314 state->nb_local_branches++];
315
316 branch->target = target_offset;
317
318 if (no_indirection)
319 branch->branch = jit_new_node_pww(code2, NULL, rs, rt);
320 else if (is_forward)
321 branch->branch = jit_b();
322 else
323 branch->branch = jit_bgti(LIGHTREC_REG_CYCLE, 0);
324 }
325
326 if (!op_flag_local_branch(op->flags) || !is_forward) {
327 next_pc = get_branch_pc(block, offset, 1 + (s16)op->i.imm);
328 state->no_load_delay = op_flag_local_branch(op->flags);
329 lightrec_emit_end_of_block(state, block, offset, -1, next_pc,
330 31, link, false);
331 }
332
333 if (!unconditional) {
334 if (!no_indirection)
335 jit_patch(addr);
336
337 lightrec_regcache_leave_branch(reg_cache, regs_backup);
338
339 if (bz && link)
340 update_ra_register(reg_cache, _jit, 31, block->pc, link);
341
342 if (!op_flag_no_ds(op->flags) && ds->opcode) {
343 state->no_load_delay = true;
344 lightrec_rec_opcode(state, block, offset + 1);
345 }
346 }
347}
348
349static void rec_BNE(struct lightrec_cstate *state,
350 const struct block *block, u16 offset)
351{
352 union code c = block->opcode_list[offset].c;
353
354 _jit_name(block->_jit, __func__);
355
356 if (c.i.rt == 0)
357 rec_b(state, block, offset, jit_code_beqi, jit_code_bnei, 0, false, true);
358 else
359 rec_b(state, block, offset, jit_code_beqr, jit_code_bner, 0, false, false);
360}
361
362static void rec_BEQ(struct lightrec_cstate *state,
363 const struct block *block, u16 offset)
364{
365 union code c = block->opcode_list[offset].c;
366
367 _jit_name(block->_jit, __func__);
368
369 if (c.i.rt == 0)
370 rec_b(state, block, offset, jit_code_bnei, jit_code_beqi, 0, c.i.rs == 0, true);
371 else
372 rec_b(state, block, offset, jit_code_bner, jit_code_beqr, 0, c.i.rs == c.i.rt, false);
373}
374
375static void rec_BLEZ(struct lightrec_cstate *state,
376 const struct block *block, u16 offset)
377{
378 union code c = block->opcode_list[offset].c;
379
380 _jit_name(block->_jit, __func__);
381 rec_b(state, block, offset, jit_code_bgti, jit_code_blei, 0, c.i.rs == 0, true);
382}
383
384static void rec_BGTZ(struct lightrec_cstate *state,
385 const struct block *block, u16 offset)
386{
387 _jit_name(block->_jit, __func__);
388 rec_b(state, block, offset, jit_code_blei, jit_code_bgti, 0, false, true);
389}
390
391static void rec_regimm_BLTZ(struct lightrec_cstate *state,
392 const struct block *block, u16 offset)
393{
394 _jit_name(block->_jit, __func__);
395 rec_b(state, block, offset, jit_code_bgei, jit_code_blti, 0, false, true);
396}
397
398static void rec_regimm_BLTZAL(struct lightrec_cstate *state,
399 const struct block *block, u16 offset)
400{
401 _jit_name(block->_jit, __func__);
402 rec_b(state, block, offset, jit_code_bgei, jit_code_blti,
403 get_branch_pc(block, offset, 2), false, true);
404}
405
406static void rec_regimm_BGEZ(struct lightrec_cstate *state,
407 const struct block *block, u16 offset)
408{
409 union code c = block->opcode_list[offset].c;
410
411 _jit_name(block->_jit, __func__);
412 rec_b(state, block, offset, jit_code_blti, jit_code_bgei, 0, !c.i.rs, true);
413}
414
415static void rec_regimm_BGEZAL(struct lightrec_cstate *state,
416 const struct block *block, u16 offset)
417{
418 const struct opcode *op = &block->opcode_list[offset];
419 _jit_name(block->_jit, __func__);
420 rec_b(state, block, offset, jit_code_blti, jit_code_bgei,
421 get_branch_pc(block, offset, 2),
422 !op->i.rs, true);
423}
424
425static void rec_alloc_rs_rd(struct regcache *reg_cache,
426 jit_state_t *_jit,
427 const struct opcode *op,
428 u8 rs, u8 rd,
429 u8 in_flags, u8 out_flags,
430 u8 *rs_out, u8 *rd_out)
431{
432 bool unload, discard;
433 u32 unload_flags;
434
435 if (OPT_EARLY_UNLOAD) {
436 unload_flags = LIGHTREC_FLAGS_GET_RS(op->flags);
437 unload = unload_flags == LIGHTREC_REG_UNLOAD;
438 discard = unload_flags == LIGHTREC_REG_DISCARD;
439 }
440
441 if (OPT_EARLY_UNLOAD && rs && rd != rs && (unload || discard)) {
442 rs = lightrec_alloc_reg_in(reg_cache, _jit, rs, in_flags);
443 lightrec_remap_reg(reg_cache, _jit, rs, rd, discard);
444 lightrec_set_reg_out_flags(reg_cache, rs, out_flags);
445 rd = rs;
446 } else {
447 rs = lightrec_alloc_reg_in(reg_cache, _jit, rs, in_flags);
448 rd = lightrec_alloc_reg_out(reg_cache, _jit, rd, out_flags);
449 }
450
451 *rs_out = rs;
452 *rd_out = rd;
453}
454
455static void rec_alu_imm(struct lightrec_cstate *state, const struct block *block,
456 u16 offset, jit_code_t code, bool slti)
457{
458 struct regcache *reg_cache = state->reg_cache;
459 union code c = block->opcode_list[offset].c;
460 jit_state_t *_jit = block->_jit;
461 u8 rs, rt, out_flags = REG_EXT;
462
463 if (slti)
464 out_flags |= REG_ZEXT;
465
466 jit_note(__FILE__, __LINE__);
467
468 rec_alloc_rs_rd(reg_cache, _jit, &block->opcode_list[offset],
469 c.i.rs, c.i.rt, REG_EXT, out_flags, &rs, &rt);
470
471 jit_new_node_www(code, rt, rs, (s32)(s16) c.i.imm);
472
473 lightrec_free_reg(reg_cache, rs);
474 lightrec_free_reg(reg_cache, rt);
475}
476
477static void rec_alu_special(struct lightrec_cstate *state, const struct block *block,
478 u16 offset, jit_code_t code, bool out_ext)
479{
480 struct regcache *reg_cache = state->reg_cache;
481 union code c = block->opcode_list[offset].c;
482 jit_state_t *_jit = block->_jit;
483 u8 rd, rt, rs;
484
485 jit_note(__FILE__, __LINE__);
486
487 rt = lightrec_alloc_reg_in(reg_cache, _jit, c.r.rt, REG_EXT);
488 rec_alloc_rs_rd(reg_cache, _jit, &block->opcode_list[offset],
489 c.r.rs, c.r.rd, REG_EXT,
490 out_ext ? REG_EXT | REG_ZEXT : 0, &rs, &rd);
491
492 jit_new_node_www(code, rd, rs, rt);
493
494 lightrec_free_reg(reg_cache, rs);
495 lightrec_free_reg(reg_cache, rt);
496 lightrec_free_reg(reg_cache, rd);
497}
498
499static void rec_alu_shiftv(struct lightrec_cstate *state, const struct block *block,
500 u16 offset, jit_code_t code)
501{
502 struct regcache *reg_cache = state->reg_cache;
503 union code c = block->opcode_list[offset].c;
504 jit_state_t *_jit = block->_jit;
505 u8 rd, rt, rs, temp, flags = 0;
506
507 jit_note(__FILE__, __LINE__);
508
509 if (code == jit_code_rshr)
510 flags = REG_EXT;
511 else if (code == jit_code_rshr_u)
512 flags = REG_ZEXT;
513
514 rs = lightrec_alloc_reg_in(reg_cache, _jit, c.r.rs, 0);
515 rec_alloc_rs_rd(reg_cache, _jit, &block->opcode_list[offset],
516 c.r.rt, c.r.rd, flags, flags, &rt, &rd);
517
518 if (rt != rd) {
519 jit_andi(rd, rs, 0x1f);
520 jit_new_node_www(code, rd, rt, rd);
521 } else {
522 temp = lightrec_alloc_reg_temp(reg_cache, _jit);
523 jit_andi(temp, rs, 0x1f);
524 jit_new_node_www(code, rd, rt, temp);
525 lightrec_free_reg(reg_cache, temp);
526 }
527
528 lightrec_free_reg(reg_cache, rs);
529 lightrec_free_reg(reg_cache, rt);
530 lightrec_free_reg(reg_cache, rd);
531}
532
533static void rec_movi(struct lightrec_cstate *state,
534 const struct block *block, u16 offset)
535{
536 struct regcache *reg_cache = state->reg_cache;
537 union code c = block->opcode_list[offset].c;
538 jit_state_t *_jit = block->_jit;
539 u16 flags = REG_EXT;
540 s32 value = (s32)(s16) c.i.imm;
541 u8 rt;
542
543 if (block->opcode_list[offset].flags & LIGHTREC_MOVI)
544 value += (s32)((u32)state->movi_temp[c.i.rt] << 16);
545
546 if (value >= 0)
547 flags |= REG_ZEXT;
548
549 rt = lightrec_alloc_reg_out(reg_cache, _jit, c.i.rt, flags);
550
551 jit_movi(rt, value);
552
553 lightrec_free_reg(reg_cache, rt);
554}
555
556static void rec_ADDIU(struct lightrec_cstate *state,
557 const struct block *block, u16 offset)
558{
559 const struct opcode *op = &block->opcode_list[offset];
560
561 _jit_name(block->_jit, __func__);
562
563 if (op->i.rs && !(op->flags & LIGHTREC_MOVI))
564 rec_alu_imm(state, block, offset, jit_code_addi, false);
565 else
566 rec_movi(state, block, offset);
567}
568
569static void rec_ADDI(struct lightrec_cstate *state,
570 const struct block *block, u16 offset)
571{
572 /* TODO: Handle the exception? */
573 _jit_name(block->_jit, __func__);
574 rec_ADDIU(state, block, offset);
575}
576
577static void rec_SLTIU(struct lightrec_cstate *state,
578 const struct block *block, u16 offset)
579{
580 _jit_name(block->_jit, __func__);
581 rec_alu_imm(state, block, offset, jit_code_lti_u, true);
582}
583
584static void rec_SLTI(struct lightrec_cstate *state,
585 const struct block *block, u16 offset)
586{
587 _jit_name(block->_jit, __func__);
588 rec_alu_imm(state, block, offset, jit_code_lti, true);
589}
590
591static void rec_ANDI(struct lightrec_cstate *state,
592 const struct block *block, u16 offset)
593{
594 struct regcache *reg_cache = state->reg_cache;
595 union code c = block->opcode_list[offset].c;
596 jit_state_t *_jit = block->_jit;
597 u8 rs, rt;
598
599 _jit_name(block->_jit, __func__);
600 jit_note(__FILE__, __LINE__);
601
602 rec_alloc_rs_rd(reg_cache, _jit, &block->opcode_list[offset],
603 c.i.rs, c.i.rt, 0, REG_EXT | REG_ZEXT, &rs, &rt);
604
605 /* PSX code uses ANDI 0xff / ANDI 0xffff a lot, which are basically
606 * casts to uint8_t / uint16_t. */
607 if (c.i.imm == 0xff)
608 jit_extr_uc(rt, rs);
609 else if (c.i.imm == 0xffff)
610 jit_extr_us(rt, rs);
611 else
612 jit_andi(rt, rs, (u32)(u16) c.i.imm);
613
614 lightrec_free_reg(reg_cache, rs);
615 lightrec_free_reg(reg_cache, rt);
616}
617
618static void rec_alu_or_xor(struct lightrec_cstate *state, const struct block *block,
619 u16 offset, jit_code_t code)
620{
621 struct regcache *reg_cache = state->reg_cache;
622 union code c = block->opcode_list[offset].c;
623 jit_state_t *_jit = block->_jit;
624 u8 rs, rt, flags;
625
626 jit_note(__FILE__, __LINE__);
627
628 rec_alloc_rs_rd(reg_cache, _jit, &block->opcode_list[offset],
629 c.i.rs, c.i.rt, 0, 0, &rs, &rt);
630
631 flags = lightrec_get_reg_in_flags(reg_cache, rs);
632 lightrec_set_reg_out_flags(reg_cache, rt, flags);
633
634 jit_new_node_www(code, rt, rs, (u32)(u16) c.i.imm);
635
636 lightrec_free_reg(reg_cache, rs);
637 lightrec_free_reg(reg_cache, rt);
638}
639
640
641static void rec_ORI(struct lightrec_cstate *state,
642 const struct block *block, u16 offset)
643{
644 const struct opcode *op = &block->opcode_list[offset];
645 struct regcache *reg_cache = state->reg_cache;
646 jit_state_t *_jit = block->_jit;
647 s32 val;
648 u8 rt;
649
650 _jit_name(_jit, __func__);
651
652 if (op->flags & LIGHTREC_MOVI) {
653 rt = lightrec_alloc_reg_out(reg_cache, _jit, op->i.rt, REG_EXT);
654
655 val = ((u32)state->movi_temp[op->i.rt] << 16) | op->i.imm;
656 jit_movi(rt, val);
657
658 lightrec_free_reg(reg_cache, rt);
659 } else {
660 rec_alu_or_xor(state, block, offset, jit_code_ori);
661 }
662}
663
664static void rec_XORI(struct lightrec_cstate *state,
665 const struct block *block, u16 offset)
666{
667 _jit_name(block->_jit, __func__);
668 rec_alu_or_xor(state, block, offset, jit_code_xori);
669}
670
671static void rec_LUI(struct lightrec_cstate *state,
672 const struct block *block, u16 offset)
673{
674 struct regcache *reg_cache = state->reg_cache;
675 union code c = block->opcode_list[offset].c;
676 jit_state_t *_jit = block->_jit;
677 u8 rt, flags = REG_EXT;
678
679 if (block->opcode_list[offset].flags & LIGHTREC_MOVI) {
680 state->movi_temp[c.i.rt] = c.i.imm;
681 return;
682 }
683
684 jit_name(__func__);
685 jit_note(__FILE__, __LINE__);
686
687 if (!(c.i.imm & BIT(15)))
688 flags |= REG_ZEXT;
689
690 rt = lightrec_alloc_reg_out(reg_cache, _jit, c.i.rt, flags);
691
692 jit_movi(rt, (s32)(c.i.imm << 16));
693
694 lightrec_free_reg(reg_cache, rt);
695}
696
697static void rec_special_ADDU(struct lightrec_cstate *state,
698 const struct block *block, u16 offset)
699{
700 _jit_name(block->_jit, __func__);
701 rec_alu_special(state, block, offset, jit_code_addr, false);
702}
703
704static void rec_special_ADD(struct lightrec_cstate *state,
705 const struct block *block, u16 offset)
706{
707 /* TODO: Handle the exception? */
708 _jit_name(block->_jit, __func__);
709 rec_alu_special(state, block, offset, jit_code_addr, false);
710}
711
712static void rec_special_SUBU(struct lightrec_cstate *state,
713 const struct block *block, u16 offset)
714{
715 _jit_name(block->_jit, __func__);
716 rec_alu_special(state, block, offset, jit_code_subr, false);
717}
718
719static void rec_special_SUB(struct lightrec_cstate *state,
720 const struct block *block, u16 offset)
721{
722 /* TODO: Handle the exception? */
723 _jit_name(block->_jit, __func__);
724 rec_alu_special(state, block, offset, jit_code_subr, false);
725}
726
727static void rec_special_AND(struct lightrec_cstate *state,
728 const struct block *block, u16 offset)
729{
730 struct regcache *reg_cache = state->reg_cache;
731 union code c = block->opcode_list[offset].c;
732 jit_state_t *_jit = block->_jit;
733 u8 rd, rt, rs, flags_rs, flags_rt, flags_rd;
734
735 _jit_name(block->_jit, __func__);
736 jit_note(__FILE__, __LINE__);
737
738 rt = lightrec_alloc_reg_in(reg_cache, _jit, c.r.rt, 0);
739 rec_alloc_rs_rd(reg_cache, _jit, &block->opcode_list[offset],
740 c.r.rs, c.r.rd, 0, 0, &rs, &rd);
741
742 flags_rs = lightrec_get_reg_in_flags(reg_cache, rs);
743 flags_rt = lightrec_get_reg_in_flags(reg_cache, rt);
744
745 /* Z(rd) = Z(rs) | Z(rt) */
746 flags_rd = REG_ZEXT & (flags_rs | flags_rt);
747
748 /* E(rd) = (E(rt) & Z(rt)) | (E(rs) & Z(rs)) | (E(rs) & E(rt)) */
749 if (((flags_rs & REG_EXT) && (flags_rt & REG_ZEXT)) ||
750 ((flags_rt & REG_EXT) && (flags_rs & REG_ZEXT)) ||
751 (REG_EXT & flags_rs & flags_rt))
752 flags_rd |= REG_EXT;
753
754 lightrec_set_reg_out_flags(reg_cache, rd, flags_rd);
755
756 jit_andr(rd, rs, rt);
757
758 lightrec_free_reg(reg_cache, rs);
759 lightrec_free_reg(reg_cache, rt);
760 lightrec_free_reg(reg_cache, rd);
761}
762
763static void rec_special_or_nor(struct lightrec_cstate *state,
764 const struct block *block, u16 offset, bool nor)
765{
766 struct regcache *reg_cache = state->reg_cache;
767 union code c = block->opcode_list[offset].c;
768 jit_state_t *_jit = block->_jit;
769 u8 rd, rt, rs, flags_rs, flags_rt, flags_rd = 0;
770
771 jit_note(__FILE__, __LINE__);
772
773 rt = lightrec_alloc_reg_in(reg_cache, _jit, c.r.rt, 0);
774 rec_alloc_rs_rd(reg_cache, _jit, &block->opcode_list[offset],
775 c.r.rs, c.r.rd, 0, 0, &rs, &rd);
776
777 flags_rs = lightrec_get_reg_in_flags(reg_cache, rs);
778 flags_rt = lightrec_get_reg_in_flags(reg_cache, rt);
779
780 /* or: Z(rd) = Z(rs) & Z(rt)
781 * nor: Z(rd) = 0 */
782 if (!nor)
783 flags_rd = REG_ZEXT & flags_rs & flags_rt;
784
785 /* E(rd) = E(rs) & E(rt) */
786 if (REG_EXT & flags_rs & flags_rt)
787 flags_rd |= REG_EXT;
788
789 lightrec_set_reg_out_flags(reg_cache, rd, flags_rd);
790
791 jit_orr(rd, rs, rt);
792
793 if (nor)
794 jit_comr(rd, rd);
795
796 lightrec_free_reg(reg_cache, rs);
797 lightrec_free_reg(reg_cache, rt);
798 lightrec_free_reg(reg_cache, rd);
799}
800
801static void rec_special_OR(struct lightrec_cstate *state,
802 const struct block *block, u16 offset)
803{
804 _jit_name(block->_jit, __func__);
805 rec_special_or_nor(state, block, offset, false);
806}
807
808static void rec_special_NOR(struct lightrec_cstate *state,
809 const struct block *block, u16 offset)
810{
811 _jit_name(block->_jit, __func__);
812 rec_special_or_nor(state, block, offset, true);
813}
814
815static void rec_special_XOR(struct lightrec_cstate *state,
816 const struct block *block, u16 offset)
817{
818 struct regcache *reg_cache = state->reg_cache;
819 union code c = block->opcode_list[offset].c;
820 jit_state_t *_jit = block->_jit;
821 u8 rd, rt, rs, flags_rs, flags_rt, flags_rd;
822
823 _jit_name(block->_jit, __func__);
824
825 jit_note(__FILE__, __LINE__);
826
827 rt = lightrec_alloc_reg_in(reg_cache, _jit, c.r.rt, 0);
828 rec_alloc_rs_rd(reg_cache, _jit, &block->opcode_list[offset],
829 c.r.rs, c.r.rd, 0, 0, &rs, &rd);
830
831 flags_rs = lightrec_get_reg_in_flags(reg_cache, rs);
832 flags_rt = lightrec_get_reg_in_flags(reg_cache, rt);
833
834 /* Z(rd) = Z(rs) & Z(rt) */
835 flags_rd = REG_ZEXT & flags_rs & flags_rt;
836
837 /* E(rd) = E(rs) & E(rt) */
838 flags_rd |= REG_EXT & flags_rs & flags_rt;
839
840 lightrec_set_reg_out_flags(reg_cache, rd, flags_rd);
841
842 jit_xorr(rd, rs, rt);
843
844 lightrec_free_reg(reg_cache, rs);
845 lightrec_free_reg(reg_cache, rt);
846 lightrec_free_reg(reg_cache, rd);
847}
848
849static void rec_special_SLTU(struct lightrec_cstate *state,
850 const struct block *block, u16 offset)
851{
852 _jit_name(block->_jit, __func__);
853 rec_alu_special(state, block, offset, jit_code_ltr_u, true);
854}
855
856static void rec_special_SLT(struct lightrec_cstate *state,
857 const struct block *block, u16 offset)
858{
859 _jit_name(block->_jit, __func__);
860 rec_alu_special(state, block, offset, jit_code_ltr, true);
861}
862
863static void rec_special_SLLV(struct lightrec_cstate *state,
864 const struct block *block, u16 offset)
865{
866 _jit_name(block->_jit, __func__);
867 rec_alu_shiftv(state, block, offset, jit_code_lshr);
868}
869
870static void rec_special_SRLV(struct lightrec_cstate *state,
871 const struct block *block, u16 offset)
872{
873 _jit_name(block->_jit, __func__);
874 rec_alu_shiftv(state, block, offset, jit_code_rshr_u);
875}
876
877static void rec_special_SRAV(struct lightrec_cstate *state,
878 const struct block *block, u16 offset)
879{
880 _jit_name(block->_jit, __func__);
881 rec_alu_shiftv(state, block, offset, jit_code_rshr);
882}
883
884static void rec_alu_shift(struct lightrec_cstate *state, const struct block *block,
885 u16 offset, jit_code_t code)
886{
887 struct regcache *reg_cache = state->reg_cache;
888 union code c = block->opcode_list[offset].c;
889 jit_state_t *_jit = block->_jit;
890 u8 rd, rt, flags = 0, out_flags = 0;
891
892 jit_note(__FILE__, __LINE__);
893
894 if (code == jit_code_rshi)
895 flags = REG_EXT;
896 else if (code == jit_code_rshi_u)
897 flags = REG_ZEXT;
898
899 /* Input reg is zero-extended, if we SRL at least by one bit, we know
900 * the output reg will be both zero-extended and sign-extended. */
901 out_flags = flags;
902 if (code == jit_code_rshi_u && c.r.imm)
903 out_flags |= REG_EXT;
904
905 rec_alloc_rs_rd(reg_cache, _jit, &block->opcode_list[offset],
906 c.r.rt, c.r.rd, flags, out_flags, &rt, &rd);
907
908 jit_new_node_www(code, rd, rt, c.r.imm);
909
910 lightrec_free_reg(reg_cache, rt);
911 lightrec_free_reg(reg_cache, rd);
912}
913
914static void rec_special_SLL(struct lightrec_cstate *state,
915 const struct block *block, u16 offset)
916{
917 _jit_name(block->_jit, __func__);
918 rec_alu_shift(state, block, offset, jit_code_lshi);
919}
920
921static void rec_special_SRL(struct lightrec_cstate *state,
922 const struct block *block, u16 offset)
923{
924 _jit_name(block->_jit, __func__);
925 rec_alu_shift(state, block, offset, jit_code_rshi_u);
926}
927
928static void rec_special_SRA(struct lightrec_cstate *state,
929 const struct block *block, u16 offset)
930{
931 _jit_name(block->_jit, __func__);
932 rec_alu_shift(state, block, offset, jit_code_rshi);
933}
934
935static void rec_alu_mult(struct lightrec_cstate *state,
936 const struct block *block, u16 offset, bool is_signed)
937{
938 struct regcache *reg_cache = state->reg_cache;
939 union code c = block->opcode_list[offset].c;
940 u32 flags = block->opcode_list[offset].flags;
941 u8 reg_lo = get_mult_div_lo(c);
942 u8 reg_hi = get_mult_div_hi(c);
943 jit_state_t *_jit = block->_jit;
944 u8 lo, hi, rs, rt, rflags = 0;
945
946 jit_note(__FILE__, __LINE__);
947
948 if (is_signed)
949 rflags = REG_EXT;
950 else
951 rflags = REG_ZEXT;
952
953 rs = lightrec_alloc_reg_in(reg_cache, _jit, c.r.rs, rflags);
954 rt = lightrec_alloc_reg_in(reg_cache, _jit, c.r.rt, rflags);
955
956 if (!op_flag_no_lo(flags))
957 lo = lightrec_alloc_reg_out(reg_cache, _jit, reg_lo, 0);
958 else if (__WORDSIZE == 32)
959 lo = lightrec_alloc_reg_temp(reg_cache, _jit);
960
961 if (!op_flag_no_hi(flags))
962 hi = lightrec_alloc_reg_out(reg_cache, _jit, reg_hi, REG_EXT);
963
964 if (__WORDSIZE == 32) {
965 /* On 32-bit systems, do a 32*32->64 bit operation, or a 32*32->32 bit
966 * operation if the MULT was detected a 32-bit only. */
967 if (!op_flag_no_hi(flags)) {
968 if (is_signed)
969 jit_qmulr(lo, hi, rs, rt);
970 else
971 jit_qmulr_u(lo, hi, rs, rt);
972 } else {
973 jit_mulr(lo, rs, rt);
974 }
975 } else {
976 /* On 64-bit systems, do a 64*64->64 bit operation. */
977 if (op_flag_no_lo(flags)) {
978 jit_mulr(hi, rs, rt);
979 jit_rshi(hi, hi, 32);
980 } else {
981 jit_mulr(lo, rs, rt);
982
983 /* The 64-bit output value is in $lo, store the upper 32 bits in $hi */
984 if (!op_flag_no_hi(flags))
985 jit_rshi(hi, lo, 32);
986 }
987 }
988
989 lightrec_free_reg(reg_cache, rs);
990 lightrec_free_reg(reg_cache, rt);
991 if (!op_flag_no_lo(flags) || __WORDSIZE == 32)
992 lightrec_free_reg(reg_cache, lo);
993 if (!op_flag_no_hi(flags))
994 lightrec_free_reg(reg_cache, hi);
995}
996
997static void rec_alu_div(struct lightrec_cstate *state,
998 const struct block *block, u16 offset, bool is_signed)
999{
1000 struct regcache *reg_cache = state->reg_cache;
1001 union code c = block->opcode_list[offset].c;
1002 u32 flags = block->opcode_list[offset].flags;
1003 bool no_check = op_flag_no_div_check(flags);
1004 u8 reg_lo = get_mult_div_lo(c);
1005 u8 reg_hi = get_mult_div_hi(c);
1006 jit_state_t *_jit = block->_jit;
1007 jit_node_t *branch, *to_end;
1008 u8 lo = 0, hi = 0, rs, rt, rflags = 0;
1009
1010 jit_note(__FILE__, __LINE__);
1011
1012 if (is_signed)
1013 rflags = REG_EXT;
1014 else
1015 rflags = REG_ZEXT;
1016
1017 rs = lightrec_alloc_reg_in(reg_cache, _jit, c.r.rs, rflags);
1018 rt = lightrec_alloc_reg_in(reg_cache, _jit, c.r.rt, rflags);
1019
1020 if (!op_flag_no_lo(flags))
1021 lo = lightrec_alloc_reg_out(reg_cache, _jit, reg_lo, 0);
1022
1023 if (!op_flag_no_hi(flags))
1024 hi = lightrec_alloc_reg_out(reg_cache, _jit, reg_hi, 0);
1025
1026 /* Jump to special handler if dividing by zero */
1027 if (!no_check)
1028 branch = jit_beqi(rt, 0);
1029
1030 if (op_flag_no_lo(flags)) {
1031 if (is_signed)
1032 jit_remr(hi, rs, rt);
1033 else
1034 jit_remr_u(hi, rs, rt);
1035 } else if (op_flag_no_hi(flags)) {
1036 if (is_signed)
1037 jit_divr(lo, rs, rt);
1038 else
1039 jit_divr_u(lo, rs, rt);
1040 } else {
1041 if (is_signed)
1042 jit_qdivr(lo, hi, rs, rt);
1043 else
1044 jit_qdivr_u(lo, hi, rs, rt);
1045 }
1046
1047 if (!no_check) {
1048 /* Jump above the div-by-zero handler */
1049 to_end = jit_b();
1050
1051 jit_patch(branch);
1052
1053 if (!op_flag_no_lo(flags)) {
1054 if (is_signed) {
1055 jit_ltr(lo, rs, rt);
1056 jit_lshi(lo, lo, 1);
1057 jit_subi(lo, lo, 1);
1058 } else {
1059 jit_subi(lo, rt, 1);
1060 }
1061 }
1062
1063 if (!op_flag_no_hi(flags))
1064 jit_movr(hi, rs);
1065
1066 jit_patch(to_end);
1067 }
1068
1069 lightrec_free_reg(reg_cache, rs);
1070 lightrec_free_reg(reg_cache, rt);
1071
1072 if (!op_flag_no_lo(flags))
1073 lightrec_free_reg(reg_cache, lo);
1074
1075 if (!op_flag_no_hi(flags))
1076 lightrec_free_reg(reg_cache, hi);
1077}
1078
1079static void rec_special_MULT(struct lightrec_cstate *state,
1080 const struct block *block, u16 offset)
1081{
1082 _jit_name(block->_jit, __func__);
1083 rec_alu_mult(state, block, offset, true);
1084}
1085
1086static void rec_special_MULTU(struct lightrec_cstate *state,
1087 const struct block *block, u16 offset)
1088{
1089 _jit_name(block->_jit, __func__);
1090 rec_alu_mult(state, block, offset, false);
1091}
1092
1093static void rec_special_DIV(struct lightrec_cstate *state,
1094 const struct block *block, u16 offset)
1095{
1096 _jit_name(block->_jit, __func__);
1097 rec_alu_div(state, block, offset, true);
1098}
1099
1100static void rec_special_DIVU(struct lightrec_cstate *state,
1101 const struct block *block, u16 offset)
1102{
1103 _jit_name(block->_jit, __func__);
1104 rec_alu_div(state, block, offset, false);
1105}
1106
1107static void rec_alu_mv_lo_hi(struct lightrec_cstate *state,
1108 const struct block *block, u16 offset,
1109 u8 dst, u8 src)
1110{
1111 struct regcache *reg_cache = state->reg_cache;
1112 jit_state_t *_jit = block->_jit;
1113
1114 jit_note(__FILE__, __LINE__);
1115
1116 rec_alloc_rs_rd(reg_cache, _jit, &block->opcode_list[offset],
1117 src, dst, 0, REG_EXT, &src, &dst);
1118
1119 jit_extr_i(dst, src);
1120
1121 lightrec_free_reg(reg_cache, src);
1122 lightrec_free_reg(reg_cache, dst);
1123}
1124
1125static void rec_special_MFHI(struct lightrec_cstate *state,
1126 const struct block *block, u16 offset)
1127{
1128 union code c = block->opcode_list[offset].c;
1129
1130 _jit_name(block->_jit, __func__);
1131 rec_alu_mv_lo_hi(state, block, offset, c.r.rd, REG_HI);
1132}
1133
1134static void rec_special_MTHI(struct lightrec_cstate *state,
1135 const struct block *block, u16 offset)
1136{
1137 union code c = block->opcode_list[offset].c;
1138
1139 _jit_name(block->_jit, __func__);
1140 rec_alu_mv_lo_hi(state, block, offset, REG_HI, c.r.rs);
1141}
1142
1143static void rec_special_MFLO(struct lightrec_cstate *state,
1144 const struct block *block, u16 offset)
1145{
1146 union code c = block->opcode_list[offset].c;
1147
1148 _jit_name(block->_jit, __func__);
1149 rec_alu_mv_lo_hi(state, block, offset, c.r.rd, REG_LO);
1150}
1151
1152static void rec_special_MTLO(struct lightrec_cstate *state,
1153 const struct block *block, u16 offset)
1154{
1155 union code c = block->opcode_list[offset].c;
1156
1157 _jit_name(block->_jit, __func__);
1158 rec_alu_mv_lo_hi(state, block, offset, REG_LO, c.r.rs);
1159}
1160
1161static void call_to_c_wrapper(struct lightrec_cstate *state,
1162 const struct block *block, u32 arg,
1163 enum c_wrappers wrapper)
1164{
1165 struct regcache *reg_cache = state->reg_cache;
1166 jit_state_t *_jit = block->_jit;
1167 s8 tmp, tmp2;
1168
1169 /* Make sure JIT_R1 is not mapped; it will be used in the C wrapper. */
1170 tmp2 = lightrec_alloc_reg(reg_cache, _jit, JIT_R1);
1171
1172 tmp = lightrec_get_reg_with_value(reg_cache,
1173 (intptr_t) state->state->wrappers_eps[wrapper]);
1174 if (tmp < 0) {
1175 tmp = lightrec_alloc_reg_temp(reg_cache, _jit);
1176 jit_ldxi(tmp, LIGHTREC_REG_STATE,
1177 offsetof(struct lightrec_state, wrappers_eps[wrapper]));
1178
1179 lightrec_temp_set_value(reg_cache, tmp,
1180 (intptr_t) state->state->wrappers_eps[wrapper]);
1181 }
1182
1183 lightrec_free_reg(reg_cache, tmp2);
1184
1185#ifdef __mips__
1186 /* On MIPS, register t9 is always used as the target register for JALR.
1187 * Therefore if it does not contain the target address we must
1188 * invalidate it. */
1189 if (tmp != _T9)
1190 lightrec_unload_reg(reg_cache, _jit, _T9);
1191#endif
1192
1193 jit_prepare();
1194 jit_pushargi(arg);
1195
1196 lightrec_regcache_mark_live(reg_cache, _jit);
1197 jit_callr(tmp);
1198
1199 lightrec_free_reg(reg_cache, tmp);
1200 lightrec_regcache_mark_live(reg_cache, _jit);
1201}
1202
1203static void rec_io(struct lightrec_cstate *state,
1204 const struct block *block, u16 offset,
1205 bool load_rt, bool read_rt)
1206{
1207 struct regcache *reg_cache = state->reg_cache;
1208 jit_state_t *_jit = block->_jit;
1209 union code c = block->opcode_list[offset].c;
1210 u32 flags = block->opcode_list[offset].flags;
1211 bool is_tagged = LIGHTREC_FLAGS_GET_IO_MODE(flags);
1212 u32 lut_entry;
1213 u8 zero;
1214
1215 jit_note(__FILE__, __LINE__);
1216
1217 lightrec_clean_reg_if_loaded(reg_cache, _jit, c.i.rs, false);
1218
1219 if (read_rt && likely(c.i.rt))
1220 lightrec_clean_reg_if_loaded(reg_cache, _jit, c.i.rt, true);
1221 else if (load_rt)
1222 lightrec_clean_reg_if_loaded(reg_cache, _jit, c.i.rt, false);
1223
1224 if (op_flag_load_delay(flags) && !state->no_load_delay) {
1225 /* Clear state->in_delay_slot_n. This notifies the lightrec_rw
1226 * wrapper that it should write the REG_TEMP register instead of
1227 * the actual output register of the opcode. */
1228 zero = lightrec_alloc_reg_in(reg_cache, _jit, 0, 0);
1229 jit_stxi_c(offsetof(struct lightrec_state, in_delay_slot_n),
1230 LIGHTREC_REG_STATE, zero);
1231 lightrec_free_reg(reg_cache, zero);
1232 }
1233
1234 if (is_tagged) {
1235 call_to_c_wrapper(state, block, c.opcode, C_WRAPPER_RW);
1236 } else {
1237 lut_entry = lightrec_get_lut_entry(block);
1238 call_to_c_wrapper(state, block, (lut_entry << 16) | offset,
1239 C_WRAPPER_RW_GENERIC);
1240 }
1241}
1242
1243static u32 rec_ram_mask(const struct lightrec_state *state)
1244{
1245 return (RAM_SIZE << (state->mirrors_mapped * 2)) - 1;
1246}
1247
1248static u32 rec_io_mask(const struct lightrec_state *state)
1249{
1250 u32 length = state->maps[PSX_MAP_HW_REGISTERS].length;
1251
1252 return 0x1f800000 | GENMASK(31 - clz32(length - 1), 0);
1253}
1254
1255static void rec_store_memory(struct lightrec_cstate *cstate,
1256 const struct block *block,
1257 u16 offset, jit_code_t code,
1258 jit_code_t swap_code,
1259 uintptr_t addr_offset, u32 addr_mask,
1260 bool invalidate)
1261{
1262 const struct lightrec_state *state = cstate->state;
1263 struct regcache *reg_cache = cstate->reg_cache;
1264 struct opcode *op = &block->opcode_list[offset];
1265 jit_state_t *_jit = block->_jit;
1266 union code c = op->c;
1267 u8 rs, rt, tmp = 0, tmp2 = 0, tmp3, addr_reg, addr_reg2;
1268 s16 imm = (s16)c.i.imm;
1269 s32 simm = (s32)imm << (1 - lut_is_32bit(state));
1270 s32 lut_offt = offsetof(struct lightrec_state, code_lut);
1271 bool no_mask = op_flag_no_mask(op->flags);
1272 bool add_imm = c.i.imm &&
1273 ((!state->mirrors_mapped && !no_mask) || (invalidate &&
1274 ((imm & 0x3) || simm + lut_offt != (s16)(simm + lut_offt))));
1275 bool need_tmp = !no_mask || add_imm || invalidate;
1276 bool swc2 = c.i.op == OP_SWC2;
1277 u8 in_reg = swc2 ? REG_TEMP : c.i.rt;
1278 s8 reg_imm;
1279
1280 rs = lightrec_alloc_reg_in(reg_cache, _jit, c.i.rs, 0);
1281 if (need_tmp)
1282 tmp = lightrec_alloc_reg_temp(reg_cache, _jit);
1283
1284 addr_reg = rs;
1285
1286 if (add_imm) {
1287 jit_addi(tmp, addr_reg, (s16)c.i.imm);
1288 lightrec_free_reg(reg_cache, rs);
1289 addr_reg = tmp;
1290 imm = 0;
1291 } else if (simm) {
1292 lut_offt += simm;
1293 }
1294
1295 if (!no_mask) {
1296 reg_imm = lightrec_alloc_reg_temp_with_value(reg_cache, _jit,
1297 addr_mask);
1298
1299 jit_andr(tmp, addr_reg, reg_imm);
1300 addr_reg = tmp;
1301
1302 lightrec_free_reg(reg_cache, reg_imm);
1303 }
1304
1305 if (addr_offset) {
1306 reg_imm = lightrec_alloc_reg_temp_with_value(reg_cache, _jit,
1307 addr_offset);
1308 tmp2 = lightrec_alloc_reg_temp(reg_cache, _jit);
1309 jit_addr(tmp2, addr_reg, reg_imm);
1310 addr_reg2 = tmp2;
1311
1312 lightrec_free_reg(reg_cache, reg_imm);
1313 } else {
1314 addr_reg2 = addr_reg;
1315 }
1316
1317 rt = lightrec_alloc_reg_in(reg_cache, _jit, in_reg, 0);
1318
1319 if (is_big_endian() && swap_code && in_reg) {
1320 tmp3 = lightrec_alloc_reg_temp(reg_cache, _jit);
1321
1322 jit_new_node_ww(swap_code, tmp3, rt);
1323 jit_new_node_www(code, imm, addr_reg2, tmp3);
1324
1325 lightrec_free_reg(reg_cache, tmp3);
1326 } else {
1327 jit_new_node_www(code, imm, addr_reg2, rt);
1328 }
1329
1330 lightrec_free_reg(reg_cache, rt);
1331
1332 if (invalidate) {
1333 tmp3 = lightrec_alloc_reg_in(reg_cache, _jit, 0, 0);
1334
1335 if (c.i.op != OP_SW) {
1336 jit_andi(tmp, addr_reg, ~3);
1337 addr_reg = tmp;
1338 }
1339
1340 if (!lut_is_32bit(state)) {
1341 jit_lshi(tmp, addr_reg, 1);
1342 addr_reg = tmp;
1343 }
1344
1345 if (addr_reg == rs && c.i.rs == 0) {
1346 addr_reg = LIGHTREC_REG_STATE;
1347 } else {
1348 jit_add_state(tmp, addr_reg);
1349 addr_reg = tmp;
1350 }
1351
1352 if (lut_is_32bit(state))
1353 jit_stxi_i(lut_offt, addr_reg, tmp3);
1354 else
1355 jit_stxi(lut_offt, addr_reg, tmp3);
1356
1357 lightrec_free_reg(reg_cache, tmp3);
1358 }
1359
1360 if (addr_offset)
1361 lightrec_free_reg(reg_cache, tmp2);
1362 if (need_tmp)
1363 lightrec_free_reg(reg_cache, tmp);
1364 lightrec_free_reg(reg_cache, rs);
1365}
1366
1367static void rec_store_ram(struct lightrec_cstate *cstate,
1368 const struct block *block,
1369 u16 offset, jit_code_t code,
1370 jit_code_t swap_code, bool invalidate)
1371{
1372 const struct lightrec_state *state = cstate->state;
1373
1374 _jit_note(block->_jit, __FILE__, __LINE__);
1375
1376 return rec_store_memory(cstate, block, offset, code, swap_code,
1377 state->offset_ram, rec_ram_mask(state),
1378 invalidate);
1379}
1380
1381static void rec_store_scratch(struct lightrec_cstate *cstate,
1382 const struct block *block, u16 offset,
1383 jit_code_t code, jit_code_t swap_code)
1384{
1385 _jit_note(block->_jit, __FILE__, __LINE__);
1386
1387 return rec_store_memory(cstate, block, offset, code, swap_code,
1388 cstate->state->offset_scratch,
1389 0x1fffffff, false);
1390}
1391
1392static void rec_store_io(struct lightrec_cstate *cstate,
1393 const struct block *block, u16 offset,
1394 jit_code_t code, jit_code_t swap_code)
1395{
1396 _jit_note(block->_jit, __FILE__, __LINE__);
1397
1398 return rec_store_memory(cstate, block, offset, code, swap_code,
1399 cstate->state->offset_io,
1400 rec_io_mask(cstate->state), false);
1401}
1402
1403static void rec_store_direct_no_invalidate(struct lightrec_cstate *cstate,
1404 const struct block *block,
1405 u16 offset, jit_code_t code,
1406 jit_code_t swap_code)
1407{
1408 const struct lightrec_state *state = cstate->state;
1409 struct regcache *reg_cache = cstate->reg_cache;
1410 union code c = block->opcode_list[offset].c;
1411 jit_state_t *_jit = block->_jit;
1412 jit_node_t *to_not_ram, *to_end;
1413 bool swc2 = c.i.op == OP_SWC2;
1414 u8 tmp, tmp2 = 0, rs, rt, in_reg = swc2 ? REG_TEMP : c.i.rt;
1415 u32 addr_mask;
1416 s32 reg_imm;
1417 s16 imm;
1418
1419 jit_note(__FILE__, __LINE__);
1420 rs = lightrec_alloc_reg_in(reg_cache, _jit, c.i.rs, 0);
1421 tmp = lightrec_alloc_reg_temp(reg_cache, _jit);
1422
1423 if (state->mirrors_mapped)
1424 addr_mask = 0x1f800000 | (4 * RAM_SIZE - 1);
1425 else
1426 addr_mask = 0x1f800000 | (RAM_SIZE - 1);
1427
1428 reg_imm = lightrec_alloc_reg_temp_with_value(reg_cache, _jit, addr_mask);
1429
1430 /* Convert to KUNSEG and avoid RAM mirrors */
1431 if (!state->mirrors_mapped && c.i.imm) {
1432 imm = 0;
1433 jit_addi(tmp, rs, (s16)c.i.imm);
1434 jit_andr(tmp, tmp, reg_imm);
1435 } else {
1436 imm = (s16)c.i.imm;
1437 jit_andr(tmp, rs, reg_imm);
1438 }
1439
1440 lightrec_free_reg(reg_cache, rs);
1441 lightrec_free_reg(reg_cache, reg_imm);
1442
1443 if (state->offset_ram != state->offset_scratch) {
1444 tmp2 = lightrec_alloc_reg_temp(reg_cache, _jit);
1445
1446 to_not_ram = jit_bmsi(tmp, BIT(28));
1447
1448 jit_movi(tmp2, state->offset_ram);
1449
1450 to_end = jit_b();
1451 jit_patch(to_not_ram);
1452
1453 jit_movi(tmp2, state->offset_scratch);
1454 jit_patch(to_end);
1455 } else if (state->offset_ram) {
1456 tmp2 = lightrec_alloc_reg_temp_with_value(reg_cache, _jit,
1457 state->offset_ram);
1458 }
1459
1460 if (state->offset_ram || state->offset_scratch) {
1461 jit_addr(tmp, tmp, tmp2);
1462 lightrec_free_reg(reg_cache, tmp2);
1463 }
1464
1465 rt = lightrec_alloc_reg_in(reg_cache, _jit, in_reg, 0);
1466
1467 if (is_big_endian() && swap_code && in_reg) {
1468 tmp2 = lightrec_alloc_reg_temp(reg_cache, _jit);
1469
1470 jit_new_node_ww(swap_code, tmp2, rt);
1471 jit_new_node_www(code, imm, tmp, tmp2);
1472
1473 lightrec_free_reg(reg_cache, tmp2);
1474 } else {
1475 jit_new_node_www(code, imm, tmp, rt);
1476 }
1477
1478 lightrec_free_reg(reg_cache, rt);
1479 lightrec_free_reg(reg_cache, tmp);
1480}
1481
1482static void rec_store_direct(struct lightrec_cstate *cstate, const struct block *block,
1483 u16 offset, jit_code_t code, jit_code_t swap_code)
1484{
1485 const struct lightrec_state *state = cstate->state;
1486 u32 ram_size = state->mirrors_mapped ? RAM_SIZE * 4 : RAM_SIZE;
1487 struct regcache *reg_cache = cstate->reg_cache;
1488 union code c = block->opcode_list[offset].c;
1489 jit_state_t *_jit = block->_jit;
1490 jit_node_t *to_not_ram, *to_end;
1491 bool swc2 = c.i.op == OP_SWC2;
1492 u8 tmp, tmp2, tmp3, masked_reg, rs, rt;
1493 u8 in_reg = swc2 ? REG_TEMP : c.i.rt;
1494 u32 addr_mask = 0x1f800000 | (ram_size - 1);
1495 bool different_offsets = state->offset_ram != state->offset_scratch;
1496 s32 reg_imm;
1497
1498 jit_note(__FILE__, __LINE__);
1499
1500 rs = lightrec_alloc_reg_in(reg_cache, _jit, c.i.rs, 0);
1501 tmp2 = lightrec_alloc_reg_temp(reg_cache, _jit);
1502 tmp3 = lightrec_alloc_reg_in(reg_cache, _jit, 0, 0);
1503
1504 reg_imm = lightrec_alloc_reg_temp_with_value(reg_cache, _jit, addr_mask);
1505
1506 /* Convert to KUNSEG and avoid RAM mirrors */
1507 if (c.i.imm) {
1508 jit_addi(tmp2, rs, (s16)c.i.imm);
1509 jit_andr(tmp2, tmp2, reg_imm);
1510 } else {
1511 jit_andr(tmp2, rs, reg_imm);
1512 }
1513
1514 lightrec_free_reg(reg_cache, rs);
1515 lightrec_free_reg(reg_cache, reg_imm);
1516 tmp = lightrec_alloc_reg_temp(reg_cache, _jit);
1517
1518 if (different_offsets) {
1519 to_not_ram = jit_bgti(tmp2, ram_size);
1520 masked_reg = tmp2;
1521 } else {
1522 jit_lti_u(tmp, tmp2, ram_size);
1523 jit_movnr(tmp, tmp2, tmp);
1524 masked_reg = tmp;
1525 }
1526
1527 /* Compute the offset to the code LUT */
1528 if (c.i.op == OP_SW)
1529 jit_andi(tmp, masked_reg, RAM_SIZE - 1);
1530 else
1531 jit_andi(tmp, masked_reg, (RAM_SIZE - 1) & ~3);
1532
1533 if (!lut_is_32bit(state))
1534 jit_lshi(tmp, tmp, 1);
1535 jit_add_state(tmp, tmp);
1536
1537 /* Write NULL to the code LUT to invalidate any block that's there */
1538 if (lut_is_32bit(state))
1539 jit_stxi_i(offsetof(struct lightrec_state, code_lut), tmp, tmp3);
1540 else
1541 jit_stxi(offsetof(struct lightrec_state, code_lut), tmp, tmp3);
1542
1543 if (different_offsets) {
1544 jit_movi(tmp, state->offset_ram);
1545
1546 to_end = jit_b();
1547 jit_patch(to_not_ram);
1548 }
1549
1550 if (state->offset_ram || state->offset_scratch)
1551 jit_movi(tmp, state->offset_scratch);
1552
1553 if (different_offsets)
1554 jit_patch(to_end);
1555
1556 if (state->offset_ram || state->offset_scratch)
1557 jit_addr(tmp2, tmp2, tmp);
1558
1559 lightrec_free_reg(reg_cache, tmp);
1560 lightrec_free_reg(reg_cache, tmp3);
1561
1562 rt = lightrec_alloc_reg_in(reg_cache, _jit, in_reg, 0);
1563
1564 if (is_big_endian() && swap_code && in_reg) {
1565 tmp = lightrec_alloc_reg_temp(reg_cache, _jit);
1566
1567 jit_new_node_ww(swap_code, tmp, rt);
1568 jit_new_node_www(code, 0, tmp2, tmp);
1569
1570 lightrec_free_reg(reg_cache, tmp);
1571 } else {
1572 jit_new_node_www(code, 0, tmp2, rt);
1573 }
1574
1575 lightrec_free_reg(reg_cache, rt);
1576 lightrec_free_reg(reg_cache, tmp2);
1577}
1578
1579static void rec_store(struct lightrec_cstate *state,
1580 const struct block *block, u16 offset,
1581 jit_code_t code, jit_code_t swap_code)
1582{
1583 u32 flags = block->opcode_list[offset].flags;
1584 u32 mode = LIGHTREC_FLAGS_GET_IO_MODE(flags);
1585 bool no_invalidate = op_flag_no_invalidate(flags) ||
1586 (state->state->opt_flags & LIGHTREC_OPT_INV_DMA_ONLY);
1587 union code c = block->opcode_list[offset].c;
1588 bool is_swc2 = c.i.op == OP_SWC2;
1589
1590 if (is_swc2) {
1591 switch (mode) {
1592 case LIGHTREC_IO_RAM:
1593 case LIGHTREC_IO_SCRATCH:
1594 case LIGHTREC_IO_DIRECT:
1595 case LIGHTREC_IO_DIRECT_HW:
1596 rec_cp2_do_mfc2(state, block, offset, c.i.rt, REG_TEMP);
1597 break;
1598 default:
1599 break;
1600 }
1601 }
1602
1603 switch (mode) {
1604 case LIGHTREC_IO_RAM:
1605 rec_store_ram(state, block, offset, code,
1606 swap_code, !no_invalidate);
1607 break;
1608 case LIGHTREC_IO_SCRATCH:
1609 rec_store_scratch(state, block, offset, code, swap_code);
1610 break;
1611 case LIGHTREC_IO_DIRECT:
1612 if (no_invalidate) {
1613 rec_store_direct_no_invalidate(state, block, offset,
1614 code, swap_code);
1615 } else {
1616 rec_store_direct(state, block, offset, code, swap_code);
1617 }
1618 break;
1619 case LIGHTREC_IO_DIRECT_HW:
1620 rec_store_io(state, block, offset, code, swap_code);
1621 break;
1622 default:
1623 rec_io(state, block, offset, true, false);
1624 return;
1625 }
1626
1627 if (is_swc2)
1628 lightrec_discard_reg_if_loaded(state->reg_cache, REG_TEMP);
1629}
1630
1631static void rec_SB(struct lightrec_cstate *state,
1632 const struct block *block, u16 offset)
1633{
1634 _jit_name(block->_jit, __func__);
1635 rec_store(state, block, offset, jit_code_stxi_c, 0);
1636}
1637
1638static void rec_SH(struct lightrec_cstate *state,
1639 const struct block *block, u16 offset)
1640{
1641 _jit_name(block->_jit, __func__);
1642 rec_store(state, block, offset,
1643 jit_code_stxi_s, jit_code_bswapr_us);
1644}
1645
1646static void rec_SW(struct lightrec_cstate *state,
1647 const struct block *block, u16 offset)
1648
1649{
1650 union code c = block->opcode_list[offset].c;
1651
1652 _jit_name(block->_jit, c.i.op == OP_SWC2 ? "rec_SWC2" : "rec_SW");
1653 rec_store(state, block, offset,
1654 jit_code_stxi_i, jit_code_bswapr_ui);
1655}
1656
1657static void rec_SWL(struct lightrec_cstate *state,
1658 const struct block *block, u16 offset)
1659{
1660 _jit_name(block->_jit, __func__);
1661 rec_io(state, block, offset, true, false);
1662}
1663
1664static void rec_SWR(struct lightrec_cstate *state,
1665 const struct block *block, u16 offset)
1666{
1667 _jit_name(block->_jit, __func__);
1668 rec_io(state, block, offset, true, false);
1669}
1670
1671static void rec_load_memory(struct lightrec_cstate *cstate,
1672 const struct block *block, u16 offset,
1673 jit_code_t code, jit_code_t swap_code, bool is_unsigned,
1674 uintptr_t addr_offset, u32 addr_mask)
1675{
1676 struct regcache *reg_cache = cstate->reg_cache;
1677 struct opcode *op = &block->opcode_list[offset];
1678 bool load_delay = op_flag_load_delay(op->flags) && !cstate->no_load_delay;
1679 jit_state_t *_jit = block->_jit;
1680 u8 rs, rt, out_reg, addr_reg, flags = REG_EXT;
1681 bool no_mask = op_flag_no_mask(op->flags);
1682 union code c = op->c;
1683 s8 reg_imm;
1684 s16 imm;
1685
1686 if (load_delay || c.i.op == OP_LWC2)
1687 out_reg = REG_TEMP;
1688 else if (c.i.rt)
1689 out_reg = c.i.rt;
1690 else
1691 return;
1692
1693 if (is_unsigned)
1694 flags |= REG_ZEXT;
1695
1696 rs = lightrec_alloc_reg_in(reg_cache, _jit, c.i.rs, 0);
1697 rt = lightrec_alloc_reg_out(reg_cache, _jit, out_reg, flags);
1698
1699 if (!cstate->state->mirrors_mapped && c.i.imm && !no_mask) {
1700 jit_addi(rt, rs, (s16)c.i.imm);
1701 addr_reg = rt;
1702 imm = 0;
1703 } else {
1704 addr_reg = rs;
1705 imm = (s16)c.i.imm;
1706 }
1707
1708 if (!no_mask) {
1709 reg_imm = lightrec_alloc_reg_temp_with_value(reg_cache, _jit,
1710 addr_mask);
1711
1712 jit_andr(rt, addr_reg, reg_imm);
1713 addr_reg = rt;
1714
1715 lightrec_free_reg(reg_cache, reg_imm);
1716 }
1717
1718 if (addr_offset) {
1719 reg_imm = lightrec_alloc_reg_temp_with_value(reg_cache, _jit,
1720 addr_offset);
1721
1722 jit_addr(rt, addr_reg, reg_imm);
1723 addr_reg = rt;
1724
1725 lightrec_free_reg(reg_cache, reg_imm);
1726 }
1727
1728 jit_new_node_www(code, rt, addr_reg, imm);
1729
1730 if (is_big_endian() && swap_code) {
1731 jit_new_node_ww(swap_code, rt, rt);
1732
1733 if (c.i.op == OP_LH)
1734 jit_extr_s(rt, rt);
1735 else if (c.i.op == OP_LW && __WORDSIZE == 64)
1736 jit_extr_i(rt, rt);
1737 }
1738
1739 lightrec_free_reg(reg_cache, rs);
1740 lightrec_free_reg(reg_cache, rt);
1741}
1742
1743static void rec_load_ram(struct lightrec_cstate *cstate,
1744 const struct block *block, u16 offset,
1745 jit_code_t code, jit_code_t swap_code, bool is_unsigned)
1746{
1747 _jit_note(block->_jit, __FILE__, __LINE__);
1748
1749 rec_load_memory(cstate, block, offset, code, swap_code, is_unsigned,
1750 cstate->state->offset_ram, rec_ram_mask(cstate->state));
1751}
1752
1753static void rec_load_bios(struct lightrec_cstate *cstate,
1754 const struct block *block, u16 offset,
1755 jit_code_t code, jit_code_t swap_code, bool is_unsigned)
1756{
1757 _jit_note(block->_jit, __FILE__, __LINE__);
1758
1759 rec_load_memory(cstate, block, offset, code, swap_code, is_unsigned,
1760 cstate->state->offset_bios, 0x1fffffff);
1761}
1762
1763static void rec_load_scratch(struct lightrec_cstate *cstate,
1764 const struct block *block, u16 offset,
1765 jit_code_t code, jit_code_t swap_code, bool is_unsigned)
1766{
1767 _jit_note(block->_jit, __FILE__, __LINE__);
1768
1769 rec_load_memory(cstate, block, offset, code, swap_code, is_unsigned,
1770 cstate->state->offset_scratch, 0x1fffffff);
1771}
1772
1773static void rec_load_io(struct lightrec_cstate *cstate,
1774 const struct block *block, u16 offset,
1775 jit_code_t code, jit_code_t swap_code, bool is_unsigned)
1776{
1777 _jit_note(block->_jit, __FILE__, __LINE__);
1778
1779 rec_load_memory(cstate, block, offset, code, swap_code, is_unsigned,
1780 cstate->state->offset_io, rec_io_mask(cstate->state));
1781}
1782
1783static void rec_load_direct(struct lightrec_cstate *cstate,
1784 const struct block *block, u16 offset,
1785 jit_code_t code, jit_code_t swap_code,
1786 bool is_unsigned)
1787{
1788 const struct lightrec_state *state = cstate->state;
1789 struct regcache *reg_cache = cstate->reg_cache;
1790 struct opcode *op = &block->opcode_list[offset];
1791 bool load_delay = op_flag_load_delay(op->flags) && !cstate->no_load_delay;
1792 jit_state_t *_jit = block->_jit;
1793 jit_node_t *to_not_ram, *to_not_bios, *to_end, *to_end2;
1794 u8 tmp, rs, rt, out_reg, addr_reg, flags = REG_EXT;
1795 bool different_offsets = state->offset_bios != state->offset_scratch;
1796 union code c = op->c;
1797 s32 addr_mask;
1798 u32 reg_imm;
1799 s8 offt_reg;
1800 s16 imm;
1801
1802 if (load_delay || c.i.op == OP_LWC2)
1803 out_reg = REG_TEMP;
1804 else if (c.i.rt)
1805 out_reg = c.i.rt;
1806 else
1807 return;
1808
1809 if (is_unsigned)
1810 flags |= REG_ZEXT;
1811
1812 jit_note(__FILE__, __LINE__);
1813 rs = lightrec_alloc_reg_in(reg_cache, _jit, c.i.rs, 0);
1814 rt = lightrec_alloc_reg_out(reg_cache, _jit, out_reg, flags);
1815
1816 if ((state->offset_ram == state->offset_bios &&
1817 state->offset_ram == state->offset_scratch &&
1818 state->mirrors_mapped) || !c.i.imm) {
1819 addr_reg = rs;
1820 imm = (s16)c.i.imm;
1821 } else {
1822 jit_addi(rt, rs, (s16)c.i.imm);
1823 addr_reg = rt;
1824 imm = 0;
1825
1826 if (c.i.rs != c.i.rt)
1827 lightrec_free_reg(reg_cache, rs);
1828 }
1829
1830 tmp = lightrec_alloc_reg_temp(reg_cache, _jit);
1831
1832 if (state->offset_ram == state->offset_bios &&
1833 state->offset_ram == state->offset_scratch) {
1834 if (!state->mirrors_mapped)
1835 addr_mask = 0x1f800000 | (RAM_SIZE - 1);
1836 else
1837 addr_mask = 0x1fffffff;
1838
1839 reg_imm = lightrec_alloc_reg_temp_with_value(reg_cache, _jit,
1840 addr_mask);
1841 if (!state->mirrors_mapped) {
1842 jit_andi(tmp, addr_reg, BIT(28));
1843 jit_rshi_u(tmp, tmp, 28 - 22);
1844 jit_orr(tmp, tmp, reg_imm);
1845 jit_andr(rt, addr_reg, tmp);
1846 } else {
1847 jit_andr(rt, addr_reg, reg_imm);
1848 }
1849
1850 lightrec_free_reg(reg_cache, reg_imm);
1851
1852 if (state->offset_ram) {
1853 offt_reg = lightrec_get_reg_with_value(reg_cache,
1854 state->offset_ram);
1855 if (offt_reg < 0) {
1856 jit_movi(tmp, state->offset_ram);
1857 lightrec_temp_set_value(reg_cache, tmp,
1858 state->offset_ram);
1859 } else {
1860 lightrec_free_reg(reg_cache, tmp);
1861 tmp = offt_reg;
1862 }
1863 }
1864 } else {
1865 to_not_ram = jit_bmsi(addr_reg, BIT(28));
1866
1867 /* Convert to KUNSEG and avoid RAM mirrors */
1868 jit_andi(rt, addr_reg, RAM_SIZE - 1);
1869
1870 if (state->offset_ram)
1871 jit_movi(tmp, state->offset_ram);
1872
1873 to_end = jit_b();
1874
1875 jit_patch(to_not_ram);
1876
1877 if (different_offsets)
1878 to_not_bios = jit_bmci(addr_reg, BIT(22));
1879
1880 /* Convert to KUNSEG */
1881 jit_andi(rt, addr_reg, 0x1fc00000 | (BIOS_SIZE - 1));
1882
1883 jit_movi(tmp, state->offset_bios);
1884
1885 if (different_offsets) {
1886 to_end2 = jit_b();
1887
1888 jit_patch(to_not_bios);
1889
1890 /* Convert to KUNSEG */
1891 jit_andi(rt, addr_reg, 0x1f800fff);
1892
1893 if (state->offset_scratch)
1894 jit_movi(tmp, state->offset_scratch);
1895
1896 jit_patch(to_end2);
1897 }
1898
1899 jit_patch(to_end);
1900 }
1901
1902 if (state->offset_ram || state->offset_bios || state->offset_scratch)
1903 jit_addr(rt, rt, tmp);
1904
1905 jit_new_node_www(code, rt, rt, imm);
1906
1907 if (is_big_endian() && swap_code) {
1908 jit_new_node_ww(swap_code, rt, rt);
1909
1910 if (c.i.op == OP_LH)
1911 jit_extr_s(rt, rt);
1912 else if (c.i.op == OP_LW && __WORDSIZE == 64)
1913 jit_extr_i(rt, rt);
1914 }
1915
1916 lightrec_free_reg(reg_cache, addr_reg);
1917 lightrec_free_reg(reg_cache, rt);
1918 lightrec_free_reg(reg_cache, tmp);
1919}
1920
1921static void rec_load(struct lightrec_cstate *state, const struct block *block,
1922 u16 offset, jit_code_t code, jit_code_t swap_code,
1923 bool is_unsigned)
1924{
1925 const struct opcode *op = &block->opcode_list[offset];
1926 u32 flags = op->flags;
1927
1928 switch (LIGHTREC_FLAGS_GET_IO_MODE(flags)) {
1929 case LIGHTREC_IO_RAM:
1930 rec_load_ram(state, block, offset, code, swap_code, is_unsigned);
1931 break;
1932 case LIGHTREC_IO_BIOS:
1933 rec_load_bios(state, block, offset, code, swap_code, is_unsigned);
1934 break;
1935 case LIGHTREC_IO_SCRATCH:
1936 rec_load_scratch(state, block, offset, code, swap_code, is_unsigned);
1937 break;
1938 case LIGHTREC_IO_DIRECT_HW:
1939 rec_load_io(state, block, offset, code, swap_code, is_unsigned);
1940 break;
1941 case LIGHTREC_IO_DIRECT:
1942 rec_load_direct(state, block, offset, code, swap_code, is_unsigned);
1943 break;
1944 default:
1945 rec_io(state, block, offset, false, true);
1946 return;
1947 }
1948
1949 if (op->i.op == OP_LWC2) {
1950 rec_cp2_do_mtc2(state, block, offset, op->i.rt, REG_TEMP);
1951 lightrec_discard_reg_if_loaded(state->reg_cache, REG_TEMP);
1952 }
1953}
1954
1955static void rec_LB(struct lightrec_cstate *state, const struct block *block, u16 offset)
1956{
1957 _jit_name(block->_jit, __func__);
1958 rec_load(state, block, offset, jit_code_ldxi_c, 0, false);
1959}
1960
1961static void rec_LBU(struct lightrec_cstate *state, const struct block *block, u16 offset)
1962{
1963 _jit_name(block->_jit, __func__);
1964 rec_load(state, block, offset, jit_code_ldxi_uc, 0, true);
1965}
1966
1967static void rec_LH(struct lightrec_cstate *state, const struct block *block, u16 offset)
1968{
1969 jit_code_t code = is_big_endian() ? jit_code_ldxi_us : jit_code_ldxi_s;
1970
1971 _jit_name(block->_jit, __func__);
1972 rec_load(state, block, offset, code, jit_code_bswapr_us, false);
1973}
1974
1975static void rec_LHU(struct lightrec_cstate *state, const struct block *block, u16 offset)
1976{
1977 _jit_name(block->_jit, __func__);
1978 rec_load(state, block, offset, jit_code_ldxi_us, jit_code_bswapr_us, true);
1979}
1980
1981static void rec_LWL(struct lightrec_cstate *state, const struct block *block, u16 offset)
1982{
1983 _jit_name(block->_jit, __func__);
1984 rec_io(state, block, offset, true, true);
1985}
1986
1987static void rec_LWR(struct lightrec_cstate *state, const struct block *block, u16 offset)
1988{
1989 _jit_name(block->_jit, __func__);
1990 rec_io(state, block, offset, true, true);
1991}
1992
1993static void rec_LW(struct lightrec_cstate *state, const struct block *block, u16 offset)
1994{
1995 union code c = block->opcode_list[offset].c;
1996 jit_code_t code;
1997
1998 if (is_big_endian() && __WORDSIZE == 64)
1999 code = jit_code_ldxi_ui;
2000 else
2001 code = jit_code_ldxi_i;
2002
2003 _jit_name(block->_jit, c.i.op == OP_LWC2 ? "rec_LWC2" : "rec_LW");
2004 rec_load(state, block, offset, code, jit_code_bswapr_ui, false);
2005}
2006
2007static void rec_exit_early(struct lightrec_cstate *state,
2008 const struct block *block, u16 offset,
2009 u32 exit_code, u32 pc)
2010{
2011 struct regcache *reg_cache = state->reg_cache;
2012 jit_state_t *_jit = block->_jit;
2013 u8 tmp;
2014
2015 _jit_note(block->_jit, __FILE__, __LINE__);
2016
2017 tmp = lightrec_alloc_reg_temp(reg_cache, _jit);
2018
2019 jit_movi(tmp, exit_code);
2020 jit_stxi_i(offsetof(struct lightrec_state, exit_flags),
2021 LIGHTREC_REG_STATE, tmp);
2022
2023 jit_ldxi_i(tmp, LIGHTREC_REG_STATE,
2024 offsetof(struct lightrec_state, target_cycle));
2025 jit_subr(tmp, tmp, LIGHTREC_REG_CYCLE);
2026 jit_movi(LIGHTREC_REG_CYCLE, 0);
2027 jit_stxi_i(offsetof(struct lightrec_state, target_cycle),
2028 LIGHTREC_REG_STATE, tmp);
2029 jit_stxi_i(offsetof(struct lightrec_state, current_cycle),
2030 LIGHTREC_REG_STATE, tmp);
2031
2032 lightrec_free_reg(reg_cache, tmp);
2033
2034 lightrec_emit_end_of_block(state, block, offset, -1, pc, 31, 0, true);
2035}
2036
2037static void rec_special_SYSCALL(struct lightrec_cstate *state,
2038 const struct block *block, u16 offset)
2039{
2040 _jit_name(block->_jit, __func__);
2041
2042 /* TODO: the return address should be "pc - 4" if we're a delay slot */
2043 rec_exit_early(state, block, offset, LIGHTREC_EXIT_SYSCALL,
2044 get_ds_pc(block, offset, 0));
2045}
2046
2047static void rec_special_BREAK(struct lightrec_cstate *state,
2048 const struct block *block, u16 offset)
2049{
2050 _jit_name(block->_jit, __func__);
2051 rec_exit_early(state, block, offset, LIGHTREC_EXIT_BREAK,
2052 get_ds_pc(block, offset, 0));
2053}
2054
2055static void rec_mfc(struct lightrec_cstate *state, const struct block *block, u16 offset)
2056{
2057 struct regcache *reg_cache = state->reg_cache;
2058 union code c = block->opcode_list[offset].c;
2059 jit_state_t *_jit = block->_jit;
2060
2061 jit_note(__FILE__, __LINE__);
2062
2063 if (c.i.op != OP_SWC2)
2064 lightrec_clean_reg_if_loaded(reg_cache, _jit, c.i.rt, true);
2065
2066 call_to_c_wrapper(state, block, c.opcode, C_WRAPPER_MFC);
2067}
2068
2069static void rec_mtc(struct lightrec_cstate *state, const struct block *block, u16 offset)
2070{
2071 struct regcache *reg_cache = state->reg_cache;
2072 union code c = block->opcode_list[offset].c;
2073 jit_state_t *_jit = block->_jit;
2074
2075 jit_note(__FILE__, __LINE__);
2076 lightrec_clean_reg_if_loaded(reg_cache, _jit, c.i.rs, false);
2077 lightrec_clean_reg_if_loaded(reg_cache, _jit, c.i.rt, false);
2078 lightrec_clean_reg_if_loaded(reg_cache, _jit, REG_TEMP, false);
2079
2080 call_to_c_wrapper(state, block, c.opcode, C_WRAPPER_MTC);
2081
2082 if (c.i.op == OP_CP0 &&
2083 !op_flag_no_ds(block->opcode_list[offset].flags) &&
2084 (c.r.rd == 12 || c.r.rd == 13))
2085 lightrec_emit_end_of_block(state, block, offset, -1,
2086 get_ds_pc(block, offset, 1),
2087 0, 0, true);
2088}
2089
2090static void
2091rec_mfc0(struct lightrec_cstate *state, const struct block *block, u16 offset)
2092{
2093 struct regcache *reg_cache = state->reg_cache;
2094 union code c = block->opcode_list[offset].c;
2095 jit_state_t *_jit = block->_jit;
2096 u8 rt;
2097
2098 jit_note(__FILE__, __LINE__);
2099
2100 rt = lightrec_alloc_reg_out(reg_cache, _jit, c.i.rt, REG_EXT);
2101
2102 jit_ldxi_i(rt, LIGHTREC_REG_STATE,
2103 offsetof(struct lightrec_state, regs.cp0[c.r.rd]));
2104
2105 lightrec_free_reg(reg_cache, rt);
2106}
2107
2108static bool block_uses_icache(const struct lightrec_cstate *state,
2109 const struct block *block)
2110{
2111 const struct lightrec_mem_map *map = &state->state->maps[PSX_MAP_KERNEL_USER_RAM];
2112 u32 pc = kunseg(block->pc);
2113
2114 if (pc < map->pc || pc >= map->pc + map->length)
2115 return false;
2116
2117 return (block->pc >> 28) < 0xa;
2118}
2119
2120static void
2121rec_mtc0(struct lightrec_cstate *state, const struct block *block, u16 offset)
2122{
2123 struct regcache *reg_cache = state->reg_cache;
2124 const union code c = block->opcode_list[offset].c;
2125 jit_state_t *_jit = block->_jit;
2126 u8 rt, tmp = 0, tmp2, status;
2127 jit_node_t *to_end;
2128
2129 jit_note(__FILE__, __LINE__);
2130
2131 switch(c.r.rd) {
2132 case 1:
2133 case 4:
2134 case 8:
2135 case 14:
2136 case 15:
2137 /* Those registers are read-only */
2138 return;
2139 default:
2140 break;
2141 }
2142
2143 if (!block_uses_icache(state, block) && c.r.rd == 12) {
2144 /* If we are not running code from the RAM through kuseg or
2145 * kseg0, handle writes to the Status register in C; as the
2146 * code may toggle bit 16 which isolates the cache. Code
2147 * running from kuseg or kseg0 in RAM cannot do that. */
2148 rec_mtc(state, block, offset);
2149 return;
2150 }
2151
2152 rt = lightrec_alloc_reg_in(reg_cache, _jit, c.i.rt, 0);
2153
2154 if (c.r.rd != 13) {
2155 jit_stxi_i(offsetof(struct lightrec_state, regs.cp0[c.r.rd]),
2156 LIGHTREC_REG_STATE, rt);
2157 }
2158
2159 if (c.r.rd == 12 || c.r.rd == 13) {
2160 tmp = lightrec_alloc_reg_temp(reg_cache, _jit);
2161 jit_ldxi_i(tmp, LIGHTREC_REG_STATE,
2162 offsetof(struct lightrec_state, regs.cp0[13]));
2163
2164 tmp2 = lightrec_alloc_reg_temp(reg_cache, _jit);
2165 }
2166
2167 if (c.r.rd == 12) {
2168 status = rt;
2169 } else if (c.r.rd == 13) {
2170 /* Cause = (Cause & ~0x0300) | (value & 0x0300) */
2171 jit_andi(tmp2, rt, 0x0300);
2172 jit_ori(tmp, tmp, 0x0300);
2173 jit_xori(tmp, tmp, 0x0300);
2174 jit_orr(tmp, tmp, tmp2);
2175 jit_ldxi_i(tmp2, LIGHTREC_REG_STATE,
2176 offsetof(struct lightrec_state, regs.cp0[12]));
2177 jit_stxi_i(offsetof(struct lightrec_state, regs.cp0[13]),
2178 LIGHTREC_REG_STATE, tmp);
2179 status = tmp2;
2180 }
2181
2182 if (c.r.rd == 12 || c.r.rd == 13) {
2183 /* Exit dynarec in case there's a software interrupt.
2184 * exit_flags = !!(status & tmp & 0x0300) & status; */
2185 jit_andr(tmp, tmp, status);
2186 jit_andi(tmp, tmp, 0x0300);
2187 jit_nei(tmp, tmp, 0);
2188 jit_andr(tmp, tmp, status);
2189 }
2190
2191 if (c.r.rd == 12) {
2192 /* Exit dynarec in case we unmask a hardware interrupt.
2193 * exit_flags = !(~status & 0x401) */
2194
2195 jit_comr(tmp2, status);
2196 jit_andi(tmp2, tmp2, 0x401);
2197 jit_eqi(tmp2, tmp2, 0);
2198 jit_orr(tmp, tmp, tmp2);
2199 }
2200
2201 lightrec_free_reg(reg_cache, rt);
2202
2203 if (c.r.rd == 12 || c.r.rd == 13) {
2204 to_end = jit_beqi(tmp, 0);
2205
2206 jit_ldxi_i(tmp2, LIGHTREC_REG_STATE,
2207 offsetof(struct lightrec_state, target_cycle));
2208 jit_subr(tmp2, tmp2, LIGHTREC_REG_CYCLE);
2209 jit_movi(LIGHTREC_REG_CYCLE, 0);
2210 jit_stxi_i(offsetof(struct lightrec_state, target_cycle),
2211 LIGHTREC_REG_STATE, tmp2);
2212 jit_stxi_i(offsetof(struct lightrec_state, current_cycle),
2213 LIGHTREC_REG_STATE, tmp2);
2214
2215
2216 jit_patch(to_end);
2217 }
2218
2219 if (!op_flag_no_ds(block->opcode_list[offset].flags) &&
2220 (c.r.rd == 12 || c.r.rd == 13)) {
2221 state->cycles += lightrec_cycles_of_opcode(state->state, c);
2222 lightrec_emit_eob(state, block, offset + 1);
2223 }
2224}
2225
2226static void rec_cp0_MFC0(struct lightrec_cstate *state,
2227 const struct block *block, u16 offset)
2228{
2229 _jit_name(block->_jit, __func__);
2230 rec_mfc0(state, block, offset);
2231}
2232
2233static void rec_cp0_CFC0(struct lightrec_cstate *state,
2234 const struct block *block, u16 offset)
2235{
2236 _jit_name(block->_jit, __func__);
2237 rec_mfc0(state, block, offset);
2238}
2239
2240static void rec_cp0_MTC0(struct lightrec_cstate *state,
2241 const struct block *block, u16 offset)
2242{
2243 _jit_name(block->_jit, __func__);
2244 rec_mtc0(state, block, offset);
2245}
2246
2247static void rec_cp0_CTC0(struct lightrec_cstate *state,
2248 const struct block *block, u16 offset)
2249{
2250 _jit_name(block->_jit, __func__);
2251 rec_mtc0(state, block, offset);
2252}
2253
2254static unsigned int cp2d_i_offset(u8 reg)
2255{
2256 return offsetof(struct lightrec_state, regs.cp2d[reg]);
2257}
2258
2259static unsigned int cp2d_s_offset(u8 reg)
2260{
2261 return cp2d_i_offset(reg) + is_big_endian() * 2;
2262}
2263
2264static unsigned int cp2c_i_offset(u8 reg)
2265{
2266 return offsetof(struct lightrec_state, regs.cp2c[reg]);
2267}
2268
2269static unsigned int cp2c_s_offset(u8 reg)
2270{
2271 return cp2c_i_offset(reg) + is_big_endian() * 2;
2272}
2273
2274static void rec_cp2_do_mfc2(struct lightrec_cstate *state,
2275 const struct block *block, u16 offset,
2276 u8 reg, u8 out_reg)
2277{
2278 struct regcache *reg_cache = state->reg_cache;
2279 jit_state_t *_jit = block->_jit;
2280 const u32 zext_regs = 0x300f0080;
2281 u8 rt, tmp, tmp2, tmp3, out, flags;
2282 unsigned int i;
2283
2284 _jit_name(block->_jit, __func__);
2285
2286 if (state->state->ops.cop2_notify) {
2287 /* We must call cop2_notify, handle that in C. */
2288 rec_mfc(state, block, offset);
2289 return;
2290 }
2291
2292 flags = (zext_regs & BIT(reg)) ? REG_ZEXT : REG_EXT;
2293 rt = lightrec_alloc_reg_out(reg_cache, _jit, out_reg, flags);
2294
2295 if (reg == 15)
2296 reg = 14;
2297
2298 switch (reg) {
2299 case 1:
2300 case 3:
2301 case 5:
2302 case 8:
2303 case 9:
2304 case 10:
2305 case 11:
2306 jit_ldxi_s(rt, LIGHTREC_REG_STATE, cp2d_s_offset(reg));
2307 break;
2308 case 7:
2309 case 16:
2310 case 17:
2311 case 18:
2312 case 19:
2313 jit_ldxi_us(rt, LIGHTREC_REG_STATE, cp2d_s_offset(reg));
2314 break;
2315 case 28:
2316 case 29:
2317 tmp = lightrec_alloc_reg_temp(reg_cache, _jit);
2318 tmp2 = lightrec_alloc_reg_temp(reg_cache, _jit);
2319 tmp3 = lightrec_alloc_reg_temp(reg_cache, _jit);
2320
2321 for (i = 0; i < 3; i++) {
2322 out = i == 0 ? rt : tmp;
2323
2324 jit_ldxi_s(tmp, LIGHTREC_REG_STATE, cp2d_s_offset(9 + i));
2325 jit_movi(tmp2, 0x1f);
2326 jit_rshi(out, tmp, 7);
2327
2328 jit_ltr(tmp3, tmp2, out);
2329 jit_movnr(out, tmp2, tmp3);
2330
2331 jit_gei(tmp2, out, 0);
2332 jit_movzr(out, tmp2, tmp2);
2333
2334 if (i > 0) {
2335 jit_lshi(tmp, tmp, 5 * i);
2336 jit_orr(rt, rt, tmp);
2337 }
2338 }
2339
2340
2341 lightrec_free_reg(reg_cache, tmp);
2342 lightrec_free_reg(reg_cache, tmp2);
2343 lightrec_free_reg(reg_cache, tmp3);
2344 break;
2345 default:
2346 jit_ldxi_i(rt, LIGHTREC_REG_STATE, cp2d_i_offset(reg));
2347 break;
2348 }
2349
2350 lightrec_free_reg(reg_cache, rt);
2351}
2352
2353static void rec_cp2_basic_MFC2(struct lightrec_cstate *state,
2354 const struct block *block, u16 offset)
2355{
2356 const union code c = block->opcode_list[offset].c;
2357
2358 rec_cp2_do_mfc2(state, block, offset, c.r.rd, c.r.rt);
2359}
2360
2361static void rec_cp2_basic_CFC2(struct lightrec_cstate *state,
2362 const struct block *block, u16 offset)
2363{
2364 struct regcache *reg_cache = state->reg_cache;
2365 const union code c = block->opcode_list[offset].c;
2366 jit_state_t *_jit = block->_jit;
2367 u8 rt;
2368
2369 _jit_name(block->_jit, __func__);
2370
2371 if (state->state->ops.cop2_notify) {
2372 /* We must call cop2_notify, handle that in C. */
2373 rec_mfc(state, block, offset);
2374 return;
2375 }
2376
2377 switch (c.r.rd) {
2378 case 4:
2379 case 12:
2380 case 20:
2381 case 26:
2382 case 27:
2383 case 29:
2384 case 30:
2385 rt = lightrec_alloc_reg_out(reg_cache, _jit, c.r.rt, REG_EXT);
2386 jit_ldxi_s(rt, LIGHTREC_REG_STATE, cp2c_s_offset(c.r.rd));
2387 break;
2388 default:
2389 rt = lightrec_alloc_reg_out(reg_cache, _jit, c.r.rt, REG_ZEXT);
2390 jit_ldxi_ui(rt, LIGHTREC_REG_STATE, cp2c_i_offset(c.r.rd));
2391 break;
2392 }
2393
2394 lightrec_free_reg(reg_cache, rt);
2395}
2396
2397static void rec_cp2_do_mtc2(struct lightrec_cstate *state,
2398 const struct block *block, u16 offset,
2399 u8 reg, u8 in_reg)
2400{
2401 struct regcache *reg_cache = state->reg_cache;
2402 jit_state_t *_jit = block->_jit;
2403 u8 rt, tmp, tmp2, flags = 0;
2404
2405 _jit_name(block->_jit, __func__);
2406
2407 if (state->state->ops.cop2_notify) {
2408 /* We must call cop2_notify, handle that in C. */
2409 rec_mtc(state, block, offset);
2410 return;
2411 }
2412
2413 if (reg == 31)
2414 return;
2415
2416 if (reg == 30)
2417 flags |= REG_EXT;
2418
2419 rt = lightrec_alloc_reg_in(reg_cache, _jit, in_reg, flags);
2420
2421 switch (reg) {
2422 case 15:
2423 tmp = lightrec_alloc_reg_temp(reg_cache, _jit);
2424 jit_ldxi_i(tmp, LIGHTREC_REG_STATE, cp2d_i_offset(13));
2425
2426 tmp2 = lightrec_alloc_reg_temp(reg_cache, _jit);
2427 jit_ldxi_i(tmp2, LIGHTREC_REG_STATE, cp2d_i_offset(14));
2428
2429 jit_stxi_i(cp2d_i_offset(12), LIGHTREC_REG_STATE, tmp);
2430 jit_stxi_i(cp2d_i_offset(13), LIGHTREC_REG_STATE, tmp2);
2431 jit_stxi_i(cp2d_i_offset(14), LIGHTREC_REG_STATE, rt);
2432
2433 lightrec_free_reg(reg_cache, tmp);
2434 lightrec_free_reg(reg_cache, tmp2);
2435 break;
2436 case 28:
2437 tmp = lightrec_alloc_reg_temp(reg_cache, _jit);
2438
2439 jit_lshi(tmp, rt, 7);
2440 jit_andi(tmp, tmp, 0xf80);
2441 jit_stxi_s(cp2d_s_offset(9), LIGHTREC_REG_STATE, tmp);
2442
2443 jit_lshi(tmp, rt, 2);
2444 jit_andi(tmp, tmp, 0xf80);
2445 jit_stxi_s(cp2d_s_offset(10), LIGHTREC_REG_STATE, tmp);
2446
2447 jit_rshi(tmp, rt, 3);
2448 jit_andi(tmp, tmp, 0xf80);
2449 jit_stxi_s(cp2d_s_offset(11), LIGHTREC_REG_STATE, tmp);
2450
2451 lightrec_free_reg(reg_cache, tmp);
2452 break;
2453 case 30:
2454 tmp = lightrec_alloc_reg_temp(reg_cache, _jit);
2455
2456 /* if (rt < 0) rt = ~rt; */
2457 jit_rshi(tmp, rt, 31);
2458 jit_xorr(tmp, rt, tmp);
2459
2460 /* Count leading zeros */
2461 jit_clzr(tmp, tmp);
2462 if (__WORDSIZE != 32)
2463 jit_subi(tmp, tmp, __WORDSIZE - 32);
2464
2465 jit_stxi_i(cp2d_i_offset(31), LIGHTREC_REG_STATE, tmp);
2466
2467 lightrec_free_reg(reg_cache, tmp);
2468 fallthrough;
2469 default:
2470 jit_stxi_i(cp2d_i_offset(reg), LIGHTREC_REG_STATE, rt);
2471 break;
2472 }
2473
2474 lightrec_free_reg(reg_cache, rt);
2475}
2476
2477static void rec_cp2_basic_MTC2(struct lightrec_cstate *state,
2478 const struct block *block, u16 offset)
2479{
2480 const union code c = block->opcode_list[offset].c;
2481
2482 rec_cp2_do_mtc2(state, block, offset, c.r.rd, c.r.rt);
2483}
2484
2485static void rec_cp2_basic_CTC2(struct lightrec_cstate *state,
2486 const struct block *block, u16 offset)
2487{
2488 struct regcache *reg_cache = state->reg_cache;
2489 const union code c = block->opcode_list[offset].c;
2490 jit_state_t *_jit = block->_jit;
2491 u8 rt, tmp, tmp2;
2492
2493 _jit_name(block->_jit, __func__);
2494
2495 if (state->state->ops.cop2_notify) {
2496 /* We must call cop2_notify, handle that in C. */
2497 rec_mtc(state, block, offset);
2498 return;
2499 }
2500
2501 rt = lightrec_alloc_reg_in(reg_cache, _jit, c.r.rt, 0);
2502
2503 switch (c.r.rd) {
2504 case 4:
2505 case 12:
2506 case 20:
2507 case 26:
2508 case 27:
2509 case 29:
2510 case 30:
2511 jit_stxi_s(cp2c_s_offset(c.r.rd), LIGHTREC_REG_STATE, rt);
2512 break;
2513 case 31:
2514 tmp = lightrec_alloc_reg_temp(reg_cache, _jit);
2515 tmp2 = lightrec_alloc_reg_temp(reg_cache, _jit);
2516
2517 jit_andi(tmp, rt, 0x7f87e000);
2518 jit_nei(tmp, tmp, 0);
2519 jit_lshi(tmp, tmp, 31);
2520
2521 jit_andi(tmp2, rt, 0x7ffff000);
2522 jit_orr(tmp, tmp2, tmp);
2523
2524 jit_stxi_i(cp2c_i_offset(31), LIGHTREC_REG_STATE, tmp);
2525
2526 lightrec_free_reg(reg_cache, tmp);
2527 lightrec_free_reg(reg_cache, tmp2);
2528 break;
2529
2530 default:
2531 jit_stxi_i(cp2c_i_offset(c.r.rd), LIGHTREC_REG_STATE, rt);
2532 }
2533
2534 lightrec_free_reg(reg_cache, rt);
2535}
2536
2537static void rec_cp0_RFE(struct lightrec_cstate *state,
2538 const struct block *block, u16 offset)
2539{
2540 struct regcache *reg_cache = state->reg_cache;
2541 jit_state_t *_jit = block->_jit;
2542 u8 status, tmp;
2543
2544 jit_name(__func__);
2545 jit_note(__FILE__, __LINE__);
2546
2547 status = lightrec_alloc_reg_temp(reg_cache, _jit);
2548 jit_ldxi_i(status, LIGHTREC_REG_STATE,
2549 offsetof(struct lightrec_state, regs.cp0[12]));
2550
2551 tmp = lightrec_alloc_reg_temp(reg_cache, _jit);
2552
2553 /* status = ((status >> 2) & 0xf) | status & ~0xf; */
2554 jit_rshi(tmp, status, 2);
2555 jit_andi(tmp, tmp, 0xf);
2556 jit_andi(status, status, ~0xful);
2557 jit_orr(status, status, tmp);
2558
2559 jit_ldxi_i(tmp, LIGHTREC_REG_STATE,
2560 offsetof(struct lightrec_state, regs.cp0[13]));
2561 jit_stxi_i(offsetof(struct lightrec_state, regs.cp0[12]),
2562 LIGHTREC_REG_STATE, status);
2563
2564 /* Exit dynarec in case there's a software interrupt.
2565 * exit_flags = !!(status & cause & 0x0300) & status; */
2566 jit_andr(tmp, tmp, status);
2567 jit_andi(tmp, tmp, 0x0300);
2568 jit_nei(tmp, tmp, 0);
2569 jit_andr(tmp, tmp, status);
2570 jit_stxi_i(offsetof(struct lightrec_state, exit_flags),
2571 LIGHTREC_REG_STATE, tmp);
2572
2573 lightrec_free_reg(reg_cache, status);
2574 lightrec_free_reg(reg_cache, tmp);
2575}
2576
2577static void rec_CP(struct lightrec_cstate *state,
2578 const struct block *block, u16 offset)
2579{
2580 union code c = block->opcode_list[offset].c;
2581 jit_state_t *_jit = block->_jit;
2582
2583 jit_name(__func__);
2584 jit_note(__FILE__, __LINE__);
2585
2586 call_to_c_wrapper(state, block, c.opcode, C_WRAPPER_CP);
2587}
2588
2589static void rec_meta_MOV(struct lightrec_cstate *state,
2590 const struct block *block, u16 offset)
2591{
2592 struct regcache *reg_cache = state->reg_cache;
2593 const struct opcode *op = &block->opcode_list[offset];
2594 union code c = op->c;
2595 jit_state_t *_jit = block->_jit;
2596 bool unload_rd;
2597 bool unload_rs, discard_rs;
2598 u8 rs, rd;
2599
2600 _jit_name(block->_jit, __func__);
2601 jit_note(__FILE__, __LINE__);
2602
2603 unload_rs = OPT_EARLY_UNLOAD
2604 && LIGHTREC_FLAGS_GET_RS(op->flags) == LIGHTREC_REG_UNLOAD;
2605 discard_rs = OPT_EARLY_UNLOAD
2606 && LIGHTREC_FLAGS_GET_RS(op->flags) == LIGHTREC_REG_DISCARD;
2607
2608 if ((unload_rs || discard_rs) && c.m.rs) {
2609 /* If the source register is going to be unloaded or discarded,
2610 * then we can simply mark its host register as now pointing to
2611 * the destination register. */
2612 pr_debug("Remap %s to %s at offset 0x%x\n",
2613 lightrec_reg_name(c.m.rs), lightrec_reg_name(c.m.rd),
2614 offset << 2);
2615 rs = lightrec_alloc_reg_in(reg_cache, _jit, c.m.rs, 0);
2616 lightrec_remap_reg(reg_cache, _jit, rs, c.m.rd, discard_rs);
2617 lightrec_free_reg(reg_cache, rs);
2618 return;
2619 }
2620
2621 unload_rd = OPT_EARLY_UNLOAD
2622 && LIGHTREC_FLAGS_GET_RD(op->flags) == LIGHTREC_REG_UNLOAD;
2623
2624 if (c.m.rs && !lightrec_reg_is_loaded(reg_cache, c.m.rs)) {
2625 /* The source register is not yet loaded - we can load its value
2626 * from the register cache directly into the target register. */
2627 rd = lightrec_alloc_reg_out(reg_cache, _jit, c.m.rd, REG_EXT);
2628
2629 jit_ldxi_i(rd, LIGHTREC_REG_STATE,
2630 offsetof(struct lightrec_state, regs.gpr) + (c.m.rs << 2));
2631
2632 lightrec_free_reg(reg_cache, rd);
2633 } else if (unload_rd) {
2634 /* If the destination register will be unloaded right after the
2635 * MOV meta-opcode, we don't actually need to write any host
2636 * register - we can just store the source register directly to
2637 * the register cache, at the offset corresponding to the
2638 * destination register. */
2639 lightrec_discard_reg_if_loaded(reg_cache, c.m.rd);
2640
2641 rs = lightrec_alloc_reg_in(reg_cache, _jit, c.m.rs, 0);
2642
2643 jit_stxi_i(offsetof(struct lightrec_state, regs.gpr)
2644 + (c.m.rd << 2), LIGHTREC_REG_STATE, rs);
2645
2646 lightrec_free_reg(reg_cache, rs);
2647 } else {
2648 if (c.m.rs)
2649 rs = lightrec_alloc_reg_in(reg_cache, _jit, c.m.rs, 0);
2650
2651 rd = lightrec_alloc_reg_out(reg_cache, _jit, c.m.rd, REG_EXT);
2652
2653 if (c.m.rs == 0) {
2654 jit_movi(rd, 0);
2655 } else {
2656 jit_extr_i(rd, rs);
2657 lightrec_free_reg(reg_cache, rs);
2658 }
2659
2660 lightrec_free_reg(reg_cache, rd);
2661 }
2662}
2663
2664static void rec_meta_EXTC_EXTS(struct lightrec_cstate *state,
2665 const struct block *block,
2666 u16 offset)
2667{
2668 struct regcache *reg_cache = state->reg_cache;
2669 union code c = block->opcode_list[offset].c;
2670 jit_state_t *_jit = block->_jit;
2671 u8 rs, rd;
2672
2673 _jit_name(block->_jit, __func__);
2674 jit_note(__FILE__, __LINE__);
2675
2676 rec_alloc_rs_rd(reg_cache, _jit, &block->opcode_list[offset],
2677 c.m.rs, c.m.rd, 0, REG_EXT, &rs, &rd);
2678
2679 if (c.m.op == OP_META_EXTC)
2680 jit_extr_c(rd, rs);
2681 else
2682 jit_extr_s(rd, rs);
2683
2684 lightrec_free_reg(reg_cache, rs);
2685 lightrec_free_reg(reg_cache, rd);
2686}
2687
2688static void rec_meta_MULT2(struct lightrec_cstate *state,
2689 const struct block *block,
2690 u16 offset)
2691{
2692 struct regcache *reg_cache = state->reg_cache;
2693 union code c = block->opcode_list[offset].c;
2694 jit_state_t *_jit = block->_jit;
2695 u8 reg_lo = get_mult_div_lo(c);
2696 u8 reg_hi = get_mult_div_hi(c);
2697 u32 flags = block->opcode_list[offset].flags;
2698 bool is_signed = c.i.op == OP_META_MULT2;
2699 u8 rs, lo, hi, rflags = 0, hiflags = 0;
2700 unsigned int i;
2701
2702 if (!op_flag_no_hi(flags) && c.r.op < 32) {
2703 rflags = is_signed ? REG_EXT : REG_ZEXT;
2704 hiflags = is_signed ? REG_EXT : (REG_EXT | REG_ZEXT);
2705 }
2706
2707 _jit_name(block->_jit, __func__);
2708 jit_note(__FILE__, __LINE__);
2709
2710 rs = lightrec_alloc_reg_in(reg_cache, _jit, c.i.rs, rflags);
2711
2712 /*
2713 * We must handle the case where one of the output registers is our rs
2714 * input register. Thanksfully, computing LO/HI can be done in any
2715 * order. Here, we make sure that the computation that overwrites the
2716 * input register is always performed last.
2717 */
2718 for (i = 0; i < 2; i++) {
2719 if ((!i ^ (reg_lo == c.i.rs)) && !op_flag_no_lo(flags)) {
2720 lo = lightrec_alloc_reg_out(reg_cache, _jit, reg_lo, 0);
2721
2722 if (c.r.op < 32)
2723 jit_lshi(lo, rs, c.r.op);
2724 else
2725 jit_movi(lo, 0);
2726
2727 lightrec_free_reg(reg_cache, lo);
2728 continue;
2729 }
2730
2731 if ((!!i ^ (reg_lo == c.i.rs)) && !op_flag_no_hi(flags)) {
2732 hi = lightrec_alloc_reg_out(reg_cache, _jit,
2733 reg_hi, hiflags);
2734
2735 if (c.r.op >= 32) {
2736 jit_lshi(hi, rs, c.r.op - 32);
2737 } else if (is_signed) {
2738 if (c.r.op)
2739 jit_rshi(hi, rs, 32 - c.r.op);
2740 else
2741 jit_rshi(hi, rs, 31);
2742 } else {
2743 if (c.r.op)
2744 jit_rshi_u(hi, rs, 32 - c.r.op);
2745 else
2746 jit_movi(hi, 0);
2747 }
2748
2749 lightrec_free_reg(reg_cache, hi);
2750 }
2751 }
2752
2753 lightrec_free_reg(reg_cache, rs);
2754
2755 _jit_name(block->_jit, __func__);
2756 jit_note(__FILE__, __LINE__);
2757}
2758
2759static void rec_meta_COM(struct lightrec_cstate *state,
2760 const struct block *block, u16 offset)
2761{
2762 struct regcache *reg_cache = state->reg_cache;
2763 union code c = block->opcode_list[offset].c;
2764 jit_state_t *_jit = block->_jit;
2765 u8 rd, rs, flags;
2766
2767 jit_note(__FILE__, __LINE__);
2768
2769 rec_alloc_rs_rd(reg_cache, _jit, &block->opcode_list[offset],
2770 c.m.rs, c.m.rd, 0, 0, &rs, &rd);
2771
2772 flags = lightrec_get_reg_in_flags(reg_cache, rs);
2773
2774 lightrec_set_reg_out_flags(reg_cache, rd,
2775 flags & REG_EXT);
2776
2777 jit_comr(rd, rs);
2778
2779 lightrec_free_reg(reg_cache, rs);
2780 lightrec_free_reg(reg_cache, rd);
2781}
2782
2783static void unknown_opcode(struct lightrec_cstate *state,
2784 const struct block *block, u16 offset)
2785{
2786 rec_exit_early(state, block, offset, LIGHTREC_EXIT_UNKNOWN_OP,
2787 block->pc + (offset << 2));
2788}
2789
2790static const lightrec_rec_func_t rec_standard[64] = {
2791 SET_DEFAULT_ELM(rec_standard, unknown_opcode),
2792 [OP_SPECIAL] = rec_SPECIAL,
2793 [OP_REGIMM] = rec_REGIMM,
2794 [OP_J] = rec_J,
2795 [OP_JAL] = rec_JAL,
2796 [OP_BEQ] = rec_BEQ,
2797 [OP_BNE] = rec_BNE,
2798 [OP_BLEZ] = rec_BLEZ,
2799 [OP_BGTZ] = rec_BGTZ,
2800 [OP_ADDI] = rec_ADDI,
2801 [OP_ADDIU] = rec_ADDIU,
2802 [OP_SLTI] = rec_SLTI,
2803 [OP_SLTIU] = rec_SLTIU,
2804 [OP_ANDI] = rec_ANDI,
2805 [OP_ORI] = rec_ORI,
2806 [OP_XORI] = rec_XORI,
2807 [OP_LUI] = rec_LUI,
2808 [OP_CP0] = rec_CP0,
2809 [OP_CP2] = rec_CP2,
2810 [OP_LB] = rec_LB,
2811 [OP_LH] = rec_LH,
2812 [OP_LWL] = rec_LWL,
2813 [OP_LW] = rec_LW,
2814 [OP_LBU] = rec_LBU,
2815 [OP_LHU] = rec_LHU,
2816 [OP_LWR] = rec_LWR,
2817 [OP_SB] = rec_SB,
2818 [OP_SH] = rec_SH,
2819 [OP_SWL] = rec_SWL,
2820 [OP_SW] = rec_SW,
2821 [OP_SWR] = rec_SWR,
2822 [OP_LWC2] = rec_LW,
2823 [OP_SWC2] = rec_SW,
2824
2825 [OP_META] = rec_META,
2826 [OP_META_MULT2] = rec_meta_MULT2,
2827 [OP_META_MULTU2] = rec_meta_MULT2,
2828};
2829
2830static const lightrec_rec_func_t rec_special[64] = {
2831 SET_DEFAULT_ELM(rec_special, unknown_opcode),
2832 [OP_SPECIAL_SLL] = rec_special_SLL,
2833 [OP_SPECIAL_SRL] = rec_special_SRL,
2834 [OP_SPECIAL_SRA] = rec_special_SRA,
2835 [OP_SPECIAL_SLLV] = rec_special_SLLV,
2836 [OP_SPECIAL_SRLV] = rec_special_SRLV,
2837 [OP_SPECIAL_SRAV] = rec_special_SRAV,
2838 [OP_SPECIAL_JR] = rec_special_JR,
2839 [OP_SPECIAL_JALR] = rec_special_JALR,
2840 [OP_SPECIAL_SYSCALL] = rec_special_SYSCALL,
2841 [OP_SPECIAL_BREAK] = rec_special_BREAK,
2842 [OP_SPECIAL_MFHI] = rec_special_MFHI,
2843 [OP_SPECIAL_MTHI] = rec_special_MTHI,
2844 [OP_SPECIAL_MFLO] = rec_special_MFLO,
2845 [OP_SPECIAL_MTLO] = rec_special_MTLO,
2846 [OP_SPECIAL_MULT] = rec_special_MULT,
2847 [OP_SPECIAL_MULTU] = rec_special_MULTU,
2848 [OP_SPECIAL_DIV] = rec_special_DIV,
2849 [OP_SPECIAL_DIVU] = rec_special_DIVU,
2850 [OP_SPECIAL_ADD] = rec_special_ADD,
2851 [OP_SPECIAL_ADDU] = rec_special_ADDU,
2852 [OP_SPECIAL_SUB] = rec_special_SUB,
2853 [OP_SPECIAL_SUBU] = rec_special_SUBU,
2854 [OP_SPECIAL_AND] = rec_special_AND,
2855 [OP_SPECIAL_OR] = rec_special_OR,
2856 [OP_SPECIAL_XOR] = rec_special_XOR,
2857 [OP_SPECIAL_NOR] = rec_special_NOR,
2858 [OP_SPECIAL_SLT] = rec_special_SLT,
2859 [OP_SPECIAL_SLTU] = rec_special_SLTU,
2860};
2861
2862static const lightrec_rec_func_t rec_regimm[64] = {
2863 SET_DEFAULT_ELM(rec_regimm, unknown_opcode),
2864 [OP_REGIMM_BLTZ] = rec_regimm_BLTZ,
2865 [OP_REGIMM_BGEZ] = rec_regimm_BGEZ,
2866 [OP_REGIMM_BLTZAL] = rec_regimm_BLTZAL,
2867 [OP_REGIMM_BGEZAL] = rec_regimm_BGEZAL,
2868};
2869
2870static const lightrec_rec_func_t rec_cp0[64] = {
2871 SET_DEFAULT_ELM(rec_cp0, rec_CP),
2872 [OP_CP0_MFC0] = rec_cp0_MFC0,
2873 [OP_CP0_CFC0] = rec_cp0_CFC0,
2874 [OP_CP0_MTC0] = rec_cp0_MTC0,
2875 [OP_CP0_CTC0] = rec_cp0_CTC0,
2876 [OP_CP0_RFE] = rec_cp0_RFE,
2877};
2878
2879static const lightrec_rec_func_t rec_cp2_basic[64] = {
2880 SET_DEFAULT_ELM(rec_cp2_basic, rec_CP),
2881 [OP_CP2_BASIC_MFC2] = rec_cp2_basic_MFC2,
2882 [OP_CP2_BASIC_CFC2] = rec_cp2_basic_CFC2,
2883 [OP_CP2_BASIC_MTC2] = rec_cp2_basic_MTC2,
2884 [OP_CP2_BASIC_CTC2] = rec_cp2_basic_CTC2,
2885};
2886
2887static const lightrec_rec_func_t rec_meta[64] = {
2888 SET_DEFAULT_ELM(rec_meta, unknown_opcode),
2889 [OP_META_MOV] = rec_meta_MOV,
2890 [OP_META_EXTC] = rec_meta_EXTC_EXTS,
2891 [OP_META_EXTS] = rec_meta_EXTC_EXTS,
2892 [OP_META_COM] = rec_meta_COM,
2893};
2894
2895static void rec_SPECIAL(struct lightrec_cstate *state,
2896 const struct block *block, u16 offset)
2897{
2898 union code c = block->opcode_list[offset].c;
2899 lightrec_rec_func_t f = rec_special[c.r.op];
2900
2901 if (!HAS_DEFAULT_ELM && unlikely(!f))
2902 unknown_opcode(state, block, offset);
2903 else
2904 (*f)(state, block, offset);
2905}
2906
2907static void rec_REGIMM(struct lightrec_cstate *state,
2908 const struct block *block, u16 offset)
2909{
2910 union code c = block->opcode_list[offset].c;
2911 lightrec_rec_func_t f = rec_regimm[c.r.rt];
2912
2913 if (!HAS_DEFAULT_ELM && unlikely(!f))
2914 unknown_opcode(state, block, offset);
2915 else
2916 (*f)(state, block, offset);
2917}
2918
2919static void rec_CP0(struct lightrec_cstate *state,
2920 const struct block *block, u16 offset)
2921{
2922 union code c = block->opcode_list[offset].c;
2923 lightrec_rec_func_t f = rec_cp0[c.r.rs];
2924
2925 if (!HAS_DEFAULT_ELM && unlikely(!f))
2926 rec_CP(state, block, offset);
2927 else
2928 (*f)(state, block, offset);
2929}
2930
2931static void rec_CP2(struct lightrec_cstate *state,
2932 const struct block *block, u16 offset)
2933{
2934 union code c = block->opcode_list[offset].c;
2935
2936 if (c.r.op == OP_CP2_BASIC) {
2937 lightrec_rec_func_t f = rec_cp2_basic[c.r.rs];
2938
2939 if (HAS_DEFAULT_ELM || likely(f)) {
2940 (*f)(state, block, offset);
2941 return;
2942 }
2943 }
2944
2945 rec_CP(state, block, offset);
2946}
2947
2948static void rec_META(struct lightrec_cstate *state,
2949 const struct block *block, u16 offset)
2950{
2951 union code c = block->opcode_list[offset].c;
2952 lightrec_rec_func_t f = rec_meta[c.m.op];
2953
2954 if (!HAS_DEFAULT_ELM && unlikely(!f))
2955 unknown_opcode(state, block, offset);
2956 else
2957 (*f)(state, block, offset);
2958}
2959
2960void lightrec_rec_opcode(struct lightrec_cstate *state,
2961 const struct block *block, u16 offset)
2962{
2963 struct regcache *reg_cache = state->reg_cache;
2964 struct lightrec_branch_target *target;
2965 const struct opcode *op = &block->opcode_list[offset];
2966 jit_state_t *_jit = block->_jit;
2967 lightrec_rec_func_t f;
2968 u16 unload_offset;
2969
2970 if (op_flag_sync(op->flags)) {
2971 if (state->cycles)
2972 jit_subi(LIGHTREC_REG_CYCLE, LIGHTREC_REG_CYCLE, state->cycles);
2973 state->cycles = 0;
2974
2975 lightrec_storeback_regs(reg_cache, _jit);
2976 lightrec_regcache_reset(reg_cache);
2977
2978 pr_debug("Adding branch target at offset 0x%x\n", offset << 2);
2979 target = &state->targets[state->nb_targets++];
2980 target->offset = offset;
2981 target->label = jit_indirect();
2982 }
2983
2984 if (likely(op->opcode)) {
2985 f = rec_standard[op->i.op];
2986
2987 if (!HAS_DEFAULT_ELM && unlikely(!f))
2988 unknown_opcode(state, block, offset);
2989 else
2990 (*f)(state, block, offset);
2991 }
2992
2993 if (OPT_EARLY_UNLOAD) {
2994 unload_offset = offset +
2995 (has_delay_slot(op->c) && !op_flag_no_ds(op->flags));
2996
2997 lightrec_do_early_unload(state, block, unload_offset);
2998 }
2999
3000 state->no_load_delay = false;
3001}