1 // SPDX-License-Identifier: LGPL-2.1-or-later
3 * Copyright (C) 2014-2021 Paul Cercueil <paul@crapouillou.net>
6 #include "blockcache.h"
8 #include "disassembler.h"
10 #include "lightning-wrapper.h"
11 #include "optimizer.h"
17 #define LIGHTNING_UNALIGNED_32BIT 4
19 typedef void (*lightrec_rec_func_t)(struct lightrec_cstate *, const struct block *, u16);
21 /* Forward declarations */
22 static void rec_SPECIAL(struct lightrec_cstate *state, const struct block *block, u16 offset);
23 static void rec_REGIMM(struct lightrec_cstate *state, const struct block *block, u16 offset);
24 static void rec_CP0(struct lightrec_cstate *state, const struct block *block, u16 offset);
25 static void rec_CP2(struct lightrec_cstate *state, const struct block *block, u16 offset);
26 static void rec_META(struct lightrec_cstate *state, const struct block *block, u16 offset);
27 static void rec_cp2_do_mtc2(struct lightrec_cstate *state,
28 const struct block *block, u16 offset, u8 reg, u8 in_reg);
29 static void rec_cp2_do_mfc2(struct lightrec_cstate *state,
30 const struct block *block, u16 offset,
34 lightrec_jump_to_fn(jit_state_t *_jit, void (*fn)(void))
36 /* Prevent jit_jmpi() from using our cycles register as a temporary */
37 jit_live(LIGHTREC_REG_CYCLE);
39 jit_patch_abs(jit_jmpi(), fn);
43 lightrec_jump_to_eob(struct lightrec_cstate *state, jit_state_t *_jit)
45 lightrec_jump_to_fn(_jit, state->state->eob_wrapper_func);
49 lightrec_jump_to_ds_check(struct lightrec_cstate *state, jit_state_t *_jit)
51 lightrec_jump_to_fn(_jit, state->state->ds_check_func);
54 static void update_ra_register(struct regcache *reg_cache, jit_state_t *_jit,
55 u8 ra_reg, u32 pc, u32 link)
59 link_reg = lightrec_alloc_reg_out(reg_cache, _jit, ra_reg, 0);
60 lightrec_load_imm(reg_cache, _jit, link_reg, pc, link);
61 lightrec_free_reg(reg_cache, link_reg);
64 static void lightrec_emit_end_of_block(struct lightrec_cstate *state,
65 const struct block *block, u16 offset,
66 s8 reg_new_pc, u32 imm, u8 ra_reg,
67 u32 link, bool update_cycles)
69 struct regcache *reg_cache = state->reg_cache;
70 jit_state_t *_jit = block->_jit;
71 const struct opcode *op = &block->opcode_list[offset],
72 *ds = get_delay_slot(block->opcode_list, offset);
73 u32 cycles = state->cycles + lightrec_cycles_of_opcode(state->state, op->c);
74 bool has_ds = has_delay_slot(op->c);
76 jit_note(__FILE__, __LINE__);
78 if (link && ra_reg != reg_new_pc)
79 update_ra_register(reg_cache, _jit, ra_reg, block->pc, link);
82 lightrec_load_next_pc_imm(reg_cache, _jit, block->pc, imm);
84 lightrec_load_next_pc(reg_cache, _jit, reg_new_pc);
86 if (link && ra_reg == reg_new_pc) {
87 /* Handle the special case: JALR $r0, $r0
88 * In that case the target PC should be the old value of the
90 update_ra_register(reg_cache, _jit, ra_reg, block->pc, link);
93 if (has_ds && !op_flag_no_ds(op->flags) && !op_flag_local_branch(op->flags)) {
94 cycles += lightrec_cycles_of_opcode(state->state, ds->c);
96 /* Recompile the delay slot */
98 lightrec_rec_opcode(state, block, offset + 1);
101 /* Clean the remaining registers */
102 lightrec_clean_regs(reg_cache, _jit);
104 if (cycles && update_cycles) {
105 jit_subi(LIGHTREC_REG_CYCLE, LIGHTREC_REG_CYCLE, cycles);
106 pr_debug("EOB: %u cycles\n", cycles);
109 if (has_ds && op_flag_load_delay(ds->flags)
110 && opcode_is_load(ds->c) && !state->no_load_delay) {
111 /* If the delay slot is a load opcode, its target register
112 * will be written after the first opcode of the target is
113 * executed. Handle this by jumping to a special section of
114 * the dispatcher. It expects the loaded value to be in
115 * REG_TEMP, and the target register number to be in JIT_V1.*/
116 jit_movi(JIT_V1, ds->c.i.rt);
118 lightrec_jump_to_ds_check(state, _jit);
120 lightrec_jump_to_eob(state, _jit);
123 lightrec_regcache_reset(reg_cache);
126 void lightrec_emit_jump_to_interpreter(struct lightrec_cstate *state,
127 const struct block *block, u16 offset)
129 struct regcache *reg_cache = state->reg_cache;
130 jit_state_t *_jit = block->_jit;
132 lightrec_clean_regs(reg_cache, _jit);
134 /* Call the interpreter with the block's address in JIT_V1 and the
135 * PC (which might have an offset) in JIT_V0. */
136 lightrec_load_imm(reg_cache, _jit, JIT_V0, block->pc,
137 block->pc + (offset << 2));
138 if (lightrec_store_next_pc()) {
139 jit_stxi_i(offsetof(struct lightrec_state, next_pc),
140 LIGHTREC_REG_STATE, JIT_V0);
143 jit_movi(JIT_V1, (uintptr_t)block);
145 jit_subi(LIGHTREC_REG_CYCLE, LIGHTREC_REG_CYCLE, state->cycles);
146 lightrec_jump_to_fn(_jit, state->state->interpreter_func);
149 static void lightrec_emit_eob(struct lightrec_cstate *state,
150 const struct block *block, u16 offset)
152 struct regcache *reg_cache = state->reg_cache;
153 jit_state_t *_jit = block->_jit;
155 lightrec_clean_regs(reg_cache, _jit);
157 lightrec_load_imm(reg_cache, _jit, JIT_V0, block->pc,
158 block->pc + (offset << 2));
159 if (lightrec_store_next_pc()) {
160 jit_stxi_i(offsetof(struct lightrec_state, next_pc),
161 LIGHTREC_REG_STATE, JIT_V0);
164 jit_subi(LIGHTREC_REG_CYCLE, LIGHTREC_REG_CYCLE, state->cycles);
166 lightrec_jump_to_eob(state, _jit);
169 static void rec_special_JR(struct lightrec_cstate *state, const struct block *block, u16 offset)
171 union code c = block->opcode_list[offset].c;
173 _jit_name(block->_jit, __func__);
174 lightrec_emit_end_of_block(state, block, offset, c.r.rs, 0, 31, 0, true);
177 static void rec_special_JALR(struct lightrec_cstate *state, const struct block *block, u16 offset)
179 union code c = block->opcode_list[offset].c;
181 _jit_name(block->_jit, __func__);
182 lightrec_emit_end_of_block(state, block, offset, c.r.rs, 0, c.r.rd,
183 get_branch_pc(block, offset, 2), true);
186 static void rec_J(struct lightrec_cstate *state, const struct block *block, u16 offset)
188 union code c = block->opcode_list[offset].c;
190 _jit_name(block->_jit, __func__);
191 lightrec_emit_end_of_block(state, block, offset, -1,
192 (block->pc & 0xf0000000) | (c.j.imm << 2),
196 static void rec_JAL(struct lightrec_cstate *state, const struct block *block, u16 offset)
198 union code c = block->opcode_list[offset].c;
200 _jit_name(block->_jit, __func__);
201 lightrec_emit_end_of_block(state, block, offset, -1,
202 (block->pc & 0xf0000000) | (c.j.imm << 2),
203 31, get_branch_pc(block, offset, 2), true);
206 static void lightrec_do_early_unload(struct lightrec_cstate *state,
207 const struct block *block, u16 offset)
209 struct regcache *reg_cache = state->reg_cache;
210 const struct opcode *op = &block->opcode_list[offset];
211 jit_state_t *_jit = block->_jit;
217 { op->r.rd, LIGHTREC_FLAGS_GET_RD(op->flags), },
218 { op->i.rt, LIGHTREC_FLAGS_GET_RT(op->flags), },
219 { op->i.rs, LIGHTREC_FLAGS_GET_RS(op->flags), },
222 for (i = 0; i < ARRAY_SIZE(reg_ops); i++) {
223 reg = reg_ops[i].reg;
225 switch (reg_ops[i].op) {
226 case LIGHTREC_REG_UNLOAD:
227 lightrec_clean_reg_if_loaded(reg_cache, _jit, reg, true);
230 case LIGHTREC_REG_DISCARD:
231 lightrec_discard_reg_if_loaded(reg_cache, reg);
234 case LIGHTREC_REG_CLEAN:
235 lightrec_clean_reg_if_loaded(reg_cache, _jit, reg, false);
243 static void rec_b(struct lightrec_cstate *state, const struct block *block, u16 offset,
244 jit_code_t code, jit_code_t code2, u32 link, bool unconditional, bool bz)
246 struct regcache *reg_cache = state->reg_cache;
247 struct native_register *regs_backup;
248 jit_state_t *_jit = block->_jit;
249 struct lightrec_branch *branch;
250 const struct opcode *op = &block->opcode_list[offset],
251 *ds = get_delay_slot(block->opcode_list, offset);
253 bool is_forward = (s16)op->i.imm >= 0;
254 int op_cycles = lightrec_cycles_of_opcode(state->state, op->c);
255 u32 target_offset, cycles = state->cycles + op_cycles;
256 bool no_indirection = false;
260 jit_note(__FILE__, __LINE__);
262 if (!op_flag_no_ds(op->flags))
263 cycles += lightrec_cycles_of_opcode(state->state, ds->c);
265 state->cycles = -op_cycles;
267 if (!unconditional) {
268 rs = lightrec_alloc_reg_in(reg_cache, _jit, op->i.rs, REG_EXT);
269 rt = bz ? 0 : lightrec_alloc_reg_in(reg_cache,
270 _jit, op->i.rt, REG_EXT);
272 /* Unload dead registers before evaluating the branch */
273 if (OPT_EARLY_UNLOAD)
274 lightrec_do_early_unload(state, block, offset);
276 if (op_flag_local_branch(op->flags) &&
277 (op_flag_no_ds(op->flags) || !ds->opcode) &&
278 is_forward && !lightrec_has_dirty_regs(reg_cache))
279 no_indirection = true;
282 pr_debug("Using no indirection for branch at offset 0x%hx\n", offset << 2);
286 jit_subi(LIGHTREC_REG_CYCLE, LIGHTREC_REG_CYCLE, cycles);
288 if (!unconditional) {
289 /* Generate the branch opcode */
291 addr = jit_new_node_pww(code, NULL, rs, rt);
293 lightrec_free_regs(reg_cache);
294 regs_backup = lightrec_regcache_enter_branch(reg_cache);
297 if (op_flag_local_branch(op->flags)) {
298 /* Recompile the delay slot */
299 if (!op_flag_no_ds(op->flags) && ds->opcode) {
300 /* Never handle load delays with local branches. */
301 state->no_load_delay = true;
302 lightrec_rec_opcode(state, block, offset + 1);
306 update_ra_register(reg_cache, _jit, 31, block->pc, link);
308 /* Clean remaining registers */
309 lightrec_clean_regs(reg_cache, _jit);
311 target_offset = offset + 1 + (s16)op->i.imm
312 - !!op_flag_no_ds(op->flags);
313 pr_debug("Adding local branch to offset 0x%x\n",
315 branch = &state->local_branches[
316 state->nb_local_branches++];
318 branch->target = target_offset;
321 branch->branch = jit_new_node_pww(code2, NULL, rs, rt);
323 branch->branch = jit_b();
325 branch->branch = jit_bgti(LIGHTREC_REG_CYCLE, 0);
328 if (!op_flag_local_branch(op->flags) || !is_forward) {
329 next_pc = get_branch_pc(block, offset, 1 + (s16)op->i.imm);
330 state->no_load_delay = op_flag_local_branch(op->flags);
331 lightrec_emit_end_of_block(state, block, offset, -1, next_pc,
335 if (!unconditional) {
339 lightrec_regcache_leave_branch(reg_cache, regs_backup);
342 update_ra_register(reg_cache, _jit, 31, block->pc, link);
344 if (!op_flag_no_ds(op->flags) && ds->opcode) {
345 state->no_load_delay = true;
346 lightrec_rec_opcode(state, block, offset + 1);
351 static void rec_BNE(struct lightrec_cstate *state,
352 const struct block *block, u16 offset)
354 union code c = block->opcode_list[offset].c;
356 _jit_name(block->_jit, __func__);
359 rec_b(state, block, offset, jit_code_beqi, jit_code_bnei, 0, false, true);
361 rec_b(state, block, offset, jit_code_beqr, jit_code_bner, 0, false, false);
364 static void rec_BEQ(struct lightrec_cstate *state,
365 const struct block *block, u16 offset)
367 union code c = block->opcode_list[offset].c;
369 _jit_name(block->_jit, __func__);
372 rec_b(state, block, offset, jit_code_bnei, jit_code_beqi, 0, c.i.rs == 0, true);
374 rec_b(state, block, offset, jit_code_bner, jit_code_beqr, 0, c.i.rs == c.i.rt, false);
377 static void rec_BLEZ(struct lightrec_cstate *state,
378 const struct block *block, u16 offset)
380 union code c = block->opcode_list[offset].c;
382 _jit_name(block->_jit, __func__);
383 rec_b(state, block, offset, jit_code_bgti, jit_code_blei, 0, c.i.rs == 0, true);
386 static void rec_BGTZ(struct lightrec_cstate *state,
387 const struct block *block, u16 offset)
389 _jit_name(block->_jit, __func__);
390 rec_b(state, block, offset, jit_code_blei, jit_code_bgti, 0, false, true);
393 static void rec_regimm_BLTZ(struct lightrec_cstate *state,
394 const struct block *block, u16 offset)
396 _jit_name(block->_jit, __func__);
397 rec_b(state, block, offset, jit_code_bgei, jit_code_blti, 0, false, true);
400 static void rec_regimm_BLTZAL(struct lightrec_cstate *state,
401 const struct block *block, u16 offset)
403 _jit_name(block->_jit, __func__);
404 rec_b(state, block, offset, jit_code_bgei, jit_code_blti,
405 get_branch_pc(block, offset, 2), false, true);
408 static void rec_regimm_BGEZ(struct lightrec_cstate *state,
409 const struct block *block, u16 offset)
411 union code c = block->opcode_list[offset].c;
413 _jit_name(block->_jit, __func__);
414 rec_b(state, block, offset, jit_code_blti, jit_code_bgei, 0, !c.i.rs, true);
417 static void rec_regimm_BGEZAL(struct lightrec_cstate *state,
418 const struct block *block, u16 offset)
420 const struct opcode *op = &block->opcode_list[offset];
421 _jit_name(block->_jit, __func__);
422 rec_b(state, block, offset, jit_code_blti, jit_code_bgei,
423 get_branch_pc(block, offset, 2),
427 static void rec_alloc_rs_rd(struct regcache *reg_cache,
429 const struct opcode *op,
431 u8 in_flags, u8 out_flags,
432 u8 *rs_out, u8 *rd_out)
434 bool unload, discard;
437 if (OPT_EARLY_UNLOAD) {
438 unload_flags = LIGHTREC_FLAGS_GET_RS(op->flags);
439 unload = unload_flags == LIGHTREC_REG_UNLOAD;
440 discard = unload_flags == LIGHTREC_REG_DISCARD;
443 if (OPT_EARLY_UNLOAD && rs && rd != rs && (unload || discard)) {
444 rs = lightrec_alloc_reg_in(reg_cache, _jit, rs, in_flags);
445 lightrec_remap_reg(reg_cache, _jit, rs, rd, discard);
446 lightrec_set_reg_out_flags(reg_cache, rs, out_flags);
449 rs = lightrec_alloc_reg_in(reg_cache, _jit, rs, in_flags);
450 rd = lightrec_alloc_reg_out(reg_cache, _jit, rd, out_flags);
457 static void rec_alu_imm(struct lightrec_cstate *state, const struct block *block,
458 u16 offset, jit_code_t code, bool slti)
460 struct regcache *reg_cache = state->reg_cache;
461 union code c = block->opcode_list[offset].c;
462 jit_state_t *_jit = block->_jit;
463 u8 rs, rt, out_flags = REG_EXT;
466 out_flags |= REG_ZEXT;
468 jit_note(__FILE__, __LINE__);
470 rec_alloc_rs_rd(reg_cache, _jit, &block->opcode_list[offset],
471 c.i.rs, c.i.rt, REG_EXT, out_flags, &rs, &rt);
473 jit_new_node_www(code, rt, rs, (s32)(s16) c.i.imm);
475 lightrec_free_reg(reg_cache, rs);
476 lightrec_free_reg(reg_cache, rt);
479 static void rec_alu_special(struct lightrec_cstate *state, const struct block *block,
480 u16 offset, jit_code_t code, bool out_ext)
482 struct regcache *reg_cache = state->reg_cache;
483 union code c = block->opcode_list[offset].c;
484 jit_state_t *_jit = block->_jit;
487 jit_note(__FILE__, __LINE__);
489 rt = lightrec_alloc_reg_in(reg_cache, _jit, c.r.rt, REG_EXT);
490 rec_alloc_rs_rd(reg_cache, _jit, &block->opcode_list[offset],
491 c.r.rs, c.r.rd, REG_EXT,
492 out_ext ? REG_EXT | REG_ZEXT : 0, &rs, &rd);
494 jit_new_node_www(code, rd, rs, rt);
496 lightrec_free_reg(reg_cache, rs);
497 lightrec_free_reg(reg_cache, rt);
498 lightrec_free_reg(reg_cache, rd);
501 static void rec_alu_shiftv(struct lightrec_cstate *state, const struct block *block,
502 u16 offset, jit_code_t code)
504 struct regcache *reg_cache = state->reg_cache;
505 union code c = block->opcode_list[offset].c;
506 jit_state_t *_jit = block->_jit;
507 u8 rd, rt, rs, temp, flags = 0;
509 jit_note(__FILE__, __LINE__);
511 if (code == jit_code_rshr)
513 else if (code == jit_code_rshr_u)
516 rs = lightrec_alloc_reg_in(reg_cache, _jit, c.r.rs, 0);
517 rec_alloc_rs_rd(reg_cache, _jit, &block->opcode_list[offset],
518 c.r.rt, c.r.rd, flags, flags, &rt, &rd);
521 jit_andi(rd, rs, 0x1f);
522 jit_new_node_www(code, rd, rt, rd);
524 temp = lightrec_alloc_reg_temp(reg_cache, _jit);
525 jit_andi(temp, rs, 0x1f);
526 jit_new_node_www(code, rd, rt, temp);
527 lightrec_free_reg(reg_cache, temp);
530 lightrec_free_reg(reg_cache, rs);
531 lightrec_free_reg(reg_cache, rt);
532 lightrec_free_reg(reg_cache, rd);
535 static void rec_movi(struct lightrec_cstate *state,
536 const struct block *block, u16 offset)
538 struct regcache *reg_cache = state->reg_cache;
539 union code c = block->opcode_list[offset].c;
540 jit_state_t *_jit = block->_jit;
542 s32 value = (s32)(s16) c.i.imm;
545 if (block->opcode_list[offset].flags & LIGHTREC_MOVI)
546 value += (s32)((u32)state->movi_temp[c.i.rt] << 16);
551 rt = lightrec_alloc_reg_out(reg_cache, _jit, c.i.rt, flags);
555 lightrec_free_reg(reg_cache, rt);
558 static void rec_ADDIU(struct lightrec_cstate *state,
559 const struct block *block, u16 offset)
561 const struct opcode *op = &block->opcode_list[offset];
563 _jit_name(block->_jit, __func__);
565 if (op->i.rs && !(op->flags & LIGHTREC_MOVI))
566 rec_alu_imm(state, block, offset, jit_code_addi, false);
568 rec_movi(state, block, offset);
571 static void rec_ADDI(struct lightrec_cstate *state,
572 const struct block *block, u16 offset)
574 /* TODO: Handle the exception? */
575 _jit_name(block->_jit, __func__);
576 rec_ADDIU(state, block, offset);
579 static void rec_SLTIU(struct lightrec_cstate *state,
580 const struct block *block, u16 offset)
582 _jit_name(block->_jit, __func__);
583 rec_alu_imm(state, block, offset, jit_code_lti_u, true);
586 static void rec_SLTI(struct lightrec_cstate *state,
587 const struct block *block, u16 offset)
589 _jit_name(block->_jit, __func__);
590 rec_alu_imm(state, block, offset, jit_code_lti, true);
593 static void rec_ANDI(struct lightrec_cstate *state,
594 const struct block *block, u16 offset)
596 struct regcache *reg_cache = state->reg_cache;
597 union code c = block->opcode_list[offset].c;
598 jit_state_t *_jit = block->_jit;
601 _jit_name(block->_jit, __func__);
602 jit_note(__FILE__, __LINE__);
604 rec_alloc_rs_rd(reg_cache, _jit, &block->opcode_list[offset],
605 c.i.rs, c.i.rt, 0, REG_EXT | REG_ZEXT, &rs, &rt);
607 /* PSX code uses ANDI 0xff / ANDI 0xffff a lot, which are basically
608 * casts to uint8_t / uint16_t. */
611 else if (c.i.imm == 0xffff)
614 jit_andi(rt, rs, (u32)(u16) c.i.imm);
616 lightrec_free_reg(reg_cache, rs);
617 lightrec_free_reg(reg_cache, rt);
620 static void rec_alu_or_xor(struct lightrec_cstate *state, const struct block *block,
621 u16 offset, jit_code_t code)
623 struct regcache *reg_cache = state->reg_cache;
624 union code c = block->opcode_list[offset].c;
625 jit_state_t *_jit = block->_jit;
628 jit_note(__FILE__, __LINE__);
630 rec_alloc_rs_rd(reg_cache, _jit, &block->opcode_list[offset],
631 c.i.rs, c.i.rt, 0, 0, &rs, &rt);
633 flags = lightrec_get_reg_in_flags(reg_cache, rs);
634 lightrec_set_reg_out_flags(reg_cache, rt, flags);
636 jit_new_node_www(code, rt, rs, (u32)(u16) c.i.imm);
638 lightrec_free_reg(reg_cache, rs);
639 lightrec_free_reg(reg_cache, rt);
643 static void rec_ORI(struct lightrec_cstate *state,
644 const struct block *block, u16 offset)
646 const struct opcode *op = &block->opcode_list[offset];
647 struct regcache *reg_cache = state->reg_cache;
648 jit_state_t *_jit = block->_jit;
652 _jit_name(_jit, __func__);
654 if (op->flags & LIGHTREC_MOVI) {
655 rt = lightrec_alloc_reg_out(reg_cache, _jit, op->i.rt, REG_EXT);
657 val = ((u32)state->movi_temp[op->i.rt] << 16) | op->i.imm;
660 lightrec_free_reg(reg_cache, rt);
662 rec_alu_or_xor(state, block, offset, jit_code_ori);
666 static void rec_XORI(struct lightrec_cstate *state,
667 const struct block *block, u16 offset)
669 _jit_name(block->_jit, __func__);
670 rec_alu_or_xor(state, block, offset, jit_code_xori);
673 static void rec_LUI(struct lightrec_cstate *state,
674 const struct block *block, u16 offset)
676 struct regcache *reg_cache = state->reg_cache;
677 union code c = block->opcode_list[offset].c;
678 jit_state_t *_jit = block->_jit;
679 u8 rt, flags = REG_EXT;
681 if (block->opcode_list[offset].flags & LIGHTREC_MOVI) {
682 state->movi_temp[c.i.rt] = c.i.imm;
687 jit_note(__FILE__, __LINE__);
689 if (!(c.i.imm & BIT(15)))
692 rt = lightrec_alloc_reg_out(reg_cache, _jit, c.i.rt, flags);
694 jit_movi(rt, (s32)(c.i.imm << 16));
696 lightrec_free_reg(reg_cache, rt);
699 static void rec_special_ADDU(struct lightrec_cstate *state,
700 const struct block *block, u16 offset)
702 _jit_name(block->_jit, __func__);
703 rec_alu_special(state, block, offset, jit_code_addr, false);
706 static void rec_special_ADD(struct lightrec_cstate *state,
707 const struct block *block, u16 offset)
709 /* TODO: Handle the exception? */
710 _jit_name(block->_jit, __func__);
711 rec_alu_special(state, block, offset, jit_code_addr, false);
714 static void rec_special_SUBU(struct lightrec_cstate *state,
715 const struct block *block, u16 offset)
717 _jit_name(block->_jit, __func__);
718 rec_alu_special(state, block, offset, jit_code_subr, false);
721 static void rec_special_SUB(struct lightrec_cstate *state,
722 const struct block *block, u16 offset)
724 /* TODO: Handle the exception? */
725 _jit_name(block->_jit, __func__);
726 rec_alu_special(state, block, offset, jit_code_subr, false);
729 static void rec_special_AND(struct lightrec_cstate *state,
730 const struct block *block, u16 offset)
732 struct regcache *reg_cache = state->reg_cache;
733 union code c = block->opcode_list[offset].c;
734 jit_state_t *_jit = block->_jit;
735 u8 rd, rt, rs, flags_rs, flags_rt, flags_rd;
737 _jit_name(block->_jit, __func__);
738 jit_note(__FILE__, __LINE__);
740 rt = lightrec_alloc_reg_in(reg_cache, _jit, c.r.rt, 0);
741 rec_alloc_rs_rd(reg_cache, _jit, &block->opcode_list[offset],
742 c.r.rs, c.r.rd, 0, 0, &rs, &rd);
744 flags_rs = lightrec_get_reg_in_flags(reg_cache, rs);
745 flags_rt = lightrec_get_reg_in_flags(reg_cache, rt);
747 /* Z(rd) = Z(rs) | Z(rt) */
748 flags_rd = REG_ZEXT & (flags_rs | flags_rt);
750 /* E(rd) = (E(rt) & Z(rt)) | (E(rs) & Z(rs)) | (E(rs) & E(rt)) */
751 if (((flags_rs & REG_EXT) && (flags_rt & REG_ZEXT)) ||
752 ((flags_rt & REG_EXT) && (flags_rs & REG_ZEXT)) ||
753 (REG_EXT & flags_rs & flags_rt))
756 lightrec_set_reg_out_flags(reg_cache, rd, flags_rd);
758 jit_andr(rd, rs, rt);
760 lightrec_free_reg(reg_cache, rs);
761 lightrec_free_reg(reg_cache, rt);
762 lightrec_free_reg(reg_cache, rd);
765 static void rec_special_or_nor(struct lightrec_cstate *state,
766 const struct block *block, u16 offset, bool nor)
768 struct regcache *reg_cache = state->reg_cache;
769 union code c = block->opcode_list[offset].c;
770 jit_state_t *_jit = block->_jit;
771 u8 rd, rt, rs, flags_rs, flags_rt, flags_rd = 0;
773 jit_note(__FILE__, __LINE__);
775 rt = lightrec_alloc_reg_in(reg_cache, _jit, c.r.rt, 0);
776 rec_alloc_rs_rd(reg_cache, _jit, &block->opcode_list[offset],
777 c.r.rs, c.r.rd, 0, 0, &rs, &rd);
779 flags_rs = lightrec_get_reg_in_flags(reg_cache, rs);
780 flags_rt = lightrec_get_reg_in_flags(reg_cache, rt);
782 /* or: Z(rd) = Z(rs) & Z(rt)
785 flags_rd = REG_ZEXT & flags_rs & flags_rt;
787 /* E(rd) = E(rs) & E(rt) */
788 if (REG_EXT & flags_rs & flags_rt)
791 lightrec_set_reg_out_flags(reg_cache, rd, flags_rd);
798 lightrec_free_reg(reg_cache, rs);
799 lightrec_free_reg(reg_cache, rt);
800 lightrec_free_reg(reg_cache, rd);
803 static void rec_special_OR(struct lightrec_cstate *state,
804 const struct block *block, u16 offset)
806 _jit_name(block->_jit, __func__);
807 rec_special_or_nor(state, block, offset, false);
810 static void rec_special_NOR(struct lightrec_cstate *state,
811 const struct block *block, u16 offset)
813 _jit_name(block->_jit, __func__);
814 rec_special_or_nor(state, block, offset, true);
817 static void rec_special_XOR(struct lightrec_cstate *state,
818 const struct block *block, u16 offset)
820 struct regcache *reg_cache = state->reg_cache;
821 union code c = block->opcode_list[offset].c;
822 jit_state_t *_jit = block->_jit;
823 u8 rd, rt, rs, flags_rs, flags_rt, flags_rd;
825 _jit_name(block->_jit, __func__);
827 jit_note(__FILE__, __LINE__);
829 rt = lightrec_alloc_reg_in(reg_cache, _jit, c.r.rt, 0);
830 rec_alloc_rs_rd(reg_cache, _jit, &block->opcode_list[offset],
831 c.r.rs, c.r.rd, 0, 0, &rs, &rd);
833 flags_rs = lightrec_get_reg_in_flags(reg_cache, rs);
834 flags_rt = lightrec_get_reg_in_flags(reg_cache, rt);
836 /* Z(rd) = Z(rs) & Z(rt) */
837 flags_rd = REG_ZEXT & flags_rs & flags_rt;
839 /* E(rd) = E(rs) & E(rt) */
840 flags_rd |= REG_EXT & flags_rs & flags_rt;
842 lightrec_set_reg_out_flags(reg_cache, rd, flags_rd);
844 jit_xorr(rd, rs, rt);
846 lightrec_free_reg(reg_cache, rs);
847 lightrec_free_reg(reg_cache, rt);
848 lightrec_free_reg(reg_cache, rd);
851 static void rec_special_SLTU(struct lightrec_cstate *state,
852 const struct block *block, u16 offset)
854 _jit_name(block->_jit, __func__);
855 rec_alu_special(state, block, offset, jit_code_ltr_u, true);
858 static void rec_special_SLT(struct lightrec_cstate *state,
859 const struct block *block, u16 offset)
861 _jit_name(block->_jit, __func__);
862 rec_alu_special(state, block, offset, jit_code_ltr, true);
865 static void rec_special_SLLV(struct lightrec_cstate *state,
866 const struct block *block, u16 offset)
868 _jit_name(block->_jit, __func__);
869 rec_alu_shiftv(state, block, offset, jit_code_lshr);
872 static void rec_special_SRLV(struct lightrec_cstate *state,
873 const struct block *block, u16 offset)
875 _jit_name(block->_jit, __func__);
876 rec_alu_shiftv(state, block, offset, jit_code_rshr_u);
879 static void rec_special_SRAV(struct lightrec_cstate *state,
880 const struct block *block, u16 offset)
882 _jit_name(block->_jit, __func__);
883 rec_alu_shiftv(state, block, offset, jit_code_rshr);
886 static void rec_alu_shift(struct lightrec_cstate *state, const struct block *block,
887 u16 offset, jit_code_t code)
889 struct regcache *reg_cache = state->reg_cache;
890 union code c = block->opcode_list[offset].c;
891 jit_state_t *_jit = block->_jit;
892 u8 rd, rt, flags = 0, out_flags = 0;
894 jit_note(__FILE__, __LINE__);
896 if (code == jit_code_rshi)
898 else if (code == jit_code_rshi_u)
901 /* Input reg is zero-extended, if we SRL at least by one bit, we know
902 * the output reg will be both zero-extended and sign-extended. */
904 if (code == jit_code_rshi_u && c.r.imm)
905 out_flags |= REG_EXT;
907 rec_alloc_rs_rd(reg_cache, _jit, &block->opcode_list[offset],
908 c.r.rt, c.r.rd, flags, out_flags, &rt, &rd);
910 jit_new_node_www(code, rd, rt, c.r.imm);
912 lightrec_free_reg(reg_cache, rt);
913 lightrec_free_reg(reg_cache, rd);
916 static void rec_special_SLL(struct lightrec_cstate *state,
917 const struct block *block, u16 offset)
919 _jit_name(block->_jit, __func__);
920 rec_alu_shift(state, block, offset, jit_code_lshi);
923 static void rec_special_SRL(struct lightrec_cstate *state,
924 const struct block *block, u16 offset)
926 _jit_name(block->_jit, __func__);
927 rec_alu_shift(state, block, offset, jit_code_rshi_u);
930 static void rec_special_SRA(struct lightrec_cstate *state,
931 const struct block *block, u16 offset)
933 _jit_name(block->_jit, __func__);
934 rec_alu_shift(state, block, offset, jit_code_rshi);
937 static void rec_alu_mult(struct lightrec_cstate *state,
938 const struct block *block, u16 offset, bool is_signed)
940 struct regcache *reg_cache = state->reg_cache;
941 union code c = block->opcode_list[offset].c;
942 u32 flags = block->opcode_list[offset].flags;
943 u8 reg_lo = get_mult_div_lo(c);
944 u8 reg_hi = get_mult_div_hi(c);
945 jit_state_t *_jit = block->_jit;
946 u8 lo, hi, rs, rt, rflags = 0;
947 bool no_lo = op_flag_no_lo(flags);
948 bool no_hi = op_flag_no_hi(flags);
950 jit_note(__FILE__, __LINE__);
957 rs = lightrec_alloc_reg_in(reg_cache, _jit, c.r.rs, rflags);
958 rt = lightrec_alloc_reg_in(reg_cache, _jit, c.r.rt, rflags);
961 lo = lightrec_alloc_reg_out(reg_cache, _jit, reg_lo, 0);
964 hi = lightrec_alloc_reg_out(reg_cache, _jit, reg_hi, REG_EXT);
966 if (__WORDSIZE == 32) {
967 /* On 32-bit systems, do a 32*32->64 bit operation, or a 32*32->32 bit
968 * operation if the MULT was detected a 32-bit only. */
971 jit_hmulr(hi, rs, rt);
973 jit_hmulr_u(hi, rs, rt);
975 jit_mulr(lo, rs, rt);
976 } else if (is_signed) {
977 jit_qmulr(lo, hi, rs, rt);
979 jit_qmulr_u(lo, hi, rs, rt);
982 /* On 64-bit systems, do a 64*64->64 bit operation. */
984 jit_mulr(hi, rs, rt);
985 jit_rshi(hi, hi, 32);
987 jit_mulr(lo, rs, rt);
989 /* The 64-bit output value is in $lo, store the upper 32 bits in $hi */
991 jit_rshi(hi, lo, 32);
995 lightrec_free_reg(reg_cache, rs);
996 lightrec_free_reg(reg_cache, rt);
998 lightrec_free_reg(reg_cache, lo);
1000 lightrec_free_reg(reg_cache, hi);
1003 static void rec_alu_div(struct lightrec_cstate *state,
1004 const struct block *block, u16 offset, bool is_signed)
1006 struct regcache *reg_cache = state->reg_cache;
1007 union code c = block->opcode_list[offset].c;
1008 u32 flags = block->opcode_list[offset].flags;
1009 bool no_check = op_flag_no_div_check(flags);
1010 u8 reg_lo = get_mult_div_lo(c);
1011 u8 reg_hi = get_mult_div_hi(c);
1012 jit_state_t *_jit = block->_jit;
1013 jit_node_t *branch, *to_end;
1014 u8 lo = 0, hi = 0, rs, rt, rflags = 0;
1016 jit_note(__FILE__, __LINE__);
1023 rs = lightrec_alloc_reg_in(reg_cache, _jit, c.r.rs, rflags);
1024 rt = lightrec_alloc_reg_in(reg_cache, _jit, c.r.rt, rflags);
1026 if (!op_flag_no_lo(flags))
1027 lo = lightrec_alloc_reg_out(reg_cache, _jit, reg_lo, 0);
1029 if (!op_flag_no_hi(flags))
1030 hi = lightrec_alloc_reg_out(reg_cache, _jit, reg_hi, 0);
1032 /* Jump to special handler if dividing by zero */
1034 branch = jit_beqi(rt, 0);
1036 if (op_flag_no_lo(flags)) {
1038 jit_remr(hi, rs, rt);
1040 jit_remr_u(hi, rs, rt);
1041 } else if (op_flag_no_hi(flags)) {
1043 jit_divr(lo, rs, rt);
1045 jit_divr_u(lo, rs, rt);
1048 jit_qdivr(lo, hi, rs, rt);
1050 jit_qdivr_u(lo, hi, rs, rt);
1054 /* Jump above the div-by-zero handler */
1059 if (!op_flag_no_lo(flags)) {
1061 jit_ltr(lo, rs, rt);
1062 jit_lshi(lo, lo, 1);
1063 jit_subi(lo, lo, 1);
1065 jit_subi(lo, rt, 1);
1069 if (!op_flag_no_hi(flags))
1075 lightrec_free_reg(reg_cache, rs);
1076 lightrec_free_reg(reg_cache, rt);
1078 if (!op_flag_no_lo(flags))
1079 lightrec_free_reg(reg_cache, lo);
1081 if (!op_flag_no_hi(flags))
1082 lightrec_free_reg(reg_cache, hi);
1085 static void rec_special_MULT(struct lightrec_cstate *state,
1086 const struct block *block, u16 offset)
1088 _jit_name(block->_jit, __func__);
1089 rec_alu_mult(state, block, offset, true);
1092 static void rec_special_MULTU(struct lightrec_cstate *state,
1093 const struct block *block, u16 offset)
1095 _jit_name(block->_jit, __func__);
1096 rec_alu_mult(state, block, offset, false);
1099 static void rec_special_DIV(struct lightrec_cstate *state,
1100 const struct block *block, u16 offset)
1102 _jit_name(block->_jit, __func__);
1103 rec_alu_div(state, block, offset, true);
1106 static void rec_special_DIVU(struct lightrec_cstate *state,
1107 const struct block *block, u16 offset)
1109 _jit_name(block->_jit, __func__);
1110 rec_alu_div(state, block, offset, false);
1113 static void rec_alu_mv_lo_hi(struct lightrec_cstate *state,
1114 const struct block *block, u16 offset,
1117 struct regcache *reg_cache = state->reg_cache;
1118 jit_state_t *_jit = block->_jit;
1120 jit_note(__FILE__, __LINE__);
1122 rec_alloc_rs_rd(reg_cache, _jit, &block->opcode_list[offset],
1123 src, dst, 0, REG_EXT, &src, &dst);
1125 jit_extr_i(dst, src);
1127 lightrec_free_reg(reg_cache, src);
1128 lightrec_free_reg(reg_cache, dst);
1131 static void rec_special_MFHI(struct lightrec_cstate *state,
1132 const struct block *block, u16 offset)
1134 union code c = block->opcode_list[offset].c;
1136 _jit_name(block->_jit, __func__);
1137 rec_alu_mv_lo_hi(state, block, offset, c.r.rd, REG_HI);
1140 static void rec_special_MTHI(struct lightrec_cstate *state,
1141 const struct block *block, u16 offset)
1143 union code c = block->opcode_list[offset].c;
1145 _jit_name(block->_jit, __func__);
1146 rec_alu_mv_lo_hi(state, block, offset, REG_HI, c.r.rs);
1149 static void rec_special_MFLO(struct lightrec_cstate *state,
1150 const struct block *block, u16 offset)
1152 union code c = block->opcode_list[offset].c;
1154 _jit_name(block->_jit, __func__);
1155 rec_alu_mv_lo_hi(state, block, offset, c.r.rd, REG_LO);
1158 static void rec_special_MTLO(struct lightrec_cstate *state,
1159 const struct block *block, u16 offset)
1161 union code c = block->opcode_list[offset].c;
1163 _jit_name(block->_jit, __func__);
1164 rec_alu_mv_lo_hi(state, block, offset, REG_LO, c.r.rs);
1167 static void call_to_c_wrapper(struct lightrec_cstate *state,
1168 const struct block *block, u32 arg,
1169 enum c_wrappers wrapper)
1171 struct regcache *reg_cache = state->reg_cache;
1172 jit_state_t *_jit = block->_jit;
1175 /* Make sure JIT_R1 is not mapped; it will be used in the C wrapper. */
1176 tmp2 = lightrec_alloc_reg(reg_cache, _jit, JIT_R1);
1178 tmp = lightrec_get_reg_with_value(reg_cache,
1179 (intptr_t) state->state->wrappers_eps[wrapper]);
1181 tmp = lightrec_alloc_reg_temp(reg_cache, _jit);
1182 jit_ldxi(tmp, LIGHTREC_REG_STATE,
1183 offsetof(struct lightrec_state, wrappers_eps[wrapper]));
1185 lightrec_temp_set_value(reg_cache, tmp,
1186 (intptr_t) state->state->wrappers_eps[wrapper]);
1189 lightrec_free_reg(reg_cache, tmp2);
1192 /* On MIPS, register t9 is always used as the target register for JALR.
1193 * Therefore if it does not contain the target address we must
1196 lightrec_unload_reg(reg_cache, _jit, _T9);
1202 lightrec_regcache_mark_live(reg_cache, _jit);
1205 lightrec_free_reg(reg_cache, tmp);
1206 lightrec_regcache_mark_live(reg_cache, _jit);
1209 static void rec_io(struct lightrec_cstate *state,
1210 const struct block *block, u16 offset,
1211 bool load_rt, bool read_rt)
1213 struct regcache *reg_cache = state->reg_cache;
1214 jit_state_t *_jit = block->_jit;
1215 union code c = block->opcode_list[offset].c;
1216 u32 flags = block->opcode_list[offset].flags;
1217 bool is_tagged = LIGHTREC_FLAGS_GET_IO_MODE(flags);
1221 jit_note(__FILE__, __LINE__);
1223 lightrec_clean_reg_if_loaded(reg_cache, _jit, c.i.rs, false);
1225 if (read_rt && likely(c.i.rt))
1226 lightrec_clean_reg_if_loaded(reg_cache, _jit, c.i.rt, true);
1228 lightrec_clean_reg_if_loaded(reg_cache, _jit, c.i.rt, false);
1230 if (op_flag_load_delay(flags) && !state->no_load_delay) {
1231 /* Clear state->in_delay_slot_n. This notifies the lightrec_rw
1232 * wrapper that it should write the REG_TEMP register instead of
1233 * the actual output register of the opcode. */
1234 zero = lightrec_alloc_reg_in(reg_cache, _jit, 0, 0);
1235 jit_stxi_c(offsetof(struct lightrec_state, in_delay_slot_n),
1236 LIGHTREC_REG_STATE, zero);
1237 lightrec_free_reg(reg_cache, zero);
1241 call_to_c_wrapper(state, block, c.opcode, C_WRAPPER_RW);
1243 lut_entry = lightrec_get_lut_entry(block);
1244 call_to_c_wrapper(state, block, (lut_entry << 16) | offset,
1245 C_WRAPPER_RW_GENERIC);
1249 static u32 rec_ram_mask(const struct lightrec_state *state)
1251 return (RAM_SIZE << (state->mirrors_mapped * 2)) - 1;
1254 static u32 rec_io_mask(const struct lightrec_state *state)
1256 u32 length = state->maps[PSX_MAP_HW_REGISTERS].length;
1258 return 0x1f800000 | GENMASK(31 - clz32(length - 1), 0);
1261 static void rec_store_memory(struct lightrec_cstate *cstate,
1262 const struct block *block,
1263 u16 offset, jit_code_t code,
1264 jit_code_t swap_code,
1265 uintptr_t addr_offset, u32 addr_mask,
1268 const struct lightrec_state *state = cstate->state;
1269 struct regcache *reg_cache = cstate->reg_cache;
1270 struct opcode *op = &block->opcode_list[offset];
1271 jit_state_t *_jit = block->_jit;
1272 union code c = op->c;
1273 u8 rs, rt, tmp = 0, tmp2 = 0, tmp3, addr_reg, addr_reg2;
1274 s16 imm = (s16)c.i.imm;
1275 s32 simm = (s32)imm << (1 - lut_is_32bit(state));
1276 s32 lut_offt = offsetof(struct lightrec_state, code_lut);
1277 bool no_mask = op_flag_no_mask(op->flags);
1278 bool add_imm = c.i.imm &&
1279 (c.i.op == OP_META_SWU
1280 || (!state->mirrors_mapped && !no_mask) || (invalidate &&
1281 ((imm & 0x3) || simm + lut_offt != (s16)(simm + lut_offt))));
1282 bool need_tmp = !no_mask || add_imm || invalidate;
1283 bool swc2 = c.i.op == OP_SWC2;
1284 u8 in_reg = swc2 ? REG_TEMP : c.i.rt;
1287 rs = lightrec_alloc_reg_in(reg_cache, _jit, c.i.rs, 0);
1289 tmp = lightrec_alloc_reg_temp(reg_cache, _jit);
1294 jit_addi(tmp, addr_reg, (s16)c.i.imm);
1295 lightrec_free_reg(reg_cache, rs);
1303 reg_imm = lightrec_alloc_reg_temp_with_value(reg_cache, _jit,
1306 jit_andr(tmp, addr_reg, reg_imm);
1309 lightrec_free_reg(reg_cache, reg_imm);
1313 reg_imm = lightrec_alloc_reg_temp_with_value(reg_cache, _jit,
1315 tmp2 = lightrec_alloc_reg_temp(reg_cache, _jit);
1316 jit_addr(tmp2, addr_reg, reg_imm);
1319 lightrec_free_reg(reg_cache, reg_imm);
1321 addr_reg2 = addr_reg;
1324 rt = lightrec_alloc_reg_in(reg_cache, _jit, in_reg, 0);
1326 if (is_big_endian() && swap_code && in_reg) {
1327 tmp3 = lightrec_alloc_reg_temp(reg_cache, _jit);
1329 jit_new_node_ww(swap_code, tmp3, rt);
1331 if (c.i.op == OP_META_SWU)
1332 jit_unstr(addr_reg2, tmp3, LIGHTNING_UNALIGNED_32BIT);
1334 jit_new_node_www(code, imm, addr_reg2, tmp3);
1336 lightrec_free_reg(reg_cache, tmp3);
1337 } else if (c.i.op == OP_META_SWU) {
1338 jit_unstr(addr_reg2, rt, LIGHTNING_UNALIGNED_32BIT);
1340 jit_new_node_www(code, imm, addr_reg2, rt);
1343 lightrec_free_reg(reg_cache, rt);
1346 tmp3 = lightrec_alloc_reg_in(reg_cache, _jit, 0, 0);
1348 if (c.i.op != OP_SW) {
1349 jit_andi(tmp, addr_reg, ~3);
1353 if (!lut_is_32bit(state)) {
1354 jit_lshi(tmp, addr_reg, 1);
1358 if (addr_reg == rs && c.i.rs == 0) {
1359 addr_reg = LIGHTREC_REG_STATE;
1361 jit_add_state(tmp, addr_reg);
1365 if (lut_is_32bit(state))
1366 jit_stxi_i(lut_offt, addr_reg, tmp3);
1368 jit_stxi(lut_offt, addr_reg, tmp3);
1370 lightrec_free_reg(reg_cache, tmp3);
1374 lightrec_free_reg(reg_cache, tmp2);
1376 lightrec_free_reg(reg_cache, tmp);
1377 lightrec_free_reg(reg_cache, rs);
1380 static void rec_store_ram(struct lightrec_cstate *cstate,
1381 const struct block *block,
1382 u16 offset, jit_code_t code,
1383 jit_code_t swap_code, bool invalidate)
1385 const struct lightrec_state *state = cstate->state;
1387 _jit_note(block->_jit, __FILE__, __LINE__);
1389 return rec_store_memory(cstate, block, offset, code, swap_code,
1390 state->offset_ram, rec_ram_mask(state),
1394 static void rec_store_scratch(struct lightrec_cstate *cstate,
1395 const struct block *block, u16 offset,
1396 jit_code_t code, jit_code_t swap_code)
1398 _jit_note(block->_jit, __FILE__, __LINE__);
1400 return rec_store_memory(cstate, block, offset, code, swap_code,
1401 cstate->state->offset_scratch,
1405 static void rec_store_io(struct lightrec_cstate *cstate,
1406 const struct block *block, u16 offset,
1407 jit_code_t code, jit_code_t swap_code)
1409 _jit_note(block->_jit, __FILE__, __LINE__);
1411 return rec_store_memory(cstate, block, offset, code, swap_code,
1412 cstate->state->offset_io,
1413 rec_io_mask(cstate->state), false);
1416 static void rec_store_direct_no_invalidate(struct lightrec_cstate *cstate,
1417 const struct block *block,
1418 u16 offset, jit_code_t code,
1419 jit_code_t swap_code)
1421 const struct lightrec_state *state = cstate->state;
1422 struct regcache *reg_cache = cstate->reg_cache;
1423 union code c = block->opcode_list[offset].c;
1424 jit_state_t *_jit = block->_jit;
1425 jit_node_t *to_not_ram, *to_end;
1426 bool swc2 = c.i.op == OP_SWC2;
1427 u8 tmp, tmp2 = 0, rs, rt, in_reg = swc2 ? REG_TEMP : c.i.rt;
1432 jit_note(__FILE__, __LINE__);
1433 rs = lightrec_alloc_reg_in(reg_cache, _jit, c.i.rs, 0);
1434 tmp = lightrec_alloc_reg_temp(reg_cache, _jit);
1436 if (state->mirrors_mapped)
1437 addr_mask = 0x1f800000 | (4 * RAM_SIZE - 1);
1439 addr_mask = 0x1f800000 | (RAM_SIZE - 1);
1441 reg_imm = lightrec_alloc_reg_temp_with_value(reg_cache, _jit, addr_mask);
1443 /* Convert to KUNSEG and avoid RAM mirrors */
1444 if ((c.i.op == OP_META_SWU || !state->mirrors_mapped) && c.i.imm) {
1446 jit_addi(tmp, rs, (s16)c.i.imm);
1447 jit_andr(tmp, tmp, reg_imm);
1450 jit_andr(tmp, rs, reg_imm);
1453 lightrec_free_reg(reg_cache, rs);
1454 lightrec_free_reg(reg_cache, reg_imm);
1456 if (state->offset_ram != state->offset_scratch) {
1457 tmp2 = lightrec_alloc_reg_temp(reg_cache, _jit);
1459 to_not_ram = jit_bmsi(tmp, BIT(28));
1461 jit_movi(tmp2, state->offset_ram);
1464 jit_patch(to_not_ram);
1466 jit_movi(tmp2, state->offset_scratch);
1468 } else if (state->offset_ram) {
1469 tmp2 = lightrec_alloc_reg_temp_with_value(reg_cache, _jit,
1473 if (state->offset_ram || state->offset_scratch) {
1474 jit_addr(tmp, tmp, tmp2);
1475 lightrec_free_reg(reg_cache, tmp2);
1478 rt = lightrec_alloc_reg_in(reg_cache, _jit, in_reg, 0);
1480 if (is_big_endian() && swap_code && in_reg) {
1481 tmp2 = lightrec_alloc_reg_temp(reg_cache, _jit);
1483 jit_new_node_ww(swap_code, tmp2, rt);
1485 if (c.i.op == OP_META_SWU)
1486 jit_unstr(tmp, tmp2, LIGHTNING_UNALIGNED_32BIT);
1488 jit_new_node_www(code, imm, tmp, tmp2);
1490 lightrec_free_reg(reg_cache, tmp2);
1491 } else if (c.i.op == OP_META_SWU) {
1492 jit_unstr(tmp, rt, LIGHTNING_UNALIGNED_32BIT);
1494 jit_new_node_www(code, imm, tmp, rt);
1497 lightrec_free_reg(reg_cache, rt);
1498 lightrec_free_reg(reg_cache, tmp);
1501 static void rec_store_direct(struct lightrec_cstate *cstate, const struct block *block,
1502 u16 offset, jit_code_t code, jit_code_t swap_code)
1504 const struct lightrec_state *state = cstate->state;
1505 u32 ram_size = state->mirrors_mapped ? RAM_SIZE * 4 : RAM_SIZE;
1506 struct regcache *reg_cache = cstate->reg_cache;
1507 union code c = block->opcode_list[offset].c;
1508 jit_state_t *_jit = block->_jit;
1509 jit_node_t *to_not_ram, *to_end;
1510 bool swc2 = c.i.op == OP_SWC2;
1511 u8 tmp, tmp2, tmp3, masked_reg, rs, rt;
1512 u8 in_reg = swc2 ? REG_TEMP : c.i.rt;
1513 u32 addr_mask = 0x1f800000 | (ram_size - 1);
1514 bool different_offsets = state->offset_ram != state->offset_scratch;
1517 jit_note(__FILE__, __LINE__);
1519 rs = lightrec_alloc_reg_in(reg_cache, _jit, c.i.rs, 0);
1520 tmp2 = lightrec_alloc_reg_temp(reg_cache, _jit);
1521 tmp3 = lightrec_alloc_reg_in(reg_cache, _jit, 0, 0);
1523 reg_imm = lightrec_alloc_reg_temp_with_value(reg_cache, _jit, addr_mask);
1525 /* Convert to KUNSEG and avoid RAM mirrors */
1527 jit_addi(tmp2, rs, (s16)c.i.imm);
1528 jit_andr(tmp2, tmp2, reg_imm);
1530 jit_andr(tmp2, rs, reg_imm);
1533 lightrec_free_reg(reg_cache, rs);
1534 lightrec_free_reg(reg_cache, reg_imm);
1535 tmp = lightrec_alloc_reg_temp(reg_cache, _jit);
1537 if (different_offsets) {
1538 to_not_ram = jit_bgti(tmp2, ram_size);
1541 jit_lti_u(tmp, tmp2, ram_size);
1542 jit_movnr(tmp, tmp2, tmp);
1546 /* Compute the offset to the code LUT */
1547 if (c.i.op == OP_SW)
1548 jit_andi(tmp, masked_reg, RAM_SIZE - 1);
1550 jit_andi(tmp, masked_reg, (RAM_SIZE - 1) & ~3);
1552 if (!lut_is_32bit(state))
1553 jit_lshi(tmp, tmp, 1);
1554 jit_add_state(tmp, tmp);
1556 /* Write NULL to the code LUT to invalidate any block that's there */
1557 if (lut_is_32bit(state))
1558 jit_stxi_i(offsetof(struct lightrec_state, code_lut), tmp, tmp3);
1560 jit_stxi(offsetof(struct lightrec_state, code_lut), tmp, tmp3);
1562 if (c.i.op == OP_META_SWU) {
1563 /* With a SWU opcode, we might have touched the following 32-bit
1564 * word, so invalidate it as well */
1565 if (lut_is_32bit(state)) {
1566 jit_stxi_i(offsetof(struct lightrec_state, code_lut) + 4,
1569 jit_stxi(offsetof(struct lightrec_state, code_lut)
1570 + sizeof(uintptr_t), tmp, tmp3);
1574 if (different_offsets) {
1575 jit_movi(tmp, state->offset_ram);
1578 jit_patch(to_not_ram);
1581 if (state->offset_ram || state->offset_scratch)
1582 jit_movi(tmp, state->offset_scratch);
1584 if (different_offsets)
1587 if (state->offset_ram || state->offset_scratch)
1588 jit_addr(tmp2, tmp2, tmp);
1590 lightrec_free_reg(reg_cache, tmp);
1591 lightrec_free_reg(reg_cache, tmp3);
1593 rt = lightrec_alloc_reg_in(reg_cache, _jit, in_reg, 0);
1595 if (is_big_endian() && swap_code && in_reg) {
1596 tmp = lightrec_alloc_reg_temp(reg_cache, _jit);
1598 jit_new_node_ww(swap_code, tmp, rt);
1600 if (c.i.op == OP_META_SWU)
1601 jit_unstr(tmp2, tmp, LIGHTNING_UNALIGNED_32BIT);
1603 jit_new_node_www(code, 0, tmp2, tmp);
1605 lightrec_free_reg(reg_cache, tmp);
1606 } else if (c.i.op == OP_META_SWU) {
1607 jit_unstr(tmp2, rt, LIGHTNING_UNALIGNED_32BIT);
1609 jit_new_node_www(code, 0, tmp2, rt);
1612 lightrec_free_reg(reg_cache, rt);
1613 lightrec_free_reg(reg_cache, tmp2);
1616 static void rec_store(struct lightrec_cstate *state,
1617 const struct block *block, u16 offset,
1618 jit_code_t code, jit_code_t swap_code)
1620 u32 flags = block->opcode_list[offset].flags;
1621 u32 mode = LIGHTREC_FLAGS_GET_IO_MODE(flags);
1622 bool no_invalidate = op_flag_no_invalidate(flags) ||
1623 (state->state->opt_flags & LIGHTREC_OPT_INV_DMA_ONLY);
1624 union code c = block->opcode_list[offset].c;
1625 bool is_swc2 = c.i.op == OP_SWC2;
1629 case LIGHTREC_IO_RAM:
1630 case LIGHTREC_IO_SCRATCH:
1631 case LIGHTREC_IO_DIRECT:
1632 case LIGHTREC_IO_DIRECT_HW:
1633 rec_cp2_do_mfc2(state, block, offset, c.i.rt, REG_TEMP);
1641 case LIGHTREC_IO_RAM:
1642 rec_store_ram(state, block, offset, code,
1643 swap_code, !no_invalidate);
1645 case LIGHTREC_IO_SCRATCH:
1646 rec_store_scratch(state, block, offset, code, swap_code);
1648 case LIGHTREC_IO_DIRECT:
1649 if (no_invalidate) {
1650 rec_store_direct_no_invalidate(state, block, offset,
1653 rec_store_direct(state, block, offset, code, swap_code);
1656 case LIGHTREC_IO_DIRECT_HW:
1657 rec_store_io(state, block, offset, code, swap_code);
1660 rec_io(state, block, offset, true, false);
1665 lightrec_discard_reg_if_loaded(state->reg_cache, REG_TEMP);
1668 static void rec_SB(struct lightrec_cstate *state,
1669 const struct block *block, u16 offset)
1671 _jit_name(block->_jit, __func__);
1672 rec_store(state, block, offset, jit_code_stxi_c, 0);
1675 static void rec_SH(struct lightrec_cstate *state,
1676 const struct block *block, u16 offset)
1678 _jit_name(block->_jit, __func__);
1679 rec_store(state, block, offset,
1680 jit_code_stxi_s, jit_code_bswapr_us);
1683 static void rec_SW(struct lightrec_cstate *state,
1684 const struct block *block, u16 offset)
1687 union code c = block->opcode_list[offset].c;
1689 _jit_name(block->_jit, c.i.op == OP_SWC2 ? "rec_SWC2" : "rec_SW");
1690 rec_store(state, block, offset,
1691 jit_code_stxi_i, jit_code_bswapr_ui);
1694 static void rec_SWL(struct lightrec_cstate *state,
1695 const struct block *block, u16 offset)
1697 _jit_name(block->_jit, __func__);
1698 rec_io(state, block, offset, true, false);
1701 static void rec_SWR(struct lightrec_cstate *state,
1702 const struct block *block, u16 offset)
1704 _jit_name(block->_jit, __func__);
1705 rec_io(state, block, offset, true, false);
1708 static void rec_load_memory(struct lightrec_cstate *cstate,
1709 const struct block *block, u16 offset,
1710 jit_code_t code, jit_code_t swap_code, bool is_unsigned,
1711 uintptr_t addr_offset, u32 addr_mask)
1713 struct regcache *reg_cache = cstate->reg_cache;
1714 struct opcode *op = &block->opcode_list[offset];
1715 bool load_delay = op_flag_load_delay(op->flags) && !cstate->no_load_delay;
1716 jit_state_t *_jit = block->_jit;
1717 u8 rs, rt, out_reg, addr_reg, flags = REG_EXT;
1718 bool no_mask = op_flag_no_mask(op->flags);
1719 union code c = op->c;
1723 if (load_delay || c.i.op == OP_LWC2)
1733 rs = lightrec_alloc_reg_in(reg_cache, _jit, c.i.rs, 0);
1734 rt = lightrec_alloc_reg_out(reg_cache, _jit, out_reg, flags);
1736 if ((op->i.op == OP_META_LWU && c.i.imm)
1737 || (!cstate->state->mirrors_mapped && c.i.imm && !no_mask)) {
1738 jit_addi(rt, rs, (s16)c.i.imm);
1746 if (op->i.op == OP_META_LWU)
1747 imm = LIGHTNING_UNALIGNED_32BIT;
1750 reg_imm = lightrec_alloc_reg_temp_with_value(reg_cache, _jit,
1753 jit_andr(rt, addr_reg, reg_imm);
1756 lightrec_free_reg(reg_cache, reg_imm);
1760 reg_imm = lightrec_alloc_reg_temp_with_value(reg_cache, _jit,
1763 jit_addr(rt, addr_reg, reg_imm);
1766 lightrec_free_reg(reg_cache, reg_imm);
1769 jit_new_node_www(code, rt, addr_reg, imm);
1771 if (is_big_endian() && swap_code) {
1772 jit_new_node_ww(swap_code, rt, rt);
1774 if (c.i.op == OP_LH)
1776 else if (c.i.op == OP_LW && __WORDSIZE == 64)
1780 lightrec_free_reg(reg_cache, rs);
1781 lightrec_free_reg(reg_cache, rt);
1784 static void rec_load_ram(struct lightrec_cstate *cstate,
1785 const struct block *block, u16 offset,
1786 jit_code_t code, jit_code_t swap_code, bool is_unsigned)
1788 _jit_note(block->_jit, __FILE__, __LINE__);
1790 rec_load_memory(cstate, block, offset, code, swap_code, is_unsigned,
1791 cstate->state->offset_ram, rec_ram_mask(cstate->state));
1794 static void rec_load_bios(struct lightrec_cstate *cstate,
1795 const struct block *block, u16 offset,
1796 jit_code_t code, jit_code_t swap_code, bool is_unsigned)
1798 _jit_note(block->_jit, __FILE__, __LINE__);
1800 rec_load_memory(cstate, block, offset, code, swap_code, is_unsigned,
1801 cstate->state->offset_bios, 0x1fffffff);
1804 static void rec_load_scratch(struct lightrec_cstate *cstate,
1805 const struct block *block, u16 offset,
1806 jit_code_t code, jit_code_t swap_code, bool is_unsigned)
1808 _jit_note(block->_jit, __FILE__, __LINE__);
1810 rec_load_memory(cstate, block, offset, code, swap_code, is_unsigned,
1811 cstate->state->offset_scratch, 0x1fffffff);
1814 static void rec_load_io(struct lightrec_cstate *cstate,
1815 const struct block *block, u16 offset,
1816 jit_code_t code, jit_code_t swap_code, bool is_unsigned)
1818 _jit_note(block->_jit, __FILE__, __LINE__);
1820 rec_load_memory(cstate, block, offset, code, swap_code, is_unsigned,
1821 cstate->state->offset_io, rec_io_mask(cstate->state));
1824 static void rec_load_direct(struct lightrec_cstate *cstate,
1825 const struct block *block, u16 offset,
1826 jit_code_t code, jit_code_t swap_code,
1829 const struct lightrec_state *state = cstate->state;
1830 struct regcache *reg_cache = cstate->reg_cache;
1831 struct opcode *op = &block->opcode_list[offset];
1832 bool load_delay = op_flag_load_delay(op->flags) && !cstate->no_load_delay;
1833 jit_state_t *_jit = block->_jit;
1834 jit_node_t *to_not_ram, *to_not_bios, *to_end, *to_end2;
1835 u8 tmp, rs, rt, out_reg, addr_reg, flags = REG_EXT;
1836 bool different_offsets = state->offset_bios != state->offset_scratch;
1837 union code c = op->c;
1843 if (load_delay || c.i.op == OP_LWC2)
1853 jit_note(__FILE__, __LINE__);
1854 rs = lightrec_alloc_reg_in(reg_cache, _jit, c.i.rs, 0);
1855 rt = lightrec_alloc_reg_out(reg_cache, _jit, out_reg, flags);
1857 if ((state->offset_ram == state->offset_bios &&
1858 state->offset_ram == state->offset_scratch &&
1859 state->mirrors_mapped && c.i.op != OP_META_LWU)
1864 jit_addi(rt, rs, (s16)c.i.imm);
1868 if (c.i.rs != c.i.rt)
1869 lightrec_free_reg(reg_cache, rs);
1872 if (op->i.op == OP_META_LWU)
1873 imm = LIGHTNING_UNALIGNED_32BIT;
1875 tmp = lightrec_alloc_reg_temp(reg_cache, _jit);
1877 if (state->offset_ram == state->offset_bios &&
1878 state->offset_ram == state->offset_scratch) {
1879 if (!state->mirrors_mapped)
1880 addr_mask = 0x1f800000 | (RAM_SIZE - 1);
1882 addr_mask = 0x1fffffff;
1884 reg_imm = lightrec_alloc_reg_temp_with_value(reg_cache, _jit,
1886 if (!state->mirrors_mapped) {
1887 jit_andi(tmp, addr_reg, BIT(28));
1888 jit_rshi_u(tmp, tmp, 28 - 22);
1889 jit_orr(tmp, tmp, reg_imm);
1890 jit_andr(rt, addr_reg, tmp);
1892 jit_andr(rt, addr_reg, reg_imm);
1895 lightrec_free_reg(reg_cache, reg_imm);
1897 if (state->offset_ram) {
1898 offt_reg = lightrec_get_reg_with_value(reg_cache,
1901 jit_movi(tmp, state->offset_ram);
1902 lightrec_temp_set_value(reg_cache, tmp,
1905 lightrec_free_reg(reg_cache, tmp);
1910 to_not_ram = jit_bmsi(addr_reg, BIT(28));
1912 /* Convert to KUNSEG and avoid RAM mirrors */
1913 jit_andi(rt, addr_reg, RAM_SIZE - 1);
1915 if (state->offset_ram)
1916 jit_movi(tmp, state->offset_ram);
1920 jit_patch(to_not_ram);
1922 if (different_offsets)
1923 to_not_bios = jit_bmci(addr_reg, BIT(22));
1925 /* Convert to KUNSEG */
1926 jit_andi(rt, addr_reg, 0x1fc00000 | (BIOS_SIZE - 1));
1928 jit_movi(tmp, state->offset_bios);
1930 if (different_offsets) {
1933 jit_patch(to_not_bios);
1935 /* Convert to KUNSEG */
1936 jit_andi(rt, addr_reg, 0x1f800fff);
1938 if (state->offset_scratch)
1939 jit_movi(tmp, state->offset_scratch);
1947 if (state->offset_ram || state->offset_bios || state->offset_scratch)
1948 jit_addr(rt, rt, tmp);
1950 jit_new_node_www(code, rt, rt, imm);
1952 if (is_big_endian() && swap_code) {
1953 jit_new_node_ww(swap_code, rt, rt);
1955 if (c.i.op == OP_LH)
1957 else if (c.i.op == OP_LW && __WORDSIZE == 64)
1961 lightrec_free_reg(reg_cache, addr_reg);
1962 lightrec_free_reg(reg_cache, rt);
1963 lightrec_free_reg(reg_cache, tmp);
1966 static void rec_load(struct lightrec_cstate *state, const struct block *block,
1967 u16 offset, jit_code_t code, jit_code_t swap_code,
1970 const struct opcode *op = &block->opcode_list[offset];
1971 u32 flags = op->flags;
1973 switch (LIGHTREC_FLAGS_GET_IO_MODE(flags)) {
1974 case LIGHTREC_IO_RAM:
1975 rec_load_ram(state, block, offset, code, swap_code, is_unsigned);
1977 case LIGHTREC_IO_BIOS:
1978 rec_load_bios(state, block, offset, code, swap_code, is_unsigned);
1980 case LIGHTREC_IO_SCRATCH:
1981 rec_load_scratch(state, block, offset, code, swap_code, is_unsigned);
1983 case LIGHTREC_IO_DIRECT_HW:
1984 rec_load_io(state, block, offset, code, swap_code, is_unsigned);
1986 case LIGHTREC_IO_DIRECT:
1987 rec_load_direct(state, block, offset, code, swap_code, is_unsigned);
1990 rec_io(state, block, offset, false, true);
1994 if (op->i.op == OP_LWC2) {
1995 rec_cp2_do_mtc2(state, block, offset, op->i.rt, REG_TEMP);
1996 lightrec_discard_reg_if_loaded(state->reg_cache, REG_TEMP);
2000 static void rec_LB(struct lightrec_cstate *state, const struct block *block, u16 offset)
2002 _jit_name(block->_jit, __func__);
2003 rec_load(state, block, offset, jit_code_ldxi_c, 0, false);
2006 static void rec_LBU(struct lightrec_cstate *state, const struct block *block, u16 offset)
2008 _jit_name(block->_jit, __func__);
2009 rec_load(state, block, offset, jit_code_ldxi_uc, 0, true);
2012 static void rec_LH(struct lightrec_cstate *state, const struct block *block, u16 offset)
2014 jit_code_t code = is_big_endian() ? jit_code_ldxi_us : jit_code_ldxi_s;
2016 _jit_name(block->_jit, __func__);
2017 rec_load(state, block, offset, code, jit_code_bswapr_us, false);
2020 static void rec_LHU(struct lightrec_cstate *state, const struct block *block, u16 offset)
2022 _jit_name(block->_jit, __func__);
2023 rec_load(state, block, offset, jit_code_ldxi_us, jit_code_bswapr_us, true);
2026 static void rec_LWL(struct lightrec_cstate *state, const struct block *block, u16 offset)
2028 _jit_name(block->_jit, __func__);
2029 rec_io(state, block, offset, true, true);
2032 static void rec_LWR(struct lightrec_cstate *state, const struct block *block, u16 offset)
2034 _jit_name(block->_jit, __func__);
2035 rec_io(state, block, offset, true, true);
2038 static void rec_LW(struct lightrec_cstate *state, const struct block *block, u16 offset)
2040 union code c = block->opcode_list[offset].c;
2043 if (is_big_endian() && __WORDSIZE == 64)
2044 code = jit_code_ldxi_ui;
2046 code = jit_code_ldxi_i;
2048 _jit_name(block->_jit, c.i.op == OP_LWC2 ? "rec_LWC2" : "rec_LW");
2049 rec_load(state, block, offset, code, jit_code_bswapr_ui, false);
2052 static void rec_exit_early(struct lightrec_cstate *state,
2053 const struct block *block, u16 offset,
2054 u32 exit_code, u32 pc)
2056 struct regcache *reg_cache = state->reg_cache;
2057 jit_state_t *_jit = block->_jit;
2060 _jit_note(block->_jit, __FILE__, __LINE__);
2062 tmp = lightrec_alloc_reg_temp(reg_cache, _jit);
2064 jit_movi(tmp, exit_code);
2065 jit_stxi_i(offsetof(struct lightrec_state, exit_flags),
2066 LIGHTREC_REG_STATE, tmp);
2068 jit_ldxi_i(tmp, LIGHTREC_REG_STATE,
2069 offsetof(struct lightrec_state, target_cycle));
2070 jit_subr(tmp, tmp, LIGHTREC_REG_CYCLE);
2071 jit_movi(LIGHTREC_REG_CYCLE, 0);
2072 jit_stxi_i(offsetof(struct lightrec_state, target_cycle),
2073 LIGHTREC_REG_STATE, tmp);
2074 jit_stxi_i(offsetof(struct lightrec_state, current_cycle),
2075 LIGHTREC_REG_STATE, tmp);
2077 lightrec_free_reg(reg_cache, tmp);
2079 lightrec_emit_end_of_block(state, block, offset, -1, pc, 31, 0, true);
2082 static void rec_special_SYSCALL(struct lightrec_cstate *state,
2083 const struct block *block, u16 offset)
2085 _jit_name(block->_jit, __func__);
2087 /* TODO: the return address should be "pc - 4" if we're a delay slot */
2088 rec_exit_early(state, block, offset, LIGHTREC_EXIT_SYSCALL,
2089 get_ds_pc(block, offset, 0));
2092 static void rec_special_BREAK(struct lightrec_cstate *state,
2093 const struct block *block, u16 offset)
2095 _jit_name(block->_jit, __func__);
2096 rec_exit_early(state, block, offset, LIGHTREC_EXIT_BREAK,
2097 get_ds_pc(block, offset, 0));
2100 static void rec_mfc(struct lightrec_cstate *state, const struct block *block, u16 offset)
2102 struct regcache *reg_cache = state->reg_cache;
2103 union code c = block->opcode_list[offset].c;
2104 jit_state_t *_jit = block->_jit;
2106 jit_note(__FILE__, __LINE__);
2108 if (c.i.op != OP_SWC2)
2109 lightrec_clean_reg_if_loaded(reg_cache, _jit, c.i.rt, true);
2111 call_to_c_wrapper(state, block, c.opcode, C_WRAPPER_MFC);
2114 static void rec_mtc(struct lightrec_cstate *state, const struct block *block, u16 offset)
2116 struct regcache *reg_cache = state->reg_cache;
2117 union code c = block->opcode_list[offset].c;
2118 jit_state_t *_jit = block->_jit;
2120 jit_note(__FILE__, __LINE__);
2121 lightrec_clean_reg_if_loaded(reg_cache, _jit, c.i.rs, false);
2122 lightrec_clean_reg_if_loaded(reg_cache, _jit, c.i.rt, false);
2123 lightrec_clean_reg_if_loaded(reg_cache, _jit, REG_TEMP, false);
2125 call_to_c_wrapper(state, block, c.opcode, C_WRAPPER_MTC);
2127 if (c.i.op == OP_CP0 &&
2128 !op_flag_no_ds(block->opcode_list[offset].flags) &&
2129 (c.r.rd == 12 || c.r.rd == 13))
2130 lightrec_emit_end_of_block(state, block, offset, -1,
2131 get_ds_pc(block, offset, 1),
2136 rec_mfc0(struct lightrec_cstate *state, const struct block *block, u16 offset)
2138 struct regcache *reg_cache = state->reg_cache;
2139 union code c = block->opcode_list[offset].c;
2140 jit_state_t *_jit = block->_jit;
2143 jit_note(__FILE__, __LINE__);
2145 rt = lightrec_alloc_reg_out(reg_cache, _jit, c.i.rt, REG_EXT);
2147 jit_ldxi_i(rt, LIGHTREC_REG_STATE,
2148 offsetof(struct lightrec_state, regs.cp0[c.r.rd]));
2150 lightrec_free_reg(reg_cache, rt);
2153 static bool block_uses_icache(const struct lightrec_cstate *state,
2154 const struct block *block)
2156 const struct lightrec_mem_map *map = &state->state->maps[PSX_MAP_KERNEL_USER_RAM];
2157 u32 pc = kunseg(block->pc);
2159 if (pc < map->pc || pc >= map->pc + map->length)
2162 return (block->pc >> 28) < 0xa;
2166 rec_mtc0(struct lightrec_cstate *state, const struct block *block, u16 offset)
2168 struct regcache *reg_cache = state->reg_cache;
2169 const union code c = block->opcode_list[offset].c;
2170 jit_state_t *_jit = block->_jit;
2171 u8 rt, tmp = 0, tmp2, status;
2174 jit_note(__FILE__, __LINE__);
2182 /* Those registers are read-only */
2188 if (!block_uses_icache(state, block) && c.r.rd == 12) {
2189 /* If we are not running code from the RAM through kuseg or
2190 * kseg0, handle writes to the Status register in C; as the
2191 * code may toggle bit 16 which isolates the cache. Code
2192 * running from kuseg or kseg0 in RAM cannot do that. */
2193 rec_mtc(state, block, offset);
2197 rt = lightrec_alloc_reg_in(reg_cache, _jit, c.i.rt, 0);
2200 jit_stxi_i(offsetof(struct lightrec_state, regs.cp0[c.r.rd]),
2201 LIGHTREC_REG_STATE, rt);
2204 if (c.r.rd == 12 || c.r.rd == 13) {
2205 tmp = lightrec_alloc_reg_temp(reg_cache, _jit);
2206 jit_ldxi_i(tmp, LIGHTREC_REG_STATE,
2207 offsetof(struct lightrec_state, regs.cp0[13]));
2209 tmp2 = lightrec_alloc_reg_temp(reg_cache, _jit);
2214 } else if (c.r.rd == 13) {
2215 /* Cause = (Cause & ~0x0300) | (value & 0x0300) */
2216 jit_andi(tmp2, rt, 0x0300);
2217 jit_ori(tmp, tmp, 0x0300);
2218 jit_xori(tmp, tmp, 0x0300);
2219 jit_orr(tmp, tmp, tmp2);
2220 jit_ldxi_i(tmp2, LIGHTREC_REG_STATE,
2221 offsetof(struct lightrec_state, regs.cp0[12]));
2222 jit_stxi_i(offsetof(struct lightrec_state, regs.cp0[13]),
2223 LIGHTREC_REG_STATE, tmp);
2227 if (c.r.rd == 12 || c.r.rd == 13) {
2228 /* Exit dynarec in case there's a software interrupt.
2229 * exit_flags = !!(status & tmp & 0x0300) & status; */
2230 jit_andr(tmp, tmp, status);
2231 jit_andi(tmp, tmp, 0x0300);
2232 jit_nei(tmp, tmp, 0);
2233 jit_andr(tmp, tmp, status);
2237 /* Exit dynarec in case we unmask a hardware interrupt.
2238 * exit_flags = !(~status & 0x401) */
2240 jit_comr(tmp2, status);
2241 jit_andi(tmp2, tmp2, 0x401);
2242 jit_eqi(tmp2, tmp2, 0);
2243 jit_orr(tmp, tmp, tmp2);
2246 lightrec_free_reg(reg_cache, rt);
2248 if (c.r.rd == 12 || c.r.rd == 13) {
2249 to_end = jit_beqi(tmp, 0);
2251 jit_ldxi_i(tmp2, LIGHTREC_REG_STATE,
2252 offsetof(struct lightrec_state, target_cycle));
2253 jit_subr(tmp2, tmp2, LIGHTREC_REG_CYCLE);
2254 jit_movi(LIGHTREC_REG_CYCLE, 0);
2255 jit_stxi_i(offsetof(struct lightrec_state, target_cycle),
2256 LIGHTREC_REG_STATE, tmp2);
2257 jit_stxi_i(offsetof(struct lightrec_state, current_cycle),
2258 LIGHTREC_REG_STATE, tmp2);
2264 if (!op_flag_no_ds(block->opcode_list[offset].flags) &&
2265 (c.r.rd == 12 || c.r.rd == 13)) {
2266 state->cycles += lightrec_cycles_of_opcode(state->state, c);
2267 lightrec_emit_eob(state, block, offset + 1);
2271 static void rec_cp0_MFC0(struct lightrec_cstate *state,
2272 const struct block *block, u16 offset)
2274 _jit_name(block->_jit, __func__);
2275 rec_mfc0(state, block, offset);
2278 static void rec_cp0_CFC0(struct lightrec_cstate *state,
2279 const struct block *block, u16 offset)
2281 _jit_name(block->_jit, __func__);
2282 rec_mfc0(state, block, offset);
2285 static void rec_cp0_MTC0(struct lightrec_cstate *state,
2286 const struct block *block, u16 offset)
2288 _jit_name(block->_jit, __func__);
2289 rec_mtc0(state, block, offset);
2292 static void rec_cp0_CTC0(struct lightrec_cstate *state,
2293 const struct block *block, u16 offset)
2295 _jit_name(block->_jit, __func__);
2296 rec_mtc0(state, block, offset);
2299 static unsigned int cp2d_i_offset(u8 reg)
2301 return offsetof(struct lightrec_state, regs.cp2d[reg]);
2304 static unsigned int cp2d_s_offset(u8 reg)
2306 return cp2d_i_offset(reg) + is_big_endian() * 2;
2309 static unsigned int cp2c_i_offset(u8 reg)
2311 return offsetof(struct lightrec_state, regs.cp2c[reg]);
2314 static unsigned int cp2c_s_offset(u8 reg)
2316 return cp2c_i_offset(reg) + is_big_endian() * 2;
2319 static void rec_cp2_do_mfc2(struct lightrec_cstate *state,
2320 const struct block *block, u16 offset,
2323 struct regcache *reg_cache = state->reg_cache;
2324 jit_state_t *_jit = block->_jit;
2325 const u32 zext_regs = 0x300f0080;
2326 u8 rt, tmp, tmp2, tmp3, out, flags;
2329 _jit_name(block->_jit, __func__);
2331 if (state->state->ops.cop2_notify) {
2332 /* We must call cop2_notify, handle that in C. */
2333 rec_mfc(state, block, offset);
2337 flags = (zext_regs & BIT(reg)) ? REG_ZEXT : REG_EXT;
2338 rt = lightrec_alloc_reg_out(reg_cache, _jit, out_reg, flags);
2351 jit_ldxi_s(rt, LIGHTREC_REG_STATE, cp2d_s_offset(reg));
2358 jit_ldxi_us(rt, LIGHTREC_REG_STATE, cp2d_s_offset(reg));
2362 tmp = lightrec_alloc_reg_temp(reg_cache, _jit);
2363 tmp2 = lightrec_alloc_reg_temp(reg_cache, _jit);
2364 tmp3 = lightrec_alloc_reg_temp(reg_cache, _jit);
2366 for (i = 0; i < 3; i++) {
2367 out = i == 0 ? rt : tmp;
2369 jit_ldxi_s(tmp, LIGHTREC_REG_STATE, cp2d_s_offset(9 + i));
2370 jit_movi(tmp2, 0x1f);
2371 jit_rshi(out, tmp, 7);
2373 jit_ltr(tmp3, tmp2, out);
2374 jit_movnr(out, tmp2, tmp3);
2376 jit_gei(tmp2, out, 0);
2377 jit_movzr(out, tmp2, tmp2);
2380 jit_lshi(tmp, tmp, 5 * i);
2381 jit_orr(rt, rt, tmp);
2386 lightrec_free_reg(reg_cache, tmp);
2387 lightrec_free_reg(reg_cache, tmp2);
2388 lightrec_free_reg(reg_cache, tmp3);
2391 jit_ldxi_i(rt, LIGHTREC_REG_STATE, cp2d_i_offset(reg));
2395 lightrec_free_reg(reg_cache, rt);
2398 static void rec_cp2_basic_MFC2(struct lightrec_cstate *state,
2399 const struct block *block, u16 offset)
2401 const union code c = block->opcode_list[offset].c;
2403 rec_cp2_do_mfc2(state, block, offset, c.r.rd, c.r.rt);
2406 static void rec_cp2_basic_CFC2(struct lightrec_cstate *state,
2407 const struct block *block, u16 offset)
2409 struct regcache *reg_cache = state->reg_cache;
2410 const union code c = block->opcode_list[offset].c;
2411 jit_state_t *_jit = block->_jit;
2414 _jit_name(block->_jit, __func__);
2416 if (state->state->ops.cop2_notify) {
2417 /* We must call cop2_notify, handle that in C. */
2418 rec_mfc(state, block, offset);
2430 rt = lightrec_alloc_reg_out(reg_cache, _jit, c.r.rt, REG_EXT);
2431 jit_ldxi_s(rt, LIGHTREC_REG_STATE, cp2c_s_offset(c.r.rd));
2434 rt = lightrec_alloc_reg_out(reg_cache, _jit, c.r.rt, REG_ZEXT);
2435 jit_ldxi_ui(rt, LIGHTREC_REG_STATE, cp2c_i_offset(c.r.rd));
2439 lightrec_free_reg(reg_cache, rt);
2442 static void rec_cp2_do_mtc2(struct lightrec_cstate *state,
2443 const struct block *block, u16 offset,
2446 struct regcache *reg_cache = state->reg_cache;
2447 jit_state_t *_jit = block->_jit;
2448 u8 rt, tmp, tmp2, flags = 0;
2450 _jit_name(block->_jit, __func__);
2452 if (state->state->ops.cop2_notify) {
2453 /* We must call cop2_notify, handle that in C. */
2454 rec_mtc(state, block, offset);
2464 rt = lightrec_alloc_reg_in(reg_cache, _jit, in_reg, flags);
2468 tmp = lightrec_alloc_reg_temp(reg_cache, _jit);
2469 jit_ldxi_i(tmp, LIGHTREC_REG_STATE, cp2d_i_offset(13));
2471 tmp2 = lightrec_alloc_reg_temp(reg_cache, _jit);
2472 jit_ldxi_i(tmp2, LIGHTREC_REG_STATE, cp2d_i_offset(14));
2474 jit_stxi_i(cp2d_i_offset(12), LIGHTREC_REG_STATE, tmp);
2475 jit_stxi_i(cp2d_i_offset(13), LIGHTREC_REG_STATE, tmp2);
2476 jit_stxi_i(cp2d_i_offset(14), LIGHTREC_REG_STATE, rt);
2478 lightrec_free_reg(reg_cache, tmp);
2479 lightrec_free_reg(reg_cache, tmp2);
2482 tmp = lightrec_alloc_reg_temp(reg_cache, _jit);
2484 jit_lshi(tmp, rt, 7);
2485 jit_andi(tmp, tmp, 0xf80);
2486 jit_stxi_s(cp2d_s_offset(9), LIGHTREC_REG_STATE, tmp);
2488 jit_lshi(tmp, rt, 2);
2489 jit_andi(tmp, tmp, 0xf80);
2490 jit_stxi_s(cp2d_s_offset(10), LIGHTREC_REG_STATE, tmp);
2492 jit_rshi(tmp, rt, 3);
2493 jit_andi(tmp, tmp, 0xf80);
2494 jit_stxi_s(cp2d_s_offset(11), LIGHTREC_REG_STATE, tmp);
2496 lightrec_free_reg(reg_cache, tmp);
2499 tmp = lightrec_alloc_reg_temp(reg_cache, _jit);
2501 /* if (rt < 0) rt = ~rt; */
2502 jit_rshi(tmp, rt, 31);
2503 jit_xorr(tmp, rt, tmp);
2505 /* Count leading zeros */
2507 if (__WORDSIZE != 32)
2508 jit_subi(tmp, tmp, __WORDSIZE - 32);
2510 jit_stxi_i(cp2d_i_offset(31), LIGHTREC_REG_STATE, tmp);
2512 lightrec_free_reg(reg_cache, tmp);
2515 jit_stxi_i(cp2d_i_offset(reg), LIGHTREC_REG_STATE, rt);
2519 lightrec_free_reg(reg_cache, rt);
2522 static void rec_cp2_basic_MTC2(struct lightrec_cstate *state,
2523 const struct block *block, u16 offset)
2525 const union code c = block->opcode_list[offset].c;
2527 rec_cp2_do_mtc2(state, block, offset, c.r.rd, c.r.rt);
2530 static void rec_cp2_basic_CTC2(struct lightrec_cstate *state,
2531 const struct block *block, u16 offset)
2533 struct regcache *reg_cache = state->reg_cache;
2534 const union code c = block->opcode_list[offset].c;
2535 jit_state_t *_jit = block->_jit;
2538 _jit_name(block->_jit, __func__);
2540 if (state->state->ops.cop2_notify) {
2541 /* We must call cop2_notify, handle that in C. */
2542 rec_mtc(state, block, offset);
2546 rt = lightrec_alloc_reg_in(reg_cache, _jit, c.r.rt, 0);
2556 jit_stxi_s(cp2c_s_offset(c.r.rd), LIGHTREC_REG_STATE, rt);
2559 tmp = lightrec_alloc_reg_temp(reg_cache, _jit);
2560 tmp2 = lightrec_alloc_reg_temp(reg_cache, _jit);
2562 jit_andi(tmp, rt, 0x7f87e000);
2563 jit_nei(tmp, tmp, 0);
2564 jit_lshi(tmp, tmp, 31);
2566 jit_andi(tmp2, rt, 0x7ffff000);
2567 jit_orr(tmp, tmp2, tmp);
2569 jit_stxi_i(cp2c_i_offset(31), LIGHTREC_REG_STATE, tmp);
2571 lightrec_free_reg(reg_cache, tmp);
2572 lightrec_free_reg(reg_cache, tmp2);
2576 jit_stxi_i(cp2c_i_offset(c.r.rd), LIGHTREC_REG_STATE, rt);
2579 lightrec_free_reg(reg_cache, rt);
2582 static void rec_cp0_RFE(struct lightrec_cstate *state,
2583 const struct block *block, u16 offset)
2585 struct regcache *reg_cache = state->reg_cache;
2586 jit_state_t *_jit = block->_jit;
2590 jit_note(__FILE__, __LINE__);
2592 status = lightrec_alloc_reg_temp(reg_cache, _jit);
2593 jit_ldxi_i(status, LIGHTREC_REG_STATE,
2594 offsetof(struct lightrec_state, regs.cp0[12]));
2596 tmp = lightrec_alloc_reg_temp(reg_cache, _jit);
2598 /* status = ((status >> 2) & 0xf) | status & ~0xf; */
2599 jit_rshi(tmp, status, 2);
2600 jit_andi(tmp, tmp, 0xf);
2601 jit_andi(status, status, ~0xful);
2602 jit_orr(status, status, tmp);
2604 jit_ldxi_i(tmp, LIGHTREC_REG_STATE,
2605 offsetof(struct lightrec_state, regs.cp0[13]));
2606 jit_stxi_i(offsetof(struct lightrec_state, regs.cp0[12]),
2607 LIGHTREC_REG_STATE, status);
2609 /* Exit dynarec in case there's a software interrupt.
2610 * exit_flags = !!(status & cause & 0x0300) & status; */
2611 jit_andr(tmp, tmp, status);
2612 jit_andi(tmp, tmp, 0x0300);
2613 jit_nei(tmp, tmp, 0);
2614 jit_andr(tmp, tmp, status);
2615 jit_stxi_i(offsetof(struct lightrec_state, exit_flags),
2616 LIGHTREC_REG_STATE, tmp);
2618 lightrec_free_reg(reg_cache, status);
2619 lightrec_free_reg(reg_cache, tmp);
2622 static void rec_CP(struct lightrec_cstate *state,
2623 const struct block *block, u16 offset)
2625 union code c = block->opcode_list[offset].c;
2626 jit_state_t *_jit = block->_jit;
2629 jit_note(__FILE__, __LINE__);
2631 call_to_c_wrapper(state, block, c.opcode, C_WRAPPER_CP);
2634 static void rec_meta_MOV(struct lightrec_cstate *state,
2635 const struct block *block, u16 offset)
2637 struct regcache *reg_cache = state->reg_cache;
2638 const struct opcode *op = &block->opcode_list[offset];
2639 union code c = op->c;
2640 jit_state_t *_jit = block->_jit;
2642 bool unload_rs, discard_rs;
2645 _jit_name(block->_jit, __func__);
2646 jit_note(__FILE__, __LINE__);
2648 unload_rs = OPT_EARLY_UNLOAD
2649 && LIGHTREC_FLAGS_GET_RS(op->flags) == LIGHTREC_REG_UNLOAD;
2650 discard_rs = OPT_EARLY_UNLOAD
2651 && LIGHTREC_FLAGS_GET_RS(op->flags) == LIGHTREC_REG_DISCARD;
2653 if ((unload_rs || discard_rs) && c.m.rs) {
2654 /* If the source register is going to be unloaded or discarded,
2655 * then we can simply mark its host register as now pointing to
2656 * the destination register. */
2657 pr_debug("Remap %s to %s at offset 0x%x\n",
2658 lightrec_reg_name(c.m.rs), lightrec_reg_name(c.m.rd),
2660 rs = lightrec_alloc_reg_in(reg_cache, _jit, c.m.rs, 0);
2661 lightrec_remap_reg(reg_cache, _jit, rs, c.m.rd, discard_rs);
2662 lightrec_free_reg(reg_cache, rs);
2666 unload_rd = OPT_EARLY_UNLOAD
2667 && LIGHTREC_FLAGS_GET_RD(op->flags) == LIGHTREC_REG_UNLOAD;
2669 if (c.m.rs && !lightrec_reg_is_loaded(reg_cache, c.m.rs)) {
2670 /* The source register is not yet loaded - we can load its value
2671 * from the register cache directly into the target register. */
2672 rd = lightrec_alloc_reg_out(reg_cache, _jit, c.m.rd, REG_EXT);
2674 jit_ldxi_i(rd, LIGHTREC_REG_STATE,
2675 offsetof(struct lightrec_state, regs.gpr) + (c.m.rs << 2));
2677 lightrec_free_reg(reg_cache, rd);
2678 } else if (unload_rd) {
2679 /* If the destination register will be unloaded right after the
2680 * MOV meta-opcode, we don't actually need to write any host
2681 * register - we can just store the source register directly to
2682 * the register cache, at the offset corresponding to the
2683 * destination register. */
2684 lightrec_discard_reg_if_loaded(reg_cache, c.m.rd);
2686 rs = lightrec_alloc_reg_in(reg_cache, _jit, c.m.rs, 0);
2688 jit_stxi_i(offsetof(struct lightrec_state, regs.gpr)
2689 + (c.m.rd << 2), LIGHTREC_REG_STATE, rs);
2691 lightrec_free_reg(reg_cache, rs);
2694 rs = lightrec_alloc_reg_in(reg_cache, _jit, c.m.rs, 0);
2696 rd = lightrec_alloc_reg_out(reg_cache, _jit, c.m.rd, REG_EXT);
2702 lightrec_free_reg(reg_cache, rs);
2705 lightrec_free_reg(reg_cache, rd);
2709 static void rec_meta_EXTC_EXTS(struct lightrec_cstate *state,
2710 const struct block *block,
2713 struct regcache *reg_cache = state->reg_cache;
2714 union code c = block->opcode_list[offset].c;
2715 jit_state_t *_jit = block->_jit;
2718 _jit_name(block->_jit, __func__);
2719 jit_note(__FILE__, __LINE__);
2721 rec_alloc_rs_rd(reg_cache, _jit, &block->opcode_list[offset],
2722 c.m.rs, c.m.rd, 0, REG_EXT, &rs, &rd);
2724 if (c.m.op == OP_META_EXTC)
2729 lightrec_free_reg(reg_cache, rs);
2730 lightrec_free_reg(reg_cache, rd);
2733 static void rec_meta_MULT2(struct lightrec_cstate *state,
2734 const struct block *block,
2737 struct regcache *reg_cache = state->reg_cache;
2738 union code c = block->opcode_list[offset].c;
2739 jit_state_t *_jit = block->_jit;
2740 u8 reg_lo = get_mult_div_lo(c);
2741 u8 reg_hi = get_mult_div_hi(c);
2742 u32 flags = block->opcode_list[offset].flags;
2743 bool is_signed = c.i.op == OP_META_MULT2;
2744 u8 rs, lo, hi, rflags = 0, hiflags = 0;
2747 if (!op_flag_no_hi(flags) && c.r.op < 32) {
2748 rflags = is_signed ? REG_EXT : REG_ZEXT;
2749 hiflags = is_signed ? REG_EXT : (REG_EXT | REG_ZEXT);
2752 _jit_name(block->_jit, __func__);
2753 jit_note(__FILE__, __LINE__);
2755 rs = lightrec_alloc_reg_in(reg_cache, _jit, c.i.rs, rflags);
2758 * We must handle the case where one of the output registers is our rs
2759 * input register. Thanksfully, computing LO/HI can be done in any
2760 * order. Here, we make sure that the computation that overwrites the
2761 * input register is always performed last.
2763 for (i = 0; i < 2; i++) {
2764 if ((!i ^ (reg_lo == c.i.rs)) && !op_flag_no_lo(flags)) {
2765 lo = lightrec_alloc_reg_out(reg_cache, _jit, reg_lo, 0);
2768 jit_lshi(lo, rs, c.r.op);
2772 lightrec_free_reg(reg_cache, lo);
2776 if ((!!i ^ (reg_lo == c.i.rs)) && !op_flag_no_hi(flags)) {
2777 hi = lightrec_alloc_reg_out(reg_cache, _jit,
2781 jit_lshi(hi, rs, c.r.op - 32);
2782 } else if (is_signed) {
2784 jit_rshi(hi, rs, 32 - c.r.op);
2786 jit_rshi(hi, rs, 31);
2789 jit_rshi_u(hi, rs, 32 - c.r.op);
2794 lightrec_free_reg(reg_cache, hi);
2798 lightrec_free_reg(reg_cache, rs);
2800 _jit_name(block->_jit, __func__);
2801 jit_note(__FILE__, __LINE__);
2804 static void rec_meta_COM(struct lightrec_cstate *state,
2805 const struct block *block, u16 offset)
2807 struct regcache *reg_cache = state->reg_cache;
2808 union code c = block->opcode_list[offset].c;
2809 jit_state_t *_jit = block->_jit;
2812 jit_note(__FILE__, __LINE__);
2814 rec_alloc_rs_rd(reg_cache, _jit, &block->opcode_list[offset],
2815 c.m.rs, c.m.rd, 0, 0, &rs, &rd);
2817 flags = lightrec_get_reg_in_flags(reg_cache, rs);
2819 lightrec_set_reg_out_flags(reg_cache, rd,
2824 lightrec_free_reg(reg_cache, rs);
2825 lightrec_free_reg(reg_cache, rd);
2828 static void rec_meta_LWU(struct lightrec_cstate *state,
2829 const struct block *block,
2834 if (is_big_endian() && __WORDSIZE == 64)
2835 code = jit_code_unldr_u;
2837 code = jit_code_unldr;
2839 _jit_name(block->_jit, __func__);
2840 rec_load(state, block, offset, code, jit_code_bswapr_ui, false);
2843 static void rec_meta_SWU(struct lightrec_cstate *state,
2844 const struct block *block,
2847 _jit_name(block->_jit, __func__);
2848 rec_store(state, block, offset, jit_code_unstr, jit_code_bswapr_ui);
2851 static void unknown_opcode(struct lightrec_cstate *state,
2852 const struct block *block, u16 offset)
2854 rec_exit_early(state, block, offset, LIGHTREC_EXIT_UNKNOWN_OP,
2855 block->pc + (offset << 2));
2858 static const lightrec_rec_func_t rec_standard[64] = {
2859 SET_DEFAULT_ELM(rec_standard, unknown_opcode),
2860 [OP_SPECIAL] = rec_SPECIAL,
2861 [OP_REGIMM] = rec_REGIMM,
2866 [OP_BLEZ] = rec_BLEZ,
2867 [OP_BGTZ] = rec_BGTZ,
2868 [OP_ADDI] = rec_ADDI,
2869 [OP_ADDIU] = rec_ADDIU,
2870 [OP_SLTI] = rec_SLTI,
2871 [OP_SLTIU] = rec_SLTIU,
2872 [OP_ANDI] = rec_ANDI,
2874 [OP_XORI] = rec_XORI,
2893 [OP_META] = rec_META,
2894 [OP_META_MULT2] = rec_meta_MULT2,
2895 [OP_META_MULTU2] = rec_meta_MULT2,
2896 [OP_META_LWU] = rec_meta_LWU,
2897 [OP_META_SWU] = rec_meta_SWU,
2900 static const lightrec_rec_func_t rec_special[64] = {
2901 SET_DEFAULT_ELM(rec_special, unknown_opcode),
2902 [OP_SPECIAL_SLL] = rec_special_SLL,
2903 [OP_SPECIAL_SRL] = rec_special_SRL,
2904 [OP_SPECIAL_SRA] = rec_special_SRA,
2905 [OP_SPECIAL_SLLV] = rec_special_SLLV,
2906 [OP_SPECIAL_SRLV] = rec_special_SRLV,
2907 [OP_SPECIAL_SRAV] = rec_special_SRAV,
2908 [OP_SPECIAL_JR] = rec_special_JR,
2909 [OP_SPECIAL_JALR] = rec_special_JALR,
2910 [OP_SPECIAL_SYSCALL] = rec_special_SYSCALL,
2911 [OP_SPECIAL_BREAK] = rec_special_BREAK,
2912 [OP_SPECIAL_MFHI] = rec_special_MFHI,
2913 [OP_SPECIAL_MTHI] = rec_special_MTHI,
2914 [OP_SPECIAL_MFLO] = rec_special_MFLO,
2915 [OP_SPECIAL_MTLO] = rec_special_MTLO,
2916 [OP_SPECIAL_MULT] = rec_special_MULT,
2917 [OP_SPECIAL_MULTU] = rec_special_MULTU,
2918 [OP_SPECIAL_DIV] = rec_special_DIV,
2919 [OP_SPECIAL_DIVU] = rec_special_DIVU,
2920 [OP_SPECIAL_ADD] = rec_special_ADD,
2921 [OP_SPECIAL_ADDU] = rec_special_ADDU,
2922 [OP_SPECIAL_SUB] = rec_special_SUB,
2923 [OP_SPECIAL_SUBU] = rec_special_SUBU,
2924 [OP_SPECIAL_AND] = rec_special_AND,
2925 [OP_SPECIAL_OR] = rec_special_OR,
2926 [OP_SPECIAL_XOR] = rec_special_XOR,
2927 [OP_SPECIAL_NOR] = rec_special_NOR,
2928 [OP_SPECIAL_SLT] = rec_special_SLT,
2929 [OP_SPECIAL_SLTU] = rec_special_SLTU,
2932 static const lightrec_rec_func_t rec_regimm[64] = {
2933 SET_DEFAULT_ELM(rec_regimm, unknown_opcode),
2934 [OP_REGIMM_BLTZ] = rec_regimm_BLTZ,
2935 [OP_REGIMM_BGEZ] = rec_regimm_BGEZ,
2936 [OP_REGIMM_BLTZAL] = rec_regimm_BLTZAL,
2937 [OP_REGIMM_BGEZAL] = rec_regimm_BGEZAL,
2940 static const lightrec_rec_func_t rec_cp0[64] = {
2941 SET_DEFAULT_ELM(rec_cp0, rec_CP),
2942 [OP_CP0_MFC0] = rec_cp0_MFC0,
2943 [OP_CP0_CFC0] = rec_cp0_CFC0,
2944 [OP_CP0_MTC0] = rec_cp0_MTC0,
2945 [OP_CP0_CTC0] = rec_cp0_CTC0,
2946 [OP_CP0_RFE] = rec_cp0_RFE,
2949 static const lightrec_rec_func_t rec_cp2_basic[64] = {
2950 SET_DEFAULT_ELM(rec_cp2_basic, rec_CP),
2951 [OP_CP2_BASIC_MFC2] = rec_cp2_basic_MFC2,
2952 [OP_CP2_BASIC_CFC2] = rec_cp2_basic_CFC2,
2953 [OP_CP2_BASIC_MTC2] = rec_cp2_basic_MTC2,
2954 [OP_CP2_BASIC_CTC2] = rec_cp2_basic_CTC2,
2957 static const lightrec_rec_func_t rec_meta[64] = {
2958 SET_DEFAULT_ELM(rec_meta, unknown_opcode),
2959 [OP_META_MOV] = rec_meta_MOV,
2960 [OP_META_EXTC] = rec_meta_EXTC_EXTS,
2961 [OP_META_EXTS] = rec_meta_EXTC_EXTS,
2962 [OP_META_COM] = rec_meta_COM,
2965 static void rec_SPECIAL(struct lightrec_cstate *state,
2966 const struct block *block, u16 offset)
2968 union code c = block->opcode_list[offset].c;
2969 lightrec_rec_func_t f = rec_special[c.r.op];
2971 if (!HAS_DEFAULT_ELM && unlikely(!f))
2972 unknown_opcode(state, block, offset);
2974 (*f)(state, block, offset);
2977 static void rec_REGIMM(struct lightrec_cstate *state,
2978 const struct block *block, u16 offset)
2980 union code c = block->opcode_list[offset].c;
2981 lightrec_rec_func_t f = rec_regimm[c.r.rt];
2983 if (!HAS_DEFAULT_ELM && unlikely(!f))
2984 unknown_opcode(state, block, offset);
2986 (*f)(state, block, offset);
2989 static void rec_CP0(struct lightrec_cstate *state,
2990 const struct block *block, u16 offset)
2992 union code c = block->opcode_list[offset].c;
2993 lightrec_rec_func_t f = rec_cp0[c.r.rs];
2995 if (!HAS_DEFAULT_ELM && unlikely(!f))
2996 rec_CP(state, block, offset);
2998 (*f)(state, block, offset);
3001 static void rec_CP2(struct lightrec_cstate *state,
3002 const struct block *block, u16 offset)
3004 union code c = block->opcode_list[offset].c;
3006 if (c.r.op == OP_CP2_BASIC) {
3007 lightrec_rec_func_t f = rec_cp2_basic[c.r.rs];
3009 if (HAS_DEFAULT_ELM || likely(f)) {
3010 (*f)(state, block, offset);
3015 rec_CP(state, block, offset);
3018 static void rec_META(struct lightrec_cstate *state,
3019 const struct block *block, u16 offset)
3021 union code c = block->opcode_list[offset].c;
3022 lightrec_rec_func_t f = rec_meta[c.m.op];
3024 if (!HAS_DEFAULT_ELM && unlikely(!f))
3025 unknown_opcode(state, block, offset);
3027 (*f)(state, block, offset);
3030 void lightrec_rec_opcode(struct lightrec_cstate *state,
3031 const struct block *block, u16 offset)
3033 struct regcache *reg_cache = state->reg_cache;
3034 struct lightrec_branch_target *target;
3035 const struct opcode *op = &block->opcode_list[offset];
3036 jit_state_t *_jit = block->_jit;
3037 lightrec_rec_func_t f;
3040 if (op_flag_sync(op->flags)) {
3042 jit_subi(LIGHTREC_REG_CYCLE, LIGHTREC_REG_CYCLE, state->cycles);
3045 lightrec_storeback_regs(reg_cache, _jit);
3046 lightrec_regcache_reset(reg_cache);
3048 pr_debug("Adding branch target at offset 0x%x\n", offset << 2);
3049 target = &state->targets[state->nb_targets++];
3050 target->offset = offset;
3051 target->label = jit_indirect();
3054 if (likely(op->opcode)) {
3055 f = rec_standard[op->i.op];
3057 if (!HAS_DEFAULT_ELM && unlikely(!f))
3058 unknown_opcode(state, block, offset);
3060 (*f)(state, block, offset);
3063 if (OPT_EARLY_UNLOAD) {
3064 unload_offset = offset +
3065 (has_delay_slot(op->c) && !op_flag_no_ds(op->flags));
3067 lightrec_do_early_unload(state, block, unload_offset);
3070 state->no_load_delay = false;