gpu_neon: more complicated overflow check
[pcsx_rearmed.git] / deps / lightrec / emitter.c
... / ...
CommitLineData
1// SPDX-License-Identifier: LGPL-2.1-or-later
2/*
3 * Copyright (C) 2014-2021 Paul Cercueil <paul@crapouillou.net>
4 */
5
6#include "blockcache.h"
7#include "debug.h"
8#include "disassembler.h"
9#include "emitter.h"
10#include "lightning-wrapper.h"
11#include "optimizer.h"
12#include "regcache.h"
13
14#include <stdbool.h>
15#include <stddef.h>
16
17typedef void (*lightrec_rec_func_t)(struct lightrec_cstate *, const struct block *, u16);
18
19/* Forward declarations */
20static void rec_SPECIAL(struct lightrec_cstate *state, const struct block *block, u16 offset);
21static void rec_REGIMM(struct lightrec_cstate *state, const struct block *block, u16 offset);
22static void rec_CP0(struct lightrec_cstate *state, const struct block *block, u16 offset);
23static void rec_CP2(struct lightrec_cstate *state, const struct block *block, u16 offset);
24static void rec_META(struct lightrec_cstate *state, const struct block *block, u16 offset);
25static void rec_cp2_do_mtc2(struct lightrec_cstate *state,
26 const struct block *block, u16 offset, u8 reg, u8 in_reg);
27static void rec_cp2_do_mfc2(struct lightrec_cstate *state,
28 const struct block *block, u16 offset,
29 u8 reg, u8 out_reg);
30
31static void unknown_opcode(struct lightrec_cstate *state, const struct block *block, u16 offset)
32{
33 pr_warn("Unknown opcode: 0x%08x at PC 0x%08x\n",
34 block->opcode_list[offset].c.opcode,
35 block->pc + (offset << 2));
36}
37
38static void
39lightrec_jump_to_fn(jit_state_t *_jit, void (*fn)(void))
40{
41 /* Prevent jit_jmpi() from using our cycles register as a temporary */
42 jit_live(LIGHTREC_REG_CYCLE);
43
44 jit_patch_abs(jit_jmpi(), fn);
45}
46
47static void
48lightrec_jump_to_eob(struct lightrec_cstate *state, jit_state_t *_jit)
49{
50 lightrec_jump_to_fn(_jit, state->state->eob_wrapper_func);
51}
52
53static void
54lightrec_jump_to_ds_check(struct lightrec_cstate *state, jit_state_t *_jit)
55{
56 lightrec_jump_to_fn(_jit, state->state->ds_check_func);
57}
58
59static void update_ra_register(struct regcache *reg_cache, jit_state_t *_jit,
60 u8 ra_reg, u32 pc, u32 link)
61{
62 u8 link_reg;
63
64 link_reg = lightrec_alloc_reg_out(reg_cache, _jit, ra_reg, 0);
65 lightrec_load_imm(reg_cache, _jit, link_reg, pc, link);
66 lightrec_free_reg(reg_cache, link_reg);
67}
68
69static void lightrec_emit_end_of_block(struct lightrec_cstate *state,
70 const struct block *block, u16 offset,
71 s8 reg_new_pc, u32 imm, u8 ra_reg,
72 u32 link, bool update_cycles)
73{
74 struct regcache *reg_cache = state->reg_cache;
75 jit_state_t *_jit = block->_jit;
76 const struct opcode *op = &block->opcode_list[offset],
77 *ds = get_delay_slot(block->opcode_list, offset);
78 u32 cycles = state->cycles + lightrec_cycles_of_opcode(state->state, op->c);
79
80 jit_note(__FILE__, __LINE__);
81
82 if (link && ra_reg != reg_new_pc)
83 update_ra_register(reg_cache, _jit, ra_reg, block->pc, link);
84
85 if (reg_new_pc < 0)
86 lightrec_load_next_pc_imm(reg_cache, _jit, block->pc, imm);
87 else
88 lightrec_load_next_pc(reg_cache, _jit, reg_new_pc);
89
90 if (link && ra_reg == reg_new_pc) {
91 /* Handle the special case: JALR $r0, $r0
92 * In that case the target PC should be the old value of the
93 * register. */
94 update_ra_register(reg_cache, _jit, ra_reg, block->pc, link);
95 }
96
97 if (has_delay_slot(op->c) &&
98 !op_flag_no_ds(op->flags) && !op_flag_local_branch(op->flags)) {
99 cycles += lightrec_cycles_of_opcode(state->state, ds->c);
100
101 /* Recompile the delay slot */
102 if (ds->c.opcode)
103 lightrec_rec_opcode(state, block, offset + 1);
104 }
105
106 /* Clean the remaining registers */
107 lightrec_clean_regs(reg_cache, _jit);
108
109 if (cycles && update_cycles) {
110 jit_subi(LIGHTREC_REG_CYCLE, LIGHTREC_REG_CYCLE, cycles);
111 pr_debug("EOB: %u cycles\n", cycles);
112 }
113
114 if (op_flag_load_delay(ds->flags)
115 && opcode_is_load(ds->c) && !state->no_load_delay) {
116 /* If the delay slot is a load opcode, its target register
117 * will be written after the first opcode of the target is
118 * executed. Handle this by jumping to a special section of
119 * the dispatcher. It expects the loaded value to be in
120 * REG_TEMP, and the target register number to be in JIT_V1.*/
121 jit_movi(JIT_V1, ds->c.i.rt);
122
123 lightrec_jump_to_ds_check(state, _jit);
124 } else {
125 lightrec_jump_to_eob(state, _jit);
126 }
127
128 lightrec_regcache_reset(reg_cache);
129}
130
131void lightrec_emit_jump_to_interpreter(struct lightrec_cstate *state,
132 const struct block *block, u16 offset)
133{
134 struct regcache *reg_cache = state->reg_cache;
135 jit_state_t *_jit = block->_jit;
136
137 lightrec_clean_regs(reg_cache, _jit);
138
139 /* Call the interpreter with the block's address in JIT_V1 and the
140 * PC (which might have an offset) in JIT_V0. */
141 lightrec_load_imm(reg_cache, _jit, JIT_V0, block->pc,
142 block->pc + (offset << 2));
143 if (lightrec_store_next_pc()) {
144 jit_stxi_i(offsetof(struct lightrec_state, next_pc),
145 LIGHTREC_REG_STATE, JIT_V0);
146 }
147
148 jit_movi(JIT_V1, (uintptr_t)block);
149
150 jit_subi(LIGHTREC_REG_CYCLE, LIGHTREC_REG_CYCLE, state->cycles);
151 lightrec_jump_to_fn(_jit, state->state->interpreter_func);
152}
153
154static void lightrec_emit_eob(struct lightrec_cstate *state,
155 const struct block *block, u16 offset)
156{
157 struct regcache *reg_cache = state->reg_cache;
158 jit_state_t *_jit = block->_jit;
159
160 lightrec_clean_regs(reg_cache, _jit);
161
162 lightrec_load_imm(reg_cache, _jit, JIT_V0, block->pc,
163 block->pc + (offset << 2));
164 if (lightrec_store_next_pc()) {
165 jit_stxi_i(offsetof(struct lightrec_state, next_pc),
166 LIGHTREC_REG_STATE, JIT_V0);
167 }
168
169 jit_subi(LIGHTREC_REG_CYCLE, LIGHTREC_REG_CYCLE, state->cycles);
170
171 lightrec_jump_to_eob(state, _jit);
172}
173
174static void rec_special_JR(struct lightrec_cstate *state, const struct block *block, u16 offset)
175{
176 union code c = block->opcode_list[offset].c;
177
178 _jit_name(block->_jit, __func__);
179 lightrec_emit_end_of_block(state, block, offset, c.r.rs, 0, 31, 0, true);
180}
181
182static void rec_special_JALR(struct lightrec_cstate *state, const struct block *block, u16 offset)
183{
184 union code c = block->opcode_list[offset].c;
185
186 _jit_name(block->_jit, __func__);
187 lightrec_emit_end_of_block(state, block, offset, c.r.rs, 0, c.r.rd,
188 get_branch_pc(block, offset, 2), true);
189}
190
191static void rec_J(struct lightrec_cstate *state, const struct block *block, u16 offset)
192{
193 union code c = block->opcode_list[offset].c;
194
195 _jit_name(block->_jit, __func__);
196 lightrec_emit_end_of_block(state, block, offset, -1,
197 (block->pc & 0xf0000000) | (c.j.imm << 2),
198 31, 0, true);
199}
200
201static void rec_JAL(struct lightrec_cstate *state, const struct block *block, u16 offset)
202{
203 union code c = block->opcode_list[offset].c;
204
205 _jit_name(block->_jit, __func__);
206 lightrec_emit_end_of_block(state, block, offset, -1,
207 (block->pc & 0xf0000000) | (c.j.imm << 2),
208 31, get_branch_pc(block, offset, 2), true);
209}
210
211static void lightrec_do_early_unload(struct lightrec_cstate *state,
212 const struct block *block, u16 offset)
213{
214 struct regcache *reg_cache = state->reg_cache;
215 const struct opcode *op = &block->opcode_list[offset];
216 jit_state_t *_jit = block->_jit;
217 unsigned int i;
218 u8 reg;
219 struct {
220 u8 reg, op;
221 } reg_ops[3] = {
222 { op->r.rd, LIGHTREC_FLAGS_GET_RD(op->flags), },
223 { op->i.rt, LIGHTREC_FLAGS_GET_RT(op->flags), },
224 { op->i.rs, LIGHTREC_FLAGS_GET_RS(op->flags), },
225 };
226
227 for (i = 0; i < ARRAY_SIZE(reg_ops); i++) {
228 reg = reg_ops[i].reg;
229
230 switch (reg_ops[i].op) {
231 case LIGHTREC_REG_UNLOAD:
232 lightrec_clean_reg_if_loaded(reg_cache, _jit, reg, true);
233 break;
234
235 case LIGHTREC_REG_DISCARD:
236 lightrec_discard_reg_if_loaded(reg_cache, reg);
237 break;
238
239 case LIGHTREC_REG_CLEAN:
240 lightrec_clean_reg_if_loaded(reg_cache, _jit, reg, false);
241 break;
242 default:
243 break;
244 };
245 }
246}
247
248static void rec_b(struct lightrec_cstate *state, const struct block *block, u16 offset,
249 jit_code_t code, jit_code_t code2, u32 link, bool unconditional, bool bz)
250{
251 struct regcache *reg_cache = state->reg_cache;
252 struct native_register *regs_backup;
253 jit_state_t *_jit = block->_jit;
254 struct lightrec_branch *branch;
255 const struct opcode *op = &block->opcode_list[offset],
256 *ds = get_delay_slot(block->opcode_list, offset);
257 jit_node_t *addr;
258 bool is_forward = (s16)op->i.imm >= 0;
259 int op_cycles = lightrec_cycles_of_opcode(state->state, op->c);
260 u32 target_offset, cycles = state->cycles + op_cycles;
261 bool no_indirection = false;
262 u32 next_pc;
263 u8 rs, rt;
264
265 jit_note(__FILE__, __LINE__);
266
267 if (!op_flag_no_ds(op->flags))
268 cycles += lightrec_cycles_of_opcode(state->state, ds->c);
269
270 state->cycles = -op_cycles;
271
272 if (!unconditional) {
273 rs = lightrec_alloc_reg_in(reg_cache, _jit, op->i.rs, REG_EXT);
274 rt = bz ? 0 : lightrec_alloc_reg_in(reg_cache,
275 _jit, op->i.rt, REG_EXT);
276
277 /* Unload dead registers before evaluating the branch */
278 if (OPT_EARLY_UNLOAD)
279 lightrec_do_early_unload(state, block, offset);
280
281 if (op_flag_local_branch(op->flags) &&
282 (op_flag_no_ds(op->flags) || !ds->opcode) &&
283 is_forward && !lightrec_has_dirty_regs(reg_cache))
284 no_indirection = true;
285
286 if (no_indirection)
287 pr_debug("Using no indirection for branch at offset 0x%hx\n", offset << 2);
288 }
289
290 if (cycles)
291 jit_subi(LIGHTREC_REG_CYCLE, LIGHTREC_REG_CYCLE, cycles);
292
293 if (!unconditional) {
294 /* Generate the branch opcode */
295 if (!no_indirection)
296 addr = jit_new_node_pww(code, NULL, rs, rt);
297
298 lightrec_free_regs(reg_cache);
299 regs_backup = lightrec_regcache_enter_branch(reg_cache);
300 }
301
302 if (op_flag_local_branch(op->flags)) {
303 /* Recompile the delay slot */
304 if (!op_flag_no_ds(op->flags) && ds->opcode) {
305 /* Never handle load delays with local branches. */
306 state->no_load_delay = true;
307 lightrec_rec_opcode(state, block, offset + 1);
308 }
309
310 if (link)
311 update_ra_register(reg_cache, _jit, 31, block->pc, link);
312
313 /* Clean remaining registers */
314 lightrec_clean_regs(reg_cache, _jit);
315
316 target_offset = offset + 1 + (s16)op->i.imm
317 - !!op_flag_no_ds(op->flags);
318 pr_debug("Adding local branch to offset 0x%x\n",
319 target_offset << 2);
320 branch = &state->local_branches[
321 state->nb_local_branches++];
322
323 branch->target = target_offset;
324
325 if (no_indirection)
326 branch->branch = jit_new_node_pww(code2, NULL, rs, rt);
327 else if (is_forward)
328 branch->branch = jit_b();
329 else
330 branch->branch = jit_bgti(LIGHTREC_REG_CYCLE, 0);
331 }
332
333 if (!op_flag_local_branch(op->flags) || !is_forward) {
334 next_pc = get_branch_pc(block, offset, 1 + (s16)op->i.imm);
335 state->no_load_delay = op_flag_local_branch(op->flags);
336 lightrec_emit_end_of_block(state, block, offset, -1, next_pc,
337 31, link, false);
338 }
339
340 if (!unconditional) {
341 if (!no_indirection)
342 jit_patch(addr);
343
344 lightrec_regcache_leave_branch(reg_cache, regs_backup);
345
346 if (bz && link)
347 update_ra_register(reg_cache, _jit, 31, block->pc, link);
348
349 if (!op_flag_no_ds(op->flags) && ds->opcode) {
350 state->no_load_delay = true;
351 lightrec_rec_opcode(state, block, offset + 1);
352 }
353 }
354}
355
356static void rec_BNE(struct lightrec_cstate *state,
357 const struct block *block, u16 offset)
358{
359 union code c = block->opcode_list[offset].c;
360
361 _jit_name(block->_jit, __func__);
362
363 if (c.i.rt == 0)
364 rec_b(state, block, offset, jit_code_beqi, jit_code_bnei, 0, false, true);
365 else
366 rec_b(state, block, offset, jit_code_beqr, jit_code_bner, 0, false, false);
367}
368
369static void rec_BEQ(struct lightrec_cstate *state,
370 const struct block *block, u16 offset)
371{
372 union code c = block->opcode_list[offset].c;
373
374 _jit_name(block->_jit, __func__);
375
376 if (c.i.rt == 0)
377 rec_b(state, block, offset, jit_code_bnei, jit_code_beqi, 0, c.i.rs == 0, true);
378 else
379 rec_b(state, block, offset, jit_code_bner, jit_code_beqr, 0, c.i.rs == c.i.rt, false);
380}
381
382static void rec_BLEZ(struct lightrec_cstate *state,
383 const struct block *block, u16 offset)
384{
385 union code c = block->opcode_list[offset].c;
386
387 _jit_name(block->_jit, __func__);
388 rec_b(state, block, offset, jit_code_bgti, jit_code_blei, 0, c.i.rs == 0, true);
389}
390
391static void rec_BGTZ(struct lightrec_cstate *state,
392 const struct block *block, u16 offset)
393{
394 _jit_name(block->_jit, __func__);
395 rec_b(state, block, offset, jit_code_blei, jit_code_bgti, 0, false, true);
396}
397
398static void rec_regimm_BLTZ(struct lightrec_cstate *state,
399 const struct block *block, u16 offset)
400{
401 _jit_name(block->_jit, __func__);
402 rec_b(state, block, offset, jit_code_bgei, jit_code_blti, 0, false, true);
403}
404
405static void rec_regimm_BLTZAL(struct lightrec_cstate *state,
406 const struct block *block, u16 offset)
407{
408 _jit_name(block->_jit, __func__);
409 rec_b(state, block, offset, jit_code_bgei, jit_code_blti,
410 get_branch_pc(block, offset, 2), false, true);
411}
412
413static void rec_regimm_BGEZ(struct lightrec_cstate *state,
414 const struct block *block, u16 offset)
415{
416 union code c = block->opcode_list[offset].c;
417
418 _jit_name(block->_jit, __func__);
419 rec_b(state, block, offset, jit_code_blti, jit_code_bgei, 0, !c.i.rs, true);
420}
421
422static void rec_regimm_BGEZAL(struct lightrec_cstate *state,
423 const struct block *block, u16 offset)
424{
425 const struct opcode *op = &block->opcode_list[offset];
426 _jit_name(block->_jit, __func__);
427 rec_b(state, block, offset, jit_code_blti, jit_code_bgei,
428 get_branch_pc(block, offset, 2),
429 !op->i.rs, true);
430}
431
432static void rec_alloc_rs_rd(struct regcache *reg_cache,
433 jit_state_t *_jit,
434 const struct opcode *op,
435 u8 rs, u8 rd,
436 u8 in_flags, u8 out_flags,
437 u8 *rs_out, u8 *rd_out)
438{
439 bool unload, discard;
440 u32 unload_flags;
441
442 if (OPT_EARLY_UNLOAD) {
443 unload_flags = LIGHTREC_FLAGS_GET_RS(op->flags);
444 unload = unload_flags == LIGHTREC_REG_UNLOAD;
445 discard = unload_flags == LIGHTREC_REG_DISCARD;
446 }
447
448 if (OPT_EARLY_UNLOAD && rs && rd != rs && (unload || discard)) {
449 rs = lightrec_alloc_reg_in(reg_cache, _jit, rs, in_flags);
450 lightrec_remap_reg(reg_cache, _jit, rs, rd, discard);
451 lightrec_set_reg_out_flags(reg_cache, rs, out_flags);
452 rd = rs;
453 } else {
454 rs = lightrec_alloc_reg_in(reg_cache, _jit, rs, in_flags);
455 rd = lightrec_alloc_reg_out(reg_cache, _jit, rd, out_flags);
456 }
457
458 *rs_out = rs;
459 *rd_out = rd;
460}
461
462static void rec_alu_imm(struct lightrec_cstate *state, const struct block *block,
463 u16 offset, jit_code_t code, bool slti)
464{
465 struct regcache *reg_cache = state->reg_cache;
466 union code c = block->opcode_list[offset].c;
467 jit_state_t *_jit = block->_jit;
468 u8 rs, rt, out_flags = REG_EXT;
469
470 if (slti)
471 out_flags |= REG_ZEXT;
472
473 jit_note(__FILE__, __LINE__);
474
475 rec_alloc_rs_rd(reg_cache, _jit, &block->opcode_list[offset],
476 c.i.rs, c.i.rt, REG_EXT, out_flags, &rs, &rt);
477
478 jit_new_node_www(code, rt, rs, (s32)(s16) c.i.imm);
479
480 lightrec_free_reg(reg_cache, rs);
481 lightrec_free_reg(reg_cache, rt);
482}
483
484static void rec_alu_special(struct lightrec_cstate *state, const struct block *block,
485 u16 offset, jit_code_t code, bool out_ext)
486{
487 struct regcache *reg_cache = state->reg_cache;
488 union code c = block->opcode_list[offset].c;
489 jit_state_t *_jit = block->_jit;
490 u8 rd, rt, rs;
491
492 jit_note(__FILE__, __LINE__);
493
494 rt = lightrec_alloc_reg_in(reg_cache, _jit, c.r.rt, REG_EXT);
495 rec_alloc_rs_rd(reg_cache, _jit, &block->opcode_list[offset],
496 c.r.rs, c.r.rd, REG_EXT,
497 out_ext ? REG_EXT | REG_ZEXT : 0, &rs, &rd);
498
499 jit_new_node_www(code, rd, rs, rt);
500
501 lightrec_free_reg(reg_cache, rs);
502 lightrec_free_reg(reg_cache, rt);
503 lightrec_free_reg(reg_cache, rd);
504}
505
506static void rec_alu_shiftv(struct lightrec_cstate *state, const struct block *block,
507 u16 offset, jit_code_t code)
508{
509 struct regcache *reg_cache = state->reg_cache;
510 union code c = block->opcode_list[offset].c;
511 jit_state_t *_jit = block->_jit;
512 u8 rd, rt, rs, temp, flags = 0;
513
514 jit_note(__FILE__, __LINE__);
515
516 if (code == jit_code_rshr)
517 flags = REG_EXT;
518 else if (code == jit_code_rshr_u)
519 flags = REG_ZEXT;
520
521 rs = lightrec_alloc_reg_in(reg_cache, _jit, c.r.rs, 0);
522 rec_alloc_rs_rd(reg_cache, _jit, &block->opcode_list[offset],
523 c.r.rt, c.r.rd, flags, flags, &rt, &rd);
524
525 if (rt != rd) {
526 jit_andi(rd, rs, 0x1f);
527 jit_new_node_www(code, rd, rt, rd);
528 } else {
529 temp = lightrec_alloc_reg_temp(reg_cache, _jit);
530 jit_andi(temp, rs, 0x1f);
531 jit_new_node_www(code, rd, rt, temp);
532 lightrec_free_reg(reg_cache, temp);
533 }
534
535 lightrec_free_reg(reg_cache, rs);
536 lightrec_free_reg(reg_cache, rt);
537 lightrec_free_reg(reg_cache, rd);
538}
539
540static void rec_movi(struct lightrec_cstate *state,
541 const struct block *block, u16 offset)
542{
543 struct regcache *reg_cache = state->reg_cache;
544 union code c = block->opcode_list[offset].c;
545 jit_state_t *_jit = block->_jit;
546 u16 flags = REG_EXT;
547 s32 value = (s32)(s16) c.i.imm;
548 u8 rt;
549
550 if (block->opcode_list[offset].flags & LIGHTREC_MOVI)
551 value += (s32)((u32)state->movi_temp[c.i.rt] << 16);
552
553 if (value >= 0)
554 flags |= REG_ZEXT;
555
556 rt = lightrec_alloc_reg_out(reg_cache, _jit, c.i.rt, flags);
557
558 jit_movi(rt, value);
559
560 lightrec_free_reg(reg_cache, rt);
561}
562
563static void rec_ADDIU(struct lightrec_cstate *state,
564 const struct block *block, u16 offset)
565{
566 const struct opcode *op = &block->opcode_list[offset];
567
568 _jit_name(block->_jit, __func__);
569
570 if (op->i.rs && !(op->flags & LIGHTREC_MOVI))
571 rec_alu_imm(state, block, offset, jit_code_addi, false);
572 else
573 rec_movi(state, block, offset);
574}
575
576static void rec_ADDI(struct lightrec_cstate *state,
577 const struct block *block, u16 offset)
578{
579 /* TODO: Handle the exception? */
580 _jit_name(block->_jit, __func__);
581 rec_ADDIU(state, block, offset);
582}
583
584static void rec_SLTIU(struct lightrec_cstate *state,
585 const struct block *block, u16 offset)
586{
587 _jit_name(block->_jit, __func__);
588 rec_alu_imm(state, block, offset, jit_code_lti_u, true);
589}
590
591static void rec_SLTI(struct lightrec_cstate *state,
592 const struct block *block, u16 offset)
593{
594 _jit_name(block->_jit, __func__);
595 rec_alu_imm(state, block, offset, jit_code_lti, true);
596}
597
598static void rec_ANDI(struct lightrec_cstate *state,
599 const struct block *block, u16 offset)
600{
601 struct regcache *reg_cache = state->reg_cache;
602 union code c = block->opcode_list[offset].c;
603 jit_state_t *_jit = block->_jit;
604 u8 rs, rt;
605
606 _jit_name(block->_jit, __func__);
607 jit_note(__FILE__, __LINE__);
608
609 rec_alloc_rs_rd(reg_cache, _jit, &block->opcode_list[offset],
610 c.i.rs, c.i.rt, 0, REG_EXT | REG_ZEXT, &rs, &rt);
611
612 /* PSX code uses ANDI 0xff / ANDI 0xffff a lot, which are basically
613 * casts to uint8_t / uint16_t. */
614 if (c.i.imm == 0xff)
615 jit_extr_uc(rt, rs);
616 else if (c.i.imm == 0xffff)
617 jit_extr_us(rt, rs);
618 else
619 jit_andi(rt, rs, (u32)(u16) c.i.imm);
620
621 lightrec_free_reg(reg_cache, rs);
622 lightrec_free_reg(reg_cache, rt);
623}
624
625static void rec_alu_or_xor(struct lightrec_cstate *state, const struct block *block,
626 u16 offset, jit_code_t code)
627{
628 struct regcache *reg_cache = state->reg_cache;
629 union code c = block->opcode_list[offset].c;
630 jit_state_t *_jit = block->_jit;
631 u8 rs, rt, flags;
632
633 jit_note(__FILE__, __LINE__);
634
635 rec_alloc_rs_rd(reg_cache, _jit, &block->opcode_list[offset],
636 c.i.rs, c.i.rt, 0, 0, &rs, &rt);
637
638 flags = lightrec_get_reg_in_flags(reg_cache, rs);
639 lightrec_set_reg_out_flags(reg_cache, rt, flags);
640
641 jit_new_node_www(code, rt, rs, (u32)(u16) c.i.imm);
642
643 lightrec_free_reg(reg_cache, rs);
644 lightrec_free_reg(reg_cache, rt);
645}
646
647
648static void rec_ORI(struct lightrec_cstate *state,
649 const struct block *block, u16 offset)
650{
651 const struct opcode *op = &block->opcode_list[offset];
652 struct regcache *reg_cache = state->reg_cache;
653 jit_state_t *_jit = block->_jit;
654 s32 val;
655 u8 rt;
656
657 _jit_name(_jit, __func__);
658
659 if (op->flags & LIGHTREC_MOVI) {
660 rt = lightrec_alloc_reg_out(reg_cache, _jit, op->i.rt, REG_EXT);
661
662 val = ((u32)state->movi_temp[op->i.rt] << 16) | op->i.imm;
663 jit_movi(rt, val);
664
665 lightrec_free_reg(reg_cache, rt);
666 } else {
667 rec_alu_or_xor(state, block, offset, jit_code_ori);
668 }
669}
670
671static void rec_XORI(struct lightrec_cstate *state,
672 const struct block *block, u16 offset)
673{
674 _jit_name(block->_jit, __func__);
675 rec_alu_or_xor(state, block, offset, jit_code_xori);
676}
677
678static void rec_LUI(struct lightrec_cstate *state,
679 const struct block *block, u16 offset)
680{
681 struct regcache *reg_cache = state->reg_cache;
682 union code c = block->opcode_list[offset].c;
683 jit_state_t *_jit = block->_jit;
684 u8 rt, flags = REG_EXT;
685
686 if (block->opcode_list[offset].flags & LIGHTREC_MOVI) {
687 state->movi_temp[c.i.rt] = c.i.imm;
688 return;
689 }
690
691 jit_name(__func__);
692 jit_note(__FILE__, __LINE__);
693
694 if (!(c.i.imm & BIT(15)))
695 flags |= REG_ZEXT;
696
697 rt = lightrec_alloc_reg_out(reg_cache, _jit, c.i.rt, flags);
698
699 jit_movi(rt, (s32)(c.i.imm << 16));
700
701 lightrec_free_reg(reg_cache, rt);
702}
703
704static void rec_special_ADDU(struct lightrec_cstate *state,
705 const struct block *block, u16 offset)
706{
707 _jit_name(block->_jit, __func__);
708 rec_alu_special(state, block, offset, jit_code_addr, false);
709}
710
711static void rec_special_ADD(struct lightrec_cstate *state,
712 const struct block *block, u16 offset)
713{
714 /* TODO: Handle the exception? */
715 _jit_name(block->_jit, __func__);
716 rec_alu_special(state, block, offset, jit_code_addr, false);
717}
718
719static void rec_special_SUBU(struct lightrec_cstate *state,
720 const struct block *block, u16 offset)
721{
722 _jit_name(block->_jit, __func__);
723 rec_alu_special(state, block, offset, jit_code_subr, false);
724}
725
726static void rec_special_SUB(struct lightrec_cstate *state,
727 const struct block *block, u16 offset)
728{
729 /* TODO: Handle the exception? */
730 _jit_name(block->_jit, __func__);
731 rec_alu_special(state, block, offset, jit_code_subr, false);
732}
733
734static void rec_special_AND(struct lightrec_cstate *state,
735 const struct block *block, u16 offset)
736{
737 struct regcache *reg_cache = state->reg_cache;
738 union code c = block->opcode_list[offset].c;
739 jit_state_t *_jit = block->_jit;
740 u8 rd, rt, rs, flags_rs, flags_rt, flags_rd;
741
742 _jit_name(block->_jit, __func__);
743 jit_note(__FILE__, __LINE__);
744
745 rt = lightrec_alloc_reg_in(reg_cache, _jit, c.r.rt, 0);
746 rec_alloc_rs_rd(reg_cache, _jit, &block->opcode_list[offset],
747 c.r.rs, c.r.rd, 0, 0, &rs, &rd);
748
749 flags_rs = lightrec_get_reg_in_flags(reg_cache, rs);
750 flags_rt = lightrec_get_reg_in_flags(reg_cache, rt);
751
752 /* Z(rd) = Z(rs) | Z(rt) */
753 flags_rd = REG_ZEXT & (flags_rs | flags_rt);
754
755 /* E(rd) = (E(rt) & Z(rt)) | (E(rs) & Z(rs)) | (E(rs) & E(rt)) */
756 if (((flags_rs & REG_EXT) && (flags_rt & REG_ZEXT)) ||
757 ((flags_rt & REG_EXT) && (flags_rs & REG_ZEXT)) ||
758 (REG_EXT & flags_rs & flags_rt))
759 flags_rd |= REG_EXT;
760
761 lightrec_set_reg_out_flags(reg_cache, rd, flags_rd);
762
763 jit_andr(rd, rs, rt);
764
765 lightrec_free_reg(reg_cache, rs);
766 lightrec_free_reg(reg_cache, rt);
767 lightrec_free_reg(reg_cache, rd);
768}
769
770static void rec_special_or_nor(struct lightrec_cstate *state,
771 const struct block *block, u16 offset, bool nor)
772{
773 struct regcache *reg_cache = state->reg_cache;
774 union code c = block->opcode_list[offset].c;
775 jit_state_t *_jit = block->_jit;
776 u8 rd, rt, rs, flags_rs, flags_rt, flags_rd = 0;
777
778 jit_note(__FILE__, __LINE__);
779
780 rt = lightrec_alloc_reg_in(reg_cache, _jit, c.r.rt, 0);
781 rec_alloc_rs_rd(reg_cache, _jit, &block->opcode_list[offset],
782 c.r.rs, c.r.rd, 0, 0, &rs, &rd);
783
784 flags_rs = lightrec_get_reg_in_flags(reg_cache, rs);
785 flags_rt = lightrec_get_reg_in_flags(reg_cache, rt);
786
787 /* or: Z(rd) = Z(rs) & Z(rt)
788 * nor: Z(rd) = 0 */
789 if (!nor)
790 flags_rd = REG_ZEXT & flags_rs & flags_rt;
791
792 /* E(rd) = E(rs) & E(rt) */
793 if (REG_EXT & flags_rs & flags_rt)
794 flags_rd |= REG_EXT;
795
796 lightrec_set_reg_out_flags(reg_cache, rd, flags_rd);
797
798 jit_orr(rd, rs, rt);
799
800 if (nor)
801 jit_comr(rd, rd);
802
803 lightrec_free_reg(reg_cache, rs);
804 lightrec_free_reg(reg_cache, rt);
805 lightrec_free_reg(reg_cache, rd);
806}
807
808static void rec_special_OR(struct lightrec_cstate *state,
809 const struct block *block, u16 offset)
810{
811 _jit_name(block->_jit, __func__);
812 rec_special_or_nor(state, block, offset, false);
813}
814
815static void rec_special_NOR(struct lightrec_cstate *state,
816 const struct block *block, u16 offset)
817{
818 _jit_name(block->_jit, __func__);
819 rec_special_or_nor(state, block, offset, true);
820}
821
822static void rec_special_XOR(struct lightrec_cstate *state,
823 const struct block *block, u16 offset)
824{
825 struct regcache *reg_cache = state->reg_cache;
826 union code c = block->opcode_list[offset].c;
827 jit_state_t *_jit = block->_jit;
828 u8 rd, rt, rs, flags_rs, flags_rt, flags_rd;
829
830 _jit_name(block->_jit, __func__);
831
832 jit_note(__FILE__, __LINE__);
833
834 rt = lightrec_alloc_reg_in(reg_cache, _jit, c.r.rt, 0);
835 rec_alloc_rs_rd(reg_cache, _jit, &block->opcode_list[offset],
836 c.r.rs, c.r.rd, 0, 0, &rs, &rd);
837
838 flags_rs = lightrec_get_reg_in_flags(reg_cache, rs);
839 flags_rt = lightrec_get_reg_in_flags(reg_cache, rt);
840
841 /* Z(rd) = Z(rs) & Z(rt) */
842 flags_rd = REG_ZEXT & flags_rs & flags_rt;
843
844 /* E(rd) = E(rs) & E(rt) */
845 flags_rd |= REG_EXT & flags_rs & flags_rt;
846
847 lightrec_set_reg_out_flags(reg_cache, rd, flags_rd);
848
849 jit_xorr(rd, rs, rt);
850
851 lightrec_free_reg(reg_cache, rs);
852 lightrec_free_reg(reg_cache, rt);
853 lightrec_free_reg(reg_cache, rd);
854}
855
856static void rec_special_SLTU(struct lightrec_cstate *state,
857 const struct block *block, u16 offset)
858{
859 _jit_name(block->_jit, __func__);
860 rec_alu_special(state, block, offset, jit_code_ltr_u, true);
861}
862
863static void rec_special_SLT(struct lightrec_cstate *state,
864 const struct block *block, u16 offset)
865{
866 _jit_name(block->_jit, __func__);
867 rec_alu_special(state, block, offset, jit_code_ltr, true);
868}
869
870static void rec_special_SLLV(struct lightrec_cstate *state,
871 const struct block *block, u16 offset)
872{
873 _jit_name(block->_jit, __func__);
874 rec_alu_shiftv(state, block, offset, jit_code_lshr);
875}
876
877static void rec_special_SRLV(struct lightrec_cstate *state,
878 const struct block *block, u16 offset)
879{
880 _jit_name(block->_jit, __func__);
881 rec_alu_shiftv(state, block, offset, jit_code_rshr_u);
882}
883
884static void rec_special_SRAV(struct lightrec_cstate *state,
885 const struct block *block, u16 offset)
886{
887 _jit_name(block->_jit, __func__);
888 rec_alu_shiftv(state, block, offset, jit_code_rshr);
889}
890
891static void rec_alu_shift(struct lightrec_cstate *state, const struct block *block,
892 u16 offset, jit_code_t code)
893{
894 struct regcache *reg_cache = state->reg_cache;
895 union code c = block->opcode_list[offset].c;
896 jit_state_t *_jit = block->_jit;
897 u8 rd, rt, flags = 0, out_flags = 0;
898
899 jit_note(__FILE__, __LINE__);
900
901 if (code == jit_code_rshi)
902 flags = REG_EXT;
903 else if (code == jit_code_rshi_u)
904 flags = REG_ZEXT;
905
906 /* Input reg is zero-extended, if we SRL at least by one bit, we know
907 * the output reg will be both zero-extended and sign-extended. */
908 out_flags = flags;
909 if (code == jit_code_rshi_u && c.r.imm)
910 out_flags |= REG_EXT;
911
912 rec_alloc_rs_rd(reg_cache, _jit, &block->opcode_list[offset],
913 c.r.rt, c.r.rd, flags, out_flags, &rt, &rd);
914
915 jit_new_node_www(code, rd, rt, c.r.imm);
916
917 lightrec_free_reg(reg_cache, rt);
918 lightrec_free_reg(reg_cache, rd);
919}
920
921static void rec_special_SLL(struct lightrec_cstate *state,
922 const struct block *block, u16 offset)
923{
924 _jit_name(block->_jit, __func__);
925 rec_alu_shift(state, block, offset, jit_code_lshi);
926}
927
928static void rec_special_SRL(struct lightrec_cstate *state,
929 const struct block *block, u16 offset)
930{
931 _jit_name(block->_jit, __func__);
932 rec_alu_shift(state, block, offset, jit_code_rshi_u);
933}
934
935static void rec_special_SRA(struct lightrec_cstate *state,
936 const struct block *block, u16 offset)
937{
938 _jit_name(block->_jit, __func__);
939 rec_alu_shift(state, block, offset, jit_code_rshi);
940}
941
942static void rec_alu_mult(struct lightrec_cstate *state,
943 const struct block *block, u16 offset, bool is_signed)
944{
945 struct regcache *reg_cache = state->reg_cache;
946 union code c = block->opcode_list[offset].c;
947 u32 flags = block->opcode_list[offset].flags;
948 u8 reg_lo = get_mult_div_lo(c);
949 u8 reg_hi = get_mult_div_hi(c);
950 jit_state_t *_jit = block->_jit;
951 u8 lo, hi, rs, rt, rflags = 0;
952
953 jit_note(__FILE__, __LINE__);
954
955 if (is_signed)
956 rflags = REG_EXT;
957 else
958 rflags = REG_ZEXT;
959
960 rs = lightrec_alloc_reg_in(reg_cache, _jit, c.r.rs, rflags);
961 rt = lightrec_alloc_reg_in(reg_cache, _jit, c.r.rt, rflags);
962
963 if (!op_flag_no_lo(flags))
964 lo = lightrec_alloc_reg_out(reg_cache, _jit, reg_lo, 0);
965 else if (__WORDSIZE == 32)
966 lo = lightrec_alloc_reg_temp(reg_cache, _jit);
967
968 if (!op_flag_no_hi(flags))
969 hi = lightrec_alloc_reg_out(reg_cache, _jit, reg_hi, REG_EXT);
970
971 if (__WORDSIZE == 32) {
972 /* On 32-bit systems, do a 32*32->64 bit operation, or a 32*32->32 bit
973 * operation if the MULT was detected a 32-bit only. */
974 if (!op_flag_no_hi(flags)) {
975 if (is_signed)
976 jit_qmulr(lo, hi, rs, rt);
977 else
978 jit_qmulr_u(lo, hi, rs, rt);
979 } else {
980 jit_mulr(lo, rs, rt);
981 }
982 } else {
983 /* On 64-bit systems, do a 64*64->64 bit operation. */
984 if (op_flag_no_lo(flags)) {
985 jit_mulr(hi, rs, rt);
986 jit_rshi(hi, hi, 32);
987 } else {
988 jit_mulr(lo, rs, rt);
989
990 /* The 64-bit output value is in $lo, store the upper 32 bits in $hi */
991 if (!op_flag_no_hi(flags))
992 jit_rshi(hi, lo, 32);
993 }
994 }
995
996 lightrec_free_reg(reg_cache, rs);
997 lightrec_free_reg(reg_cache, rt);
998 if (!op_flag_no_lo(flags) || __WORDSIZE == 32)
999 lightrec_free_reg(reg_cache, lo);
1000 if (!op_flag_no_hi(flags))
1001 lightrec_free_reg(reg_cache, hi);
1002}
1003
1004static void rec_alu_div(struct lightrec_cstate *state,
1005 const struct block *block, u16 offset, bool is_signed)
1006{
1007 struct regcache *reg_cache = state->reg_cache;
1008 union code c = block->opcode_list[offset].c;
1009 u32 flags = block->opcode_list[offset].flags;
1010 bool no_check = op_flag_no_div_check(flags);
1011 u8 reg_lo = get_mult_div_lo(c);
1012 u8 reg_hi = get_mult_div_hi(c);
1013 jit_state_t *_jit = block->_jit;
1014 jit_node_t *branch, *to_end;
1015 u8 lo = 0, hi = 0, rs, rt, rflags = 0;
1016
1017 jit_note(__FILE__, __LINE__);
1018
1019 if (is_signed)
1020 rflags = REG_EXT;
1021 else
1022 rflags = REG_ZEXT;
1023
1024 rs = lightrec_alloc_reg_in(reg_cache, _jit, c.r.rs, rflags);
1025 rt = lightrec_alloc_reg_in(reg_cache, _jit, c.r.rt, rflags);
1026
1027 if (!op_flag_no_lo(flags))
1028 lo = lightrec_alloc_reg_out(reg_cache, _jit, reg_lo, 0);
1029
1030 if (!op_flag_no_hi(flags))
1031 hi = lightrec_alloc_reg_out(reg_cache, _jit, reg_hi, 0);
1032
1033 /* Jump to special handler if dividing by zero */
1034 if (!no_check)
1035 branch = jit_beqi(rt, 0);
1036
1037 if (op_flag_no_lo(flags)) {
1038 if (is_signed)
1039 jit_remr(hi, rs, rt);
1040 else
1041 jit_remr_u(hi, rs, rt);
1042 } else if (op_flag_no_hi(flags)) {
1043 if (is_signed)
1044 jit_divr(lo, rs, rt);
1045 else
1046 jit_divr_u(lo, rs, rt);
1047 } else {
1048 if (is_signed)
1049 jit_qdivr(lo, hi, rs, rt);
1050 else
1051 jit_qdivr_u(lo, hi, rs, rt);
1052 }
1053
1054 if (!no_check) {
1055 /* Jump above the div-by-zero handler */
1056 to_end = jit_b();
1057
1058 jit_patch(branch);
1059
1060 if (!op_flag_no_lo(flags)) {
1061 if (is_signed) {
1062 jit_ltr(lo, rs, rt);
1063 jit_lshi(lo, lo, 1);
1064 jit_subi(lo, lo, 1);
1065 } else {
1066 jit_subi(lo, rt, 1);
1067 }
1068 }
1069
1070 if (!op_flag_no_hi(flags))
1071 jit_movr(hi, rs);
1072
1073 jit_patch(to_end);
1074 }
1075
1076 lightrec_free_reg(reg_cache, rs);
1077 lightrec_free_reg(reg_cache, rt);
1078
1079 if (!op_flag_no_lo(flags))
1080 lightrec_free_reg(reg_cache, lo);
1081
1082 if (!op_flag_no_hi(flags))
1083 lightrec_free_reg(reg_cache, hi);
1084}
1085
1086static void rec_special_MULT(struct lightrec_cstate *state,
1087 const struct block *block, u16 offset)
1088{
1089 _jit_name(block->_jit, __func__);
1090 rec_alu_mult(state, block, offset, true);
1091}
1092
1093static void rec_special_MULTU(struct lightrec_cstate *state,
1094 const struct block *block, u16 offset)
1095{
1096 _jit_name(block->_jit, __func__);
1097 rec_alu_mult(state, block, offset, false);
1098}
1099
1100static void rec_special_DIV(struct lightrec_cstate *state,
1101 const struct block *block, u16 offset)
1102{
1103 _jit_name(block->_jit, __func__);
1104 rec_alu_div(state, block, offset, true);
1105}
1106
1107static void rec_special_DIVU(struct lightrec_cstate *state,
1108 const struct block *block, u16 offset)
1109{
1110 _jit_name(block->_jit, __func__);
1111 rec_alu_div(state, block, offset, false);
1112}
1113
1114static void rec_alu_mv_lo_hi(struct lightrec_cstate *state,
1115 const struct block *block, u16 offset,
1116 u8 dst, u8 src)
1117{
1118 struct regcache *reg_cache = state->reg_cache;
1119 jit_state_t *_jit = block->_jit;
1120
1121 jit_note(__FILE__, __LINE__);
1122
1123 rec_alloc_rs_rd(reg_cache, _jit, &block->opcode_list[offset],
1124 src, dst, 0, REG_EXT, &src, &dst);
1125
1126 jit_extr_i(dst, src);
1127
1128 lightrec_free_reg(reg_cache, src);
1129 lightrec_free_reg(reg_cache, dst);
1130}
1131
1132static void rec_special_MFHI(struct lightrec_cstate *state,
1133 const struct block *block, u16 offset)
1134{
1135 union code c = block->opcode_list[offset].c;
1136
1137 _jit_name(block->_jit, __func__);
1138 rec_alu_mv_lo_hi(state, block, offset, c.r.rd, REG_HI);
1139}
1140
1141static void rec_special_MTHI(struct lightrec_cstate *state,
1142 const struct block *block, u16 offset)
1143{
1144 union code c = block->opcode_list[offset].c;
1145
1146 _jit_name(block->_jit, __func__);
1147 rec_alu_mv_lo_hi(state, block, offset, REG_HI, c.r.rs);
1148}
1149
1150static void rec_special_MFLO(struct lightrec_cstate *state,
1151 const struct block *block, u16 offset)
1152{
1153 union code c = block->opcode_list[offset].c;
1154
1155 _jit_name(block->_jit, __func__);
1156 rec_alu_mv_lo_hi(state, block, offset, c.r.rd, REG_LO);
1157}
1158
1159static void rec_special_MTLO(struct lightrec_cstate *state,
1160 const struct block *block, u16 offset)
1161{
1162 union code c = block->opcode_list[offset].c;
1163
1164 _jit_name(block->_jit, __func__);
1165 rec_alu_mv_lo_hi(state, block, offset, REG_LO, c.r.rs);
1166}
1167
1168static void call_to_c_wrapper(struct lightrec_cstate *state,
1169 const struct block *block, u32 arg,
1170 enum c_wrappers wrapper)
1171{
1172 struct regcache *reg_cache = state->reg_cache;
1173 jit_state_t *_jit = block->_jit;
1174 s8 tmp, tmp2;
1175
1176 /* Make sure JIT_R1 is not mapped; it will be used in the C wrapper. */
1177 tmp2 = lightrec_alloc_reg(reg_cache, _jit, JIT_R1);
1178
1179 tmp = lightrec_get_reg_with_value(reg_cache,
1180 (intptr_t) state->state->wrappers_eps[wrapper]);
1181 if (tmp < 0) {
1182 tmp = lightrec_alloc_reg_temp(reg_cache, _jit);
1183 jit_ldxi(tmp, LIGHTREC_REG_STATE,
1184 offsetof(struct lightrec_state, wrappers_eps[wrapper]));
1185
1186 lightrec_temp_set_value(reg_cache, tmp,
1187 (intptr_t) state->state->wrappers_eps[wrapper]);
1188 }
1189
1190 lightrec_free_reg(reg_cache, tmp2);
1191
1192#ifdef __mips__
1193 /* On MIPS, register t9 is always used as the target register for JALR.
1194 * Therefore if it does not contain the target address we must
1195 * invalidate it. */
1196 if (tmp != _T9)
1197 lightrec_unload_reg(reg_cache, _jit, _T9);
1198#endif
1199
1200 jit_prepare();
1201 jit_pushargi(arg);
1202
1203 lightrec_regcache_mark_live(reg_cache, _jit);
1204 jit_callr(tmp);
1205
1206 lightrec_free_reg(reg_cache, tmp);
1207 lightrec_regcache_mark_live(reg_cache, _jit);
1208}
1209
1210static void rec_io(struct lightrec_cstate *state,
1211 const struct block *block, u16 offset,
1212 bool load_rt, bool read_rt)
1213{
1214 struct regcache *reg_cache = state->reg_cache;
1215 jit_state_t *_jit = block->_jit;
1216 union code c = block->opcode_list[offset].c;
1217 u32 flags = block->opcode_list[offset].flags;
1218 bool is_tagged = LIGHTREC_FLAGS_GET_IO_MODE(flags);
1219 u32 lut_entry;
1220 u8 zero;
1221
1222 jit_note(__FILE__, __LINE__);
1223
1224 lightrec_clean_reg_if_loaded(reg_cache, _jit, c.i.rs, false);
1225
1226 if (read_rt && likely(c.i.rt))
1227 lightrec_clean_reg_if_loaded(reg_cache, _jit, c.i.rt, true);
1228 else if (load_rt)
1229 lightrec_clean_reg_if_loaded(reg_cache, _jit, c.i.rt, false);
1230
1231 if (op_flag_load_delay(flags) && !state->no_load_delay) {
1232 /* Clear state->in_delay_slot_n. This notifies the lightrec_rw
1233 * wrapper that it should write the REG_TEMP register instead of
1234 * the actual output register of the opcode. */
1235 zero = lightrec_alloc_reg_in(reg_cache, _jit, 0, 0);
1236 jit_stxi_c(offsetof(struct lightrec_state, in_delay_slot_n),
1237 LIGHTREC_REG_STATE, zero);
1238 lightrec_free_reg(reg_cache, zero);
1239 }
1240
1241 if (is_tagged) {
1242 call_to_c_wrapper(state, block, c.opcode, C_WRAPPER_RW);
1243 } else {
1244 lut_entry = lightrec_get_lut_entry(block);
1245 call_to_c_wrapper(state, block, (lut_entry << 16) | offset,
1246 C_WRAPPER_RW_GENERIC);
1247 }
1248}
1249
1250static u32 rec_ram_mask(struct lightrec_state *state)
1251{
1252 return (RAM_SIZE << (state->mirrors_mapped * 2)) - 1;
1253}
1254
1255static u32 rec_io_mask(const struct lightrec_state *state)
1256{
1257 u32 length = state->maps[PSX_MAP_HW_REGISTERS].length;
1258
1259 return 0x1f800000 | GENMASK(31 - clz32(length - 1), 0);
1260}
1261
1262static void rec_store_memory(struct lightrec_cstate *cstate,
1263 const struct block *block,
1264 u16 offset, jit_code_t code,
1265 jit_code_t swap_code,
1266 uintptr_t addr_offset, u32 addr_mask,
1267 bool invalidate)
1268{
1269 const struct lightrec_state *state = cstate->state;
1270 struct regcache *reg_cache = cstate->reg_cache;
1271 struct opcode *op = &block->opcode_list[offset];
1272 jit_state_t *_jit = block->_jit;
1273 union code c = op->c;
1274 u8 rs, rt, tmp, tmp2, tmp3, addr_reg, addr_reg2;
1275 s16 imm = (s16)c.i.imm;
1276 s32 simm = (s32)imm << (1 - lut_is_32bit(state));
1277 s32 lut_offt = offsetof(struct lightrec_state, code_lut);
1278 bool no_mask = op_flag_no_mask(op->flags);
1279 bool add_imm = c.i.imm &&
1280 ((!state->mirrors_mapped && !no_mask) || (invalidate &&
1281 ((imm & 0x3) || simm + lut_offt != (s16)(simm + lut_offt))));
1282 bool need_tmp = !no_mask || add_imm || invalidate;
1283 bool swc2 = c.i.op == OP_SWC2;
1284 u8 in_reg = swc2 ? REG_TEMP : c.i.rt;
1285 s8 reg_imm;
1286
1287 rs = lightrec_alloc_reg_in(reg_cache, _jit, c.i.rs, 0);
1288 if (need_tmp)
1289 tmp = lightrec_alloc_reg_temp(reg_cache, _jit);
1290
1291 addr_reg = rs;
1292
1293 if (add_imm) {
1294 jit_addi(tmp, addr_reg, (s16)c.i.imm);
1295 lightrec_free_reg(reg_cache, rs);
1296 addr_reg = tmp;
1297 imm = 0;
1298 } else if (simm) {
1299 lut_offt += simm;
1300 }
1301
1302 if (!no_mask) {
1303 reg_imm = lightrec_alloc_reg_temp_with_value(reg_cache, _jit,
1304 addr_mask);
1305
1306 jit_andr(tmp, addr_reg, reg_imm);
1307 addr_reg = tmp;
1308
1309 lightrec_free_reg(reg_cache, reg_imm);
1310 }
1311
1312 if (addr_offset) {
1313 reg_imm = lightrec_alloc_reg_temp_with_value(reg_cache, _jit,
1314 addr_offset);
1315 tmp2 = lightrec_alloc_reg_temp(reg_cache, _jit);
1316 jit_addr(tmp2, addr_reg, reg_imm);
1317 addr_reg2 = tmp2;
1318
1319 lightrec_free_reg(reg_cache, reg_imm);
1320 } else {
1321 addr_reg2 = addr_reg;
1322 }
1323
1324 rt = lightrec_alloc_reg_in(reg_cache, _jit, in_reg, 0);
1325
1326 if (is_big_endian() && swap_code && in_reg) {
1327 tmp3 = lightrec_alloc_reg_temp(reg_cache, _jit);
1328
1329 jit_new_node_ww(swap_code, tmp3, rt);
1330 jit_new_node_www(code, imm, addr_reg2, tmp3);
1331
1332 lightrec_free_reg(reg_cache, tmp3);
1333 } else {
1334 jit_new_node_www(code, imm, addr_reg2, rt);
1335 }
1336
1337 lightrec_free_reg(reg_cache, rt);
1338
1339 if (invalidate) {
1340 tmp3 = lightrec_alloc_reg_in(reg_cache, _jit, 0, 0);
1341
1342 if (c.i.op != OP_SW) {
1343 jit_andi(tmp, addr_reg, ~3);
1344 addr_reg = tmp;
1345 }
1346
1347 if (!lut_is_32bit(state)) {
1348 jit_lshi(tmp, addr_reg, 1);
1349 addr_reg = tmp;
1350 }
1351
1352 if (addr_reg == rs && c.i.rs == 0) {
1353 addr_reg = LIGHTREC_REG_STATE;
1354 } else {
1355 jit_add_state(tmp, addr_reg);
1356 addr_reg = tmp;
1357 }
1358
1359 if (lut_is_32bit(state))
1360 jit_stxi_i(lut_offt, addr_reg, tmp3);
1361 else
1362 jit_stxi(lut_offt, addr_reg, tmp3);
1363
1364 lightrec_free_reg(reg_cache, tmp3);
1365 }
1366
1367 if (addr_offset)
1368 lightrec_free_reg(reg_cache, tmp2);
1369 if (need_tmp)
1370 lightrec_free_reg(reg_cache, tmp);
1371 lightrec_free_reg(reg_cache, rs);
1372}
1373
1374static void rec_store_ram(struct lightrec_cstate *cstate,
1375 const struct block *block,
1376 u16 offset, jit_code_t code,
1377 jit_code_t swap_code, bool invalidate)
1378{
1379 struct lightrec_state *state = cstate->state;
1380
1381 _jit_note(block->_jit, __FILE__, __LINE__);
1382
1383 return rec_store_memory(cstate, block, offset, code, swap_code,
1384 state->offset_ram, rec_ram_mask(state),
1385 invalidate);
1386}
1387
1388static void rec_store_scratch(struct lightrec_cstate *cstate,
1389 const struct block *block, u16 offset,
1390 jit_code_t code, jit_code_t swap_code)
1391{
1392 _jit_note(block->_jit, __FILE__, __LINE__);
1393
1394 return rec_store_memory(cstate, block, offset, code, swap_code,
1395 cstate->state->offset_scratch,
1396 0x1fffffff, false);
1397}
1398
1399static void rec_store_io(struct lightrec_cstate *cstate,
1400 const struct block *block, u16 offset,
1401 jit_code_t code, jit_code_t swap_code)
1402{
1403 _jit_note(block->_jit, __FILE__, __LINE__);
1404
1405 return rec_store_memory(cstate, block, offset, code, swap_code,
1406 cstate->state->offset_io,
1407 rec_io_mask(cstate->state), false);
1408}
1409
1410static void rec_store_direct_no_invalidate(struct lightrec_cstate *cstate,
1411 const struct block *block,
1412 u16 offset, jit_code_t code,
1413 jit_code_t swap_code)
1414{
1415 struct lightrec_state *state = cstate->state;
1416 struct regcache *reg_cache = cstate->reg_cache;
1417 union code c = block->opcode_list[offset].c;
1418 jit_state_t *_jit = block->_jit;
1419 jit_node_t *to_not_ram, *to_end;
1420 bool swc2 = c.i.op == OP_SWC2;
1421 u8 tmp, tmp2 = 0, rs, rt, in_reg = swc2 ? REG_TEMP : c.i.rt;
1422 u32 addr_mask;
1423 s32 reg_imm;
1424 s16 imm;
1425
1426 jit_note(__FILE__, __LINE__);
1427 rs = lightrec_alloc_reg_in(reg_cache, _jit, c.i.rs, 0);
1428 tmp = lightrec_alloc_reg_temp(reg_cache, _jit);
1429
1430 if (state->mirrors_mapped)
1431 addr_mask = 0x1f800000 | (4 * RAM_SIZE - 1);
1432 else
1433 addr_mask = 0x1f800000 | (RAM_SIZE - 1);
1434
1435 reg_imm = lightrec_alloc_reg_temp_with_value(reg_cache, _jit, addr_mask);
1436
1437 /* Convert to KUNSEG and avoid RAM mirrors */
1438 if (!state->mirrors_mapped && c.i.imm) {
1439 imm = 0;
1440 jit_addi(tmp, rs, (s16)c.i.imm);
1441 jit_andr(tmp, tmp, reg_imm);
1442 } else {
1443 imm = (s16)c.i.imm;
1444 jit_andr(tmp, rs, reg_imm);
1445 }
1446
1447 lightrec_free_reg(reg_cache, rs);
1448 lightrec_free_reg(reg_cache, reg_imm);
1449
1450 if (state->offset_ram != state->offset_scratch) {
1451 tmp2 = lightrec_alloc_reg_temp(reg_cache, _jit);
1452
1453 to_not_ram = jit_bmsi(tmp, BIT(28));
1454
1455 jit_movi(tmp2, state->offset_ram);
1456
1457 to_end = jit_b();
1458 jit_patch(to_not_ram);
1459
1460 jit_movi(tmp2, state->offset_scratch);
1461 jit_patch(to_end);
1462 } else if (state->offset_ram) {
1463 tmp2 = lightrec_alloc_reg_temp_with_value(reg_cache, _jit,
1464 state->offset_ram);
1465 }
1466
1467 if (state->offset_ram || state->offset_scratch) {
1468 jit_addr(tmp, tmp, tmp2);
1469 lightrec_free_reg(reg_cache, tmp2);
1470 }
1471
1472 rt = lightrec_alloc_reg_in(reg_cache, _jit, in_reg, 0);
1473
1474 if (is_big_endian() && swap_code && in_reg) {
1475 tmp2 = lightrec_alloc_reg_temp(reg_cache, _jit);
1476
1477 jit_new_node_ww(swap_code, tmp2, rt);
1478 jit_new_node_www(code, imm, tmp, tmp2);
1479
1480 lightrec_free_reg(reg_cache, tmp2);
1481 } else {
1482 jit_new_node_www(code, imm, tmp, rt);
1483 }
1484
1485 lightrec_free_reg(reg_cache, rt);
1486 lightrec_free_reg(reg_cache, tmp);
1487}
1488
1489static void rec_store_direct(struct lightrec_cstate *cstate, const struct block *block,
1490 u16 offset, jit_code_t code, jit_code_t swap_code)
1491{
1492 struct lightrec_state *state = cstate->state;
1493 u32 ram_size = state->mirrors_mapped ? RAM_SIZE * 4 : RAM_SIZE;
1494 struct regcache *reg_cache = cstate->reg_cache;
1495 union code c = block->opcode_list[offset].c;
1496 jit_state_t *_jit = block->_jit;
1497 jit_node_t *to_not_ram, *to_end;
1498 bool swc2 = c.i.op == OP_SWC2;
1499 u8 tmp, tmp2, tmp3, masked_reg, rs, rt;
1500 u8 in_reg = swc2 ? REG_TEMP : c.i.rt;
1501 u32 addr_mask = 0x1f800000 | (ram_size - 1);
1502 s32 reg_imm;
1503
1504 jit_note(__FILE__, __LINE__);
1505
1506 rs = lightrec_alloc_reg_in(reg_cache, _jit, c.i.rs, 0);
1507 tmp2 = lightrec_alloc_reg_temp(reg_cache, _jit);
1508 tmp3 = lightrec_alloc_reg_in(reg_cache, _jit, 0, 0);
1509
1510 reg_imm = lightrec_alloc_reg_temp_with_value(reg_cache, _jit, addr_mask);
1511
1512 /* Convert to KUNSEG and avoid RAM mirrors */
1513 if (c.i.imm) {
1514 jit_addi(tmp2, rs, (s16)c.i.imm);
1515 jit_andr(tmp2, tmp2, reg_imm);
1516 } else {
1517 jit_andr(tmp2, rs, reg_imm);
1518 }
1519
1520 lightrec_free_reg(reg_cache, rs);
1521 lightrec_free_reg(reg_cache, reg_imm);
1522 tmp = lightrec_alloc_reg_temp(reg_cache, _jit);
1523
1524 if (state->offset_ram != state->offset_scratch) {
1525 to_not_ram = jit_bgti(tmp2, ram_size);
1526 masked_reg = tmp2;
1527 } else {
1528 jit_lti_u(tmp, tmp2, ram_size);
1529 jit_movnr(tmp, tmp2, tmp);
1530 masked_reg = tmp;
1531 }
1532
1533 /* Compute the offset to the code LUT */
1534 if (c.i.op == OP_SW)
1535 jit_andi(tmp, masked_reg, RAM_SIZE - 1);
1536 else
1537 jit_andi(tmp, masked_reg, (RAM_SIZE - 1) & ~3);
1538
1539 if (!lut_is_32bit(state))
1540 jit_lshi(tmp, tmp, 1);
1541 jit_add_state(tmp, tmp);
1542
1543 /* Write NULL to the code LUT to invalidate any block that's there */
1544 if (lut_is_32bit(state))
1545 jit_stxi_i(offsetof(struct lightrec_state, code_lut), tmp, tmp3);
1546 else
1547 jit_stxi(offsetof(struct lightrec_state, code_lut), tmp, tmp3);
1548
1549 if (state->offset_ram != state->offset_scratch) {
1550 jit_movi(tmp, state->offset_ram);
1551
1552 to_end = jit_b();
1553 jit_patch(to_not_ram);
1554 }
1555
1556 if (state->offset_ram || state->offset_scratch)
1557 jit_movi(tmp, state->offset_scratch);
1558
1559 if (state->offset_ram != state->offset_scratch)
1560 jit_patch(to_end);
1561
1562 if (state->offset_ram || state->offset_scratch)
1563 jit_addr(tmp2, tmp2, tmp);
1564
1565 lightrec_free_reg(reg_cache, tmp);
1566 lightrec_free_reg(reg_cache, tmp3);
1567
1568 rt = lightrec_alloc_reg_in(reg_cache, _jit, in_reg, 0);
1569
1570 if (is_big_endian() && swap_code && in_reg) {
1571 tmp = lightrec_alloc_reg_temp(reg_cache, _jit);
1572
1573 jit_new_node_ww(swap_code, tmp, rt);
1574 jit_new_node_www(code, 0, tmp2, tmp);
1575
1576 lightrec_free_reg(reg_cache, tmp);
1577 } else {
1578 jit_new_node_www(code, 0, tmp2, rt);
1579 }
1580
1581 lightrec_free_reg(reg_cache, rt);
1582 lightrec_free_reg(reg_cache, tmp2);
1583}
1584
1585static void rec_store(struct lightrec_cstate *state,
1586 const struct block *block, u16 offset,
1587 jit_code_t code, jit_code_t swap_code)
1588{
1589 u32 flags = block->opcode_list[offset].flags;
1590 u32 mode = LIGHTREC_FLAGS_GET_IO_MODE(flags);
1591 bool no_invalidate = op_flag_no_invalidate(flags) ||
1592 (state->state->opt_flags & LIGHTREC_OPT_INV_DMA_ONLY);
1593 union code c = block->opcode_list[offset].c;
1594 bool is_swc2 = c.i.op == OP_SWC2;
1595
1596 if (is_swc2) {
1597 switch (mode) {
1598 case LIGHTREC_IO_RAM:
1599 case LIGHTREC_IO_SCRATCH:
1600 case LIGHTREC_IO_DIRECT:
1601 case LIGHTREC_IO_DIRECT_HW:
1602 rec_cp2_do_mfc2(state, block, offset, c.i.rt, REG_TEMP);
1603 break;
1604 default:
1605 break;
1606 }
1607 }
1608
1609 switch (mode) {
1610 case LIGHTREC_IO_RAM:
1611 rec_store_ram(state, block, offset, code,
1612 swap_code, !no_invalidate);
1613 break;
1614 case LIGHTREC_IO_SCRATCH:
1615 rec_store_scratch(state, block, offset, code, swap_code);
1616 break;
1617 case LIGHTREC_IO_DIRECT:
1618 if (no_invalidate) {
1619 rec_store_direct_no_invalidate(state, block, offset,
1620 code, swap_code);
1621 } else {
1622 rec_store_direct(state, block, offset, code, swap_code);
1623 }
1624 break;
1625 case LIGHTREC_IO_DIRECT_HW:
1626 rec_store_io(state, block, offset, code, swap_code);
1627 break;
1628 default:
1629 rec_io(state, block, offset, true, false);
1630 return;
1631 }
1632
1633 if (is_swc2)
1634 lightrec_discard_reg_if_loaded(state->reg_cache, REG_TEMP);
1635}
1636
1637static void rec_SB(struct lightrec_cstate *state,
1638 const struct block *block, u16 offset)
1639{
1640 _jit_name(block->_jit, __func__);
1641 rec_store(state, block, offset, jit_code_stxi_c, 0);
1642}
1643
1644static void rec_SH(struct lightrec_cstate *state,
1645 const struct block *block, u16 offset)
1646{
1647 _jit_name(block->_jit, __func__);
1648 rec_store(state, block, offset,
1649 jit_code_stxi_s, jit_code_bswapr_us);
1650}
1651
1652static void rec_SW(struct lightrec_cstate *state,
1653 const struct block *block, u16 offset)
1654
1655{
1656 union code c = block->opcode_list[offset].c;
1657
1658 _jit_name(block->_jit, c.i.op == OP_SWC2 ? "rec_SWC2" : "rec_SW");
1659 rec_store(state, block, offset,
1660 jit_code_stxi_i, jit_code_bswapr_ui);
1661}
1662
1663static void rec_SWL(struct lightrec_cstate *state,
1664 const struct block *block, u16 offset)
1665{
1666 _jit_name(block->_jit, __func__);
1667 rec_io(state, block, offset, true, false);
1668}
1669
1670static void rec_SWR(struct lightrec_cstate *state,
1671 const struct block *block, u16 offset)
1672{
1673 _jit_name(block->_jit, __func__);
1674 rec_io(state, block, offset, true, false);
1675}
1676
1677static void rec_load_memory(struct lightrec_cstate *cstate,
1678 const struct block *block, u16 offset,
1679 jit_code_t code, jit_code_t swap_code, bool is_unsigned,
1680 uintptr_t addr_offset, u32 addr_mask)
1681{
1682 struct regcache *reg_cache = cstate->reg_cache;
1683 struct opcode *op = &block->opcode_list[offset];
1684 bool load_delay = op_flag_load_delay(op->flags) && !cstate->no_load_delay;
1685 jit_state_t *_jit = block->_jit;
1686 u8 rs, rt, out_reg, addr_reg, flags = REG_EXT;
1687 bool no_mask = op_flag_no_mask(op->flags);
1688 union code c = op->c;
1689 s8 reg_imm;
1690 s16 imm;
1691
1692 if (load_delay || c.i.op == OP_LWC2)
1693 out_reg = REG_TEMP;
1694 else if (c.i.rt)
1695 out_reg = c.i.rt;
1696 else
1697 return;
1698
1699 if (is_unsigned)
1700 flags |= REG_ZEXT;
1701
1702 rs = lightrec_alloc_reg_in(reg_cache, _jit, c.i.rs, 0);
1703 rt = lightrec_alloc_reg_out(reg_cache, _jit, out_reg, flags);
1704
1705 if (!cstate->state->mirrors_mapped && c.i.imm && !no_mask) {
1706 jit_addi(rt, rs, (s16)c.i.imm);
1707 addr_reg = rt;
1708 imm = 0;
1709 } else {
1710 addr_reg = rs;
1711 imm = (s16)c.i.imm;
1712 }
1713
1714 if (!no_mask) {
1715 reg_imm = lightrec_alloc_reg_temp_with_value(reg_cache, _jit,
1716 addr_mask);
1717
1718 jit_andr(rt, addr_reg, reg_imm);
1719 addr_reg = rt;
1720
1721 lightrec_free_reg(reg_cache, reg_imm);
1722 }
1723
1724 if (addr_offset) {
1725 reg_imm = lightrec_alloc_reg_temp_with_value(reg_cache, _jit,
1726 addr_offset);
1727
1728 jit_addr(rt, addr_reg, reg_imm);
1729 addr_reg = rt;
1730
1731 lightrec_free_reg(reg_cache, reg_imm);
1732 }
1733
1734 jit_new_node_www(code, rt, addr_reg, imm);
1735
1736 if (is_big_endian() && swap_code) {
1737 jit_new_node_ww(swap_code, rt, rt);
1738
1739 if (c.i.op == OP_LH)
1740 jit_extr_s(rt, rt);
1741 else if (c.i.op == OP_LW && __WORDSIZE == 64)
1742 jit_extr_i(rt, rt);
1743 }
1744
1745 lightrec_free_reg(reg_cache, rs);
1746 lightrec_free_reg(reg_cache, rt);
1747}
1748
1749static void rec_load_ram(struct lightrec_cstate *cstate,
1750 const struct block *block, u16 offset,
1751 jit_code_t code, jit_code_t swap_code, bool is_unsigned)
1752{
1753 _jit_note(block->_jit, __FILE__, __LINE__);
1754
1755 rec_load_memory(cstate, block, offset, code, swap_code, is_unsigned,
1756 cstate->state->offset_ram, rec_ram_mask(cstate->state));
1757}
1758
1759static void rec_load_bios(struct lightrec_cstate *cstate,
1760 const struct block *block, u16 offset,
1761 jit_code_t code, jit_code_t swap_code, bool is_unsigned)
1762{
1763 _jit_note(block->_jit, __FILE__, __LINE__);
1764
1765 rec_load_memory(cstate, block, offset, code, swap_code, is_unsigned,
1766 cstate->state->offset_bios, 0x1fffffff);
1767}
1768
1769static void rec_load_scratch(struct lightrec_cstate *cstate,
1770 const struct block *block, u16 offset,
1771 jit_code_t code, jit_code_t swap_code, bool is_unsigned)
1772{
1773 _jit_note(block->_jit, __FILE__, __LINE__);
1774
1775 rec_load_memory(cstate, block, offset, code, swap_code, is_unsigned,
1776 cstate->state->offset_scratch, 0x1fffffff);
1777}
1778
1779static void rec_load_io(struct lightrec_cstate *cstate,
1780 const struct block *block, u16 offset,
1781 jit_code_t code, jit_code_t swap_code, bool is_unsigned)
1782{
1783 _jit_note(block->_jit, __FILE__, __LINE__);
1784
1785 rec_load_memory(cstate, block, offset, code, swap_code, is_unsigned,
1786 cstate->state->offset_io, rec_io_mask(cstate->state));
1787}
1788
1789static void rec_load_direct(struct lightrec_cstate *cstate,
1790 const struct block *block, u16 offset,
1791 jit_code_t code, jit_code_t swap_code,
1792 bool is_unsigned)
1793{
1794 struct lightrec_state *state = cstate->state;
1795 struct regcache *reg_cache = cstate->reg_cache;
1796 struct opcode *op = &block->opcode_list[offset];
1797 bool load_delay = op_flag_load_delay(op->flags) && !cstate->no_load_delay;
1798 jit_state_t *_jit = block->_jit;
1799 jit_node_t *to_not_ram, *to_not_bios, *to_end, *to_end2;
1800 u8 tmp, rs, rt, out_reg, addr_reg, flags = REG_EXT;
1801 union code c = op->c;
1802 s32 addr_mask;
1803 u32 reg_imm;
1804 s8 offt_reg;
1805 s16 imm;
1806
1807 if (load_delay || c.i.op == OP_LWC2)
1808 out_reg = REG_TEMP;
1809 else if (c.i.rt)
1810 out_reg = c.i.rt;
1811 else
1812 return;
1813
1814 if (is_unsigned)
1815 flags |= REG_ZEXT;
1816
1817 jit_note(__FILE__, __LINE__);
1818 rs = lightrec_alloc_reg_in(reg_cache, _jit, c.i.rs, 0);
1819 rt = lightrec_alloc_reg_out(reg_cache, _jit, out_reg, flags);
1820
1821 if ((state->offset_ram == state->offset_bios &&
1822 state->offset_ram == state->offset_scratch &&
1823 state->mirrors_mapped) || !c.i.imm) {
1824 addr_reg = rs;
1825 imm = (s16)c.i.imm;
1826 } else {
1827 jit_addi(rt, rs, (s16)c.i.imm);
1828 addr_reg = rt;
1829 imm = 0;
1830
1831 if (c.i.rs != c.i.rt)
1832 lightrec_free_reg(reg_cache, rs);
1833 }
1834
1835 tmp = lightrec_alloc_reg_temp(reg_cache, _jit);
1836
1837 if (state->offset_ram == state->offset_bios &&
1838 state->offset_ram == state->offset_scratch) {
1839 if (!state->mirrors_mapped)
1840 addr_mask = 0x1f800000 | (RAM_SIZE - 1);
1841 else
1842 addr_mask = 0x1fffffff;
1843
1844 reg_imm = lightrec_alloc_reg_temp_with_value(reg_cache, _jit,
1845 addr_mask);
1846 if (!state->mirrors_mapped) {
1847 jit_andi(tmp, addr_reg, BIT(28));
1848 jit_rshi_u(tmp, tmp, 28 - 22);
1849 jit_orr(tmp, tmp, reg_imm);
1850 jit_andr(rt, addr_reg, tmp);
1851 } else {
1852 jit_andr(rt, addr_reg, reg_imm);
1853 }
1854
1855 lightrec_free_reg(reg_cache, reg_imm);
1856
1857 if (state->offset_ram) {
1858 offt_reg = lightrec_get_reg_with_value(reg_cache,
1859 state->offset_ram);
1860 if (offt_reg < 0) {
1861 jit_movi(tmp, state->offset_ram);
1862 lightrec_temp_set_value(reg_cache, tmp,
1863 state->offset_ram);
1864 } else {
1865 lightrec_free_reg(reg_cache, tmp);
1866 tmp = offt_reg;
1867 }
1868 }
1869 } else {
1870 to_not_ram = jit_bmsi(addr_reg, BIT(28));
1871
1872 /* Convert to KUNSEG and avoid RAM mirrors */
1873 jit_andi(rt, addr_reg, RAM_SIZE - 1);
1874
1875 if (state->offset_ram)
1876 jit_movi(tmp, state->offset_ram);
1877
1878 to_end = jit_b();
1879
1880 jit_patch(to_not_ram);
1881
1882 if (state->offset_bios != state->offset_scratch)
1883 to_not_bios = jit_bmci(addr_reg, BIT(22));
1884
1885 /* Convert to KUNSEG */
1886 jit_andi(rt, addr_reg, 0x1fc00000 | (BIOS_SIZE - 1));
1887
1888 jit_movi(tmp, state->offset_bios);
1889
1890 if (state->offset_bios != state->offset_scratch) {
1891 to_end2 = jit_b();
1892
1893 jit_patch(to_not_bios);
1894
1895 /* Convert to KUNSEG */
1896 jit_andi(rt, addr_reg, 0x1f800fff);
1897
1898 if (state->offset_scratch)
1899 jit_movi(tmp, state->offset_scratch);
1900
1901 jit_patch(to_end2);
1902 }
1903
1904 jit_patch(to_end);
1905 }
1906
1907 if (state->offset_ram || state->offset_bios || state->offset_scratch)
1908 jit_addr(rt, rt, tmp);
1909
1910 jit_new_node_www(code, rt, rt, imm);
1911
1912 if (is_big_endian() && swap_code) {
1913 jit_new_node_ww(swap_code, rt, rt);
1914
1915 if (c.i.op == OP_LH)
1916 jit_extr_s(rt, rt);
1917 else if (c.i.op == OP_LW && __WORDSIZE == 64)
1918 jit_extr_i(rt, rt);
1919 }
1920
1921 lightrec_free_reg(reg_cache, addr_reg);
1922 lightrec_free_reg(reg_cache, rt);
1923 lightrec_free_reg(reg_cache, tmp);
1924}
1925
1926static void rec_load(struct lightrec_cstate *state, const struct block *block,
1927 u16 offset, jit_code_t code, jit_code_t swap_code,
1928 bool is_unsigned)
1929{
1930 const struct opcode *op = &block->opcode_list[offset];
1931 u32 flags = op->flags;
1932
1933 switch (LIGHTREC_FLAGS_GET_IO_MODE(flags)) {
1934 case LIGHTREC_IO_RAM:
1935 rec_load_ram(state, block, offset, code, swap_code, is_unsigned);
1936 break;
1937 case LIGHTREC_IO_BIOS:
1938 rec_load_bios(state, block, offset, code, swap_code, is_unsigned);
1939 break;
1940 case LIGHTREC_IO_SCRATCH:
1941 rec_load_scratch(state, block, offset, code, swap_code, is_unsigned);
1942 break;
1943 case LIGHTREC_IO_DIRECT_HW:
1944 rec_load_io(state, block, offset, code, swap_code, is_unsigned);
1945 break;
1946 case LIGHTREC_IO_DIRECT:
1947 rec_load_direct(state, block, offset, code, swap_code, is_unsigned);
1948 break;
1949 default:
1950 rec_io(state, block, offset, false, true);
1951 return;
1952 }
1953
1954 if (op->i.op == OP_LWC2) {
1955 rec_cp2_do_mtc2(state, block, offset, op->i.rt, REG_TEMP);
1956 lightrec_discard_reg_if_loaded(state->reg_cache, REG_TEMP);
1957 }
1958}
1959
1960static void rec_LB(struct lightrec_cstate *state, const struct block *block, u16 offset)
1961{
1962 _jit_name(block->_jit, __func__);
1963 rec_load(state, block, offset, jit_code_ldxi_c, 0, false);
1964}
1965
1966static void rec_LBU(struct lightrec_cstate *state, const struct block *block, u16 offset)
1967{
1968 _jit_name(block->_jit, __func__);
1969 rec_load(state, block, offset, jit_code_ldxi_uc, 0, true);
1970}
1971
1972static void rec_LH(struct lightrec_cstate *state, const struct block *block, u16 offset)
1973{
1974 jit_code_t code = is_big_endian() ? jit_code_ldxi_us : jit_code_ldxi_s;
1975
1976 _jit_name(block->_jit, __func__);
1977 rec_load(state, block, offset, code, jit_code_bswapr_us, false);
1978}
1979
1980static void rec_LHU(struct lightrec_cstate *state, const struct block *block, u16 offset)
1981{
1982 _jit_name(block->_jit, __func__);
1983 rec_load(state, block, offset, jit_code_ldxi_us, jit_code_bswapr_us, true);
1984}
1985
1986static void rec_LWL(struct lightrec_cstate *state, const struct block *block, u16 offset)
1987{
1988 _jit_name(block->_jit, __func__);
1989 rec_io(state, block, offset, true, true);
1990}
1991
1992static void rec_LWR(struct lightrec_cstate *state, const struct block *block, u16 offset)
1993{
1994 _jit_name(block->_jit, __func__);
1995 rec_io(state, block, offset, true, true);
1996}
1997
1998static void rec_LW(struct lightrec_cstate *state, const struct block *block, u16 offset)
1999{
2000 union code c = block->opcode_list[offset].c;
2001 jit_code_t code;
2002
2003 if (is_big_endian() && __WORDSIZE == 64)
2004 code = jit_code_ldxi_ui;
2005 else
2006 code = jit_code_ldxi_i;
2007
2008 _jit_name(block->_jit, c.i.op == OP_LWC2 ? "rec_LWC2" : "rec_LW");
2009 rec_load(state, block, offset, code, jit_code_bswapr_ui, false);
2010}
2011
2012static void rec_break_syscall(struct lightrec_cstate *state,
2013 const struct block *block, u16 offset,
2014 u32 exit_code)
2015{
2016 struct regcache *reg_cache = state->reg_cache;
2017 jit_state_t *_jit = block->_jit;
2018 u8 tmp;
2019
2020 _jit_note(block->_jit, __FILE__, __LINE__);
2021
2022 tmp = lightrec_alloc_reg_temp(reg_cache, _jit);
2023
2024 jit_movi(tmp, exit_code);
2025 jit_stxi_i(offsetof(struct lightrec_state, exit_flags),
2026 LIGHTREC_REG_STATE, tmp);
2027
2028 jit_ldxi_i(tmp, LIGHTREC_REG_STATE,
2029 offsetof(struct lightrec_state, target_cycle));
2030 jit_subr(tmp, tmp, LIGHTREC_REG_CYCLE);
2031 jit_movi(LIGHTREC_REG_CYCLE, 0);
2032 jit_stxi_i(offsetof(struct lightrec_state, target_cycle),
2033 LIGHTREC_REG_STATE, tmp);
2034 jit_stxi_i(offsetof(struct lightrec_state, current_cycle),
2035 LIGHTREC_REG_STATE, tmp);
2036
2037 lightrec_free_reg(reg_cache, tmp);
2038
2039 /* TODO: the return address should be "pc - 4" if we're a delay slot */
2040 lightrec_emit_end_of_block(state, block, offset, -1,
2041 get_ds_pc(block, offset, 0),
2042 31, 0, true);
2043}
2044
2045static void rec_special_SYSCALL(struct lightrec_cstate *state,
2046 const struct block *block, u16 offset)
2047{
2048 _jit_name(block->_jit, __func__);
2049 rec_break_syscall(state, block, offset, LIGHTREC_EXIT_SYSCALL);
2050}
2051
2052static void rec_special_BREAK(struct lightrec_cstate *state,
2053 const struct block *block, u16 offset)
2054{
2055 _jit_name(block->_jit, __func__);
2056 rec_break_syscall(state, block, offset, LIGHTREC_EXIT_BREAK);
2057}
2058
2059static void rec_mfc(struct lightrec_cstate *state, const struct block *block, u16 offset)
2060{
2061 struct regcache *reg_cache = state->reg_cache;
2062 union code c = block->opcode_list[offset].c;
2063 jit_state_t *_jit = block->_jit;
2064
2065 jit_note(__FILE__, __LINE__);
2066
2067 if (c.i.op != OP_SWC2)
2068 lightrec_clean_reg_if_loaded(reg_cache, _jit, c.i.rt, true);
2069
2070 call_to_c_wrapper(state, block, c.opcode, C_WRAPPER_MFC);
2071}
2072
2073static void rec_mtc(struct lightrec_cstate *state, const struct block *block, u16 offset)
2074{
2075 struct regcache *reg_cache = state->reg_cache;
2076 union code c = block->opcode_list[offset].c;
2077 jit_state_t *_jit = block->_jit;
2078
2079 jit_note(__FILE__, __LINE__);
2080 lightrec_clean_reg_if_loaded(reg_cache, _jit, c.i.rs, false);
2081 lightrec_clean_reg_if_loaded(reg_cache, _jit, c.i.rt, false);
2082 lightrec_clean_reg_if_loaded(reg_cache, _jit, REG_TEMP, false);
2083
2084 call_to_c_wrapper(state, block, c.opcode, C_WRAPPER_MTC);
2085
2086 if (c.i.op == OP_CP0 &&
2087 !op_flag_no_ds(block->opcode_list[offset].flags) &&
2088 (c.r.rd == 12 || c.r.rd == 13))
2089 lightrec_emit_end_of_block(state, block, offset, -1,
2090 get_ds_pc(block, offset, 1),
2091 0, 0, true);
2092}
2093
2094static void
2095rec_mfc0(struct lightrec_cstate *state, const struct block *block, u16 offset)
2096{
2097 struct regcache *reg_cache = state->reg_cache;
2098 union code c = block->opcode_list[offset].c;
2099 jit_state_t *_jit = block->_jit;
2100 u8 rt;
2101
2102 jit_note(__FILE__, __LINE__);
2103
2104 rt = lightrec_alloc_reg_out(reg_cache, _jit, c.i.rt, REG_EXT);
2105
2106 jit_ldxi_i(rt, LIGHTREC_REG_STATE,
2107 offsetof(struct lightrec_state, regs.cp0[c.r.rd]));
2108
2109 lightrec_free_reg(reg_cache, rt);
2110}
2111
2112static bool block_uses_icache(const struct lightrec_cstate *state,
2113 const struct block *block)
2114{
2115 const struct lightrec_mem_map *map = &state->state->maps[PSX_MAP_KERNEL_USER_RAM];
2116 u32 pc = kunseg(block->pc);
2117
2118 if (pc < map->pc || pc >= map->pc + map->length)
2119 return false;
2120
2121 return (block->pc >> 28) < 0xa;
2122}
2123
2124static void
2125rec_mtc0(struct lightrec_cstate *state, const struct block *block, u16 offset)
2126{
2127 struct regcache *reg_cache = state->reg_cache;
2128 const union code c = block->opcode_list[offset].c;
2129 jit_state_t *_jit = block->_jit;
2130 u8 rt, tmp = 0, tmp2, status;
2131 jit_node_t *to_end;
2132
2133 jit_note(__FILE__, __LINE__);
2134
2135 switch(c.r.rd) {
2136 case 1:
2137 case 4:
2138 case 8:
2139 case 14:
2140 case 15:
2141 /* Those registers are read-only */
2142 return;
2143 default:
2144 break;
2145 }
2146
2147 if (!block_uses_icache(state, block) && c.r.rd == 12) {
2148 /* If we are not running code from the RAM through kuseg or
2149 * kseg0, handle writes to the Status register in C; as the
2150 * code may toggle bit 16 which isolates the cache. Code
2151 * running from kuseg or kseg0 in RAM cannot do that. */
2152 rec_mtc(state, block, offset);
2153 return;
2154 }
2155
2156 rt = lightrec_alloc_reg_in(reg_cache, _jit, c.i.rt, 0);
2157
2158 if (c.r.rd != 13) {
2159 jit_stxi_i(offsetof(struct lightrec_state, regs.cp0[c.r.rd]),
2160 LIGHTREC_REG_STATE, rt);
2161 }
2162
2163 if (c.r.rd == 12 || c.r.rd == 13) {
2164 tmp = lightrec_alloc_reg_temp(reg_cache, _jit);
2165 jit_ldxi_i(tmp, LIGHTREC_REG_STATE,
2166 offsetof(struct lightrec_state, regs.cp0[13]));
2167
2168 tmp2 = lightrec_alloc_reg_temp(reg_cache, _jit);
2169 }
2170
2171 if (c.r.rd == 12) {
2172 status = rt;
2173 } else if (c.r.rd == 13) {
2174 /* Cause = (Cause & ~0x0300) | (value & 0x0300) */
2175 jit_andi(tmp2, rt, 0x0300);
2176 jit_ori(tmp, tmp, 0x0300);
2177 jit_xori(tmp, tmp, 0x0300);
2178 jit_orr(tmp, tmp, tmp2);
2179 jit_ldxi_i(tmp2, LIGHTREC_REG_STATE,
2180 offsetof(struct lightrec_state, regs.cp0[12]));
2181 jit_stxi_i(offsetof(struct lightrec_state, regs.cp0[13]),
2182 LIGHTREC_REG_STATE, tmp);
2183 status = tmp2;
2184 }
2185
2186 if (c.r.rd == 12 || c.r.rd == 13) {
2187 /* Exit dynarec in case there's a software interrupt.
2188 * exit_flags = !!(status & tmp & 0x0300) & status; */
2189 jit_andr(tmp, tmp, status);
2190 jit_andi(tmp, tmp, 0x0300);
2191 jit_nei(tmp, tmp, 0);
2192 jit_andr(tmp, tmp, status);
2193 }
2194
2195 if (c.r.rd == 12) {
2196 /* Exit dynarec in case we unmask a hardware interrupt.
2197 * exit_flags = !(~status & 0x401) */
2198
2199 jit_comr(tmp2, status);
2200 jit_andi(tmp2, tmp2, 0x401);
2201 jit_eqi(tmp2, tmp2, 0);
2202 jit_orr(tmp, tmp, tmp2);
2203 }
2204
2205 lightrec_free_reg(reg_cache, rt);
2206
2207 if (c.r.rd == 12 || c.r.rd == 13) {
2208 to_end = jit_beqi(tmp, 0);
2209
2210 jit_ldxi_i(tmp2, LIGHTREC_REG_STATE,
2211 offsetof(struct lightrec_state, target_cycle));
2212 jit_subr(tmp2, tmp2, LIGHTREC_REG_CYCLE);
2213 jit_movi(LIGHTREC_REG_CYCLE, 0);
2214 jit_stxi_i(offsetof(struct lightrec_state, target_cycle),
2215 LIGHTREC_REG_STATE, tmp2);
2216 jit_stxi_i(offsetof(struct lightrec_state, current_cycle),
2217 LIGHTREC_REG_STATE, tmp2);
2218
2219
2220 jit_patch(to_end);
2221 }
2222
2223 if (!op_flag_no_ds(block->opcode_list[offset].flags) &&
2224 (c.r.rd == 12 || c.r.rd == 13)) {
2225 state->cycles += lightrec_cycles_of_opcode(state->state, c);
2226 lightrec_emit_eob(state, block, offset + 1);
2227 }
2228}
2229
2230static void rec_cp0_MFC0(struct lightrec_cstate *state,
2231 const struct block *block, u16 offset)
2232{
2233 _jit_name(block->_jit, __func__);
2234 rec_mfc0(state, block, offset);
2235}
2236
2237static void rec_cp0_CFC0(struct lightrec_cstate *state,
2238 const struct block *block, u16 offset)
2239{
2240 _jit_name(block->_jit, __func__);
2241 rec_mfc0(state, block, offset);
2242}
2243
2244static void rec_cp0_MTC0(struct lightrec_cstate *state,
2245 const struct block *block, u16 offset)
2246{
2247 _jit_name(block->_jit, __func__);
2248 rec_mtc0(state, block, offset);
2249}
2250
2251static void rec_cp0_CTC0(struct lightrec_cstate *state,
2252 const struct block *block, u16 offset)
2253{
2254 _jit_name(block->_jit, __func__);
2255 rec_mtc0(state, block, offset);
2256}
2257
2258static unsigned int cp2d_i_offset(u8 reg)
2259{
2260 return offsetof(struct lightrec_state, regs.cp2d[reg]);
2261}
2262
2263static unsigned int cp2d_s_offset(u8 reg)
2264{
2265 return cp2d_i_offset(reg) + is_big_endian() * 2;
2266}
2267
2268static unsigned int cp2c_i_offset(u8 reg)
2269{
2270 return offsetof(struct lightrec_state, regs.cp2c[reg]);
2271}
2272
2273static unsigned int cp2c_s_offset(u8 reg)
2274{
2275 return cp2c_i_offset(reg) + is_big_endian() * 2;
2276}
2277
2278static void rec_cp2_do_mfc2(struct lightrec_cstate *state,
2279 const struct block *block, u16 offset,
2280 u8 reg, u8 out_reg)
2281{
2282 struct regcache *reg_cache = state->reg_cache;
2283 jit_state_t *_jit = block->_jit;
2284 const u32 zext_regs = 0x300f0080;
2285 u8 rt, tmp, tmp2, tmp3, out, flags;
2286 unsigned int i;
2287
2288 _jit_name(block->_jit, __func__);
2289
2290 if (state->state->ops.cop2_notify) {
2291 /* We must call cop2_notify, handle that in C. */
2292 rec_mfc(state, block, offset);
2293 return;
2294 }
2295
2296 flags = (zext_regs & BIT(reg)) ? REG_ZEXT : REG_EXT;
2297 rt = lightrec_alloc_reg_out(reg_cache, _jit, out_reg, flags);
2298
2299 if (reg == 15)
2300 reg = 14;
2301
2302 switch (reg) {
2303 case 1:
2304 case 3:
2305 case 5:
2306 case 8:
2307 case 9:
2308 case 10:
2309 case 11:
2310 jit_ldxi_s(rt, LIGHTREC_REG_STATE, cp2d_s_offset(reg));
2311 break;
2312 case 7:
2313 case 16:
2314 case 17:
2315 case 18:
2316 case 19:
2317 jit_ldxi_us(rt, LIGHTREC_REG_STATE, cp2d_s_offset(reg));
2318 break;
2319 case 28:
2320 case 29:
2321 tmp = lightrec_alloc_reg_temp(reg_cache, _jit);
2322 tmp2 = lightrec_alloc_reg_temp(reg_cache, _jit);
2323 tmp3 = lightrec_alloc_reg_temp(reg_cache, _jit);
2324
2325 for (i = 0; i < 3; i++) {
2326 out = i == 0 ? rt : tmp;
2327
2328 jit_ldxi_s(tmp, LIGHTREC_REG_STATE, cp2d_s_offset(9 + i));
2329 jit_movi(tmp2, 0x1f);
2330 jit_rshi(out, tmp, 7);
2331
2332 jit_ltr(tmp3, tmp2, out);
2333 jit_movnr(out, tmp2, tmp3);
2334
2335 jit_gei(tmp2, out, 0);
2336 jit_movzr(out, tmp2, tmp2);
2337
2338 if (i > 0) {
2339 jit_lshi(tmp, tmp, 5 * i);
2340 jit_orr(rt, rt, tmp);
2341 }
2342 }
2343
2344
2345 lightrec_free_reg(reg_cache, tmp);
2346 lightrec_free_reg(reg_cache, tmp2);
2347 lightrec_free_reg(reg_cache, tmp3);
2348 break;
2349 default:
2350 jit_ldxi_i(rt, LIGHTREC_REG_STATE, cp2d_i_offset(reg));
2351 break;
2352 }
2353
2354 lightrec_free_reg(reg_cache, rt);
2355}
2356
2357static void rec_cp2_basic_MFC2(struct lightrec_cstate *state,
2358 const struct block *block, u16 offset)
2359{
2360 const union code c = block->opcode_list[offset].c;
2361
2362 rec_cp2_do_mfc2(state, block, offset, c.r.rd, c.r.rt);
2363}
2364
2365static void rec_cp2_basic_CFC2(struct lightrec_cstate *state,
2366 const struct block *block, u16 offset)
2367{
2368 struct regcache *reg_cache = state->reg_cache;
2369 const union code c = block->opcode_list[offset].c;
2370 jit_state_t *_jit = block->_jit;
2371 u8 rt;
2372
2373 _jit_name(block->_jit, __func__);
2374
2375 if (state->state->ops.cop2_notify) {
2376 /* We must call cop2_notify, handle that in C. */
2377 rec_mfc(state, block, offset);
2378 return;
2379 }
2380
2381 switch (c.r.rd) {
2382 case 4:
2383 case 12:
2384 case 20:
2385 case 26:
2386 case 27:
2387 case 29:
2388 case 30:
2389 rt = lightrec_alloc_reg_out(reg_cache, _jit, c.r.rt, REG_EXT);
2390 jit_ldxi_s(rt, LIGHTREC_REG_STATE, cp2c_s_offset(c.r.rd));
2391 break;
2392 default:
2393 rt = lightrec_alloc_reg_out(reg_cache, _jit, c.r.rt, REG_ZEXT);
2394 jit_ldxi_ui(rt, LIGHTREC_REG_STATE, cp2c_i_offset(c.r.rd));
2395 break;
2396 }
2397
2398 lightrec_free_reg(reg_cache, rt);
2399}
2400
2401static void rec_cp2_do_mtc2(struct lightrec_cstate *state,
2402 const struct block *block, u16 offset,
2403 u8 reg, u8 in_reg)
2404{
2405 struct regcache *reg_cache = state->reg_cache;
2406 jit_state_t *_jit = block->_jit;
2407 u8 rt, tmp, tmp2, flags = 0;
2408
2409 _jit_name(block->_jit, __func__);
2410
2411 if (state->state->ops.cop2_notify) {
2412 /* We must call cop2_notify, handle that in C. */
2413 rec_mtc(state, block, offset);
2414 return;
2415 }
2416
2417 if (reg == 31)
2418 return;
2419
2420 if (reg == 30)
2421 flags |= REG_EXT;
2422
2423 rt = lightrec_alloc_reg_in(reg_cache, _jit, in_reg, flags);
2424
2425 switch (reg) {
2426 case 15:
2427 tmp = lightrec_alloc_reg_temp(reg_cache, _jit);
2428 jit_ldxi_i(tmp, LIGHTREC_REG_STATE, cp2d_i_offset(13));
2429
2430 tmp2 = lightrec_alloc_reg_temp(reg_cache, _jit);
2431 jit_ldxi_i(tmp2, LIGHTREC_REG_STATE, cp2d_i_offset(14));
2432
2433 jit_stxi_i(cp2d_i_offset(12), LIGHTREC_REG_STATE, tmp);
2434 jit_stxi_i(cp2d_i_offset(13), LIGHTREC_REG_STATE, tmp2);
2435 jit_stxi_i(cp2d_i_offset(14), LIGHTREC_REG_STATE, rt);
2436
2437 lightrec_free_reg(reg_cache, tmp);
2438 lightrec_free_reg(reg_cache, tmp2);
2439 break;
2440 case 28:
2441 tmp = lightrec_alloc_reg_temp(reg_cache, _jit);
2442
2443 jit_lshi(tmp, rt, 7);
2444 jit_andi(tmp, tmp, 0xf80);
2445 jit_stxi_s(cp2d_s_offset(9), LIGHTREC_REG_STATE, tmp);
2446
2447 jit_lshi(tmp, rt, 2);
2448 jit_andi(tmp, tmp, 0xf80);
2449 jit_stxi_s(cp2d_s_offset(10), LIGHTREC_REG_STATE, tmp);
2450
2451 jit_rshi(tmp, rt, 3);
2452 jit_andi(tmp, tmp, 0xf80);
2453 jit_stxi_s(cp2d_s_offset(11), LIGHTREC_REG_STATE, tmp);
2454
2455 lightrec_free_reg(reg_cache, tmp);
2456 break;
2457 case 30:
2458 tmp = lightrec_alloc_reg_temp(reg_cache, _jit);
2459
2460 /* if (rt < 0) rt = ~rt; */
2461 jit_rshi(tmp, rt, 31);
2462 jit_xorr(tmp, rt, tmp);
2463
2464 /* Count leading zeros */
2465 jit_clzr(tmp, tmp);
2466 if (__WORDSIZE != 32)
2467 jit_subi(tmp, tmp, __WORDSIZE - 32);
2468
2469 jit_stxi_i(cp2d_i_offset(31), LIGHTREC_REG_STATE, tmp);
2470
2471 lightrec_free_reg(reg_cache, tmp);
2472 fallthrough;
2473 default:
2474 jit_stxi_i(cp2d_i_offset(reg), LIGHTREC_REG_STATE, rt);
2475 break;
2476 }
2477
2478 lightrec_free_reg(reg_cache, rt);
2479}
2480
2481static void rec_cp2_basic_MTC2(struct lightrec_cstate *state,
2482 const struct block *block, u16 offset)
2483{
2484 const union code c = block->opcode_list[offset].c;
2485
2486 rec_cp2_do_mtc2(state, block, offset, c.r.rd, c.r.rt);
2487}
2488
2489static void rec_cp2_basic_CTC2(struct lightrec_cstate *state,
2490 const struct block *block, u16 offset)
2491{
2492 struct regcache *reg_cache = state->reg_cache;
2493 const union code c = block->opcode_list[offset].c;
2494 jit_state_t *_jit = block->_jit;
2495 u8 rt, tmp, tmp2;
2496
2497 _jit_name(block->_jit, __func__);
2498
2499 if (state->state->ops.cop2_notify) {
2500 /* We must call cop2_notify, handle that in C. */
2501 rec_mtc(state, block, offset);
2502 return;
2503 }
2504
2505 rt = lightrec_alloc_reg_in(reg_cache, _jit, c.r.rt, 0);
2506
2507 switch (c.r.rd) {
2508 case 4:
2509 case 12:
2510 case 20:
2511 case 26:
2512 case 27:
2513 case 29:
2514 case 30:
2515 jit_stxi_s(cp2c_s_offset(c.r.rd), LIGHTREC_REG_STATE, rt);
2516 break;
2517 case 31:
2518 tmp = lightrec_alloc_reg_temp(reg_cache, _jit);
2519 tmp2 = lightrec_alloc_reg_temp(reg_cache, _jit);
2520
2521 jit_andi(tmp, rt, 0x7f87e000);
2522 jit_nei(tmp, tmp, 0);
2523 jit_lshi(tmp, tmp, 31);
2524
2525 jit_andi(tmp2, rt, 0x7ffff000);
2526 jit_orr(tmp, tmp2, tmp);
2527
2528 jit_stxi_i(cp2c_i_offset(31), LIGHTREC_REG_STATE, tmp);
2529
2530 lightrec_free_reg(reg_cache, tmp);
2531 lightrec_free_reg(reg_cache, tmp2);
2532 break;
2533
2534 default:
2535 jit_stxi_i(cp2c_i_offset(c.r.rd), LIGHTREC_REG_STATE, rt);
2536 }
2537
2538 lightrec_free_reg(reg_cache, rt);
2539}
2540
2541static void rec_cp0_RFE(struct lightrec_cstate *state,
2542 const struct block *block, u16 offset)
2543{
2544 struct regcache *reg_cache = state->reg_cache;
2545 jit_state_t *_jit = block->_jit;
2546 u8 status, tmp;
2547
2548 jit_name(__func__);
2549 jit_note(__FILE__, __LINE__);
2550
2551 status = lightrec_alloc_reg_temp(reg_cache, _jit);
2552 jit_ldxi_i(status, LIGHTREC_REG_STATE,
2553 offsetof(struct lightrec_state, regs.cp0[12]));
2554
2555 tmp = lightrec_alloc_reg_temp(reg_cache, _jit);
2556
2557 /* status = ((status >> 2) & 0xf) | status & ~0xf; */
2558 jit_rshi(tmp, status, 2);
2559 jit_andi(tmp, tmp, 0xf);
2560 jit_andi(status, status, ~0xful);
2561 jit_orr(status, status, tmp);
2562
2563 jit_ldxi_i(tmp, LIGHTREC_REG_STATE,
2564 offsetof(struct lightrec_state, regs.cp0[13]));
2565 jit_stxi_i(offsetof(struct lightrec_state, regs.cp0[12]),
2566 LIGHTREC_REG_STATE, status);
2567
2568 /* Exit dynarec in case there's a software interrupt.
2569 * exit_flags = !!(status & cause & 0x0300) & status; */
2570 jit_andr(tmp, tmp, status);
2571 jit_andi(tmp, tmp, 0x0300);
2572 jit_nei(tmp, tmp, 0);
2573 jit_andr(tmp, tmp, status);
2574 jit_stxi_i(offsetof(struct lightrec_state, exit_flags),
2575 LIGHTREC_REG_STATE, tmp);
2576
2577 lightrec_free_reg(reg_cache, status);
2578 lightrec_free_reg(reg_cache, tmp);
2579}
2580
2581static void rec_CP(struct lightrec_cstate *state,
2582 const struct block *block, u16 offset)
2583{
2584 union code c = block->opcode_list[offset].c;
2585 jit_state_t *_jit = block->_jit;
2586
2587 jit_name(__func__);
2588 jit_note(__FILE__, __LINE__);
2589
2590 call_to_c_wrapper(state, block, c.opcode, C_WRAPPER_CP);
2591}
2592
2593static void rec_meta_MOV(struct lightrec_cstate *state,
2594 const struct block *block, u16 offset)
2595{
2596 struct regcache *reg_cache = state->reg_cache;
2597 const struct opcode *op = &block->opcode_list[offset];
2598 union code c = op->c;
2599 jit_state_t *_jit = block->_jit;
2600 bool unload_rd;
2601 bool unload_rs, discard_rs;
2602 u8 rs, rd;
2603
2604 _jit_name(block->_jit, __func__);
2605 jit_note(__FILE__, __LINE__);
2606
2607 unload_rs = OPT_EARLY_UNLOAD
2608 && LIGHTREC_FLAGS_GET_RS(op->flags) == LIGHTREC_REG_UNLOAD;
2609 discard_rs = OPT_EARLY_UNLOAD
2610 && LIGHTREC_FLAGS_GET_RS(op->flags) == LIGHTREC_REG_DISCARD;
2611
2612 if ((unload_rs || discard_rs) && c.m.rs) {
2613 /* If the source register is going to be unloaded or discarded,
2614 * then we can simply mark its host register as now pointing to
2615 * the destination register. */
2616 pr_debug("Remap %s to %s at offset 0x%x\n",
2617 lightrec_reg_name(c.m.rs), lightrec_reg_name(c.m.rd),
2618 offset << 2);
2619 rs = lightrec_alloc_reg_in(reg_cache, _jit, c.m.rs, 0);
2620 lightrec_remap_reg(reg_cache, _jit, rs, c.m.rd, discard_rs);
2621 lightrec_free_reg(reg_cache, rs);
2622 return;
2623 }
2624
2625 unload_rd = OPT_EARLY_UNLOAD
2626 && LIGHTREC_FLAGS_GET_RD(op->flags) == LIGHTREC_REG_UNLOAD;
2627
2628 if (c.m.rs && !lightrec_reg_is_loaded(reg_cache, c.m.rs)) {
2629 /* The source register is not yet loaded - we can load its value
2630 * from the register cache directly into the target register. */
2631 rd = lightrec_alloc_reg_out(reg_cache, _jit, c.m.rd, REG_EXT);
2632
2633 jit_ldxi_i(rd, LIGHTREC_REG_STATE,
2634 offsetof(struct lightrec_state, regs.gpr) + (c.m.rs << 2));
2635
2636 lightrec_free_reg(reg_cache, rd);
2637 } else if (unload_rd) {
2638 /* If the destination register will be unloaded right after the
2639 * MOV meta-opcode, we don't actually need to write any host
2640 * register - we can just store the source register directly to
2641 * the register cache, at the offset corresponding to the
2642 * destination register. */
2643 lightrec_discard_reg_if_loaded(reg_cache, c.m.rd);
2644
2645 rs = lightrec_alloc_reg_in(reg_cache, _jit, c.m.rs, 0);
2646
2647 jit_stxi_i(offsetof(struct lightrec_state, regs.gpr)
2648 + (c.m.rd << 2), LIGHTREC_REG_STATE, rs);
2649
2650 lightrec_free_reg(reg_cache, rs);
2651 } else {
2652 if (c.m.rs)
2653 rs = lightrec_alloc_reg_in(reg_cache, _jit, c.m.rs, 0);
2654
2655 rd = lightrec_alloc_reg_out(reg_cache, _jit, c.m.rd, REG_EXT);
2656
2657 if (c.m.rs == 0) {
2658 jit_movi(rd, 0);
2659 } else {
2660 jit_extr_i(rd, rs);
2661 lightrec_free_reg(reg_cache, rs);
2662 }
2663
2664 lightrec_free_reg(reg_cache, rd);
2665 }
2666}
2667
2668static void rec_meta_EXTC_EXTS(struct lightrec_cstate *state,
2669 const struct block *block,
2670 u16 offset)
2671{
2672 struct regcache *reg_cache = state->reg_cache;
2673 union code c = block->opcode_list[offset].c;
2674 jit_state_t *_jit = block->_jit;
2675 u8 rs, rd;
2676
2677 _jit_name(block->_jit, __func__);
2678 jit_note(__FILE__, __LINE__);
2679
2680 rec_alloc_rs_rd(reg_cache, _jit, &block->opcode_list[offset],
2681 c.m.rs, c.m.rd, 0, REG_EXT, &rs, &rd);
2682
2683 if (c.m.op == OP_META_EXTC)
2684 jit_extr_c(rd, rs);
2685 else
2686 jit_extr_s(rd, rs);
2687
2688 lightrec_free_reg(reg_cache, rs);
2689 lightrec_free_reg(reg_cache, rd);
2690}
2691
2692static void rec_meta_MULT2(struct lightrec_cstate *state,
2693 const struct block *block,
2694 u16 offset)
2695{
2696 struct regcache *reg_cache = state->reg_cache;
2697 union code c = block->opcode_list[offset].c;
2698 jit_state_t *_jit = block->_jit;
2699 u8 reg_lo = get_mult_div_lo(c);
2700 u8 reg_hi = get_mult_div_hi(c);
2701 u32 flags = block->opcode_list[offset].flags;
2702 bool is_signed = c.i.op == OP_META_MULT2;
2703 u8 rs, lo, hi, rflags = 0, hiflags = 0;
2704 unsigned int i;
2705
2706 if (!op_flag_no_hi(flags) && c.r.op < 32) {
2707 rflags = is_signed ? REG_EXT : REG_ZEXT;
2708 hiflags = is_signed ? REG_EXT : (REG_EXT | REG_ZEXT);
2709 }
2710
2711 _jit_name(block->_jit, __func__);
2712 jit_note(__FILE__, __LINE__);
2713
2714 rs = lightrec_alloc_reg_in(reg_cache, _jit, c.i.rs, rflags);
2715
2716 /*
2717 * We must handle the case where one of the output registers is our rs
2718 * input register. Thanksfully, computing LO/HI can be done in any
2719 * order. Here, we make sure that the computation that overwrites the
2720 * input register is always performed last.
2721 */
2722 for (i = 0; i < 2; i++) {
2723 if ((!i ^ (reg_lo == c.i.rs)) && !op_flag_no_lo(flags)) {
2724 lo = lightrec_alloc_reg_out(reg_cache, _jit, reg_lo, 0);
2725
2726 if (c.r.op < 32)
2727 jit_lshi(lo, rs, c.r.op);
2728 else
2729 jit_movi(lo, 0);
2730
2731 lightrec_free_reg(reg_cache, lo);
2732 continue;
2733 }
2734
2735 if ((!!i ^ (reg_lo == c.i.rs)) && !op_flag_no_hi(flags)) {
2736 hi = lightrec_alloc_reg_out(reg_cache, _jit,
2737 reg_hi, hiflags);
2738
2739 if (c.r.op >= 32)
2740 jit_lshi(hi, rs, c.r.op - 32);
2741 else if (is_signed)
2742 jit_rshi(hi, rs, 32 - c.r.op);
2743 else
2744 jit_rshi_u(hi, rs, 32 - c.r.op);
2745
2746 lightrec_free_reg(reg_cache, hi);
2747 }
2748 }
2749
2750 lightrec_free_reg(reg_cache, rs);
2751
2752 _jit_name(block->_jit, __func__);
2753 jit_note(__FILE__, __LINE__);
2754}
2755
2756static void rec_meta_COM(struct lightrec_cstate *state,
2757 const struct block *block, u16 offset)
2758{
2759 struct regcache *reg_cache = state->reg_cache;
2760 union code c = block->opcode_list[offset].c;
2761 jit_state_t *_jit = block->_jit;
2762 u8 rd, rs, flags;
2763
2764 jit_note(__FILE__, __LINE__);
2765
2766 rec_alloc_rs_rd(reg_cache, _jit, &block->opcode_list[offset],
2767 c.m.rs, c.m.rd, 0, 0, &rs, &rd);
2768
2769 flags = lightrec_get_reg_in_flags(reg_cache, rs);
2770
2771 lightrec_set_reg_out_flags(reg_cache, rd,
2772 flags & REG_EXT);
2773
2774 jit_comr(rd, rs);
2775
2776 lightrec_free_reg(reg_cache, rs);
2777 lightrec_free_reg(reg_cache, rd);
2778}
2779
2780static const lightrec_rec_func_t rec_standard[64] = {
2781 SET_DEFAULT_ELM(rec_standard, unknown_opcode),
2782 [OP_SPECIAL] = rec_SPECIAL,
2783 [OP_REGIMM] = rec_REGIMM,
2784 [OP_J] = rec_J,
2785 [OP_JAL] = rec_JAL,
2786 [OP_BEQ] = rec_BEQ,
2787 [OP_BNE] = rec_BNE,
2788 [OP_BLEZ] = rec_BLEZ,
2789 [OP_BGTZ] = rec_BGTZ,
2790 [OP_ADDI] = rec_ADDI,
2791 [OP_ADDIU] = rec_ADDIU,
2792 [OP_SLTI] = rec_SLTI,
2793 [OP_SLTIU] = rec_SLTIU,
2794 [OP_ANDI] = rec_ANDI,
2795 [OP_ORI] = rec_ORI,
2796 [OP_XORI] = rec_XORI,
2797 [OP_LUI] = rec_LUI,
2798 [OP_CP0] = rec_CP0,
2799 [OP_CP2] = rec_CP2,
2800 [OP_LB] = rec_LB,
2801 [OP_LH] = rec_LH,
2802 [OP_LWL] = rec_LWL,
2803 [OP_LW] = rec_LW,
2804 [OP_LBU] = rec_LBU,
2805 [OP_LHU] = rec_LHU,
2806 [OP_LWR] = rec_LWR,
2807 [OP_SB] = rec_SB,
2808 [OP_SH] = rec_SH,
2809 [OP_SWL] = rec_SWL,
2810 [OP_SW] = rec_SW,
2811 [OP_SWR] = rec_SWR,
2812 [OP_LWC2] = rec_LW,
2813 [OP_SWC2] = rec_SW,
2814
2815 [OP_META] = rec_META,
2816 [OP_META_MULT2] = rec_meta_MULT2,
2817 [OP_META_MULTU2] = rec_meta_MULT2,
2818};
2819
2820static const lightrec_rec_func_t rec_special[64] = {
2821 SET_DEFAULT_ELM(rec_special, unknown_opcode),
2822 [OP_SPECIAL_SLL] = rec_special_SLL,
2823 [OP_SPECIAL_SRL] = rec_special_SRL,
2824 [OP_SPECIAL_SRA] = rec_special_SRA,
2825 [OP_SPECIAL_SLLV] = rec_special_SLLV,
2826 [OP_SPECIAL_SRLV] = rec_special_SRLV,
2827 [OP_SPECIAL_SRAV] = rec_special_SRAV,
2828 [OP_SPECIAL_JR] = rec_special_JR,
2829 [OP_SPECIAL_JALR] = rec_special_JALR,
2830 [OP_SPECIAL_SYSCALL] = rec_special_SYSCALL,
2831 [OP_SPECIAL_BREAK] = rec_special_BREAK,
2832 [OP_SPECIAL_MFHI] = rec_special_MFHI,
2833 [OP_SPECIAL_MTHI] = rec_special_MTHI,
2834 [OP_SPECIAL_MFLO] = rec_special_MFLO,
2835 [OP_SPECIAL_MTLO] = rec_special_MTLO,
2836 [OP_SPECIAL_MULT] = rec_special_MULT,
2837 [OP_SPECIAL_MULTU] = rec_special_MULTU,
2838 [OP_SPECIAL_DIV] = rec_special_DIV,
2839 [OP_SPECIAL_DIVU] = rec_special_DIVU,
2840 [OP_SPECIAL_ADD] = rec_special_ADD,
2841 [OP_SPECIAL_ADDU] = rec_special_ADDU,
2842 [OP_SPECIAL_SUB] = rec_special_SUB,
2843 [OP_SPECIAL_SUBU] = rec_special_SUBU,
2844 [OP_SPECIAL_AND] = rec_special_AND,
2845 [OP_SPECIAL_OR] = rec_special_OR,
2846 [OP_SPECIAL_XOR] = rec_special_XOR,
2847 [OP_SPECIAL_NOR] = rec_special_NOR,
2848 [OP_SPECIAL_SLT] = rec_special_SLT,
2849 [OP_SPECIAL_SLTU] = rec_special_SLTU,
2850};
2851
2852static const lightrec_rec_func_t rec_regimm[64] = {
2853 SET_DEFAULT_ELM(rec_regimm, unknown_opcode),
2854 [OP_REGIMM_BLTZ] = rec_regimm_BLTZ,
2855 [OP_REGIMM_BGEZ] = rec_regimm_BGEZ,
2856 [OP_REGIMM_BLTZAL] = rec_regimm_BLTZAL,
2857 [OP_REGIMM_BGEZAL] = rec_regimm_BGEZAL,
2858};
2859
2860static const lightrec_rec_func_t rec_cp0[64] = {
2861 SET_DEFAULT_ELM(rec_cp0, rec_CP),
2862 [OP_CP0_MFC0] = rec_cp0_MFC0,
2863 [OP_CP0_CFC0] = rec_cp0_CFC0,
2864 [OP_CP0_MTC0] = rec_cp0_MTC0,
2865 [OP_CP0_CTC0] = rec_cp0_CTC0,
2866 [OP_CP0_RFE] = rec_cp0_RFE,
2867};
2868
2869static const lightrec_rec_func_t rec_cp2_basic[64] = {
2870 SET_DEFAULT_ELM(rec_cp2_basic, rec_CP),
2871 [OP_CP2_BASIC_MFC2] = rec_cp2_basic_MFC2,
2872 [OP_CP2_BASIC_CFC2] = rec_cp2_basic_CFC2,
2873 [OP_CP2_BASIC_MTC2] = rec_cp2_basic_MTC2,
2874 [OP_CP2_BASIC_CTC2] = rec_cp2_basic_CTC2,
2875};
2876
2877static const lightrec_rec_func_t rec_meta[64] = {
2878 SET_DEFAULT_ELM(rec_meta, unknown_opcode),
2879 [OP_META_MOV] = rec_meta_MOV,
2880 [OP_META_EXTC] = rec_meta_EXTC_EXTS,
2881 [OP_META_EXTS] = rec_meta_EXTC_EXTS,
2882 [OP_META_COM] = rec_meta_COM,
2883};
2884
2885static void rec_SPECIAL(struct lightrec_cstate *state,
2886 const struct block *block, u16 offset)
2887{
2888 union code c = block->opcode_list[offset].c;
2889 lightrec_rec_func_t f = rec_special[c.r.op];
2890
2891 if (!HAS_DEFAULT_ELM && unlikely(!f))
2892 unknown_opcode(state, block, offset);
2893 else
2894 (*f)(state, block, offset);
2895}
2896
2897static void rec_REGIMM(struct lightrec_cstate *state,
2898 const struct block *block, u16 offset)
2899{
2900 union code c = block->opcode_list[offset].c;
2901 lightrec_rec_func_t f = rec_regimm[c.r.rt];
2902
2903 if (!HAS_DEFAULT_ELM && unlikely(!f))
2904 unknown_opcode(state, block, offset);
2905 else
2906 (*f)(state, block, offset);
2907}
2908
2909static void rec_CP0(struct lightrec_cstate *state,
2910 const struct block *block, u16 offset)
2911{
2912 union code c = block->opcode_list[offset].c;
2913 lightrec_rec_func_t f = rec_cp0[c.r.rs];
2914
2915 if (!HAS_DEFAULT_ELM && unlikely(!f))
2916 rec_CP(state, block, offset);
2917 else
2918 (*f)(state, block, offset);
2919}
2920
2921static void rec_CP2(struct lightrec_cstate *state,
2922 const struct block *block, u16 offset)
2923{
2924 union code c = block->opcode_list[offset].c;
2925
2926 if (c.r.op == OP_CP2_BASIC) {
2927 lightrec_rec_func_t f = rec_cp2_basic[c.r.rs];
2928
2929 if (HAS_DEFAULT_ELM || likely(f)) {
2930 (*f)(state, block, offset);
2931 return;
2932 }
2933 }
2934
2935 rec_CP(state, block, offset);
2936}
2937
2938static void rec_META(struct lightrec_cstate *state,
2939 const struct block *block, u16 offset)
2940{
2941 union code c = block->opcode_list[offset].c;
2942 lightrec_rec_func_t f = rec_meta[c.m.op];
2943
2944 if (!HAS_DEFAULT_ELM && unlikely(!f))
2945 unknown_opcode(state, block, offset);
2946 else
2947 (*f)(state, block, offset);
2948}
2949
2950void lightrec_rec_opcode(struct lightrec_cstate *state,
2951 const struct block *block, u16 offset)
2952{
2953 struct regcache *reg_cache = state->reg_cache;
2954 struct lightrec_branch_target *target;
2955 const struct opcode *op = &block->opcode_list[offset];
2956 jit_state_t *_jit = block->_jit;
2957 lightrec_rec_func_t f;
2958 u16 unload_offset;
2959
2960 if (op_flag_sync(op->flags)) {
2961 if (state->cycles)
2962 jit_subi(LIGHTREC_REG_CYCLE, LIGHTREC_REG_CYCLE, state->cycles);
2963 state->cycles = 0;
2964
2965 lightrec_storeback_regs(reg_cache, _jit);
2966 lightrec_regcache_reset(reg_cache);
2967
2968 pr_debug("Adding branch target at offset 0x%x\n", offset << 2);
2969 target = &state->targets[state->nb_targets++];
2970 target->offset = offset;
2971 target->label = jit_indirect();
2972 }
2973
2974 if (likely(op->opcode)) {
2975 f = rec_standard[op->i.op];
2976
2977 if (!HAS_DEFAULT_ELM && unlikely(!f))
2978 unknown_opcode(state, block, offset);
2979 else
2980 (*f)(state, block, offset);
2981 }
2982
2983 if (OPT_EARLY_UNLOAD) {
2984 unload_offset = offset +
2985 (has_delay_slot(op->c) && !op_flag_no_ds(op->flags));
2986
2987 lightrec_do_early_unload(state, block, unload_offset);
2988 }
2989
2990 state->no_load_delay = false;
2991}