3 * Copyright (C) 2006 Exophase <exophase@gmail.com>
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation; either version 2 of
8 * the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 #include "arm_codegen.h"
25 u32 arm_update_gba_arm(u32 pc);
26 u32 arm_update_gba_thumb(u32 pc);
27 u32 arm_update_gba_idle_arm(u32 pc);
28 u32 arm_update_gba_idle_thumb(u32 pc);
30 // Although these are defined as a function, don't call them as
31 // such (jump to it instead)
32 void arm_indirect_branch_arm(u32 address);
33 void arm_indirect_branch_thumb(u32 address);
34 void arm_indirect_branch_dual_arm(u32 address);
35 void arm_indirect_branch_dual_thumb(u32 address);
37 void execute_store_cpsr(u32 new_cpsr, u32 store_mask, u32 address);
38 u32 execute_store_cpsr_body(u32 _cpsr, u32 store_mask, u32 address);
39 void execute_store_spsr(u32 new_cpsr, u32 store_mask);
40 u32 execute_read_spsr();
41 u32 execute_spsr_restore(u32 address);
43 void execute_swi_arm(u32 pc);
44 void execute_swi_thumb(u32 pc);
46 void function_cc execute_store_u32_safe(u32 address, u32 source);
48 void step_debug_arm(u32 pc);
51 #define write32(value) \
52 *((u32 *)translation_ptr) = value; \
53 translation_ptr += 4 \
55 #define arm_relative_offset(source, offset) \
56 (((((u32)offset - (u32)source) - 8) >> 2) & 0xFFFFFF) \
59 // reg_base_offset is the amount of bytes after reg_base where the registers
62 #define reg_base_offset 1024
65 #define reg_a0 ARMREG_R0
66 #define reg_a1 ARMREG_R1
67 #define reg_a2 ARMREG_R2
69 #define reg_s0 ARMREG_R9
70 #define reg_base ARMREG_SP
71 #define reg_flags ARMREG_R11
73 #define reg_cycles ARMREG_R12
75 #define reg_rv ARMREG_R0
77 #define reg_rm ARMREG_R0
78 #define reg_rn ARMREG_R1
79 #define reg_rs ARMREG_R14
80 #define reg_rd ARMREG_R0
83 // Register allocation layout for ARM and Thumb:
84 // Map from a GBA register to a host ARM register. -1 means load it
85 // from memory into one of the temp registers.
87 // The following registers are chosen based on statistical analysis
88 // of a few games (see below), but might not be the best ones. Results
89 // vary tremendously between ARM and Thumb (for obvious reasons), so
90 // two sets are used. Take care to not call any function which can
91 // overwrite any of these registers from the dynarec - only call
92 // trusted functions in arm_stub.S which know how to save/restore
93 // them and know how to transfer them to the C functions it calls
96 // The following define the actual registers available for allocation.
97 // As registers are freed up add them to this list.
99 // Note that r15 is linked to the a0 temp reg - this register will
100 // be preloaded with a constant upon read, and used to link to
101 // indirect branch functions upon write.
103 #define reg_x0 ARMREG_R3
104 #define reg_x1 ARMREG_R4
105 #define reg_x2 ARMREG_R5
106 #define reg_x3 ARMREG_R6
107 #define reg_x4 ARMREG_R7
108 #define reg_x5 ARMREG_R8
114 ARM register usage (38.775138% ARM instructions):
115 r00: 18.263814% (-- 18.263814%)
116 r12: 11.531477% (-- 29.795291%)
117 r09: 11.500162% (-- 41.295453%)
118 r14: 9.063440% (-- 50.358893%)
119 r06: 7.837682% (-- 58.196574%)
120 r01: 7.401049% (-- 65.597623%)
121 r07: 6.778340% (-- 72.375963%)
122 r05: 5.445009% (-- 77.820973%)
123 r02: 5.427288% (-- 83.248260%)
124 r03: 5.293743% (-- 88.542003%)
125 r04: 3.601103% (-- 92.143106%)
126 r11: 3.207311% (-- 95.350417%)
127 r10: 2.334864% (-- 97.685281%)
128 r08: 1.708207% (-- 99.393488%)
129 r15: 0.311270% (-- 99.704757%)
130 r13: 0.295243% (-- 100.000000%)
132 Thumb register usage (61.224862% Thumb instructions):
133 r00: 34.788858% (-- 34.788858%)
134 r01: 26.564083% (-- 61.352941%)
135 r03: 10.983500% (-- 72.336441%)
136 r02: 8.303127% (-- 80.639567%)
137 r04: 4.900381% (-- 85.539948%)
138 r05: 3.941292% (-- 89.481240%)
139 r06: 3.257582% (-- 92.738822%)
140 r07: 2.644851% (-- 95.383673%)
141 r13: 1.408824% (-- 96.792497%)
142 r08: 0.906433% (-- 97.698930%)
143 r09: 0.679693% (-- 98.378623%)
144 r10: 0.656446% (-- 99.035069%)
145 r12: 0.453668% (-- 99.488737%)
146 r14: 0.248909% (-- 99.737646%)
147 r11: 0.171066% (-- 99.908713%)
148 r15: 0.091287% (-- 100.000000%)
152 s32 arm_register_allocation[] =
189 s32 thumb_register_allocation[] =
228 #define arm_imm_lsl_to_rot(value) \
232 u32 arm_disect_imm_32bit(u32 imm, u32 *stores, u32 *rotations)
238 // Otherwise it'll return 0 things to store because it'll never
247 // Find chunks of non-zero data at 2 bit alignments.
250 for(; left_shift < 32; left_shift += 2)
252 if((imm >> left_shift) & 0x03)
258 // We've hit the end of the useful data.
262 // Hit the end, it might wrap back around to the beginning.
265 // Make a mask for the residual bits. IE, if we have
266 // 5 bits of data at the end we can wrap around to 3
267 // bits of data in the beginning. Thus the first
268 // thing, after being shifted left, has to be less
269 // than 111b, 0x7, or (1 << 3) - 1.
270 u32 top_bits = 32 - left_shift;
271 u32 residual_bits = 8 - top_bits;
272 u32 residual_mask = (1 << residual_bits) - 1;
274 if((store_count > 1) && (left_shift > 24) &&
275 ((stores[0] << ((32 - rotations[0]) & 0x1F)) < residual_mask))
277 // Then we can throw out the last bit and tack it on
279 u32 initial_bits = rotations[0];
281 (stores[0] << ((top_bits + (32 - rotations[0])) & 0x1F)) |
282 ((imm >> left_shift) & 0xFF);
283 rotations[0] = top_bits;
289 // There's nothing to wrap over to in the beginning
290 stores[store_count] = (imm >> left_shift) & 0xFF;
291 rotations[store_count] = (32 - left_shift) & 0x1F;
292 return store_count + 1;
297 stores[store_count] = (imm >> left_shift) & 0xFF;
298 rotations[store_count] = (32 - left_shift) & 0x1F;
305 #define arm_load_imm_32bit(ireg, imm) \
309 u32 store_count = arm_disect_imm_32bit(imm, stores, rotations); \
312 ARM_MOV_REG_IMM(0, ireg, stores[0], rotations[0]); \
314 for(i = 1; i < store_count; i++) \
316 ARM_ORR_REG_IMM(0, ireg, ireg, stores[i], rotations[i]); \
321 #define generate_load_pc(ireg, new_pc) \
322 arm_load_imm_32bit(ireg, new_pc) \
324 #define generate_load_imm(ireg, imm, imm_ror) \
325 ARM_MOV_REG_IMM(0, ireg, imm, imm_ror) \
329 #define generate_shift_left(ireg, imm) \
330 ARM_MOV_REG_IMMSHIFT(0, ireg, ireg, ARMSHIFT_LSL, imm) \
332 #define generate_shift_right(ireg, imm) \
333 ARM_MOV_REG_IMMSHIFT(0, ireg, ireg, ARMSHIFT_LSR, imm) \
335 #define generate_shift_right_arithmetic(ireg, imm) \
336 ARM_MOV_REG_IMMSHIFT(0, ireg, ireg, ARMSHIFT_ASR, imm) \
338 #define generate_rotate_right(ireg, imm) \
339 ARM_MOV_REG_IMMSHIFT(0, ireg, ireg, ARMSHIFT_ROR, imm) \
341 #define generate_add(ireg_dest, ireg_src) \
342 ARM_ADD_REG_REG(0, ireg_dest, ireg_dest, ireg_src) \
344 #define generate_sub(ireg_dest, ireg_src) \
345 ARM_SUB_REG_REG(0, ireg_dest, ireg_dest, ireg_src) \
347 #define generate_or(ireg_dest, ireg_src) \
348 ARM_ORR_REG_REG(0, ireg_dest, ireg_dest, ireg_src) \
350 #define generate_xor(ireg_dest, ireg_src) \
351 ARM_EOR_REG_REG(0, ireg_dest, ireg_dest, ireg_src) \
353 #define generate_add_imm(ireg, imm, imm_ror) \
354 ARM_ADD_REG_IMM(0, ireg, ireg, imm, imm_ror) \
356 #define generate_sub_imm(ireg, imm, imm_ror) \
357 ARM_SUB_REG_IMM(0, ireg, ireg, imm, imm_ror) \
359 #define generate_xor_imm(ireg, imm, imm_ror) \
360 ARM_EOR_REG_IMM(0, ireg, ireg, imm, imm_ror) \
362 #define generate_add_reg_reg_imm(ireg_dest, ireg_src, imm, imm_ror) \
363 ARM_ADD_REG_IMM(0, ireg_dest, ireg_src, imm, imm_ror) \
365 #define generate_and_imm(ireg, imm, imm_ror) \
366 ARM_AND_REG_IMM(0, ireg, ireg, imm, imm_ror) \
368 #define generate_mov(ireg_dest, ireg_src) \
369 if(ireg_dest != ireg_src) \
371 ARM_MOV_REG_REG(0, ireg_dest, ireg_src); \
374 #define generate_function_call(function_location) \
375 ARM_BL(0, arm_relative_offset(translation_ptr, function_location)) \
377 #define generate_exit_block() \
378 ARM_BX(0, ARMREG_LR) \
380 // The branch target is to be filled in later (thus a 0 for now)
382 #define generate_branch_filler(condition_code, writeback_location) \
383 (writeback_location) = translation_ptr; \
384 ARM_B_COND(0, condition_code, 0) \
386 #define generate_update_pc(new_pc) \
387 generate_load_pc(reg_a0, new_pc) \
389 #define generate_cycle_update() \
392 if(cycle_count >> 8) \
394 ARM_ADD_REG_IMM(0, reg_cycles, reg_cycles, (cycle_count >> 8) & 0xFF, \
395 arm_imm_lsl_to_rot(8)); \
397 ARM_ADD_REG_IMM(0, reg_cycles, reg_cycles, (cycle_count & 0xFF), 0); \
401 #define generate_cycle_update_flag_set() \
402 if(cycle_count >> 8) \
404 ARM_ADD_REG_IMM(0, reg_cycles, reg_cycles, (cycle_count >> 8) & 0xFF, \
405 arm_imm_lsl_to_rot(8)); \
407 generate_save_flags(); \
408 ARM_ADDS_REG_IMM(0, reg_cycles, reg_cycles, (cycle_count & 0xFF), 0); \
411 #define generate_branch_patch_conditional(dest, offset) \
412 *((u32 *)(dest)) = (*((u32 *)dest) & 0xFF000000) | \
413 arm_relative_offset(dest, offset) \
415 #define generate_branch_patch_unconditional(dest, offset) \
416 *((u32 *)(dest)) = (*((u32 *)dest) & 0xFF000000) | \
417 arm_relative_offset(dest, offset) \
419 // A different function is called for idle updates because of the relative
420 // location of the embedded PC. The idle version could be optimized to put
421 // the CPU into halt mode too, however.
423 #define generate_branch_idle_eliminate(writeback_location, new_pc, mode) \
424 generate_function_call(arm_update_gba_idle_##mode); \
426 generate_branch_filler(ARMCOND_AL, writeback_location) \
428 #define generate_branch_update(writeback_location, new_pc, mode) \
429 ARM_MOV_REG_IMMSHIFT(0, reg_a0, reg_cycles, ARMSHIFT_LSR, 31); \
430 ARM_ADD_REG_IMMSHIFT(0, ARMREG_PC, ARMREG_PC, reg_a0, ARMSHIFT_LSL, 2); \
432 generate_function_call(arm_update_gba_##mode); \
433 generate_branch_filler(ARMCOND_AL, writeback_location) \
436 #define generate_branch_no_cycle_update(writeback_location, new_pc, mode) \
437 if(pc == idle_loop_target_pc) \
439 generate_branch_idle_eliminate(writeback_location, new_pc, mode); \
443 generate_branch_update(writeback_location, new_pc, mode); \
446 #define generate_branch_cycle_update(writeback_location, new_pc, mode) \
447 generate_cycle_update(); \
448 generate_branch_no_cycle_update(writeback_location, new_pc, mode) \
450 // a0 holds the destination
452 #define generate_indirect_branch_no_cycle_update(type) \
453 ARM_B(0, arm_relative_offset(translation_ptr, arm_indirect_branch_##type)) \
455 #define generate_indirect_branch_cycle_update(type) \
456 generate_cycle_update(); \
457 generate_indirect_branch_no_cycle_update(type) \
459 #define generate_block_prologue() \
461 #define generate_block_extra_vars_arm() \
462 void generate_indirect_branch_arm() \
464 if(condition == 0x0E) \
466 generate_cycle_update(); \
468 generate_indirect_branch_no_cycle_update(arm); \
471 void generate_indirect_branch_dual() \
473 if(condition == 0x0E) \
475 generate_cycle_update(); \
477 generate_indirect_branch_no_cycle_update(dual_arm); \
480 u32 prepare_load_reg(u32 scratch_reg, u32 reg_index) \
482 u32 reg_use = arm_register_allocation[reg_index]; \
483 if(reg_use == mem_reg) \
485 ARM_LDR_IMM(0, scratch_reg, reg_base, \
486 (reg_base_offset + (reg_index * 4))); \
487 return scratch_reg; \
493 u32 prepare_load_reg_pc(u32 scratch_reg, u32 reg_index, u32 pc_offset) \
495 if(reg_index == 15) \
497 generate_load_pc(scratch_reg, pc + pc_offset); \
498 return scratch_reg; \
500 return prepare_load_reg(scratch_reg, reg_index); \
503 u32 prepare_store_reg(u32 scratch_reg, u32 reg_index) \
505 u32 reg_use = arm_register_allocation[reg_index]; \
506 if(reg_use == mem_reg) \
507 return scratch_reg; \
512 void complete_store_reg(u32 scratch_reg, u32 reg_index) \
514 if(arm_register_allocation[reg_index] == mem_reg) \
516 ARM_STR_IMM(0, scratch_reg, reg_base, \
517 (reg_base_offset + (reg_index * 4))); \
521 void complete_store_reg_pc_no_flags(u32 scratch_reg, u32 reg_index) \
523 if(reg_index == 15) \
525 generate_indirect_branch_arm(); \
529 complete_store_reg(scratch_reg, reg_index); \
533 void complete_store_reg_pc_flags(u32 scratch_reg, u32 reg_index) \
535 if(reg_index == 15) \
537 if(condition == 0x0E) \
539 generate_cycle_update(); \
541 generate_function_call(execute_spsr_restore); \
545 complete_store_reg(scratch_reg, reg_index); \
549 void generate_load_reg(u32 ireg, u32 reg_index) \
551 s32 load_src = arm_register_allocation[reg_index]; \
552 if(load_src != mem_reg) \
554 ARM_MOV_REG_REG(0, ireg, load_src); \
558 ARM_LDR_IMM(0, ireg, reg_base, (reg_base_offset + (reg_index * 4))); \
562 void generate_store_reg(u32 ireg, u32 reg_index) \
564 s32 store_dest = arm_register_allocation[reg_index]; \
565 if(store_dest != mem_reg) \
567 ARM_MOV_REG_REG(0, store_dest, ireg); \
571 ARM_STR_IMM(0, ireg, reg_base, (reg_base_offset + (reg_index * 4))); \
576 #define generate_block_extra_vars_thumb() \
577 u32 prepare_load_reg(u32 scratch_reg, u32 reg_index) \
579 u32 reg_use = thumb_register_allocation[reg_index]; \
580 if(reg_use == mem_reg) \
582 ARM_LDR_IMM(0, scratch_reg, reg_base, \
583 (reg_base_offset + (reg_index * 4))); \
584 return scratch_reg; \
590 u32 prepare_load_reg_pc(u32 scratch_reg, u32 reg_index, u32 pc_offset) \
592 if(reg_index == 15) \
594 generate_load_pc(scratch_reg, pc + pc_offset); \
595 return scratch_reg; \
597 return prepare_load_reg(scratch_reg, reg_index); \
600 u32 prepare_store_reg(u32 scratch_reg, u32 reg_index) \
602 u32 reg_use = thumb_register_allocation[reg_index]; \
603 if(reg_use == mem_reg) \
604 return scratch_reg; \
609 void complete_store_reg(u32 scratch_reg, u32 reg_index) \
611 if(thumb_register_allocation[reg_index] == mem_reg) \
613 ARM_STR_IMM(0, scratch_reg, reg_base, \
614 (reg_base_offset + (reg_index * 4))); \
618 void generate_load_reg(u32 ireg, u32 reg_index) \
620 s32 load_src = thumb_register_allocation[reg_index]; \
621 if(load_src != mem_reg) \
623 ARM_MOV_REG_REG(0, ireg, load_src); \
627 ARM_LDR_IMM(0, ireg, reg_base, (reg_base_offset + (reg_index * 4))); \
631 void generate_store_reg(u32 ireg, u32 reg_index) \
633 s32 store_dest = thumb_register_allocation[reg_index]; \
634 if(store_dest != mem_reg) \
636 ARM_MOV_REG_REG(0, store_dest, ireg); \
640 ARM_STR_IMM(0, ireg, reg_base, (reg_base_offset + (reg_index * 4))); \
644 u8 *last_rom_translation_ptr = rom_translation_cache;
645 u8 *last_ram_translation_ptr = ram_translation_cache;
646 u8 *last_bios_translation_ptr = bios_translation_cache;
648 #define translate_invalidate_dcache_one(which) \
649 if (which##_translation_ptr > last_##which##_translation_ptr) \
651 warm_cache_op_range(WOP_D_CLEAN, last_##which##_translation_ptr, \
652 which##_translation_ptr - last_##which##_translation_ptr); \
653 warm_cache_op_range(WOP_I_INVALIDATE, last_##which##_translation_ptr, 32);\
654 last_##which##_translation_ptr = which##_translation_ptr; \
657 #define translate_invalidate_dcache() \
659 translate_invalidate_dcache_one(rom) \
660 translate_invalidate_dcache_one(ram) \
661 translate_invalidate_dcache_one(bios) \
664 #define invalidate_icache_region(addr, size) \
665 warm_cache_op_range(WOP_I_INVALIDATE, addr, size)
668 #define block_prologue_size 0
671 // It should be okay to still generate result flags, spsr will overwrite them.
672 // This is pretty infrequent (returning from interrupt handlers, et al) so
673 // probably not worth optimizing for.
675 #define check_for_interrupts() \
676 if((io_registers[REG_IE] & io_registers[REG_IF]) && \
677 io_registers[REG_IME] && ((reg[REG_CPSR] & 0x80) == 0)) \
679 reg_mode[MODE_IRQ][6] = pc + 4; \
680 spsr[MODE_IRQ] = reg[REG_CPSR]; \
681 reg[REG_CPSR] = 0xD2; \
683 set_cpu_mode(MODE_IRQ); \
686 #define generate_load_reg_pc(ireg, reg_index, pc_offset) \
687 if(reg_index == 15) \
689 generate_load_pc(ireg, pc + pc_offset); \
693 generate_load_reg(ireg, reg_index); \
696 #define generate_store_reg_pc_no_flags(ireg, reg_index) \
697 generate_store_reg(ireg, reg_index); \
698 if(reg_index == 15) \
700 generate_indirect_branch_arm(); \
704 u32 function_cc execute_spsr_restore_body(u32 pc)
706 set_cpu_mode(cpu_modes[reg[REG_CPSR] & 0x1F]);
707 check_for_interrupts();
713 #define generate_store_reg_pc_flags(ireg, reg_index) \
714 generate_store_reg(ireg, reg_index); \
715 if(reg_index == 15) \
717 if(condition == 0x0E) \
719 generate_cycle_update(); \
721 generate_function_call(execute_spsr_restore); \
725 #define generate_load_flags() \
726 /* ARM_MSR_REG(0, ARM_PSR_F, reg_flags, ARM_CPSR) */ \
728 #define generate_store_flags() \
729 /* ARM_MRS_CPSR(0, reg_flags) */ \
731 #define generate_save_flags() \
732 ARM_MRS_CPSR(0, reg_flags) \
734 #define generate_restore_flags() \
735 ARM_MSR_REG(0, ARM_PSR_F, reg_flags, ARM_CPSR) \
738 #define condition_opposite_eq ARMCOND_NE
739 #define condition_opposite_ne ARMCOND_EQ
740 #define condition_opposite_cs ARMCOND_CC
741 #define condition_opposite_cc ARMCOND_CS
742 #define condition_opposite_mi ARMCOND_PL
743 #define condition_opposite_pl ARMCOND_MI
744 #define condition_opposite_vs ARMCOND_VC
745 #define condition_opposite_vc ARMCOND_VS
746 #define condition_opposite_hi ARMCOND_LS
747 #define condition_opposite_ls ARMCOND_HI
748 #define condition_opposite_ge ARMCOND_LT
749 #define condition_opposite_lt ARMCOND_GE
750 #define condition_opposite_gt ARMCOND_LE
751 #define condition_opposite_le ARMCOND_GT
752 #define condition_opposite_al ARMCOND_NV
753 #define condition_opposite_nv ARMCOND_AL
755 #define generate_branch(mode) \
757 generate_branch_cycle_update( \
758 block_exits[block_exit_position].branch_source, \
759 block_exits[block_exit_position].branch_target, mode); \
760 block_exit_position++; \
764 #define generate_op_and_reg_immshift(_rd, _rn, _rm, shift_type, shift) \
765 ARM_AND_REG_IMMSHIFT(0, _rd, _rn, _rm, shift_type, shift) \
767 #define generate_op_orr_reg_immshift(_rd, _rn, _rm, shift_type, shift) \
768 ARM_ORR_REG_IMMSHIFT(0, _rd, _rn, _rm, shift_type, shift) \
770 #define generate_op_eor_reg_immshift(_rd, _rn, _rm, shift_type, shift) \
771 ARM_EOR_REG_IMMSHIFT(0, _rd, _rn, _rm, shift_type, shift) \
773 #define generate_op_bic_reg_immshift(_rd, _rn, _rm, shift_type, shift) \
774 ARM_BIC_REG_IMMSHIFT(0, _rd, _rn, _rm, shift_type, shift) \
776 #define generate_op_sub_reg_immshift(_rd, _rn, _rm, shift_type, shift) \
777 ARM_SUB_REG_IMMSHIFT(0, _rd, _rn, _rm, shift_type, shift) \
779 #define generate_op_rsb_reg_immshift(_rd, _rn, _rm, shift_type, shift) \
780 ARM_RSB_REG_IMMSHIFT(0, _rd, _rn, _rm, shift_type, shift) \
782 #define generate_op_sbc_reg_immshift(_rd, _rn, _rm, shift_type, shift) \
783 ARM_SBC_REG_IMMSHIFT(0, _rd, _rn, _rm, shift_type, shift) \
785 #define generate_op_rsc_reg_immshift(_rd, _rn, _rm, shift_type, shift) \
786 ARM_RSC_REG_IMMSHIFT(0, _rd, _rn, _rm, shift_type, shift) \
788 #define generate_op_add_reg_immshift(_rd, _rn, _rm, shift_type, shift) \
789 ARM_ADD_REG_IMMSHIFT(0, _rd, _rn, _rm, shift_type, shift) \
791 #define generate_op_adc_reg_immshift(_rd, _rn, _rm, shift_type, shift) \
792 ARM_ADC_REG_IMMSHIFT(0, _rd, _rn, _rm, shift_type, shift) \
794 #define generate_op_mov_reg_immshift(_rd, _rn, _rm, shift_type, shift) \
795 ARM_MOV_REG_IMMSHIFT(0, _rd, _rm, shift_type, shift) \
797 #define generate_op_mvn_reg_immshift(_rd, _rn, _rm, shift_type, shift) \
798 ARM_MVN_REG_IMMSHIFT(0, _rd, _rm, shift_type, shift) \
801 #define generate_op_and_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \
802 ARM_AND_REG_REGSHIFT(0, _rd, _rn, _rm, shift_type, _rs) \
804 #define generate_op_orr_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \
805 ARM_ORR_REG_REGSHIFT(0, _rd, _rn, _rm, shift_type, _rs) \
807 #define generate_op_eor_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \
808 ARM_EOR_REG_REGSHIFT(0, _rd, _rn, _rm, shift_type, _rs) \
810 #define generate_op_bic_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \
811 ARM_BIC_REG_REGSHIFT(0, _rd, _rn, _rm, shift_type, _rs) \
813 #define generate_op_sub_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \
814 ARM_SUB_REG_REGSHIFT(0, _rd, _rn, _rm, shift_type, _rs) \
816 #define generate_op_rsb_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \
817 ARM_RSB_REG_REGSHIFT(0, _rd, _rn, _rm, shift_type, _rs) \
819 #define generate_op_sbc_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \
820 ARM_SBC_REG_REGSHIFT(0, _rd, _rn, _rm, shift_type, _rs) \
822 #define generate_op_rsc_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \
823 ARM_RSC_REG_REGSHIFT(0, _rd, _rn, _rm, shift_type, _rs) \
825 #define generate_op_add_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \
826 ARM_ADD_REG_REGSHIFT(0, _rd, _rn, _rm, shift_type, _rs) \
828 #define generate_op_adc_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \
829 ARM_ADC_REG_REGSHIFT(0, _rd, _rn, _rm, shift_type, _rs) \
831 #define generate_op_mov_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \
832 ARM_MOV_REG_REGSHIFT(0, _rd, _rm, shift_type, _rs) \
834 #define generate_op_mvn_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \
835 ARM_MVN_REG_REGSHIFT(0, _rd, _rm, shift_type, _rs) \
838 #define generate_op_and_imm(_rd, _rn) \
839 ARM_AND_REG_IMM(0, _rd, _rn, imm, imm_ror) \
841 #define generate_op_orr_imm(_rd, _rn) \
842 ARM_ORR_REG_IMM(0, _rd, _rn, imm, imm_ror) \
844 #define generate_op_eor_imm(_rd, _rn) \
845 ARM_EOR_REG_IMM(0, _rd, _rn, imm, imm_ror) \
847 #define generate_op_bic_imm(_rd, _rn) \
848 ARM_BIC_REG_IMM(0, _rd, _rn, imm, imm_ror) \
850 #define generate_op_sub_imm(_rd, _rn) \
851 ARM_SUB_REG_IMM(0, _rd, _rn, imm, imm_ror) \
853 #define generate_op_rsb_imm(_rd, _rn) \
854 ARM_RSB_REG_IMM(0, _rd, _rn, imm, imm_ror) \
856 #define generate_op_sbc_imm(_rd, _rn) \
857 ARM_SBC_REG_IMM(0, _rd, _rn, imm, imm_ror) \
859 #define generate_op_rsc_imm(_rd, _rn) \
860 ARM_RSC_REG_IMM(0, _rd, _rn, imm, imm_ror) \
862 #define generate_op_add_imm(_rd, _rn) \
863 ARM_ADD_REG_IMM(0, _rd, _rn, imm, imm_ror) \
865 #define generate_op_adc_imm(_rd, _rn) \
866 ARM_ADC_REG_IMM(0, _rd, _rn, imm, imm_ror) \
868 #define generate_op_mov_imm(_rd, _rn) \
869 ARM_MOV_REG_IMM(0, _rd, imm, imm_ror) \
871 #define generate_op_mvn_imm(_rd, _rn) \
872 ARM_MVN_REG_IMM(0, _rd, imm, imm_ror) \
875 #define generate_op_reg_immshift_lflags(name, _rd, _rn, _rm, st, shift) \
876 ARM_##name##_REG_IMMSHIFT(0, _rd, _rn, _rm, st, shift) \
878 #define generate_op_reg_immshift_aflags(name, _rd, _rn, _rm, st, shift) \
879 ARM_##name##_REG_IMMSHIFT(0, _rd, _rn, _rm, st, shift) \
881 #define generate_op_reg_immshift_aflags_load_c(name, _rd, _rn, _rm, st, sh) \
882 ARM_##name##_REG_IMMSHIFT(0, _rd, _rn, _rm, st, sh) \
884 #define generate_op_reg_immshift_uflags(name, _rd, _rm, shift_type, shift) \
885 ARM_##name##_REG_IMMSHIFT(0, _rd, _rm, shift_type, shift) \
887 #define generate_op_reg_immshift_tflags(name, _rn, _rm, shift_type, shift) \
888 ARM_##name##_REG_IMMSHIFT(0, _rn, _rm, shift_type, shift) \
891 #define generate_op_reg_regshift_lflags(name, _rd, _rn, _rm, shift_type, _rs) \
892 ARM_##name##_REG_REGSHIFT(0, _rd, _rn, _rm, shift_type, _rs) \
894 #define generate_op_reg_regshift_aflags(name, _rd, _rn, _rm, st, _rs) \
895 ARM_##name##_REG_REGSHIFT(0, _rd, _rn, _rm, st, _rs) \
897 #define generate_op_reg_regshift_aflags_load_c(name, _rd, _rn, _rm, st, _rs) \
898 ARM_##name##_REG_REGSHIFT(0, _rd, _rn, _rm, st, _rs) \
900 #define generate_op_reg_regshift_uflags(name, _rd, _rm, shift_type, _rs) \
901 ARM_##name##_REG_REGSHIFT(0, _rd, _rm, shift_type, _rs) \
903 #define generate_op_reg_regshift_tflags(name, _rn, _rm, shift_type, _rs) \
904 ARM_##name##_REG_REGSHIFT(0, _rn, _rm, shift_type, _rs) \
907 #define generate_op_imm_lflags(name, _rd, _rn) \
908 ARM_##name##_REG_IMM(0, _rd, _rn, imm, imm_ror) \
910 #define generate_op_imm_aflags(name, _rd, _rn) \
911 ARM_##name##_REG_IMM(0, _rd, _rn, imm, imm_ror) \
913 #define generate_op_imm_aflags_load_c(name, _rd, _rn) \
914 ARM_##name##_REG_IMM(0, _rd, _rn, imm, imm_ror) \
916 #define generate_op_imm_uflags(name, _rd) \
917 ARM_##name##_REG_IMM(0, _rd, imm, imm_ror) \
919 #define generate_op_imm_tflags(name, _rn) \
920 ARM_##name##_REG_IMM(0, _rn, imm, imm_ror) \
923 #define generate_op_ands_reg_immshift(_rd, _rn, _rm, shift_type, shift) \
924 generate_op_reg_immshift_lflags(ANDS, _rd, _rn, _rm, shift_type, shift) \
926 #define generate_op_orrs_reg_immshift(_rd, _rn, _rm, shift_type, shift) \
927 generate_op_reg_immshift_lflags(ORRS, _rd, _rn, _rm, shift_type, shift) \
929 #define generate_op_eors_reg_immshift(_rd, _rn, _rm, shift_type, shift) \
930 generate_op_reg_immshift_lflags(EORS, _rd, _rn, _rm, shift_type, shift) \
932 #define generate_op_bics_reg_immshift(_rd, _rn, _rm, shift_type, shift) \
933 generate_op_reg_immshift_lflags(BICS, _rd, _rn, _rm, shift_type, shift) \
935 #define generate_op_subs_reg_immshift(_rd, _rn, _rm, shift_type, shift) \
936 generate_op_reg_immshift_aflags(SUBS, _rd, _rn, _rm, shift_type, shift) \
938 #define generate_op_rsbs_reg_immshift(_rd, _rn, _rm, shift_type, shift) \
939 generate_op_reg_immshift_aflags(RSBS, _rd, _rn, _rm, shift_type, shift) \
941 #define generate_op_sbcs_reg_immshift(_rd, _rn, _rm, st, shift) \
942 generate_op_reg_immshift_aflags_load_c(SBCS, _rd, _rn, _rm, st, shift) \
944 #define generate_op_rscs_reg_immshift(_rd, _rn, _rm, st, shift) \
945 generate_op_reg_immshift_aflags_load_c(RSCS, _rd, _rn, _rm, st, shift) \
947 #define generate_op_adds_reg_immshift(_rd, _rn, _rm, shift_type, shift) \
948 generate_op_reg_immshift_aflags(ADDS, _rd, _rn, _rm, shift_type, shift) \
950 #define generate_op_adcs_reg_immshift(_rd, _rn, _rm, st, shift) \
951 generate_op_reg_immshift_aflags_load_c(ADCS, _rd, _rn, _rm, st, shift) \
953 #define generate_op_movs_reg_immshift(_rd, _rn, _rm, shift_type, shift) \
954 generate_op_reg_immshift_uflags(MOVS, _rd, _rm, shift_type, shift) \
956 #define generate_op_mvns_reg_immshift(_rd, _rn, _rm, shift_type, shift) \
957 generate_op_reg_immshift_uflags(MVNS, _rd, _rm, shift_type, shift) \
959 // The reg operand is in reg_rm, not reg_rn like expected, so rsbs isn't
960 // being used here. When rsbs is fully inlined it can be used with the
961 // apropriate operands.
963 #define generate_op_neg_reg_immshift(_rd, _rn, _rm, shift_type, shift) \
965 generate_load_imm(reg_rn, 0, 0); \
966 generate_op_subs_reg_immshift(_rd, reg_rn, _rm, ARMSHIFT_LSL, 0); \
969 #define generate_op_muls_reg_immshift(_rd, _rn, _rm, shift_type, shift) \
970 generate_load_flags(); \
971 ARM_MULS(0, _rd, _rn, _rm); \
972 generate_store_flags() \
974 #define generate_op_cmp_reg_immshift(_rd, _rn, _rm, shift_type, shift) \
975 generate_op_reg_immshift_tflags(CMP, _rn, _rm, shift_type, shift) \
977 #define generate_op_cmn_reg_immshift(_rd, _rn, _rm, shift_type, shift) \
978 generate_op_reg_immshift_tflags(CMN, _rn, _rm, shift_type, shift) \
980 #define generate_op_tst_reg_immshift(_rd, _rn, _rm, shift_type, shift) \
981 generate_op_reg_immshift_tflags(TST, _rn, _rm, shift_type, shift) \
983 #define generate_op_teq_reg_immshift(_rd, _rn, _rm, shift_type, shift) \
984 generate_op_reg_immshift_tflags(TEQ, _rn, _rm, shift_type, shift) \
987 #define generate_op_ands_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \
988 generate_op_reg_regshift_lflags(ANDS, _rd, _rn, _rm, shift_type, _rs) \
990 #define generate_op_orrs_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \
991 generate_op_reg_regshift_lflags(ORRS, _rd, _rn, _rm, shift_type, _rs) \
993 #define generate_op_eors_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \
994 generate_op_reg_regshift_lflags(EORS, _rd, _rn, _rm, shift_type, _rs) \
996 #define generate_op_bics_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \
997 generate_op_reg_regshift_lflags(BICS, _rd, _rn, _rm, shift_type, _rs) \
999 #define generate_op_subs_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \
1000 generate_op_reg_regshift_aflags(SUBS, _rd, _rn, _rm, shift_type, _rs) \
1002 #define generate_op_rsbs_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \
1003 generate_op_reg_regshift_aflags(RSBS, _rd, _rn, _rm, shift_type, _rs) \
1005 #define generate_op_sbcs_reg_regshift(_rd, _rn, _rm, st, _rs) \
1006 generate_op_reg_regshift_aflags_load_c(SBCS, _rd, _rn, _rm, st, _rs) \
1008 #define generate_op_rscs_reg_regshift(_rd, _rn, _rm, st, _rs) \
1009 generate_op_reg_regshift_aflags_load_c(RSCS, _rd, _rn, _rm, st, _rs) \
1011 #define generate_op_adds_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \
1012 generate_op_reg_regshift_aflags(ADDS, _rd, _rn, _rm, shift_type, _rs) \
1014 #define generate_op_adcs_reg_regshift(_rd, _rn, _rm, st, _rs) \
1015 generate_op_reg_regshift_aflags_load_c(ADCS, _rd, _rn, _rm, st, _rs) \
1017 #define generate_op_movs_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \
1018 generate_op_reg_regshift_uflags(MOVS, _rd, _rm, shift_type, _rs) \
1020 #define generate_op_mvns_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \
1021 generate_op_reg_regshift_uflags(MVNS, _rd, _rm, shift_type, _rs) \
1023 #define generate_op_cmp_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \
1024 generate_op_reg_regshift_tflags(CMP, _rn, _rm, shift_type, _rs) \
1026 #define generate_op_cmn_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \
1027 generate_op_reg_regshift_tflags(CMN, _rn, _rm, shift_type, _rs) \
1029 #define generate_op_tst_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \
1030 generate_op_reg_regshift_tflags(TST, _rn, _rm, shift_type, _rs) \
1032 #define generate_op_teq_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \
1033 generate_op_reg_regshift_tflags(TEQ, _rn, _rm, shift_type, _rs) \
1036 #define generate_op_ands_imm(_rd, _rn) \
1037 generate_op_imm_lflags(ANDS, _rd, _rn) \
1039 #define generate_op_orrs_imm(_rd, _rn) \
1040 generate_op_imm_lflags(ORRS, _rd, _rn) \
1042 #define generate_op_eors_imm(_rd, _rn) \
1043 generate_op_imm_lflags(EORS, _rd, _rn) \
1045 #define generate_op_bics_imm(_rd, _rn) \
1046 generate_op_imm_lflags(BICS, _rd, _rn) \
1048 #define generate_op_subs_imm(_rd, _rn) \
1049 generate_op_imm_aflags(SUBS, _rd, _rn) \
1051 #define generate_op_rsbs_imm(_rd, _rn) \
1052 generate_op_imm_aflags(RSBS, _rd, _rn) \
1054 #define generate_op_sbcs_imm(_rd, _rn) \
1055 generate_op_imm_aflags_load_c(SBCS, _rd, _rn) \
1057 #define generate_op_rscs_imm(_rd, _rn) \
1058 generate_op_imm_aflags_load_c(RSCS, _rd, _rn) \
1060 #define generate_op_adds_imm(_rd, _rn) \
1061 generate_op_imm_aflags(ADDS, _rd, _rn) \
1063 #define generate_op_adcs_imm(_rd, _rn) \
1064 generate_op_imm_aflags_load_c(ADCS, _rd, _rn) \
1066 #define generate_op_movs_imm(_rd, _rn) \
1067 generate_op_imm_uflags(MOVS, _rd) \
1069 #define generate_op_mvns_imm(_rd, _rn) \
1070 generate_op_imm_uflags(MVNS, _rd) \
1072 #define generate_op_cmp_imm(_rd, _rn) \
1073 generate_op_imm_tflags(CMP, _rn) \
1075 #define generate_op_cmn_imm(_rd, _rn) \
1076 generate_op_imm_tflags(CMN, _rn) \
1078 #define generate_op_tst_imm(_rd, _rn) \
1079 generate_op_imm_tflags(TST, _rn) \
1081 #define generate_op_teq_imm(_rd, _rn) \
1082 generate_op_imm_tflags(TEQ, _rn) \
1085 #define prepare_load_rn_yes() \
1086 u32 _rn = prepare_load_reg_pc(reg_rn, rn, 8) \
1088 #define prepare_load_rn_no() \
1090 #define prepare_store_rd_yes() \
1091 u32 _rd = prepare_store_reg(reg_rd, rd) \
1093 #define prepare_store_rd_no() \
1095 #define complete_store_rd_yes(flags_op) \
1096 complete_store_reg_pc_##flags_op(_rd, rd) \
1098 #define complete_store_rd_no(flags_op) \
1100 #define arm_generate_op_reg(name, load_op, store_op, flags_op) \
1101 u32 shift_type = (opcode >> 5) & 0x03; \
1102 arm_decode_data_proc_reg(); \
1103 prepare_load_rn_##load_op(); \
1104 prepare_store_rd_##store_op(); \
1106 if((opcode >> 4) & 0x01) \
1108 u32 rs = ((opcode >> 8) & 0x0F); \
1109 u32 _rs = prepare_load_reg(reg_rs, rs); \
1110 u32 _rm = prepare_load_reg_pc(reg_rm, rm, 12); \
1111 generate_op_##name##_reg_regshift(_rd, _rn, _rm, shift_type, _rs); \
1115 u32 shift_imm = ((opcode >> 7) & 0x1F); \
1116 u32 _rm = prepare_load_reg_pc(reg_rm, rm, 8); \
1117 generate_op_##name##_reg_immshift(_rd, _rn, _rm, shift_type, shift_imm); \
1119 complete_store_rd_##store_op(flags_op) \
1121 #define arm_generate_op_reg_flags(name, load_op, store_op, flags_op) \
1122 arm_generate_op_reg(name, load_op, store_op, flags_op) \
1124 // imm will be loaded by the called function if necessary.
1126 #define arm_generate_op_imm(name, load_op, store_op, flags_op) \
1127 arm_decode_data_proc_imm(); \
1128 prepare_load_rn_##load_op(); \
1129 prepare_store_rd_##store_op(); \
1130 generate_op_##name##_imm(_rd, _rn); \
1131 complete_store_rd_##store_op(flags_op) \
1133 #define arm_generate_op_imm_flags(name, load_op, store_op, flags_op) \
1134 arm_generate_op_imm(name, load_op, store_op, flags_op) \
1136 #define arm_data_proc(name, type, flags_op) \
1138 arm_generate_op_##type(name, yes, yes, flags_op); \
1141 #define arm_data_proc_test(name, type) \
1143 arm_generate_op_##type(name, yes, no, no); \
1146 #define arm_data_proc_unary(name, type, flags_op) \
1148 arm_generate_op_##type(name, no, yes, flags_op); \
1152 #define arm_multiply_add_no_flags_no() \
1153 ARM_MUL(0, _rd, _rm, _rs) \
1155 #define arm_multiply_add_yes_flags_no() \
1156 u32 _rn = prepare_load_reg(reg_a2, rn); \
1157 ARM_MLA(0, _rd, _rm, _rs, _rn) \
1159 #define arm_multiply_add_no_flags_yes() \
1160 generate_load_flags(); \
1161 ARM_MULS(0, reg_a0, reg_a0, reg_a1) \
1162 generate_store_flags() \
1164 #define arm_multiply_add_yes_flags_yes() \
1165 u32 _rn = prepare_load_reg(reg_a2, rn); \
1166 generate_load_flags(); \
1167 ARM_MLAS(0, _rd, _rm, _rs, _rn); \
1168 generate_store_flags()
1171 #define arm_multiply(add_op, flags) \
1173 arm_decode_multiply(); \
1174 u32 _rm = prepare_load_reg(reg_a0, rm); \
1175 u32 _rs = prepare_load_reg(reg_a1, rs); \
1176 u32 _rd = prepare_store_reg(reg_a0, rd); \
1177 arm_multiply_add_##add_op##_flags_##flags(); \
1178 complete_store_reg(_rd, rd); \
1182 #define arm_multiply_long_name_s64 SMULL
1183 #define arm_multiply_long_name_u64 UMULL
1184 #define arm_multiply_long_name_s64_add SMLAL
1185 #define arm_multiply_long_name_u64_add UMLAL
1188 #define arm_multiply_long_flags_no(name) \
1189 ARM_##name(0, _rdlo, _rdhi, _rm, _rs) \
1191 #define arm_multiply_long_flags_yes(name) \
1192 generate_load_flags(); \
1193 ARM_##name##S(0, _rdlo, _rdhi, _rm, _rs); \
1194 generate_store_flags() \
1197 #define arm_multiply_long_add_no(name) \
1199 #define arm_multiply_long_add_yes(name) \
1200 prepare_load_reg(reg_a0, rdlo); \
1201 prepare_load_reg(reg_a1, rdhi) \
1204 #define arm_multiply_long_op(flags, name) \
1205 arm_multiply_long_flags_##flags(name) \
1207 #define arm_multiply_long(name, add_op, flags) \
1209 arm_decode_multiply_long(); \
1210 u32 _rm = prepare_load_reg(reg_a2, rm); \
1211 u32 _rs = prepare_load_reg(reg_rs, rs); \
1212 u32 _rdlo = prepare_store_reg(reg_a0, rdlo); \
1213 u32 _rdhi = prepare_store_reg(reg_a1, rdhi); \
1214 arm_multiply_long_add_##add_op(name); \
1215 arm_multiply_long_op(flags, arm_multiply_long_name_##name); \
1216 complete_store_reg(_rdlo, rdlo); \
1217 complete_store_reg(_rdhi, rdhi); \
1220 #define arm_psr_read_cpsr() \
1221 u32 _rd = prepare_store_reg(reg_a0, rd); \
1222 generate_load_reg(_rd, REG_CPSR); \
1223 ARM_BIC_REG_IMM(0, _rd, _rd, 0xF0, arm_imm_lsl_to_rot(24)); \
1224 ARM_AND_REG_IMM(0, reg_flags, reg_flags, 0xF0, arm_imm_lsl_to_rot(24)); \
1225 ARM_ORR_REG_REG(0, _rd, _rd, reg_flags); \
1226 complete_store_reg(_rd, rd) \
1228 #define arm_psr_read_spsr() \
1229 generate_function_call(execute_read_spsr) \
1230 generate_store_reg(reg_a0, rd) \
1232 #define arm_psr_read(op_type, psr_reg) \
1233 arm_psr_read_##psr_reg() \
1235 // This function's okay because it's called from an ASM function that can
1236 // wrap it correctly.
1238 u32 execute_store_cpsr_body(u32 _cpsr, u32 store_mask, u32 address)
1240 reg[REG_CPSR] = _cpsr;
1241 if(store_mask & 0xFF)
1243 set_cpu_mode(cpu_modes[_cpsr & 0x1F]);
1244 if((io_registers[REG_IE] & io_registers[REG_IF]) &&
1245 io_registers[REG_IME] && ((_cpsr & 0x80) == 0))
1247 reg_mode[MODE_IRQ][6] = address + 4;
1248 spsr[MODE_IRQ] = _cpsr;
1249 reg[REG_CPSR] = 0xD2;
1250 set_cpu_mode(MODE_IRQ);
1258 #define arm_psr_load_new_reg() \
1259 generate_load_reg(reg_a0, rm) \
1261 #define arm_psr_load_new_imm() \
1262 generate_load_imm(reg_a0, imm, imm_ror) \
1264 #define arm_psr_store_cpsr() \
1265 arm_load_imm_32bit(reg_a1, psr_masks[psr_field]); \
1266 generate_function_call(execute_store_cpsr); \
1269 #define arm_psr_store_spsr() \
1270 generate_function_call(execute_store_spsr) \
1272 #define arm_psr_store(op_type, psr_reg) \
1273 arm_psr_load_new_##op_type(); \
1274 arm_psr_store_##psr_reg() \
1277 #define arm_psr(op_type, transfer_type, psr_reg) \
1279 arm_decode_psr_##op_type(); \
1280 arm_psr_##transfer_type(op_type, psr_reg); \
1283 // TODO: loads will need the PC passed as well for open address, however can
1284 // eventually be rectified with a hash table on the memory accesses
1285 // (same with the stores)
1287 #define arm_access_memory_load(mem_type) \
1289 generate_function_call(execute_load_##mem_type); \
1290 write32((pc + 8)); \
1291 generate_store_reg_pc_no_flags(reg_rv, rd) \
1293 #define arm_access_memory_store(mem_type) \
1295 generate_load_reg_pc(reg_a1, rd, 12); \
1296 generate_function_call(execute_store_##mem_type); \
1299 // Calculate the address into a0 from _rn, _rm
1301 #define arm_access_memory_adjust_reg_sh_up(ireg) \
1302 ARM_ADD_REG_IMMSHIFT(0, ireg, _rn, _rm, ((opcode >> 5) & 0x03), \
1303 ((opcode >> 7) & 0x1F)) \
1305 #define arm_access_memory_adjust_reg_sh_down(ireg) \
1306 ARM_SUB_REG_IMMSHIFT(0, ireg, _rn, _rm, ((opcode >> 5) & 0x03), \
1307 ((opcode >> 7) & 0x1F)) \
1309 #define arm_access_memory_adjust_reg_up(ireg) \
1310 ARM_ADD_REG_REG(0, ireg, _rn, _rm) \
1312 #define arm_access_memory_adjust_reg_down(ireg) \
1313 ARM_SUB_REG_REG(0, ireg, _rn, _rm) \
1315 #define arm_access_memory_adjust_imm(op, ireg) \
1319 u32 store_count = arm_disect_imm_32bit(offset, stores, rotations); \
1321 if(store_count > 1) \
1323 ARM_##op##_REG_IMM(0, ireg, _rn, stores[0], rotations[0]); \
1324 ARM_##op##_REG_IMM(0, ireg, ireg, stores[1], rotations[1]); \
1328 ARM_##op##_REG_IMM(0, ireg, _rn, stores[0], rotations[0]); \
1332 #define arm_access_memory_adjust_imm_up(ireg) \
1333 arm_access_memory_adjust_imm(ADD, ireg) \
1335 #define arm_access_memory_adjust_imm_down(ireg) \
1336 arm_access_memory_adjust_imm(SUB, ireg) \
1339 #define arm_access_memory_pre(type, direction) \
1340 arm_access_memory_adjust_##type##_##direction(reg_a0) \
1342 #define arm_access_memory_pre_wb(type, direction) \
1343 arm_access_memory_adjust_##type##_##direction(reg_a0); \
1344 generate_store_reg(reg_a0, rn) \
1346 #define arm_access_memory_post(type, direction) \
1347 u32 _rn_dest = prepare_store_reg(reg_a1, rn); \
1350 generate_load_reg(reg_a0, rn); \
1352 arm_access_memory_adjust_##type##_##direction(_rn_dest); \
1353 complete_store_reg(_rn_dest, rn) \
1356 #define arm_data_trans_reg(adjust_op, direction) \
1357 arm_decode_data_trans_reg(); \
1358 u32 _rn = prepare_load_reg_pc(reg_a0, rn, 8); \
1359 u32 _rm = prepare_load_reg(reg_a1, rm); \
1360 arm_access_memory_##adjust_op(reg_sh, direction) \
1362 #define arm_data_trans_imm(adjust_op, direction) \
1363 arm_decode_data_trans_imm(); \
1364 u32 _rn = prepare_load_reg_pc(reg_a0, rn, 8); \
1365 arm_access_memory_##adjust_op(imm, direction) \
1368 #define arm_data_trans_half_reg(adjust_op, direction) \
1369 arm_decode_half_trans_r(); \
1370 u32 _rn = prepare_load_reg_pc(reg_a0, rn, 8); \
1371 u32 _rm = prepare_load_reg(reg_a1, rm); \
1372 arm_access_memory_##adjust_op(reg, direction) \
1374 #define arm_data_trans_half_imm(adjust_op, direction) \
1375 arm_decode_half_trans_of(); \
1376 u32 _rn = prepare_load_reg_pc(reg_a0, rn, 8); \
1377 arm_access_memory_##adjust_op(imm, direction) \
1380 #define arm_access_memory(access_type, direction, adjust_op, mem_type, \
1383 arm_data_trans_##offset_type(adjust_op, direction); \
1384 arm_access_memory_##access_type(mem_type); \
1388 #define word_bit_count(word) \
1389 (bit_count[word >> 8] + bit_count[word & 0xFF]) \
1391 #define sprint_no(access_type, pre_op, post_op, wb) \
1393 #define sprint_yes(access_type, pre_op, post_op, wb) \
1394 printf("sbit on %s %s %s %s\n", #access_type, #pre_op, #post_op, #wb) \
1397 // TODO: Make these use cached registers. Implement iwram_stack_optimize.
1399 #define arm_block_memory_load() \
1400 generate_function_call(execute_load_u32); \
1401 write32((pc + 8)); \
1402 generate_store_reg(reg_rv, i) \
1404 #define arm_block_memory_store() \
1405 generate_load_reg_pc(reg_a1, i, 8); \
1406 generate_function_call(execute_store_u32_safe) \
1408 #define arm_block_memory_final_load() \
1409 arm_block_memory_load() \
1411 #define arm_block_memory_final_store() \
1412 generate_load_reg_pc(reg_a1, i, 12); \
1413 generate_function_call(execute_store_u32); \
1416 #define arm_block_memory_adjust_pc_store() \
1418 #define arm_block_memory_adjust_pc_load() \
1419 if(reg_list & 0x8000) \
1421 generate_mov(reg_a0, reg_rv); \
1422 generate_indirect_branch_arm(); \
1425 #define arm_block_memory_offset_down_a() \
1426 generate_sub_imm(reg_s0, ((word_bit_count(reg_list) * 4) - 4), 0) \
1428 #define arm_block_memory_offset_down_b() \
1429 generate_sub_imm(reg_s0, (word_bit_count(reg_list) * 4), 0) \
1431 #define arm_block_memory_offset_no() \
1433 #define arm_block_memory_offset_up() \
1434 generate_add_imm(reg_s0, 4, 0) \
1436 #define arm_block_memory_writeback_down() \
1437 generate_load_reg(reg_a0, rn); \
1438 generate_sub_imm(reg_a0, (word_bit_count(reg_list) * 4), 0); \
1439 generate_store_reg(reg_a0, rn) \
1441 #define arm_block_memory_writeback_up() \
1442 generate_load_reg(reg_a0, rn); \
1443 generate_add_imm(reg_a0, (word_bit_count(reg_list) * 4), 0); \
1444 generate_store_reg(reg_a0, rn) \
1446 #define arm_block_memory_writeback_no()
1448 // Only emit writeback if the register is not in the list
1450 #define arm_block_memory_writeback_load(writeback_type) \
1451 if(!((reg_list >> rn) & 0x01)) \
1453 arm_block_memory_writeback_##writeback_type(); \
1456 #define arm_block_memory_writeback_store(writeback_type) \
1457 arm_block_memory_writeback_##writeback_type() \
1459 #define arm_block_memory(access_type, offset_type, writeback_type, s_bit) \
1461 arm_decode_block_trans(); \
1465 generate_load_reg(reg_s0, rn); \
1466 arm_block_memory_offset_##offset_type(); \
1467 arm_block_memory_writeback_##access_type(writeback_type); \
1468 ARM_BIC_REG_IMM(0, reg_s0, reg_s0, 0x03, 0); \
1470 for(i = 0; i < 16; i++) \
1472 if((reg_list >> i) & 0x01) \
1475 generate_add_reg_reg_imm(reg_a0, reg_s0, offset, 0); \
1476 if(reg_list & ~((2 << i) - 1)) \
1478 arm_block_memory_##access_type(); \
1483 arm_block_memory_final_##access_type(); \
1489 arm_block_memory_adjust_pc_##access_type(); \
1492 #define arm_swap(type) \
1494 arm_decode_swap(); \
1496 generate_load_reg(reg_a0, rn); \
1497 generate_function_call(execute_load_##type); \
1498 write32((pc + 8)); \
1499 generate_mov(reg_s0, reg_rv); \
1500 generate_load_reg(reg_a0, rn); \
1501 generate_load_reg(reg_a1, rm); \
1502 generate_function_call(execute_store_##type); \
1503 write32((pc + 4)); \
1504 generate_store_reg(reg_s0, rd); \
1508 #define thumb_generate_op_reg(name, _rd, _rs, _rn) \
1509 u32 __rm = prepare_load_reg(reg_rm, _rn); \
1510 generate_op_##name##_reg_immshift(__rd, __rn, __rm, ARMSHIFT_LSL, 0) \
1512 #define thumb_generate_op_imm(name, _rd, _rs, imm_) \
1515 generate_op_##name##_imm(__rd, __rn); \
1519 #define thumb_data_proc(type, name, op_type, _rd, _rs, _rn) \
1521 thumb_decode_##type(); \
1522 u32 __rn = prepare_load_reg(reg_rn, _rs); \
1523 u32 __rd = prepare_store_reg(reg_rd, _rd); \
1524 generate_load_reg(reg_rn, _rs); \
1525 thumb_generate_op_##op_type(name, _rd, _rs, _rn); \
1526 complete_store_reg(__rd, _rd); \
1529 #define thumb_data_proc_test(type, name, op_type, _rd, _rs) \
1531 thumb_decode_##type(); \
1532 u32 __rn = prepare_load_reg(reg_rn, _rd); \
1533 thumb_generate_op_##op_type(name, 0, _rd, _rs); \
1536 #define thumb_data_proc_unary(type, name, op_type, _rd, _rs) \
1538 thumb_decode_##type(); \
1539 u32 __rd = prepare_store_reg(reg_rd, _rd); \
1540 thumb_generate_op_##op_type(name, _rd, 0, _rs); \
1541 complete_store_reg(__rd, _rd); \
1545 #define complete_store_reg_pc_thumb() \
1548 generate_indirect_branch_cycle_update(thumb); \
1552 complete_store_reg(_rd, rd); \
1555 #define thumb_data_proc_hi(name) \
1557 thumb_decode_hireg_op(); \
1558 u32 _rd = prepare_load_reg_pc(reg_rd, rd, 4); \
1559 u32 _rs = prepare_load_reg_pc(reg_rn, rs, 4); \
1560 generate_op_##name##_reg_immshift(_rd, _rd, _rs, ARMSHIFT_LSL, 0); \
1561 complete_store_reg_pc_thumb(); \
1564 #define thumb_data_proc_test_hi(name) \
1566 thumb_decode_hireg_op(); \
1567 u32 _rd = prepare_load_reg_pc(reg_rd, rd, 4); \
1568 u32 _rs = prepare_load_reg_pc(reg_rn, rs, 4); \
1569 generate_op_##name##_reg_immshift(0, _rd, _rs, ARMSHIFT_LSL, 0); \
1572 #define thumb_data_proc_mov_hi() \
1574 thumb_decode_hireg_op(); \
1575 u32 _rs = prepare_load_reg_pc(reg_rn, rs, 4); \
1576 u32 _rd = prepare_store_reg(reg_rd, rd); \
1577 ARM_MOV_REG_REG(0, _rd, _rs); \
1578 complete_store_reg_pc_thumb(); \
1583 #define thumb_load_pc(_rd) \
1585 thumb_decode_imm(); \
1586 u32 __rd = prepare_store_reg(reg_rd, _rd); \
1587 generate_load_pc(__rd, (((pc & ~2) + 4) + (imm * 4))); \
1588 complete_store_reg(__rd, _rd); \
1591 #define thumb_load_sp(_rd) \
1593 thumb_decode_imm(); \
1594 u32 __sp = prepare_load_reg(reg_a0, REG_SP); \
1595 u32 __rd = prepare_store_reg(reg_a0, _rd); \
1596 ARM_ADD_REG_IMM(0, __rd, __sp, imm, arm_imm_lsl_to_rot(2)); \
1597 complete_store_reg(__rd, _rd); \
1600 #define thumb_adjust_sp_up() \
1601 ARM_ADD_REG_IMM(0, _sp, _sp, imm, arm_imm_lsl_to_rot(2)) \
1603 #define thumb_adjust_sp_down() \
1604 ARM_SUB_REG_IMM(0, _sp, _sp, imm, arm_imm_lsl_to_rot(2)) \
1606 #define thumb_adjust_sp(direction) \
1608 thumb_decode_add_sp(); \
1609 u32 _sp = prepare_load_reg(reg_a0, REG_SP); \
1610 thumb_adjust_sp_##direction(); \
1611 complete_store_reg(_sp, REG_SP); \
1614 #define generate_op_lsl_reg(_rd, _rm, _rs) \
1615 generate_op_movs_reg_regshift(_rd, 0, _rm, ARMSHIFT_LSL, _rs) \
1617 #define generate_op_lsr_reg(_rd, _rm, _rs) \
1618 generate_op_movs_reg_regshift(_rd, 0, _rm, ARMSHIFT_LSR, _rs) \
1620 #define generate_op_asr_reg(_rd, _rm, _rs) \
1621 generate_op_movs_reg_regshift(_rd, 0, _rm, ARMSHIFT_ASR, _rs) \
1623 #define generate_op_ror_reg(_rd, _rm, _rs) \
1624 generate_op_movs_reg_regshift(_rd, 0, _rm, ARMSHIFT_ROR, _rs) \
1627 #define generate_op_lsl_imm(_rd, _rm) \
1628 generate_op_movs_reg_immshift(_rd, 0, _rm, ARMSHIFT_LSL, imm) \
1630 #define generate_op_lsr_imm(_rd, _rm) \
1631 generate_op_movs_reg_immshift(_rd, 0, _rm, ARMSHIFT_LSR, imm) \
1633 #define generate_op_asr_imm(_rd, _rm) \
1634 generate_op_movs_reg_immshift(_rd, 0, _rm, ARMSHIFT_ASR, imm) \
1636 #define generate_op_ror_imm(_rd, _rm) \
1637 generate_op_movs_reg_immshift(_rd, 0, _rm, ARMSHIFT_ROR, imm) \
1640 #define generate_shift_reg(op_type) \
1641 u32 __rm = prepare_load_reg(reg_rd, rd); \
1642 u32 __rs = prepare_load_reg(reg_rs, rs); \
1643 generate_op_##op_type##_reg(__rd, __rm, __rs) \
1645 #define generate_shift_imm(op_type) \
1646 u32 __rs = prepare_load_reg(reg_rs, rs); \
1647 generate_op_##op_type##_imm(__rd, __rs) \
1650 #define thumb_shift(decode_type, op_type, value_type) \
1652 thumb_decode_##decode_type(); \
1653 u32 __rd = prepare_store_reg(reg_rd, rd); \
1654 generate_shift_##value_type(op_type); \
1655 complete_store_reg(__rd, rd); \
1658 // Operation types: imm, mem_reg, mem_imm
1660 #define thumb_access_memory_load(mem_type, _rd) \
1662 generate_function_call(execute_load_##mem_type); \
1663 write32((pc + 4)); \
1664 generate_store_reg(reg_rv, _rd) \
1666 #define thumb_access_memory_store(mem_type, _rd) \
1668 generate_load_reg(reg_a1, _rd); \
1669 generate_function_call(execute_store_##mem_type); \
1672 #define thumb_access_memory_generate_address_pc_relative(offset, _rb, _ro) \
1673 generate_load_pc(reg_a0, (offset)) \
1675 #define thumb_access_memory_generate_address_reg_imm(offset, _rb, _ro) \
1676 u32 __rb = prepare_load_reg(reg_a0, _rb); \
1677 ARM_ADD_REG_IMM(0, reg_a0, __rb, offset, 0) \
1679 #define thumb_access_memory_generate_address_reg_imm_sp(offset, _rb, _ro) \
1680 u32 __rb = prepare_load_reg(reg_a0, _rb); \
1681 ARM_ADD_REG_IMM(0, reg_a0, __rb, offset, arm_imm_lsl_to_rot(2)) \
1683 #define thumb_access_memory_generate_address_reg_reg(offset, _rb, _ro) \
1684 u32 __rb = prepare_load_reg(reg_a0, _rb); \
1685 u32 __ro = prepare_load_reg(reg_a1, _ro); \
1686 ARM_ADD_REG_REG(0, reg_a0, __rb, __ro) \
1688 #define thumb_access_memory(access_type, op_type, _rd, _rb, _ro, \
1689 address_type, offset, mem_type) \
1691 thumb_decode_##op_type(); \
1692 thumb_access_memory_generate_address_##address_type(offset, _rb, _ro); \
1693 thumb_access_memory_##access_type(mem_type, _rd); \
1696 // TODO: Make these use cached registers. Implement iwram_stack_optimize.
1698 #define thumb_block_address_preadjust_up() \
1699 generate_add_imm(reg_s0, (bit_count[reg_list] * 4), 0) \
1701 #define thumb_block_address_preadjust_down() \
1702 generate_sub_imm(reg_s0, (bit_count[reg_list] * 4), 0) \
1704 #define thumb_block_address_preadjust_push_lr() \
1705 generate_sub_imm(reg_s0, ((bit_count[reg_list] + 1) * 4), 0) \
1707 #define thumb_block_address_preadjust_no() \
1709 #define thumb_block_address_postadjust_no(base_reg) \
1710 generate_store_reg(reg_s0, base_reg) \
1712 #define thumb_block_address_postadjust_up(base_reg) \
1713 generate_add_reg_reg_imm(reg_a0, reg_s0, (bit_count[reg_list] * 4), 0); \
1714 generate_store_reg(reg_a0, base_reg) \
1716 #define thumb_block_address_postadjust_down(base_reg) \
1717 generate_mov(reg_a0, reg_s0); \
1718 generate_sub_imm(reg_a0, (bit_count[reg_list] * 4), 0); \
1719 generate_store_reg(reg_a0, base_reg) \
1721 #define thumb_block_address_postadjust_pop_pc(base_reg) \
1722 generate_add_reg_reg_imm(reg_a0, reg_s0, \
1723 ((bit_count[reg_list] + 1) * 4), 0); \
1724 generate_store_reg(reg_a0, base_reg) \
1726 #define thumb_block_address_postadjust_push_lr(base_reg) \
1727 generate_store_reg(reg_s0, base_reg) \
1729 #define thumb_block_memory_extra_no() \
1731 #define thumb_block_memory_extra_up() \
1733 #define thumb_block_memory_extra_down() \
1735 #define thumb_block_memory_extra_pop_pc() \
1736 generate_add_reg_reg_imm(reg_a0, reg_s0, (bit_count[reg_list] * 4), 0); \
1737 generate_function_call(execute_load_u32); \
1738 write32((pc + 4)); \
1739 generate_mov(reg_a0, reg_rv); \
1740 generate_indirect_branch_cycle_update(thumb) \
1742 #define thumb_block_memory_extra_push_lr(base_reg) \
1743 generate_add_reg_reg_imm(reg_a0, reg_s0, (bit_count[reg_list] * 4), 0); \
1744 generate_load_reg(reg_a1, REG_LR); \
1745 generate_function_call(execute_store_u32_safe) \
1747 #define thumb_block_memory_load() \
1748 generate_function_call(execute_load_u32); \
1749 write32((pc + 4)); \
1750 generate_store_reg(reg_rv, i) \
1752 #define thumb_block_memory_store() \
1753 generate_load_reg(reg_a1, i); \
1754 generate_function_call(execute_store_u32_safe) \
1756 #define thumb_block_memory_final_load() \
1757 thumb_block_memory_load() \
1759 #define thumb_block_memory_final_store() \
1760 generate_load_reg(reg_a1, i); \
1761 generate_function_call(execute_store_u32); \
1764 #define thumb_block_memory_final_no(access_type) \
1765 thumb_block_memory_final_##access_type() \
1767 #define thumb_block_memory_final_up(access_type) \
1768 thumb_block_memory_final_##access_type() \
1770 #define thumb_block_memory_final_down(access_type) \
1771 thumb_block_memory_final_##access_type() \
1773 #define thumb_block_memory_final_push_lr(access_type) \
1774 thumb_block_memory_##access_type() \
1776 #define thumb_block_memory_final_pop_pc(access_type) \
1777 thumb_block_memory_##access_type() \
1779 #define thumb_block_memory(access_type, pre_op, post_op, base_reg) \
1781 thumb_decode_rlist(); \
1785 generate_load_reg(reg_s0, base_reg); \
1786 ARM_BIC_REG_IMM(0, reg_s0, reg_s0, 0x03, 0); \
1787 thumb_block_address_preadjust_##pre_op(); \
1788 thumb_block_address_postadjust_##post_op(base_reg); \
1790 for(i = 0; i < 8; i++) \
1792 if((reg_list >> i) & 0x01) \
1795 generate_add_reg_reg_imm(reg_a0, reg_s0, offset, 0); \
1796 if(reg_list & ~((2 << i) - 1)) \
1798 thumb_block_memory_##access_type(); \
1803 thumb_block_memory_final_##post_op(access_type); \
1809 thumb_block_memory_extra_##post_op(); \
1812 #define thumb_conditional_branch(condition) \
1814 generate_cycle_update(); \
1815 generate_load_flags(); \
1816 generate_branch_filler(condition_opposite_##condition, backpatch_address); \
1817 generate_branch_no_cycle_update( \
1818 block_exits[block_exit_position].branch_source, \
1819 block_exits[block_exit_position].branch_target, thumb); \
1820 generate_branch_patch_conditional(backpatch_address, translation_ptr); \
1821 block_exit_position++; \
1825 #define arm_conditional_block_header() \
1826 generate_cycle_update(); \
1827 generate_load_flags(); \
1828 /* This will choose the opposite condition */ \
1829 condition ^= 0x01; \
1830 generate_branch_filler(condition, backpatch_address) \
1833 generate_branch(arm) \
1836 generate_update_pc((pc + 4)); \
1837 generate_store_reg(reg_a0, REG_LR); \
1838 generate_branch(arm) \
1841 arm_decode_branchx(); \
1842 generate_load_reg(reg_a0, rn); \
1843 generate_indirect_branch_dual(); \
1846 generate_swi_hle_handler((opcode >> 16) & 0xFF, arm); \
1847 generate_function_call(execute_swi_arm); \
1848 write32((pc + 4)); \
1849 generate_branch(arm) \
1852 generate_branch(thumb) \
1854 #define thumb_bl() \
1855 generate_update_pc(((pc + 2) | 0x01)); \
1856 generate_store_reg(reg_a0, REG_LR); \
1857 generate_branch(thumb) \
1859 #define thumb_blh() \
1861 thumb_decode_branch(); \
1862 generate_update_pc(((pc + 2) | 0x01)); \
1863 generate_load_reg(reg_a1, REG_LR); \
1864 generate_store_reg(reg_a0, REG_LR); \
1865 generate_mov(reg_a0, reg_a1); \
1866 generate_add_imm(reg_a0, (offset * 2), 0); \
1867 generate_indirect_branch_cycle_update(thumb); \
1870 #define thumb_bx() \
1872 thumb_decode_hireg_op(); \
1873 generate_load_reg_pc(reg_a0, rs, 4); \
1874 generate_indirect_branch_cycle_update(dual_thumb); \
1877 #define thumb_swi() \
1878 generate_swi_hle_handler(opcode & 0xFF, thumb); \
1879 generate_function_call(execute_swi_thumb); \
1880 write32((pc + 2)); \
1881 /* We're in ARM mode now */ \
1882 generate_branch(arm) \
1884 u8 swi_hle_handle[256] =
1886 0x0, // SWI 0: SoftReset
1887 0x0, // SWI 1: RegisterRAMReset
1889 0x0, // SWI 3: Stop/Sleep
1890 0x0, // SWI 4: IntrWait
1891 0x0, // SWI 5: VBlankIntrWait
1893 0x0, // SWI 7: DivArm
1895 0x0, // SWI 9: ArcTan
1896 0x0, // SWI A: ArcTan2
1897 0x0, // SWI B: CpuSet
1898 0x0, // SWI C: CpuFastSet
1899 0x0, // SWI D: GetBIOSCheckSum
1900 0x0, // SWI E: BgAffineSet
1901 0x0, // SWI F: ObjAffineSet
1902 0x0, // SWI 10: BitUnpack
1903 0x0, // SWI 11: LZ77UnCompWram
1904 0x0, // SWI 12: LZ77UnCompVram
1905 0x0, // SWI 13: HuffUnComp
1906 0x0, // SWI 14: RLUnCompWram
1907 0x0, // SWI 15: RLUnCompVram
1908 0x0, // SWI 16: Diff8bitUnFilterWram
1909 0x0, // SWI 17: Diff8bitUnFilterVram
1910 0x0, // SWI 18: Diff16bitUnFilter
1911 0x0, // SWI 19: SoundBias
1912 0x0, // SWI 1A: SoundDriverInit
1913 0x0, // SWI 1B: SoundDriverMode
1914 0x0, // SWI 1C: SoundDriverMain
1915 0x0, // SWI 1D: SoundDriverVSync
1916 0x0, // SWI 1E: SoundChannelClear
1917 0x0, // SWI 1F: MidiKey2Freq
1918 0x0, // SWI 20: SoundWhatever0
1919 0x0, // SWI 21: SoundWhatever1
1920 0x0, // SWI 22: SoundWhatever2
1921 0x0, // SWI 23: SoundWhatever3
1922 0x0, // SWI 24: SoundWhatever4
1923 0x0, // SWI 25: MultiBoot
1924 0x0, // SWI 26: HardReset
1925 0x0, // SWI 27: CustomHalt
1926 0x0, // SWI 28: SoundDriverVSyncOff
1927 0x0, // SWI 29: SoundDriverVSyncOn
1928 0x0 // SWI 2A: SoundGetJumpList
1931 void execute_swi_hle_div_arm();
1932 void execute_swi_hle_div_thumb();
1934 void execute_swi_hle_div_c()
1936 s32 result = (s32)reg[0] / (s32)reg[1];
1937 reg[1] = (s32)reg[0] % (s32)reg[1];
1940 reg[3] = (result ^ (result >> 31)) - (result >> 31);
1943 #define generate_swi_hle_handler(_swi_number, mode) \
1945 u32 swi_number = _swi_number; \
1946 if(swi_hle_handle[swi_number]) \
1949 if(swi_number == 0x06) \
1951 generate_function_call(execute_swi_hle_div_##mode); \
1957 #define generate_translation_gate(type) \
1958 generate_update_pc(pc); \
1959 generate_indirect_branch_no_cycle_update(type) \
1961 #define generate_step_debug() \
1962 generate_function_call(step_debug_arm); \