2823a4c8 |
1 | /* gameplaySP |
2 | * |
3 | * Copyright (C) 2006 Exophase <exophase@gmail.com> |
4 | * |
5 | * This program is free software; you can redistribute it and/or |
6 | * modify it under the terms of the GNU General Public License as |
7 | * published by the Free Software Foundation; either version 2 of |
8 | * the License, or (at your option) any later version. |
9 | * |
10 | * This program is distributed in the hope that it will be useful, |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
13 | * General Public License for more details. |
14 | * |
15 | * You should have received a copy of the GNU General Public License |
16 | * along with this program; if not, write to the Free Software |
17 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
18 | */ |
19 | |
20 | #ifndef ARM_EMIT_H |
21 | #define ARM_EMIT_H |
22 | |
23 | #include "arm_codegen.h" |
24 | |
25 | u32 arm_update_gba_arm(u32 pc); |
26 | u32 arm_update_gba_thumb(u32 pc); |
27 | u32 arm_update_gba_idle_arm(u32 pc); |
28 | u32 arm_update_gba_idle_thumb(u32 pc); |
29 | |
30 | // Although these are defined as a function, don't call them as |
31 | // such (jump to it instead) |
32 | void arm_indirect_branch_arm(u32 address); |
33 | void arm_indirect_branch_thumb(u32 address); |
34 | void arm_indirect_branch_dual_arm(u32 address); |
35 | void arm_indirect_branch_dual_thumb(u32 address); |
36 | |
37 | void execute_store_cpsr(u32 new_cpsr, u32 store_mask, u32 address); |
38 | u32 execute_store_cpsr_body(u32 _cpsr, u32 store_mask, u32 address); |
39 | void execute_store_spsr(u32 new_cpsr, u32 store_mask); |
40 | u32 execute_read_spsr(); |
41 | u32 execute_spsr_restore(u32 address); |
42 | |
43 | void execute_swi_arm(u32 pc); |
44 | void execute_swi_thumb(u32 pc); |
45 | |
46 | void function_cc execute_store_u32_safe(u32 address, u32 source); |
47 | |
48 | void step_debug_arm(u32 pc); |
49 | |
50 | |
51 | #define write32(value) \ |
52 | *((u32 *)translation_ptr) = value; \ |
53 | translation_ptr += 4 \ |
54 | |
55 | #define arm_relative_offset(source, offset) \ |
56 | (((((u32)offset - (u32)source) - 8) >> 2) & 0xFFFFFF) \ |
57 | |
58 | |
59 | // reg_base_offset is the amount of bytes after reg_base where the registers |
60 | // actually begin. |
61 | |
62 | #define reg_base_offset 1024 |
63 | |
64 | |
65 | #define reg_a0 ARMREG_R0 |
66 | #define reg_a1 ARMREG_R1 |
67 | #define reg_a2 ARMREG_R2 |
68 | |
69 | #define reg_s0 ARMREG_R9 |
70 | #define reg_base ARMREG_SP |
71 | #define reg_flags ARMREG_R11 |
72 | |
73 | #define reg_cycles ARMREG_R12 |
74 | |
75 | #define reg_rv ARMREG_R0 |
76 | |
77 | #define reg_rm ARMREG_R0 |
78 | #define reg_rn ARMREG_R1 |
79 | #define reg_rs ARMREG_R14 |
80 | #define reg_rd ARMREG_R0 |
81 | |
82 | |
83 | // Register allocation layout for ARM and Thumb: |
84 | // Map from a GBA register to a host ARM register. -1 means load it |
85 | // from memory into one of the temp registers. |
86 | |
87 | // The following registers are chosen based on statistical analysis |
88 | // of a few games (see below), but might not be the best ones. Results |
89 | // vary tremendously between ARM and Thumb (for obvious reasons), so |
90 | // two sets are used. Take care to not call any function which can |
91 | // overwrite any of these registers from the dynarec - only call |
92 | // trusted functions in arm_stub.S which know how to save/restore |
93 | // them and know how to transfer them to the C functions it calls |
94 | // if necessary. |
95 | |
96 | // The following define the actual registers available for allocation. |
97 | // As registers are freed up add them to this list. |
98 | |
99 | // Note that r15 is linked to the a0 temp reg - this register will |
100 | // be preloaded with a constant upon read, and used to link to |
101 | // indirect branch functions upon write. |
102 | |
103 | #define reg_x0 ARMREG_R3 |
104 | #define reg_x1 ARMREG_R4 |
105 | #define reg_x2 ARMREG_R5 |
106 | #define reg_x3 ARMREG_R6 |
107 | #define reg_x4 ARMREG_R7 |
108 | #define reg_x5 ARMREG_R8 |
109 | |
110 | #define mem_reg -1 |
111 | |
112 | /* |
113 | |
114 | ARM register usage (38.775138% ARM instructions): |
115 | r00: 18.263814% (-- 18.263814%) |
116 | r12: 11.531477% (-- 29.795291%) |
117 | r09: 11.500162% (-- 41.295453%) |
118 | r14: 9.063440% (-- 50.358893%) |
119 | r06: 7.837682% (-- 58.196574%) |
120 | r01: 7.401049% (-- 65.597623%) |
121 | r07: 6.778340% (-- 72.375963%) |
122 | r05: 5.445009% (-- 77.820973%) |
123 | r02: 5.427288% (-- 83.248260%) |
124 | r03: 5.293743% (-- 88.542003%) |
125 | r04: 3.601103% (-- 92.143106%) |
126 | r11: 3.207311% (-- 95.350417%) |
127 | r10: 2.334864% (-- 97.685281%) |
128 | r08: 1.708207% (-- 99.393488%) |
129 | r15: 0.311270% (-- 99.704757%) |
130 | r13: 0.295243% (-- 100.000000%) |
131 | |
132 | Thumb register usage (61.224862% Thumb instructions): |
133 | r00: 34.788858% (-- 34.788858%) |
134 | r01: 26.564083% (-- 61.352941%) |
135 | r03: 10.983500% (-- 72.336441%) |
136 | r02: 8.303127% (-- 80.639567%) |
137 | r04: 4.900381% (-- 85.539948%) |
138 | r05: 3.941292% (-- 89.481240%) |
139 | r06: 3.257582% (-- 92.738822%) |
140 | r07: 2.644851% (-- 95.383673%) |
141 | r13: 1.408824% (-- 96.792497%) |
142 | r08: 0.906433% (-- 97.698930%) |
143 | r09: 0.679693% (-- 98.378623%) |
144 | r10: 0.656446% (-- 99.035069%) |
145 | r12: 0.453668% (-- 99.488737%) |
146 | r14: 0.248909% (-- 99.737646%) |
147 | r11: 0.171066% (-- 99.908713%) |
148 | r15: 0.091287% (-- 100.000000%) |
149 | |
150 | */ |
151 | |
152 | s32 arm_register_allocation[] = |
153 | { |
154 | reg_x0, // GBA r0 |
155 | reg_x1, // GBA r1 |
156 | mem_reg, // GBA r2 |
157 | mem_reg, // GBA r3 |
158 | mem_reg, // GBA r4 |
159 | mem_reg, // GBA r5 |
160 | reg_x2, // GBA r6 |
161 | mem_reg, // GBA r7 |
162 | mem_reg, // GBA r8 |
163 | reg_x3, // GBA r9 |
164 | mem_reg, // GBA r10 |
165 | mem_reg, // GBA r11 |
166 | reg_x4, // GBA r12 |
167 | mem_reg, // GBA r13 |
168 | reg_x5, // GBA r14 |
169 | reg_a0 // GBA r15 |
170 | |
171 | mem_reg, |
172 | mem_reg, |
173 | mem_reg, |
174 | mem_reg, |
175 | mem_reg, |
176 | mem_reg, |
177 | mem_reg, |
178 | mem_reg, |
179 | mem_reg, |
180 | mem_reg, |
181 | mem_reg, |
182 | mem_reg, |
183 | mem_reg, |
184 | mem_reg, |
185 | mem_reg, |
186 | mem_reg, |
187 | }; |
188 | |
189 | s32 thumb_register_allocation[] = |
190 | { |
191 | reg_x0, // GBA r0 |
192 | reg_x1, // GBA r1 |
193 | reg_x2, // GBA r2 |
194 | reg_x3, // GBA r3 |
195 | reg_x4, // GBA r4 |
196 | reg_x5, // GBA r5 |
197 | mem_reg, // GBA r6 |
198 | mem_reg, // GBA r7 |
199 | mem_reg, // GBA r8 |
200 | mem_reg, // GBA r9 |
201 | mem_reg, // GBA r10 |
202 | mem_reg, // GBA r11 |
203 | mem_reg, // GBA r12 |
204 | mem_reg, // GBA r13 |
205 | mem_reg, // GBA r14 |
206 | reg_a0 // GBA r15 |
207 | |
208 | mem_reg, |
209 | mem_reg, |
210 | mem_reg, |
211 | mem_reg, |
212 | mem_reg, |
213 | mem_reg, |
214 | mem_reg, |
215 | mem_reg, |
216 | mem_reg, |
217 | mem_reg, |
218 | mem_reg, |
219 | mem_reg, |
220 | mem_reg, |
221 | mem_reg, |
222 | mem_reg, |
223 | mem_reg, |
224 | }; |
225 | |
226 | |
227 | |
228 | #define arm_imm_lsl_to_rot(value) \ |
229 | (32 - value) \ |
230 | |
231 | |
232 | u32 arm_disect_imm_32bit(u32 imm, u32 *stores, u32 *rotations) |
233 | { |
234 | u32 store_count = 0; |
235 | u32 left_shift = 0; |
236 | u32 i; |
237 | |
238 | // Otherwise it'll return 0 things to store because it'll never |
239 | // find anything. |
240 | if(imm == 0) |
241 | { |
242 | rotations[0] = 0; |
243 | stores[0] = 0; |
244 | return 1; |
245 | } |
246 | |
247 | // Find chunks of non-zero data at 2 bit alignments. |
248 | while(1) |
249 | { |
250 | for(; left_shift < 32; left_shift += 2) |
251 | { |
252 | if((imm >> left_shift) & 0x03) |
253 | break; |
254 | } |
255 | |
256 | if(left_shift == 32) |
257 | { |
258 | // We've hit the end of the useful data. |
259 | return store_count; |
260 | } |
261 | |
262 | // Hit the end, it might wrap back around to the beginning. |
263 | if(left_shift >= 24) |
264 | { |
265 | // Make a mask for the residual bits. IE, if we have |
266 | // 5 bits of data at the end we can wrap around to 3 |
267 | // bits of data in the beginning. Thus the first |
268 | // thing, after being shifted left, has to be less |
269 | // than 111b, 0x7, or (1 << 3) - 1. |
270 | u32 top_bits = 32 - left_shift; |
271 | u32 residual_bits = 8 - top_bits; |
272 | u32 residual_mask = (1 << residual_bits) - 1; |
273 | |
274 | if((store_count > 1) && (left_shift > 24) && |
275 | ((stores[0] << ((32 - rotations[0]) & 0x1F)) < residual_mask)) |
276 | { |
277 | // Then we can throw out the last bit and tack it on |
278 | // to the first bit. |
279 | u32 initial_bits = rotations[0]; |
280 | stores[0] = |
281 | (stores[0] << ((top_bits + (32 - rotations[0])) & 0x1F)) | |
282 | ((imm >> left_shift) & 0xFF); |
283 | rotations[0] = top_bits; |
284 | |
285 | return store_count; |
286 | } |
287 | else |
288 | { |
289 | // There's nothing to wrap over to in the beginning |
290 | stores[store_count] = (imm >> left_shift) & 0xFF; |
291 | rotations[store_count] = (32 - left_shift) & 0x1F; |
292 | return store_count + 1; |
293 | } |
294 | break; |
295 | } |
296 | |
297 | stores[store_count] = (imm >> left_shift) & 0xFF; |
298 | rotations[store_count] = (32 - left_shift) & 0x1F; |
299 | |
300 | store_count++; |
301 | left_shift += 8; |
302 | } |
303 | } |
304 | |
305 | #define arm_load_imm_32bit(ireg, imm) \ |
306 | { \ |
307 | u32 stores[4]; \ |
308 | u32 rotations[4]; \ |
309 | u32 store_count = arm_disect_imm_32bit(imm, stores, rotations); \ |
310 | u32 i; \ |
311 | \ |
312 | ARM_MOV_REG_IMM(0, ireg, stores[0], rotations[0]); \ |
313 | \ |
314 | for(i = 1; i < store_count; i++) \ |
315 | { \ |
316 | ARM_ORR_REG_IMM(0, ireg, ireg, stores[i], rotations[i]); \ |
317 | } \ |
318 | } \ |
319 | |
320 | |
321 | #define generate_load_pc(ireg, new_pc) \ |
322 | arm_load_imm_32bit(ireg, new_pc) \ |
323 | |
324 | #define generate_load_imm(ireg, imm, imm_ror) \ |
325 | ARM_MOV_REG_IMM(0, ireg, imm, imm_ror) \ |
326 | |
327 | |
328 | |
329 | #define generate_shift_left(ireg, imm) \ |
330 | ARM_MOV_REG_IMMSHIFT(0, ireg, ireg, ARMSHIFT_LSL, imm) \ |
331 | |
332 | #define generate_shift_right(ireg, imm) \ |
333 | ARM_MOV_REG_IMMSHIFT(0, ireg, ireg, ARMSHIFT_LSR, imm) \ |
334 | |
335 | #define generate_shift_right_arithmetic(ireg, imm) \ |
336 | ARM_MOV_REG_IMMSHIFT(0, ireg, ireg, ARMSHIFT_ASR, imm) \ |
337 | |
338 | #define generate_rotate_right(ireg, imm) \ |
339 | ARM_MOV_REG_IMMSHIFT(0, ireg, ireg, ARMSHIFT_ROR, imm) \ |
340 | |
341 | #define generate_add(ireg_dest, ireg_src) \ |
342 | ARM_ADD_REG_REG(0, ireg_dest, ireg_dest, ireg_src) \ |
343 | |
344 | #define generate_sub(ireg_dest, ireg_src) \ |
345 | ARM_SUB_REG_REG(0, ireg_dest, ireg_dest, ireg_src) \ |
346 | |
347 | #define generate_or(ireg_dest, ireg_src) \ |
348 | ARM_ORR_REG_REG(0, ireg_dest, ireg_dest, ireg_src) \ |
349 | |
350 | #define generate_xor(ireg_dest, ireg_src) \ |
351 | ARM_EOR_REG_REG(0, ireg_dest, ireg_dest, ireg_src) \ |
352 | |
353 | #define generate_add_imm(ireg, imm, imm_ror) \ |
354 | ARM_ADD_REG_IMM(0, ireg, ireg, imm, imm_ror) \ |
355 | |
356 | #define generate_sub_imm(ireg, imm, imm_ror) \ |
357 | ARM_SUB_REG_IMM(0, ireg, ireg, imm, imm_ror) \ |
358 | |
359 | #define generate_xor_imm(ireg, imm, imm_ror) \ |
360 | ARM_EOR_REG_IMM(0, ireg, ireg, imm, imm_ror) \ |
361 | |
362 | #define generate_add_reg_reg_imm(ireg_dest, ireg_src, imm, imm_ror) \ |
363 | ARM_ADD_REG_IMM(0, ireg_dest, ireg_src, imm, imm_ror) \ |
364 | |
365 | #define generate_and_imm(ireg, imm, imm_ror) \ |
366 | ARM_AND_REG_IMM(0, ireg, ireg, imm, imm_ror) \ |
367 | |
368 | #define generate_mov(ireg_dest, ireg_src) \ |
369 | if(ireg_dest != ireg_src) \ |
370 | { \ |
371 | ARM_MOV_REG_REG(0, ireg_dest, ireg_src); \ |
372 | } \ |
373 | |
374 | #define generate_function_call(function_location) \ |
375 | ARM_BL(0, arm_relative_offset(translation_ptr, function_location)) \ |
376 | |
377 | #define generate_exit_block() \ |
378 | ARM_BX(0, ARMREG_LR) \ |
379 | |
380 | // The branch target is to be filled in later (thus a 0 for now) |
381 | |
382 | #define generate_branch_filler(condition_code, writeback_location) \ |
383 | (writeback_location) = translation_ptr; \ |
384 | ARM_B_COND(0, condition_code, 0) \ |
385 | |
386 | #define generate_update_pc(new_pc) \ |
387 | generate_load_pc(reg_a0, new_pc) \ |
388 | |
389 | #define generate_cycle_update() \ |
390 | if(cycle_count) \ |
391 | { \ |
392 | if(cycle_count >> 8) \ |
393 | { \ |
394 | ARM_ADD_REG_IMM(0, reg_cycles, reg_cycles, (cycle_count >> 8) & 0xFF, \ |
395 | arm_imm_lsl_to_rot(8)); \ |
396 | } \ |
397 | ARM_ADD_REG_IMM(0, reg_cycles, reg_cycles, (cycle_count & 0xFF), 0); \ |
398 | cycle_count = 0; \ |
399 | } \ |
400 | |
401 | #define generate_cycle_update_flag_set() \ |
402 | if(cycle_count >> 8) \ |
403 | { \ |
404 | ARM_ADD_REG_IMM(0, reg_cycles, reg_cycles, (cycle_count >> 8) & 0xFF, \ |
405 | arm_imm_lsl_to_rot(8)); \ |
406 | } \ |
407 | generate_save_flags(); \ |
408 | ARM_ADDS_REG_IMM(0, reg_cycles, reg_cycles, (cycle_count & 0xFF), 0); \ |
409 | cycle_count = 0 \ |
410 | |
411 | #define generate_branch_patch_conditional(dest, offset) \ |
412 | *((u32 *)(dest)) = (*((u32 *)dest) & 0xFF000000) | \ |
413 | arm_relative_offset(dest, offset) \ |
414 | |
415 | #define generate_branch_patch_unconditional(dest, offset) \ |
416 | *((u32 *)(dest)) = (*((u32 *)dest) & 0xFF000000) | \ |
417 | arm_relative_offset(dest, offset) \ |
418 | |
419 | // A different function is called for idle updates because of the relative |
420 | // location of the embedded PC. The idle version could be optimized to put |
421 | // the CPU into halt mode too, however. |
422 | |
423 | #define generate_branch_idle_eliminate(writeback_location, new_pc, mode) \ |
424 | generate_function_call(arm_update_gba_idle_##mode); \ |
425 | write32(new_pc); \ |
426 | generate_branch_filler(ARMCOND_AL, writeback_location) \ |
427 | |
428 | #define generate_branch_update(writeback_location, new_pc, mode) \ |
429 | ARM_MOV_REG_IMMSHIFT(0, reg_a0, reg_cycles, ARMSHIFT_LSR, 31); \ |
430 | ARM_ADD_REG_IMMSHIFT(0, ARMREG_PC, ARMREG_PC, reg_a0, ARMSHIFT_LSL, 2); \ |
431 | write32(new_pc); \ |
432 | generate_function_call(arm_update_gba_##mode); \ |
433 | generate_branch_filler(ARMCOND_AL, writeback_location) \ |
434 | |
435 | |
436 | #define generate_branch_no_cycle_update(writeback_location, new_pc, mode) \ |
437 | if(pc == idle_loop_target_pc) \ |
438 | { \ |
439 | generate_branch_idle_eliminate(writeback_location, new_pc, mode); \ |
440 | } \ |
441 | else \ |
442 | { \ |
443 | generate_branch_update(writeback_location, new_pc, mode); \ |
444 | } \ |
445 | |
446 | #define generate_branch_cycle_update(writeback_location, new_pc, mode) \ |
447 | generate_cycle_update(); \ |
448 | generate_branch_no_cycle_update(writeback_location, new_pc, mode) \ |
449 | |
450 | // a0 holds the destination |
451 | |
452 | #define generate_indirect_branch_no_cycle_update(type) \ |
453 | ARM_B(0, arm_relative_offset(translation_ptr, arm_indirect_branch_##type)) \ |
454 | |
455 | #define generate_indirect_branch_cycle_update(type) \ |
456 | generate_cycle_update(); \ |
457 | generate_indirect_branch_no_cycle_update(type) \ |
458 | |
459 | #define generate_block_prologue() \ |
460 | |
461 | #define generate_block_extra_vars_arm() \ |
462 | void generate_indirect_branch_arm() \ |
463 | { \ |
464 | if(condition == 0x0E) \ |
465 | { \ |
466 | generate_cycle_update(); \ |
467 | } \ |
468 | generate_indirect_branch_no_cycle_update(arm); \ |
469 | } \ |
470 | \ |
471 | void generate_indirect_branch_dual() \ |
472 | { \ |
473 | if(condition == 0x0E) \ |
474 | { \ |
475 | generate_cycle_update(); \ |
476 | } \ |
477 | generate_indirect_branch_no_cycle_update(dual_arm); \ |
478 | } \ |
479 | \ |
480 | u32 prepare_load_reg(u32 scratch_reg, u32 reg_index) \ |
481 | { \ |
482 | u32 reg_use = arm_register_allocation[reg_index]; \ |
483 | if(reg_use == mem_reg) \ |
484 | { \ |
485 | ARM_LDR_IMM(0, scratch_reg, reg_base, \ |
486 | (reg_base_offset + (reg_index * 4))); \ |
487 | return scratch_reg; \ |
488 | } \ |
489 | \ |
490 | return reg_use; \ |
491 | } \ |
492 | \ |
493 | u32 prepare_load_reg_pc(u32 scratch_reg, u32 reg_index, u32 pc_offset) \ |
494 | { \ |
495 | if(reg_index == 15) \ |
496 | { \ |
497 | generate_load_pc(scratch_reg, pc + pc_offset); \ |
498 | return scratch_reg; \ |
499 | } \ |
500 | return prepare_load_reg(scratch_reg, reg_index); \ |
501 | } \ |
502 | \ |
503 | u32 prepare_store_reg(u32 scratch_reg, u32 reg_index) \ |
504 | { \ |
505 | u32 reg_use = arm_register_allocation[reg_index]; \ |
506 | if(reg_use == mem_reg) \ |
507 | return scratch_reg; \ |
508 | \ |
509 | return reg_use; \ |
510 | } \ |
511 | \ |
512 | void complete_store_reg(u32 scratch_reg, u32 reg_index) \ |
513 | { \ |
514 | if(arm_register_allocation[reg_index] == mem_reg) \ |
515 | { \ |
516 | ARM_STR_IMM(0, scratch_reg, reg_base, \ |
517 | (reg_base_offset + (reg_index * 4))); \ |
518 | } \ |
519 | } \ |
520 | \ |
521 | void complete_store_reg_pc_no_flags(u32 scratch_reg, u32 reg_index) \ |
522 | { \ |
523 | if(reg_index == 15) \ |
524 | { \ |
525 | generate_indirect_branch_arm(); \ |
526 | } \ |
527 | else \ |
528 | { \ |
529 | complete_store_reg(scratch_reg, reg_index); \ |
530 | } \ |
531 | } \ |
532 | \ |
533 | void complete_store_reg_pc_flags(u32 scratch_reg, u32 reg_index) \ |
534 | { \ |
535 | if(reg_index == 15) \ |
536 | { \ |
537 | if(condition == 0x0E) \ |
538 | { \ |
539 | generate_cycle_update(); \ |
540 | } \ |
541 | generate_function_call(execute_spsr_restore); \ |
542 | } \ |
543 | else \ |
544 | { \ |
545 | complete_store_reg(scratch_reg, reg_index); \ |
546 | } \ |
547 | } \ |
548 | \ |
549 | void generate_load_reg(u32 ireg, u32 reg_index) \ |
550 | { \ |
551 | s32 load_src = arm_register_allocation[reg_index]; \ |
552 | if(load_src != mem_reg) \ |
553 | { \ |
554 | ARM_MOV_REG_REG(0, ireg, load_src); \ |
555 | } \ |
556 | else \ |
557 | { \ |
558 | ARM_LDR_IMM(0, ireg, reg_base, (reg_base_offset + (reg_index * 4))); \ |
559 | } \ |
560 | } \ |
561 | \ |
562 | void generate_store_reg(u32 ireg, u32 reg_index) \ |
563 | { \ |
564 | s32 store_dest = arm_register_allocation[reg_index]; \ |
565 | if(store_dest != mem_reg) \ |
566 | { \ |
567 | ARM_MOV_REG_REG(0, store_dest, ireg); \ |
568 | } \ |
569 | else \ |
570 | { \ |
571 | ARM_STR_IMM(0, ireg, reg_base, (reg_base_offset + (reg_index * 4))); \ |
572 | } \ |
573 | } \ |
574 | |
575 | |
576 | #define generate_block_extra_vars_thumb() \ |
577 | u32 prepare_load_reg(u32 scratch_reg, u32 reg_index) \ |
578 | { \ |
579 | u32 reg_use = thumb_register_allocation[reg_index]; \ |
580 | if(reg_use == mem_reg) \ |
581 | { \ |
582 | ARM_LDR_IMM(0, scratch_reg, reg_base, \ |
583 | (reg_base_offset + (reg_index * 4))); \ |
584 | return scratch_reg; \ |
585 | } \ |
586 | \ |
587 | return reg_use; \ |
588 | } \ |
589 | \ |
590 | u32 prepare_load_reg_pc(u32 scratch_reg, u32 reg_index, u32 pc_offset) \ |
591 | { \ |
592 | if(reg_index == 15) \ |
593 | { \ |
594 | generate_load_pc(scratch_reg, pc + pc_offset); \ |
595 | return scratch_reg; \ |
596 | } \ |
597 | return prepare_load_reg(scratch_reg, reg_index); \ |
598 | } \ |
599 | \ |
600 | u32 prepare_store_reg(u32 scratch_reg, u32 reg_index) \ |
601 | { \ |
602 | u32 reg_use = thumb_register_allocation[reg_index]; \ |
603 | if(reg_use == mem_reg) \ |
604 | return scratch_reg; \ |
605 | \ |
606 | return reg_use; \ |
607 | } \ |
608 | \ |
609 | void complete_store_reg(u32 scratch_reg, u32 reg_index) \ |
610 | { \ |
611 | if(thumb_register_allocation[reg_index] == mem_reg) \ |
612 | { \ |
613 | ARM_STR_IMM(0, scratch_reg, reg_base, \ |
614 | (reg_base_offset + (reg_index * 4))); \ |
615 | } \ |
616 | } \ |
617 | \ |
618 | void generate_load_reg(u32 ireg, u32 reg_index) \ |
619 | { \ |
620 | s32 load_src = thumb_register_allocation[reg_index]; \ |
621 | if(load_src != mem_reg) \ |
622 | { \ |
623 | ARM_MOV_REG_REG(0, ireg, load_src); \ |
624 | } \ |
625 | else \ |
626 | { \ |
627 | ARM_LDR_IMM(0, ireg, reg_base, (reg_base_offset + (reg_index * 4))); \ |
628 | } \ |
629 | } \ |
630 | \ |
631 | void generate_store_reg(u32 ireg, u32 reg_index) \ |
632 | { \ |
633 | s32 store_dest = thumb_register_allocation[reg_index]; \ |
634 | if(store_dest != mem_reg) \ |
635 | { \ |
636 | ARM_MOV_REG_REG(0, store_dest, ireg); \ |
637 | } \ |
638 | else \ |
639 | { \ |
640 | ARM_STR_IMM(0, ireg, reg_base, (reg_base_offset + (reg_index * 4))); \ |
641 | } \ |
642 | } \ |
643 | |
4c808278 |
644 | u8 *last_rom_translation_ptr = rom_translation_cache; |
645 | u8 *last_ram_translation_ptr = ram_translation_cache; |
646 | u8 *last_bios_translation_ptr = bios_translation_cache; |
647 | |
648 | #define translate_invalidate_dcache_one(which) \ |
649 | if (which##_translation_ptr < last_##which##_translation_ptr) \ |
650 | last_##which##_translation_ptr = which##_translation_cache; \ |
651 | if (which##_translation_ptr > last_##which##_translation_ptr) \ |
652 | { \ |
653 | /*warm_cache_op_range(WOP_D_CLEAN, last_##which##_translation_ptr, \ |
654 | which##_translation_ptr - last_##which##_translation_ptr);*/ \ |
655 | warm_cache_op_range(WOP_I_INVALIDATE, last_##which##_translation_ptr, 32);\ |
656 | last_##which##_translation_ptr = which##_translation_ptr; \ |
657 | } |
658 | |
2823a4c8 |
659 | #define translate_invalidate_dcache() \ |
660 | { \ |
4c808278 |
661 | translate_invalidate_dcache_one(rom) \ |
662 | translate_invalidate_dcache_one(ram) \ |
663 | translate_invalidate_dcache_one(bios) \ |
664 | /* notaz: tried cleaning dcache ranges, but it doesn't work for every game, \ |
665 | * don't know why */ \ |
666 | warm_cache_op_all(WOP_D_CLEAN); \ |
667 | } |
668 | |
669 | #define invalidate_icache_region(addr, size) \ |
670 | warm_cache_op_range(WOP_I_INVALIDATE, addr, size) |
671 | |
2823a4c8 |
672 | |
673 | #define block_prologue_size 0 |
674 | |
675 | |
676 | // It should be okay to still generate result flags, spsr will overwrite them. |
677 | // This is pretty infrequent (returning from interrupt handlers, et al) so |
678 | // probably not worth optimizing for. |
679 | |
680 | #define check_for_interrupts() \ |
681 | if((io_registers[REG_IE] & io_registers[REG_IF]) && \ |
682 | io_registers[REG_IME] && ((reg[REG_CPSR] & 0x80) == 0)) \ |
683 | { \ |
684 | reg_mode[MODE_IRQ][6] = pc + 4; \ |
685 | spsr[MODE_IRQ] = reg[REG_CPSR]; \ |
686 | reg[REG_CPSR] = 0xD2; \ |
687 | pc = 0x00000018; \ |
688 | set_cpu_mode(MODE_IRQ); \ |
689 | } \ |
690 | |
691 | #define generate_load_reg_pc(ireg, reg_index, pc_offset) \ |
692 | if(reg_index == 15) \ |
693 | { \ |
694 | generate_load_pc(ireg, pc + pc_offset); \ |
695 | } \ |
696 | else \ |
697 | { \ |
698 | generate_load_reg(ireg, reg_index); \ |
699 | } \ |
700 | |
701 | #define generate_store_reg_pc_no_flags(ireg, reg_index) \ |
702 | generate_store_reg(ireg, reg_index); \ |
703 | if(reg_index == 15) \ |
704 | { \ |
705 | generate_indirect_branch_arm(); \ |
706 | } \ |
707 | |
708 | |
709 | u32 function_cc execute_spsr_restore_body(u32 pc) |
710 | { |
711 | set_cpu_mode(cpu_modes[reg[REG_CPSR] & 0x1F]); |
712 | check_for_interrupts(); |
713 | |
714 | return pc; |
715 | } |
716 | |
717 | |
718 | #define generate_store_reg_pc_flags(ireg, reg_index) \ |
719 | generate_store_reg(ireg, reg_index); \ |
720 | if(reg_index == 15) \ |
721 | { \ |
722 | if(condition == 0x0E) \ |
723 | { \ |
724 | generate_cycle_update(); \ |
725 | } \ |
726 | generate_function_call(execute_spsr_restore); \ |
727 | } \ |
728 | |
729 | |
730 | #define generate_load_flags() \ |
731 | /* ARM_MSR_REG(0, ARM_PSR_F, reg_flags, ARM_CPSR) */ \ |
732 | |
733 | #define generate_store_flags() \ |
734 | /* ARM_MRS_CPSR(0, reg_flags) */ \ |
735 | |
736 | #define generate_save_flags() \ |
737 | ARM_MRS_CPSR(0, reg_flags) \ |
738 | |
739 | #define generate_restore_flags() \ |
740 | ARM_MSR_REG(0, ARM_PSR_F, reg_flags, ARM_CPSR) \ |
741 | |
742 | |
743 | #define condition_opposite_eq ARMCOND_NE |
744 | #define condition_opposite_ne ARMCOND_EQ |
745 | #define condition_opposite_cs ARMCOND_CC |
746 | #define condition_opposite_cc ARMCOND_CS |
747 | #define condition_opposite_mi ARMCOND_PL |
748 | #define condition_opposite_pl ARMCOND_MI |
749 | #define condition_opposite_vs ARMCOND_VC |
750 | #define condition_opposite_vc ARMCOND_VS |
751 | #define condition_opposite_hi ARMCOND_LS |
752 | #define condition_opposite_ls ARMCOND_HI |
753 | #define condition_opposite_ge ARMCOND_LT |
754 | #define condition_opposite_lt ARMCOND_GE |
755 | #define condition_opposite_gt ARMCOND_LE |
756 | #define condition_opposite_le ARMCOND_GT |
757 | #define condition_opposite_al ARMCOND_NV |
758 | #define condition_opposite_nv ARMCOND_AL |
759 | |
760 | #define generate_branch(mode) \ |
761 | { \ |
762 | generate_branch_cycle_update( \ |
763 | block_exits[block_exit_position].branch_source, \ |
764 | block_exits[block_exit_position].branch_target, mode); \ |
765 | block_exit_position++; \ |
766 | } \ |
767 | |
768 | |
769 | #define generate_op_and_reg_immshift(_rd, _rn, _rm, shift_type, shift) \ |
770 | ARM_AND_REG_IMMSHIFT(0, _rd, _rn, _rm, shift_type, shift) \ |
771 | |
772 | #define generate_op_orr_reg_immshift(_rd, _rn, _rm, shift_type, shift) \ |
773 | ARM_ORR_REG_IMMSHIFT(0, _rd, _rn, _rm, shift_type, shift) \ |
774 | |
775 | #define generate_op_eor_reg_immshift(_rd, _rn, _rm, shift_type, shift) \ |
776 | ARM_EOR_REG_IMMSHIFT(0, _rd, _rn, _rm, shift_type, shift) \ |
777 | |
778 | #define generate_op_bic_reg_immshift(_rd, _rn, _rm, shift_type, shift) \ |
779 | ARM_BIC_REG_IMMSHIFT(0, _rd, _rn, _rm, shift_type, shift) \ |
780 | |
781 | #define generate_op_sub_reg_immshift(_rd, _rn, _rm, shift_type, shift) \ |
782 | ARM_SUB_REG_IMMSHIFT(0, _rd, _rn, _rm, shift_type, shift) \ |
783 | |
784 | #define generate_op_rsb_reg_immshift(_rd, _rn, _rm, shift_type, shift) \ |
785 | ARM_RSB_REG_IMMSHIFT(0, _rd, _rn, _rm, shift_type, shift) \ |
786 | |
787 | #define generate_op_sbc_reg_immshift(_rd, _rn, _rm, shift_type, shift) \ |
788 | ARM_SBC_REG_IMMSHIFT(0, _rd, _rn, _rm, shift_type, shift) \ |
789 | |
790 | #define generate_op_rsc_reg_immshift(_rd, _rn, _rm, shift_type, shift) \ |
791 | ARM_RSC_REG_IMMSHIFT(0, _rd, _rn, _rm, shift_type, shift) \ |
792 | |
793 | #define generate_op_add_reg_immshift(_rd, _rn, _rm, shift_type, shift) \ |
794 | ARM_ADD_REG_IMMSHIFT(0, _rd, _rn, _rm, shift_type, shift) \ |
795 | |
796 | #define generate_op_adc_reg_immshift(_rd, _rn, _rm, shift_type, shift) \ |
797 | ARM_ADC_REG_IMMSHIFT(0, _rd, _rn, _rm, shift_type, shift) \ |
798 | |
799 | #define generate_op_mov_reg_immshift(_rd, _rn, _rm, shift_type, shift) \ |
800 | ARM_MOV_REG_IMMSHIFT(0, _rd, _rm, shift_type, shift) \ |
801 | |
802 | #define generate_op_mvn_reg_immshift(_rd, _rn, _rm, shift_type, shift) \ |
803 | ARM_MVN_REG_IMMSHIFT(0, _rd, _rm, shift_type, shift) \ |
804 | |
805 | |
806 | #define generate_op_and_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \ |
807 | ARM_AND_REG_REGSHIFT(0, _rd, _rn, _rm, shift_type, _rs) \ |
808 | |
809 | #define generate_op_orr_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \ |
810 | ARM_ORR_REG_REGSHIFT(0, _rd, _rn, _rm, shift_type, _rs) \ |
811 | |
812 | #define generate_op_eor_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \ |
813 | ARM_EOR_REG_REGSHIFT(0, _rd, _rn, _rm, shift_type, _rs) \ |
814 | |
815 | #define generate_op_bic_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \ |
816 | ARM_BIC_REG_REGSHIFT(0, _rd, _rn, _rm, shift_type, _rs) \ |
817 | |
818 | #define generate_op_sub_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \ |
819 | ARM_SUB_REG_REGSHIFT(0, _rd, _rn, _rm, shift_type, _rs) \ |
820 | |
821 | #define generate_op_rsb_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \ |
822 | ARM_RSB_REG_REGSHIFT(0, _rd, _rn, _rm, shift_type, _rs) \ |
823 | |
824 | #define generate_op_sbc_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \ |
825 | ARM_SBC_REG_REGSHIFT(0, _rd, _rn, _rm, shift_type, _rs) \ |
826 | |
827 | #define generate_op_rsc_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \ |
828 | ARM_RSC_REG_REGSHIFT(0, _rd, _rn, _rm, shift_type, _rs) \ |
829 | |
830 | #define generate_op_add_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \ |
831 | ARM_ADD_REG_REGSHIFT(0, _rd, _rn, _rm, shift_type, _rs) \ |
832 | |
833 | #define generate_op_adc_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \ |
834 | ARM_ADC_REG_REGSHIFT(0, _rd, _rn, _rm, shift_type, _rs) \ |
835 | |
836 | #define generate_op_mov_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \ |
837 | ARM_MOV_REG_REGSHIFT(0, _rd, _rm, shift_type, _rs) \ |
838 | |
839 | #define generate_op_mvn_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \ |
840 | ARM_MVN_REG_REGSHIFT(0, _rd, _rm, shift_type, _rs) \ |
841 | |
842 | |
843 | #define generate_op_and_imm(_rd, _rn) \ |
844 | ARM_AND_REG_IMM(0, _rd, _rn, imm, imm_ror) \ |
845 | |
846 | #define generate_op_orr_imm(_rd, _rn) \ |
847 | ARM_ORR_REG_IMM(0, _rd, _rn, imm, imm_ror) \ |
848 | |
849 | #define generate_op_eor_imm(_rd, _rn) \ |
850 | ARM_EOR_REG_IMM(0, _rd, _rn, imm, imm_ror) \ |
851 | |
852 | #define generate_op_bic_imm(_rd, _rn) \ |
853 | ARM_BIC_REG_IMM(0, _rd, _rn, imm, imm_ror) \ |
854 | |
855 | #define generate_op_sub_imm(_rd, _rn) \ |
856 | ARM_SUB_REG_IMM(0, _rd, _rn, imm, imm_ror) \ |
857 | |
858 | #define generate_op_rsb_imm(_rd, _rn) \ |
859 | ARM_RSB_REG_IMM(0, _rd, _rn, imm, imm_ror) \ |
860 | |
861 | #define generate_op_sbc_imm(_rd, _rn) \ |
862 | ARM_SBC_REG_IMM(0, _rd, _rn, imm, imm_ror) \ |
863 | |
864 | #define generate_op_rsc_imm(_rd, _rn) \ |
865 | ARM_RSC_REG_IMM(0, _rd, _rn, imm, imm_ror) \ |
866 | |
867 | #define generate_op_add_imm(_rd, _rn) \ |
868 | ARM_ADD_REG_IMM(0, _rd, _rn, imm, imm_ror) \ |
869 | |
870 | #define generate_op_adc_imm(_rd, _rn) \ |
871 | ARM_ADC_REG_IMM(0, _rd, _rn, imm, imm_ror) \ |
872 | |
873 | #define generate_op_mov_imm(_rd, _rn) \ |
874 | ARM_MOV_REG_IMM(0, _rd, imm, imm_ror) \ |
875 | |
876 | #define generate_op_mvn_imm(_rd, _rn) \ |
877 | ARM_MVN_REG_IMM(0, _rd, imm, imm_ror) \ |
878 | |
879 | |
880 | #define generate_op_reg_immshift_lflags(name, _rd, _rn, _rm, st, shift) \ |
881 | ARM_##name##_REG_IMMSHIFT(0, _rd, _rn, _rm, st, shift) \ |
882 | |
883 | #define generate_op_reg_immshift_aflags(name, _rd, _rn, _rm, st, shift) \ |
884 | ARM_##name##_REG_IMMSHIFT(0, _rd, _rn, _rm, st, shift) \ |
885 | |
886 | #define generate_op_reg_immshift_aflags_load_c(name, _rd, _rn, _rm, st, sh) \ |
887 | ARM_##name##_REG_IMMSHIFT(0, _rd, _rn, _rm, st, sh) \ |
888 | |
889 | #define generate_op_reg_immshift_uflags(name, _rd, _rm, shift_type, shift) \ |
890 | ARM_##name##_REG_IMMSHIFT(0, _rd, _rm, shift_type, shift) \ |
891 | |
892 | #define generate_op_reg_immshift_tflags(name, _rn, _rm, shift_type, shift) \ |
893 | ARM_##name##_REG_IMMSHIFT(0, _rn, _rm, shift_type, shift) \ |
894 | |
895 | |
896 | #define generate_op_reg_regshift_lflags(name, _rd, _rn, _rm, shift_type, _rs) \ |
897 | ARM_##name##_REG_REGSHIFT(0, _rd, _rn, _rm, shift_type, _rs) \ |
898 | |
899 | #define generate_op_reg_regshift_aflags(name, _rd, _rn, _rm, st, _rs) \ |
900 | ARM_##name##_REG_REGSHIFT(0, _rd, _rn, _rm, st, _rs) \ |
901 | |
902 | #define generate_op_reg_regshift_aflags_load_c(name, _rd, _rn, _rm, st, _rs) \ |
903 | ARM_##name##_REG_REGSHIFT(0, _rd, _rn, _rm, st, _rs) \ |
904 | |
905 | #define generate_op_reg_regshift_uflags(name, _rd, _rm, shift_type, _rs) \ |
906 | ARM_##name##_REG_REGSHIFT(0, _rd, _rm, shift_type, _rs) \ |
907 | |
908 | #define generate_op_reg_regshift_tflags(name, _rn, _rm, shift_type, _rs) \ |
909 | ARM_##name##_REG_REGSHIFT(0, _rn, _rm, shift_type, _rs) \ |
910 | |
911 | |
912 | #define generate_op_imm_lflags(name, _rd, _rn) \ |
913 | ARM_##name##_REG_IMM(0, _rd, _rn, imm, imm_ror) \ |
914 | |
915 | #define generate_op_imm_aflags(name, _rd, _rn) \ |
916 | ARM_##name##_REG_IMM(0, _rd, _rn, imm, imm_ror) \ |
917 | |
918 | #define generate_op_imm_aflags_load_c(name, _rd, _rn) \ |
919 | ARM_##name##_REG_IMM(0, _rd, _rn, imm, imm_ror) \ |
920 | |
921 | #define generate_op_imm_uflags(name, _rd) \ |
922 | ARM_##name##_REG_IMM(0, _rd, imm, imm_ror) \ |
923 | |
924 | #define generate_op_imm_tflags(name, _rn) \ |
925 | ARM_##name##_REG_IMM(0, _rn, imm, imm_ror) \ |
926 | |
927 | |
928 | #define generate_op_ands_reg_immshift(_rd, _rn, _rm, shift_type, shift) \ |
929 | generate_op_reg_immshift_lflags(ANDS, _rd, _rn, _rm, shift_type, shift) \ |
930 | |
931 | #define generate_op_orrs_reg_immshift(_rd, _rn, _rm, shift_type, shift) \ |
932 | generate_op_reg_immshift_lflags(ORRS, _rd, _rn, _rm, shift_type, shift) \ |
933 | |
934 | #define generate_op_eors_reg_immshift(_rd, _rn, _rm, shift_type, shift) \ |
935 | generate_op_reg_immshift_lflags(EORS, _rd, _rn, _rm, shift_type, shift) \ |
936 | |
937 | #define generate_op_bics_reg_immshift(_rd, _rn, _rm, shift_type, shift) \ |
938 | generate_op_reg_immshift_lflags(BICS, _rd, _rn, _rm, shift_type, shift) \ |
939 | |
940 | #define generate_op_subs_reg_immshift(_rd, _rn, _rm, shift_type, shift) \ |
941 | generate_op_reg_immshift_aflags(SUBS, _rd, _rn, _rm, shift_type, shift) \ |
942 | |
943 | #define generate_op_rsbs_reg_immshift(_rd, _rn, _rm, shift_type, shift) \ |
944 | generate_op_reg_immshift_aflags(RSBS, _rd, _rn, _rm, shift_type, shift) \ |
945 | |
946 | #define generate_op_sbcs_reg_immshift(_rd, _rn, _rm, st, shift) \ |
947 | generate_op_reg_immshift_aflags_load_c(SBCS, _rd, _rn, _rm, st, shift) \ |
948 | |
949 | #define generate_op_rscs_reg_immshift(_rd, _rn, _rm, st, shift) \ |
950 | generate_op_reg_immshift_aflags_load_c(RSCS, _rd, _rn, _rm, st, shift) \ |
951 | |
952 | #define generate_op_adds_reg_immshift(_rd, _rn, _rm, shift_type, shift) \ |
953 | generate_op_reg_immshift_aflags(ADDS, _rd, _rn, _rm, shift_type, shift) \ |
954 | |
955 | #define generate_op_adcs_reg_immshift(_rd, _rn, _rm, st, shift) \ |
956 | generate_op_reg_immshift_aflags_load_c(ADCS, _rd, _rn, _rm, st, shift) \ |
957 | |
958 | #define generate_op_movs_reg_immshift(_rd, _rn, _rm, shift_type, shift) \ |
959 | generate_op_reg_immshift_uflags(MOVS, _rd, _rm, shift_type, shift) \ |
960 | |
961 | #define generate_op_mvns_reg_immshift(_rd, _rn, _rm, shift_type, shift) \ |
962 | generate_op_reg_immshift_uflags(MVNS, _rd, _rm, shift_type, shift) \ |
963 | |
964 | // The reg operand is in reg_rm, not reg_rn like expected, so rsbs isn't |
965 | // being used here. When rsbs is fully inlined it can be used with the |
966 | // apropriate operands. |
967 | |
968 | #define generate_op_neg_reg_immshift(_rd, _rn, _rm, shift_type, shift) \ |
969 | { \ |
970 | generate_load_imm(reg_rn, 0, 0); \ |
971 | generate_op_subs_reg_immshift(_rd, reg_rn, _rm, ARMSHIFT_LSL, 0); \ |
972 | } \ |
973 | |
974 | #define generate_op_muls_reg_immshift(_rd, _rn, _rm, shift_type, shift) \ |
975 | generate_load_flags(); \ |
976 | ARM_MULS(0, _rd, _rn, _rm); \ |
977 | generate_store_flags() \ |
978 | |
979 | #define generate_op_cmp_reg_immshift(_rd, _rn, _rm, shift_type, shift) \ |
980 | generate_op_reg_immshift_tflags(CMP, _rn, _rm, shift_type, shift) \ |
981 | |
982 | #define generate_op_cmn_reg_immshift(_rd, _rn, _rm, shift_type, shift) \ |
983 | generate_op_reg_immshift_tflags(CMN, _rn, _rm, shift_type, shift) \ |
984 | |
985 | #define generate_op_tst_reg_immshift(_rd, _rn, _rm, shift_type, shift) \ |
986 | generate_op_reg_immshift_tflags(TST, _rn, _rm, shift_type, shift) \ |
987 | |
988 | #define generate_op_teq_reg_immshift(_rd, _rn, _rm, shift_type, shift) \ |
989 | generate_op_reg_immshift_tflags(TEQ, _rn, _rm, shift_type, shift) \ |
990 | |
991 | |
992 | #define generate_op_ands_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \ |
993 | generate_op_reg_regshift_lflags(ANDS, _rd, _rn, _rm, shift_type, _rs) \ |
994 | |
995 | #define generate_op_orrs_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \ |
996 | generate_op_reg_regshift_lflags(ORRS, _rd, _rn, _rm, shift_type, _rs) \ |
997 | |
998 | #define generate_op_eors_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \ |
999 | generate_op_reg_regshift_lflags(EORS, _rd, _rn, _rm, shift_type, _rs) \ |
1000 | |
1001 | #define generate_op_bics_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \ |
1002 | generate_op_reg_regshift_lflags(BICS, _rd, _rn, _rm, shift_type, _rs) \ |
1003 | |
1004 | #define generate_op_subs_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \ |
1005 | generate_op_reg_regshift_aflags(SUBS, _rd, _rn, _rm, shift_type, _rs) \ |
1006 | |
1007 | #define generate_op_rsbs_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \ |
1008 | generate_op_reg_regshift_aflags(RSBS, _rd, _rn, _rm, shift_type, _rs) \ |
1009 | |
1010 | #define generate_op_sbcs_reg_regshift(_rd, _rn, _rm, st, _rs) \ |
1011 | generate_op_reg_regshift_aflags_load_c(SBCS, _rd, _rn, _rm, st, _rs) \ |
1012 | |
1013 | #define generate_op_rscs_reg_regshift(_rd, _rn, _rm, st, _rs) \ |
1014 | generate_op_reg_regshift_aflags_load_c(RSCS, _rd, _rn, _rm, st, _rs) \ |
1015 | |
1016 | #define generate_op_adds_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \ |
1017 | generate_op_reg_regshift_aflags(ADDS, _rd, _rn, _rm, shift_type, _rs) \ |
1018 | |
1019 | #define generate_op_adcs_reg_regshift(_rd, _rn, _rm, st, _rs) \ |
1020 | generate_op_reg_regshift_aflags_load_c(ADCS, _rd, _rn, _rm, st, _rs) \ |
1021 | |
1022 | #define generate_op_movs_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \ |
1023 | generate_op_reg_regshift_uflags(MOVS, _rd, _rm, shift_type, _rs) \ |
1024 | |
1025 | #define generate_op_mvns_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \ |
1026 | generate_op_reg_regshift_uflags(MVNS, _rd, _rm, shift_type, _rs) \ |
1027 | |
1028 | #define generate_op_cmp_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \ |
1029 | generate_op_reg_regshift_tflags(CMP, _rn, _rm, shift_type, _rs) \ |
1030 | |
1031 | #define generate_op_cmn_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \ |
1032 | generate_op_reg_regshift_tflags(CMN, _rn, _rm, shift_type, _rs) \ |
1033 | |
1034 | #define generate_op_tst_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \ |
1035 | generate_op_reg_regshift_tflags(TST, _rn, _rm, shift_type, _rs) \ |
1036 | |
1037 | #define generate_op_teq_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \ |
1038 | generate_op_reg_regshift_tflags(TEQ, _rn, _rm, shift_type, _rs) \ |
1039 | |
1040 | |
1041 | #define generate_op_ands_imm(_rd, _rn) \ |
1042 | generate_op_imm_lflags(ANDS, _rd, _rn) \ |
1043 | |
1044 | #define generate_op_orrs_imm(_rd, _rn) \ |
1045 | generate_op_imm_lflags(ORRS, _rd, _rn) \ |
1046 | |
1047 | #define generate_op_eors_imm(_rd, _rn) \ |
1048 | generate_op_imm_lflags(EORS, _rd, _rn) \ |
1049 | |
1050 | #define generate_op_bics_imm(_rd, _rn) \ |
1051 | generate_op_imm_lflags(BICS, _rd, _rn) \ |
1052 | |
1053 | #define generate_op_subs_imm(_rd, _rn) \ |
1054 | generate_op_imm_aflags(SUBS, _rd, _rn) \ |
1055 | |
1056 | #define generate_op_rsbs_imm(_rd, _rn) \ |
1057 | generate_op_imm_aflags(RSBS, _rd, _rn) \ |
1058 | |
1059 | #define generate_op_sbcs_imm(_rd, _rn) \ |
1060 | generate_op_imm_aflags_load_c(SBCS, _rd, _rn) \ |
1061 | |
1062 | #define generate_op_rscs_imm(_rd, _rn) \ |
1063 | generate_op_imm_aflags_load_c(RSCS, _rd, _rn) \ |
1064 | |
1065 | #define generate_op_adds_imm(_rd, _rn) \ |
1066 | generate_op_imm_aflags(ADDS, _rd, _rn) \ |
1067 | |
1068 | #define generate_op_adcs_imm(_rd, _rn) \ |
1069 | generate_op_imm_aflags_load_c(ADCS, _rd, _rn) \ |
1070 | |
1071 | #define generate_op_movs_imm(_rd, _rn) \ |
1072 | generate_op_imm_uflags(MOVS, _rd) \ |
1073 | |
1074 | #define generate_op_mvns_imm(_rd, _rn) \ |
1075 | generate_op_imm_uflags(MVNS, _rd) \ |
1076 | |
1077 | #define generate_op_cmp_imm(_rd, _rn) \ |
1078 | generate_op_imm_tflags(CMP, _rn) \ |
1079 | |
1080 | #define generate_op_cmn_imm(_rd, _rn) \ |
1081 | generate_op_imm_tflags(CMN, _rn) \ |
1082 | |
1083 | #define generate_op_tst_imm(_rd, _rn) \ |
1084 | generate_op_imm_tflags(TST, _rn) \ |
1085 | |
1086 | #define generate_op_teq_imm(_rd, _rn) \ |
1087 | generate_op_imm_tflags(TEQ, _rn) \ |
1088 | |
1089 | |
1090 | #define prepare_load_rn_yes() \ |
1091 | u32 _rn = prepare_load_reg_pc(reg_rn, rn, 8) \ |
1092 | |
1093 | #define prepare_load_rn_no() \ |
1094 | |
1095 | #define prepare_store_rd_yes() \ |
1096 | u32 _rd = prepare_store_reg(reg_rd, rd) \ |
1097 | |
1098 | #define prepare_store_rd_no() \ |
1099 | |
1100 | #define complete_store_rd_yes(flags_op) \ |
1101 | complete_store_reg_pc_##flags_op(_rd, rd) \ |
1102 | |
1103 | #define complete_store_rd_no(flags_op) \ |
1104 | |
1105 | #define arm_generate_op_reg(name, load_op, store_op, flags_op) \ |
1106 | u32 shift_type = (opcode >> 5) & 0x03; \ |
1107 | arm_decode_data_proc_reg(); \ |
1108 | prepare_load_rn_##load_op(); \ |
1109 | prepare_store_rd_##store_op(); \ |
1110 | \ |
1111 | if((opcode >> 4) & 0x01) \ |
1112 | { \ |
1113 | u32 rs = ((opcode >> 8) & 0x0F); \ |
1114 | u32 _rs = prepare_load_reg(reg_rs, rs); \ |
1115 | u32 _rm = prepare_load_reg_pc(reg_rm, rm, 12); \ |
1116 | generate_op_##name##_reg_regshift(_rd, _rn, _rm, shift_type, _rs); \ |
1117 | } \ |
1118 | else \ |
1119 | { \ |
1120 | u32 shift_imm = ((opcode >> 7) & 0x1F); \ |
1121 | u32 _rm = prepare_load_reg_pc(reg_rm, rm, 8); \ |
1122 | generate_op_##name##_reg_immshift(_rd, _rn, _rm, shift_type, shift_imm); \ |
1123 | } \ |
1124 | complete_store_rd_##store_op(flags_op) \ |
1125 | |
1126 | #define arm_generate_op_reg_flags(name, load_op, store_op, flags_op) \ |
1127 | arm_generate_op_reg(name, load_op, store_op, flags_op) \ |
1128 | |
1129 | // imm will be loaded by the called function if necessary. |
1130 | |
1131 | #define arm_generate_op_imm(name, load_op, store_op, flags_op) \ |
1132 | arm_decode_data_proc_imm(); \ |
1133 | prepare_load_rn_##load_op(); \ |
1134 | prepare_store_rd_##store_op(); \ |
1135 | generate_op_##name##_imm(_rd, _rn); \ |
1136 | complete_store_rd_##store_op(flags_op) \ |
1137 | |
1138 | #define arm_generate_op_imm_flags(name, load_op, store_op, flags_op) \ |
1139 | arm_generate_op_imm(name, load_op, store_op, flags_op) \ |
1140 | |
1141 | #define arm_data_proc(name, type, flags_op) \ |
1142 | { \ |
1143 | arm_generate_op_##type(name, yes, yes, flags_op); \ |
1144 | } \ |
1145 | |
1146 | #define arm_data_proc_test(name, type) \ |
1147 | { \ |
1148 | arm_generate_op_##type(name, yes, no, no); \ |
1149 | } \ |
1150 | |
1151 | #define arm_data_proc_unary(name, type, flags_op) \ |
1152 | { \ |
1153 | arm_generate_op_##type(name, no, yes, flags_op); \ |
1154 | } \ |
1155 | |
1156 | |
1157 | #define arm_multiply_add_no_flags_no() \ |
1158 | ARM_MUL(0, _rd, _rm, _rs) \ |
1159 | |
1160 | #define arm_multiply_add_yes_flags_no() \ |
1161 | u32 _rn = prepare_load_reg(reg_a2, rn); \ |
1162 | ARM_MLA(0, _rd, _rm, _rs, _rn) \ |
1163 | |
1164 | #define arm_multiply_add_no_flags_yes() \ |
1165 | generate_load_flags(); \ |
1166 | ARM_MULS(0, reg_a0, reg_a0, reg_a1) \ |
1167 | generate_store_flags() \ |
1168 | |
1169 | #define arm_multiply_add_yes_flags_yes() \ |
1170 | u32 _rn = prepare_load_reg(reg_a2, rn); \ |
1171 | generate_load_flags(); \ |
1172 | ARM_MLAS(0, _rd, _rm, _rs, _rn); \ |
1173 | generate_store_flags() |
1174 | |
1175 | |
1176 | #define arm_multiply(add_op, flags) \ |
1177 | { \ |
1178 | arm_decode_multiply(); \ |
1179 | u32 _rm = prepare_load_reg(reg_a0, rm); \ |
1180 | u32 _rs = prepare_load_reg(reg_a1, rs); \ |
1181 | u32 _rd = prepare_store_reg(reg_a0, rd); \ |
1182 | arm_multiply_add_##add_op##_flags_##flags(); \ |
1183 | complete_store_reg(_rd, rd); \ |
1184 | } \ |
1185 | |
1186 | |
1187 | #define arm_multiply_long_name_s64 SMULL |
1188 | #define arm_multiply_long_name_u64 UMULL |
1189 | #define arm_multiply_long_name_s64_add SMLAL |
1190 | #define arm_multiply_long_name_u64_add UMLAL |
1191 | |
1192 | |
1193 | #define arm_multiply_long_flags_no(name) \ |
1194 | ARM_##name(0, _rdlo, _rdhi, _rm, _rs) \ |
1195 | |
1196 | #define arm_multiply_long_flags_yes(name) \ |
1197 | generate_load_flags(); \ |
1198 | ARM_##name##S(0, _rdlo, _rdhi, _rm, _rs); \ |
1199 | generate_store_flags() \ |
1200 | |
1201 | |
1202 | #define arm_multiply_long_add_no(name) \ |
1203 | |
1204 | #define arm_multiply_long_add_yes(name) \ |
1205 | prepare_load_reg(reg_a0, rdlo); \ |
1206 | prepare_load_reg(reg_a1, rdhi) \ |
1207 | |
1208 | |
1209 | #define arm_multiply_long_op(flags, name) \ |
1210 | arm_multiply_long_flags_##flags(name) \ |
1211 | |
1212 | #define arm_multiply_long(name, add_op, flags) \ |
1213 | { \ |
1214 | arm_decode_multiply_long(); \ |
1215 | u32 _rm = prepare_load_reg(reg_a2, rm); \ |
1216 | u32 _rs = prepare_load_reg(reg_rs, rs); \ |
1217 | u32 _rdlo = prepare_store_reg(reg_a0, rdlo); \ |
1218 | u32 _rdhi = prepare_store_reg(reg_a1, rdhi); \ |
1219 | arm_multiply_long_add_##add_op(name); \ |
1220 | arm_multiply_long_op(flags, arm_multiply_long_name_##name); \ |
1221 | complete_store_reg(_rdlo, rdlo); \ |
1222 | complete_store_reg(_rdhi, rdhi); \ |
1223 | } \ |
1224 | |
1225 | #define arm_psr_read_cpsr() \ |
1226 | u32 _rd = prepare_store_reg(reg_a0, rd); \ |
1227 | generate_load_reg(_rd, REG_CPSR); \ |
1228 | ARM_BIC_REG_IMM(0, _rd, _rd, 0xF0, arm_imm_lsl_to_rot(24)); \ |
1229 | ARM_AND_REG_IMM(0, reg_flags, reg_flags, 0xF0, arm_imm_lsl_to_rot(24)); \ |
1230 | ARM_ORR_REG_REG(0, _rd, _rd, reg_flags); \ |
1231 | complete_store_reg(_rd, rd) \ |
1232 | |
1233 | #define arm_psr_read_spsr() \ |
1234 | generate_function_call(execute_read_spsr) \ |
1235 | generate_store_reg(reg_a0, rd) \ |
1236 | |
1237 | #define arm_psr_read(op_type, psr_reg) \ |
1238 | arm_psr_read_##psr_reg() \ |
1239 | |
1240 | // This function's okay because it's called from an ASM function that can |
1241 | // wrap it correctly. |
1242 | |
1243 | u32 execute_store_cpsr_body(u32 _cpsr, u32 store_mask, u32 address) |
1244 | { |
1245 | reg[REG_CPSR] = _cpsr; |
1246 | if(store_mask & 0xFF) |
1247 | { |
1248 | set_cpu_mode(cpu_modes[_cpsr & 0x1F]); |
1249 | if((io_registers[REG_IE] & io_registers[REG_IF]) && |
1250 | io_registers[REG_IME] && ((_cpsr & 0x80) == 0)) |
1251 | { |
1252 | reg_mode[MODE_IRQ][6] = address + 4; |
1253 | spsr[MODE_IRQ] = _cpsr; |
1254 | reg[REG_CPSR] = 0xD2; |
1255 | set_cpu_mode(MODE_IRQ); |
1256 | return 0x00000018; |
1257 | } |
1258 | } |
1259 | |
1260 | return 0; |
1261 | } |
1262 | |
1263 | #define arm_psr_load_new_reg() \ |
1264 | generate_load_reg(reg_a0, rm) \ |
1265 | |
1266 | #define arm_psr_load_new_imm() \ |
1267 | generate_load_imm(reg_a0, imm, imm_ror) \ |
1268 | |
1269 | #define arm_psr_store_cpsr() \ |
1270 | arm_load_imm_32bit(reg_a1, psr_masks[psr_field]); \ |
1271 | generate_function_call(execute_store_cpsr); \ |
1272 | write32(pc) \ |
1273 | |
1274 | #define arm_psr_store_spsr() \ |
1275 | generate_function_call(execute_store_spsr) \ |
1276 | |
1277 | #define arm_psr_store(op_type, psr_reg) \ |
1278 | arm_psr_load_new_##op_type(); \ |
1279 | arm_psr_store_##psr_reg() \ |
1280 | |
1281 | |
1282 | #define arm_psr(op_type, transfer_type, psr_reg) \ |
1283 | { \ |
1284 | arm_decode_psr_##op_type(); \ |
1285 | arm_psr_##transfer_type(op_type, psr_reg); \ |
1286 | } \ |
1287 | |
1288 | // TODO: loads will need the PC passed as well for open address, however can |
1289 | // eventually be rectified with a hash table on the memory accesses |
1290 | // (same with the stores) |
1291 | |
1292 | #define arm_access_memory_load(mem_type) \ |
1293 | cycle_count += 2; \ |
1294 | generate_function_call(execute_load_##mem_type); \ |
1295 | write32((pc + 8)); \ |
1296 | generate_store_reg_pc_no_flags(reg_rv, rd) \ |
1297 | |
1298 | #define arm_access_memory_store(mem_type) \ |
1299 | cycle_count++; \ |
1300 | generate_load_reg_pc(reg_a1, rd, 12); \ |
1301 | generate_function_call(execute_store_##mem_type); \ |
1302 | write32((pc + 4)) \ |
1303 | |
1304 | // Calculate the address into a0 from _rn, _rm |
1305 | |
1306 | #define arm_access_memory_adjust_reg_sh_up(ireg) \ |
1307 | ARM_ADD_REG_IMMSHIFT(0, ireg, _rn, _rm, ((opcode >> 5) & 0x03), \ |
1308 | ((opcode >> 7) & 0x1F)) \ |
1309 | |
1310 | #define arm_access_memory_adjust_reg_sh_down(ireg) \ |
1311 | ARM_SUB_REG_IMMSHIFT(0, ireg, _rn, _rm, ((opcode >> 5) & 0x03), \ |
1312 | ((opcode >> 7) & 0x1F)) \ |
1313 | |
1314 | #define arm_access_memory_adjust_reg_up(ireg) \ |
1315 | ARM_ADD_REG_REG(0, ireg, _rn, _rm) \ |
1316 | |
1317 | #define arm_access_memory_adjust_reg_down(ireg) \ |
1318 | ARM_SUB_REG_REG(0, ireg, _rn, _rm) \ |
1319 | |
1320 | #define arm_access_memory_adjust_imm(op, ireg) \ |
1321 | { \ |
1322 | u32 stores[4]; \ |
1323 | u32 rotations[4]; \ |
1324 | u32 store_count = arm_disect_imm_32bit(offset, stores, rotations); \ |
1325 | \ |
1326 | if(store_count > 1) \ |
1327 | { \ |
1328 | ARM_##op##_REG_IMM(0, ireg, _rn, stores[0], rotations[0]); \ |
1329 | ARM_##op##_REG_IMM(0, ireg, ireg, stores[1], rotations[1]); \ |
1330 | } \ |
1331 | else \ |
1332 | { \ |
1333 | ARM_##op##_REG_IMM(0, ireg, _rn, stores[0], rotations[0]); \ |
1334 | } \ |
1335 | } \ |
1336 | |
1337 | #define arm_access_memory_adjust_imm_up(ireg) \ |
1338 | arm_access_memory_adjust_imm(ADD, ireg) \ |
1339 | |
1340 | #define arm_access_memory_adjust_imm_down(ireg) \ |
1341 | arm_access_memory_adjust_imm(SUB, ireg) \ |
1342 | |
1343 | |
1344 | #define arm_access_memory_pre(type, direction) \ |
1345 | arm_access_memory_adjust_##type##_##direction(reg_a0) \ |
1346 | |
1347 | #define arm_access_memory_pre_wb(type, direction) \ |
1348 | arm_access_memory_adjust_##type##_##direction(reg_a0); \ |
1349 | generate_store_reg(reg_a0, rn) \ |
1350 | |
1351 | #define arm_access_memory_post(type, direction) \ |
1352 | u32 _rn_dest = prepare_store_reg(reg_a1, rn); \ |
1353 | if(_rn != reg_a0) \ |
1354 | { \ |
1355 | generate_load_reg(reg_a0, rn); \ |
1356 | } \ |
1357 | arm_access_memory_adjust_##type##_##direction(_rn_dest); \ |
1358 | complete_store_reg(_rn_dest, rn) \ |
1359 | |
1360 | |
1361 | #define arm_data_trans_reg(adjust_op, direction) \ |
1362 | arm_decode_data_trans_reg(); \ |
1363 | u32 _rn = prepare_load_reg_pc(reg_a0, rn, 8); \ |
1364 | u32 _rm = prepare_load_reg(reg_a1, rm); \ |
1365 | arm_access_memory_##adjust_op(reg_sh, direction) \ |
1366 | |
1367 | #define arm_data_trans_imm(adjust_op, direction) \ |
1368 | arm_decode_data_trans_imm(); \ |
1369 | u32 _rn = prepare_load_reg_pc(reg_a0, rn, 8); \ |
1370 | arm_access_memory_##adjust_op(imm, direction) \ |
1371 | |
1372 | |
1373 | #define arm_data_trans_half_reg(adjust_op, direction) \ |
1374 | arm_decode_half_trans_r(); \ |
1375 | u32 _rn = prepare_load_reg_pc(reg_a0, rn, 8); \ |
1376 | u32 _rm = prepare_load_reg(reg_a1, rm); \ |
1377 | arm_access_memory_##adjust_op(reg, direction) \ |
1378 | |
1379 | #define arm_data_trans_half_imm(adjust_op, direction) \ |
1380 | arm_decode_half_trans_of(); \ |
1381 | u32 _rn = prepare_load_reg_pc(reg_a0, rn, 8); \ |
1382 | arm_access_memory_##adjust_op(imm, direction) \ |
1383 | |
1384 | |
1385 | #define arm_access_memory(access_type, direction, adjust_op, mem_type, \ |
1386 | offset_type) \ |
1387 | { \ |
1388 | arm_data_trans_##offset_type(adjust_op, direction); \ |
1389 | arm_access_memory_##access_type(mem_type); \ |
1390 | } \ |
1391 | |
1392 | |
1393 | #define word_bit_count(word) \ |
1394 | (bit_count[word >> 8] + bit_count[word & 0xFF]) \ |
1395 | |
1396 | #define sprint_no(access_type, pre_op, post_op, wb) \ |
1397 | |
1398 | #define sprint_yes(access_type, pre_op, post_op, wb) \ |
1399 | printf("sbit on %s %s %s %s\n", #access_type, #pre_op, #post_op, #wb) \ |
1400 | |
1401 | |
1402 | // TODO: Make these use cached registers. Implement iwram_stack_optimize. |
1403 | |
1404 | #define arm_block_memory_load() \ |
1405 | generate_function_call(execute_load_u32); \ |
1406 | write32((pc + 8)); \ |
1407 | generate_store_reg(reg_rv, i) \ |
1408 | |
1409 | #define arm_block_memory_store() \ |
1410 | generate_load_reg_pc(reg_a1, i, 8); \ |
1411 | generate_function_call(execute_store_u32_safe) \ |
1412 | |
1413 | #define arm_block_memory_final_load() \ |
1414 | arm_block_memory_load() \ |
1415 | |
1416 | #define arm_block_memory_final_store() \ |
1417 | generate_load_reg_pc(reg_a1, i, 12); \ |
1418 | generate_function_call(execute_store_u32); \ |
1419 | write32((pc + 4)) \ |
1420 | |
1421 | #define arm_block_memory_adjust_pc_store() \ |
1422 | |
1423 | #define arm_block_memory_adjust_pc_load() \ |
1424 | if(reg_list & 0x8000) \ |
1425 | { \ |
1426 | generate_mov(reg_a0, reg_rv); \ |
1427 | generate_indirect_branch_arm(); \ |
1428 | } \ |
1429 | |
1430 | #define arm_block_memory_offset_down_a() \ |
1431 | generate_sub_imm(reg_s0, ((word_bit_count(reg_list) * 4) - 4), 0) \ |
1432 | |
1433 | #define arm_block_memory_offset_down_b() \ |
1434 | generate_sub_imm(reg_s0, (word_bit_count(reg_list) * 4), 0) \ |
1435 | |
1436 | #define arm_block_memory_offset_no() \ |
1437 | |
1438 | #define arm_block_memory_offset_up() \ |
1439 | generate_add_imm(reg_s0, 4, 0) \ |
1440 | |
1441 | #define arm_block_memory_writeback_down() \ |
1442 | generate_load_reg(reg_a0, rn); \ |
1443 | generate_sub_imm(reg_a0, (word_bit_count(reg_list) * 4), 0); \ |
1444 | generate_store_reg(reg_a0, rn) \ |
1445 | |
1446 | #define arm_block_memory_writeback_up() \ |
1447 | generate_load_reg(reg_a0, rn); \ |
1448 | generate_add_imm(reg_a0, (word_bit_count(reg_list) * 4), 0); \ |
1449 | generate_store_reg(reg_a0, rn) \ |
1450 | |
1451 | #define arm_block_memory_writeback_no() |
1452 | |
1453 | // Only emit writeback if the register is not in the list |
1454 | |
1455 | #define arm_block_memory_writeback_load(writeback_type) \ |
1456 | if(!((reg_list >> rn) & 0x01)) \ |
1457 | { \ |
1458 | arm_block_memory_writeback_##writeback_type(); \ |
1459 | } \ |
1460 | |
1461 | #define arm_block_memory_writeback_store(writeback_type) \ |
1462 | arm_block_memory_writeback_##writeback_type() \ |
1463 | |
1464 | #define arm_block_memory(access_type, offset_type, writeback_type, s_bit) \ |
1465 | { \ |
1466 | arm_decode_block_trans(); \ |
1467 | u32 offset = 0; \ |
1468 | u32 i; \ |
1469 | \ |
1470 | generate_load_reg(reg_s0, rn); \ |
1471 | arm_block_memory_offset_##offset_type(); \ |
1472 | arm_block_memory_writeback_##access_type(writeback_type); \ |
1473 | ARM_BIC_REG_IMM(0, reg_s0, reg_s0, 0x03, 0); \ |
1474 | \ |
1475 | for(i = 0; i < 16; i++) \ |
1476 | { \ |
1477 | if((reg_list >> i) & 0x01) \ |
1478 | { \ |
1479 | cycle_count++; \ |
1480 | generate_add_reg_reg_imm(reg_a0, reg_s0, offset, 0); \ |
1481 | if(reg_list & ~((2 << i) - 1)) \ |
1482 | { \ |
1483 | arm_block_memory_##access_type(); \ |
1484 | offset += 4; \ |
1485 | } \ |
1486 | else \ |
1487 | { \ |
1488 | arm_block_memory_final_##access_type(); \ |
1489 | break; \ |
1490 | } \ |
1491 | } \ |
1492 | } \ |
1493 | \ |
1494 | arm_block_memory_adjust_pc_##access_type(); \ |
1495 | } \ |
1496 | |
1497 | #define arm_swap(type) \ |
1498 | { \ |
1499 | arm_decode_swap(); \ |
1500 | cycle_count += 3; \ |
1501 | generate_load_reg(reg_a0, rn); \ |
1502 | generate_function_call(execute_load_##type); \ |
1503 | write32((pc + 8)); \ |
1504 | generate_mov(reg_s0, reg_rv); \ |
1505 | generate_load_reg(reg_a0, rn); \ |
1506 | generate_load_reg(reg_a1, rm); \ |
1507 | generate_function_call(execute_store_##type); \ |
1508 | write32((pc + 4)); \ |
1509 | generate_store_reg(reg_s0, rd); \ |
1510 | } \ |
1511 | |
1512 | |
1513 | #define thumb_generate_op_reg(name, _rd, _rs, _rn) \ |
1514 | u32 __rm = prepare_load_reg(reg_rm, _rn); \ |
1515 | generate_op_##name##_reg_immshift(__rd, __rn, __rm, ARMSHIFT_LSL, 0) \ |
1516 | |
1517 | #define thumb_generate_op_imm(name, _rd, _rs, imm_) \ |
1518 | { \ |
1519 | u32 imm_ror = 0; \ |
1520 | generate_op_##name##_imm(__rd, __rn); \ |
1521 | } \ |
1522 | |
1523 | |
1524 | #define thumb_data_proc(type, name, op_type, _rd, _rs, _rn) \ |
1525 | { \ |
1526 | thumb_decode_##type(); \ |
1527 | u32 __rn = prepare_load_reg(reg_rn, _rs); \ |
1528 | u32 __rd = prepare_store_reg(reg_rd, _rd); \ |
1529 | generate_load_reg(reg_rn, _rs); \ |
1530 | thumb_generate_op_##op_type(name, _rd, _rs, _rn); \ |
1531 | complete_store_reg(__rd, _rd); \ |
1532 | } \ |
1533 | |
1534 | #define thumb_data_proc_test(type, name, op_type, _rd, _rs) \ |
1535 | { \ |
1536 | thumb_decode_##type(); \ |
1537 | u32 __rn = prepare_load_reg(reg_rn, _rd); \ |
1538 | thumb_generate_op_##op_type(name, 0, _rd, _rs); \ |
1539 | } \ |
1540 | |
1541 | #define thumb_data_proc_unary(type, name, op_type, _rd, _rs) \ |
1542 | { \ |
1543 | thumb_decode_##type(); \ |
1544 | u32 __rd = prepare_store_reg(reg_rd, _rd); \ |
1545 | thumb_generate_op_##op_type(name, _rd, 0, _rs); \ |
1546 | complete_store_reg(__rd, _rd); \ |
1547 | } \ |
1548 | |
1549 | |
1550 | #define complete_store_reg_pc_thumb() \ |
1551 | if(rd == 15) \ |
1552 | { \ |
1553 | generate_indirect_branch_cycle_update(thumb); \ |
1554 | } \ |
1555 | else \ |
1556 | { \ |
1557 | complete_store_reg(_rd, rd); \ |
1558 | } \ |
1559 | |
1560 | #define thumb_data_proc_hi(name) \ |
1561 | { \ |
1562 | thumb_decode_hireg_op(); \ |
1563 | u32 _rd = prepare_load_reg_pc(reg_rd, rd, 4); \ |
1564 | u32 _rs = prepare_load_reg_pc(reg_rn, rs, 4); \ |
1565 | generate_op_##name##_reg_immshift(_rd, _rd, _rs, ARMSHIFT_LSL, 0); \ |
1566 | complete_store_reg_pc_thumb(); \ |
1567 | } \ |
1568 | |
1569 | #define thumb_data_proc_test_hi(name) \ |
1570 | { \ |
1571 | thumb_decode_hireg_op(); \ |
1572 | u32 _rd = prepare_load_reg_pc(reg_rd, rd, 4); \ |
1573 | u32 _rs = prepare_load_reg_pc(reg_rn, rs, 4); \ |
1574 | generate_op_##name##_reg_immshift(0, _rd, _rs, ARMSHIFT_LSL, 0); \ |
1575 | } \ |
1576 | |
1577 | #define thumb_data_proc_mov_hi() \ |
1578 | { \ |
1579 | thumb_decode_hireg_op(); \ |
1580 | u32 _rs = prepare_load_reg_pc(reg_rn, rs, 4); \ |
1581 | u32 _rd = prepare_store_reg(reg_rd, rd); \ |
1582 | ARM_MOV_REG_REG(0, _rd, _rs); \ |
1583 | complete_store_reg_pc_thumb(); \ |
1584 | } \ |
1585 | |
1586 | |
1587 | |
1588 | #define thumb_load_pc(_rd) \ |
1589 | { \ |
1590 | thumb_decode_imm(); \ |
1591 | u32 __rd = prepare_store_reg(reg_rd, _rd); \ |
1592 | generate_load_pc(__rd, (((pc & ~2) + 4) + (imm * 4))); \ |
1593 | complete_store_reg(__rd, _rd); \ |
1594 | } \ |
1595 | |
1596 | #define thumb_load_sp(_rd) \ |
1597 | { \ |
1598 | thumb_decode_imm(); \ |
1599 | u32 __sp = prepare_load_reg(reg_a0, REG_SP); \ |
1600 | u32 __rd = prepare_store_reg(reg_a0, _rd); \ |
1601 | ARM_ADD_REG_IMM(0, __rd, __sp, imm, arm_imm_lsl_to_rot(2)); \ |
1602 | complete_store_reg(__rd, _rd); \ |
1603 | } \ |
1604 | |
1605 | #define thumb_adjust_sp_up() \ |
1606 | ARM_ADD_REG_IMM(0, _sp, _sp, imm, arm_imm_lsl_to_rot(2)) \ |
1607 | |
1608 | #define thumb_adjust_sp_down() \ |
1609 | ARM_SUB_REG_IMM(0, _sp, _sp, imm, arm_imm_lsl_to_rot(2)) \ |
1610 | |
1611 | #define thumb_adjust_sp(direction) \ |
1612 | { \ |
1613 | thumb_decode_add_sp(); \ |
1614 | u32 _sp = prepare_load_reg(reg_a0, REG_SP); \ |
1615 | thumb_adjust_sp_##direction(); \ |
1616 | complete_store_reg(_sp, REG_SP); \ |
1617 | } \ |
1618 | |
1619 | #define generate_op_lsl_reg(_rd, _rm, _rs) \ |
1620 | generate_op_movs_reg_regshift(_rd, 0, _rm, ARMSHIFT_LSL, _rs) \ |
1621 | |
1622 | #define generate_op_lsr_reg(_rd, _rm, _rs) \ |
1623 | generate_op_movs_reg_regshift(_rd, 0, _rm, ARMSHIFT_LSR, _rs) \ |
1624 | |
1625 | #define generate_op_asr_reg(_rd, _rm, _rs) \ |
1626 | generate_op_movs_reg_regshift(_rd, 0, _rm, ARMSHIFT_ASR, _rs) \ |
1627 | |
1628 | #define generate_op_ror_reg(_rd, _rm, _rs) \ |
1629 | generate_op_movs_reg_regshift(_rd, 0, _rm, ARMSHIFT_ROR, _rs) \ |
1630 | |
1631 | |
1632 | #define generate_op_lsl_imm(_rd, _rm) \ |
1633 | generate_op_movs_reg_immshift(_rd, 0, _rm, ARMSHIFT_LSL, imm) \ |
1634 | |
1635 | #define generate_op_lsr_imm(_rd, _rm) \ |
1636 | generate_op_movs_reg_immshift(_rd, 0, _rm, ARMSHIFT_LSR, imm) \ |
1637 | |
1638 | #define generate_op_asr_imm(_rd, _rm) \ |
1639 | generate_op_movs_reg_immshift(_rd, 0, _rm, ARMSHIFT_ASR, imm) \ |
1640 | |
1641 | #define generate_op_ror_imm(_rd, _rm) \ |
1642 | generate_op_movs_reg_immshift(_rd, 0, _rm, ARMSHIFT_ROR, imm) \ |
1643 | |
1644 | |
1645 | #define generate_shift_reg(op_type) \ |
1646 | u32 __rm = prepare_load_reg(reg_rd, rd); \ |
1647 | u32 __rs = prepare_load_reg(reg_rs, rs); \ |
1648 | generate_op_##op_type##_reg(__rd, __rm, __rs) \ |
1649 | |
1650 | #define generate_shift_imm(op_type) \ |
1651 | u32 __rs = prepare_load_reg(reg_rs, rs); \ |
1652 | generate_op_##op_type##_imm(__rd, __rs) \ |
1653 | |
1654 | |
1655 | #define thumb_shift(decode_type, op_type, value_type) \ |
1656 | { \ |
1657 | thumb_decode_##decode_type(); \ |
1658 | u32 __rd = prepare_store_reg(reg_rd, rd); \ |
1659 | generate_shift_##value_type(op_type); \ |
1660 | complete_store_reg(__rd, rd); \ |
1661 | } \ |
1662 | |
1663 | // Operation types: imm, mem_reg, mem_imm |
1664 | |
1665 | #define thumb_access_memory_load(mem_type, _rd) \ |
1666 | cycle_count += 2; \ |
1667 | generate_function_call(execute_load_##mem_type); \ |
1668 | write32((pc + 4)); \ |
1669 | generate_store_reg(reg_rv, _rd) \ |
1670 | |
1671 | #define thumb_access_memory_store(mem_type, _rd) \ |
1672 | cycle_count++; \ |
1673 | generate_load_reg(reg_a1, _rd); \ |
1674 | generate_function_call(execute_store_##mem_type); \ |
1675 | write32((pc + 2)) \ |
1676 | |
1677 | #define thumb_access_memory_generate_address_pc_relative(offset, _rb, _ro) \ |
1678 | generate_load_pc(reg_a0, (offset)) \ |
1679 | |
1680 | #define thumb_access_memory_generate_address_reg_imm(offset, _rb, _ro) \ |
1681 | u32 __rb = prepare_load_reg(reg_a0, _rb); \ |
1682 | ARM_ADD_REG_IMM(0, reg_a0, __rb, offset, 0) \ |
1683 | |
1684 | #define thumb_access_memory_generate_address_reg_imm_sp(offset, _rb, _ro) \ |
1685 | u32 __rb = prepare_load_reg(reg_a0, _rb); \ |
1686 | ARM_ADD_REG_IMM(0, reg_a0, __rb, offset, arm_imm_lsl_to_rot(2)) \ |
1687 | |
1688 | #define thumb_access_memory_generate_address_reg_reg(offset, _rb, _ro) \ |
1689 | u32 __rb = prepare_load_reg(reg_a0, _rb); \ |
1690 | u32 __ro = prepare_load_reg(reg_a1, _ro); \ |
1691 | ARM_ADD_REG_REG(0, reg_a0, __rb, __ro) \ |
1692 | |
1693 | #define thumb_access_memory(access_type, op_type, _rd, _rb, _ro, \ |
1694 | address_type, offset, mem_type) \ |
1695 | { \ |
1696 | thumb_decode_##op_type(); \ |
1697 | thumb_access_memory_generate_address_##address_type(offset, _rb, _ro); \ |
1698 | thumb_access_memory_##access_type(mem_type, _rd); \ |
1699 | } \ |
1700 | |
1701 | // TODO: Make these use cached registers. Implement iwram_stack_optimize. |
1702 | |
1703 | #define thumb_block_address_preadjust_up() \ |
1704 | generate_add_imm(reg_s0, (bit_count[reg_list] * 4), 0) \ |
1705 | |
1706 | #define thumb_block_address_preadjust_down() \ |
1707 | generate_sub_imm(reg_s0, (bit_count[reg_list] * 4), 0) \ |
1708 | |
1709 | #define thumb_block_address_preadjust_push_lr() \ |
1710 | generate_sub_imm(reg_s0, ((bit_count[reg_list] + 1) * 4), 0) \ |
1711 | |
1712 | #define thumb_block_address_preadjust_no() \ |
1713 | |
1714 | #define thumb_block_address_postadjust_no(base_reg) \ |
1715 | generate_store_reg(reg_s0, base_reg) \ |
1716 | |
1717 | #define thumb_block_address_postadjust_up(base_reg) \ |
1718 | generate_add_reg_reg_imm(reg_a0, reg_s0, (bit_count[reg_list] * 4), 0); \ |
1719 | generate_store_reg(reg_a0, base_reg) \ |
1720 | |
1721 | #define thumb_block_address_postadjust_down(base_reg) \ |
1722 | generate_mov(reg_a0, reg_s0); \ |
1723 | generate_sub_imm(reg_a0, (bit_count[reg_list] * 4), 0); \ |
1724 | generate_store_reg(reg_a0, base_reg) \ |
1725 | |
1726 | #define thumb_block_address_postadjust_pop_pc(base_reg) \ |
1727 | generate_add_reg_reg_imm(reg_a0, reg_s0, \ |
1728 | ((bit_count[reg_list] + 1) * 4), 0); \ |
1729 | generate_store_reg(reg_a0, base_reg) \ |
1730 | |
1731 | #define thumb_block_address_postadjust_push_lr(base_reg) \ |
1732 | generate_store_reg(reg_s0, base_reg) \ |
1733 | |
1734 | #define thumb_block_memory_extra_no() \ |
1735 | |
1736 | #define thumb_block_memory_extra_up() \ |
1737 | |
1738 | #define thumb_block_memory_extra_down() \ |
1739 | |
1740 | #define thumb_block_memory_extra_pop_pc() \ |
1741 | generate_add_reg_reg_imm(reg_a0, reg_s0, (bit_count[reg_list] * 4), 0); \ |
1742 | generate_function_call(execute_load_u32); \ |
1743 | write32((pc + 4)); \ |
1744 | generate_mov(reg_a0, reg_rv); \ |
1745 | generate_indirect_branch_cycle_update(thumb) \ |
1746 | |
1747 | #define thumb_block_memory_extra_push_lr(base_reg) \ |
1748 | generate_add_reg_reg_imm(reg_a0, reg_s0, (bit_count[reg_list] * 4), 0); \ |
1749 | generate_load_reg(reg_a1, REG_LR); \ |
1750 | generate_function_call(execute_store_u32_safe) \ |
1751 | |
1752 | #define thumb_block_memory_load() \ |
1753 | generate_function_call(execute_load_u32); \ |
1754 | write32((pc + 4)); \ |
1755 | generate_store_reg(reg_rv, i) \ |
1756 | |
1757 | #define thumb_block_memory_store() \ |
1758 | generate_load_reg(reg_a1, i); \ |
1759 | generate_function_call(execute_store_u32_safe) \ |
1760 | |
1761 | #define thumb_block_memory_final_load() \ |
1762 | thumb_block_memory_load() \ |
1763 | |
1764 | #define thumb_block_memory_final_store() \ |
1765 | generate_load_reg(reg_a1, i); \ |
1766 | generate_function_call(execute_store_u32); \ |
1767 | write32((pc + 2)) \ |
1768 | |
1769 | #define thumb_block_memory_final_no(access_type) \ |
1770 | thumb_block_memory_final_##access_type() \ |
1771 | |
1772 | #define thumb_block_memory_final_up(access_type) \ |
1773 | thumb_block_memory_final_##access_type() \ |
1774 | |
1775 | #define thumb_block_memory_final_down(access_type) \ |
1776 | thumb_block_memory_final_##access_type() \ |
1777 | |
1778 | #define thumb_block_memory_final_push_lr(access_type) \ |
1779 | thumb_block_memory_##access_type() \ |
1780 | |
1781 | #define thumb_block_memory_final_pop_pc(access_type) \ |
1782 | thumb_block_memory_##access_type() \ |
1783 | |
1784 | #define thumb_block_memory(access_type, pre_op, post_op, base_reg) \ |
1785 | { \ |
1786 | thumb_decode_rlist(); \ |
1787 | u32 i; \ |
1788 | u32 offset = 0; \ |
1789 | \ |
1790 | generate_load_reg(reg_s0, base_reg); \ |
1791 | ARM_BIC_REG_IMM(0, reg_s0, reg_s0, 0x03, 0); \ |
1792 | thumb_block_address_preadjust_##pre_op(); \ |
1793 | thumb_block_address_postadjust_##post_op(base_reg); \ |
1794 | \ |
1795 | for(i = 0; i < 8; i++) \ |
1796 | { \ |
1797 | if((reg_list >> i) & 0x01) \ |
1798 | { \ |
1799 | cycle_count++; \ |
1800 | generate_add_reg_reg_imm(reg_a0, reg_s0, offset, 0); \ |
1801 | if(reg_list & ~((2 << i) - 1)) \ |
1802 | { \ |
1803 | thumb_block_memory_##access_type(); \ |
1804 | offset += 4; \ |
1805 | } \ |
1806 | else \ |
1807 | { \ |
1808 | thumb_block_memory_final_##post_op(access_type); \ |
1809 | break; \ |
1810 | } \ |
1811 | } \ |
1812 | } \ |
1813 | \ |
1814 | thumb_block_memory_extra_##post_op(); \ |
1815 | } \ |
1816 | |
1817 | #define thumb_conditional_branch(condition) \ |
1818 | { \ |
1819 | generate_cycle_update(); \ |
1820 | generate_load_flags(); \ |
1821 | generate_branch_filler(condition_opposite_##condition, backpatch_address); \ |
1822 | generate_branch_no_cycle_update( \ |
1823 | block_exits[block_exit_position].branch_source, \ |
1824 | block_exits[block_exit_position].branch_target, thumb); \ |
1825 | generate_branch_patch_conditional(backpatch_address, translation_ptr); \ |
1826 | block_exit_position++; \ |
1827 | } \ |
1828 | |
1829 | |
1830 | #define arm_conditional_block_header() \ |
1831 | generate_cycle_update(); \ |
1832 | generate_load_flags(); \ |
1833 | /* This will choose the opposite condition */ \ |
1834 | condition ^= 0x01; \ |
1835 | generate_branch_filler(condition, backpatch_address) \ |
1836 | |
1837 | #define arm_b() \ |
1838 | generate_branch(arm) \ |
1839 | |
1840 | #define arm_bl() \ |
1841 | generate_update_pc((pc + 4)); \ |
1842 | generate_store_reg(reg_a0, REG_LR); \ |
1843 | generate_branch(arm) \ |
1844 | |
1845 | #define arm_bx() \ |
1846 | arm_decode_branchx(); \ |
1847 | generate_load_reg(reg_a0, rn); \ |
1848 | generate_indirect_branch_dual(); \ |
1849 | |
1850 | #define arm_swi() \ |
1851 | generate_swi_hle_handler((opcode >> 16) & 0xFF, arm); \ |
1852 | generate_function_call(execute_swi_arm); \ |
1853 | write32((pc + 4)); \ |
1854 | generate_branch(arm) \ |
1855 | |
1856 | #define thumb_b() \ |
1857 | generate_branch(thumb) \ |
1858 | |
1859 | #define thumb_bl() \ |
1860 | generate_update_pc(((pc + 2) | 0x01)); \ |
1861 | generate_store_reg(reg_a0, REG_LR); \ |
1862 | generate_branch(thumb) \ |
1863 | |
1864 | #define thumb_blh() \ |
1865 | { \ |
1866 | thumb_decode_branch(); \ |
1867 | generate_update_pc(((pc + 2) | 0x01)); \ |
1868 | generate_load_reg(reg_a1, REG_LR); \ |
1869 | generate_store_reg(reg_a0, REG_LR); \ |
1870 | generate_mov(reg_a0, reg_a1); \ |
1871 | generate_add_imm(reg_a0, (offset * 2), 0); \ |
1872 | generate_indirect_branch_cycle_update(thumb); \ |
1873 | } \ |
1874 | |
1875 | #define thumb_bx() \ |
1876 | { \ |
1877 | thumb_decode_hireg_op(); \ |
1878 | generate_load_reg_pc(reg_a0, rs, 4); \ |
1879 | generate_indirect_branch_cycle_update(dual_thumb); \ |
1880 | } \ |
1881 | |
1882 | #define thumb_swi() \ |
1883 | generate_swi_hle_handler(opcode & 0xFF, thumb); \ |
1884 | generate_function_call(execute_swi_thumb); \ |
1885 | write32((pc + 2)); \ |
1886 | /* We're in ARM mode now */ \ |
1887 | generate_branch(arm) \ |
1888 | |
1889 | u8 swi_hle_handle[256] = |
1890 | { |
1891 | 0x0, // SWI 0: SoftReset |
1892 | 0x0, // SWI 1: RegisterRAMReset |
1893 | 0x0, // SWI 2: Halt |
1894 | 0x0, // SWI 3: Stop/Sleep |
1895 | 0x0, // SWI 4: IntrWait |
1896 | 0x0, // SWI 5: VBlankIntrWait |
1897 | 0x1, // SWI 6: Div |
1898 | 0x0, // SWI 7: DivArm |
1899 | 0x0, // SWI 8: Sqrt |
1900 | 0x0, // SWI 9: ArcTan |
1901 | 0x0, // SWI A: ArcTan2 |
1902 | 0x0, // SWI B: CpuSet |
1903 | 0x0, // SWI C: CpuFastSet |
1904 | 0x0, // SWI D: GetBIOSCheckSum |
1905 | 0x0, // SWI E: BgAffineSet |
1906 | 0x0, // SWI F: ObjAffineSet |
1907 | 0x0, // SWI 10: BitUnpack |
1908 | 0x0, // SWI 11: LZ77UnCompWram |
1909 | 0x0, // SWI 12: LZ77UnCompVram |
1910 | 0x0, // SWI 13: HuffUnComp |
1911 | 0x0, // SWI 14: RLUnCompWram |
1912 | 0x0, // SWI 15: RLUnCompVram |
1913 | 0x0, // SWI 16: Diff8bitUnFilterWram |
1914 | 0x0, // SWI 17: Diff8bitUnFilterVram |
1915 | 0x0, // SWI 18: Diff16bitUnFilter |
1916 | 0x0, // SWI 19: SoundBias |
1917 | 0x0, // SWI 1A: SoundDriverInit |
1918 | 0x0, // SWI 1B: SoundDriverMode |
1919 | 0x0, // SWI 1C: SoundDriverMain |
1920 | 0x0, // SWI 1D: SoundDriverVSync |
1921 | 0x0, // SWI 1E: SoundChannelClear |
1922 | 0x0, // SWI 1F: MidiKey2Freq |
1923 | 0x0, // SWI 20: SoundWhatever0 |
1924 | 0x0, // SWI 21: SoundWhatever1 |
1925 | 0x0, // SWI 22: SoundWhatever2 |
1926 | 0x0, // SWI 23: SoundWhatever3 |
1927 | 0x0, // SWI 24: SoundWhatever4 |
1928 | 0x0, // SWI 25: MultiBoot |
1929 | 0x0, // SWI 26: HardReset |
1930 | 0x0, // SWI 27: CustomHalt |
1931 | 0x0, // SWI 28: SoundDriverVSyncOff |
1932 | 0x0, // SWI 29: SoundDriverVSyncOn |
1933 | 0x0 // SWI 2A: SoundGetJumpList |
1934 | }; |
1935 | |
1936 | void execute_swi_hle_div_arm(); |
1937 | void execute_swi_hle_div_thumb(); |
1938 | |
1939 | void execute_swi_hle_div_c() |
1940 | { |
1941 | s32 result = (s32)reg[0] / (s32)reg[1]; |
1942 | reg[1] = (s32)reg[0] % (s32)reg[1]; |
1943 | reg[0] = result; |
1944 | |
1945 | reg[3] = (result ^ (result >> 31)) - (result >> 31); |
1946 | } |
1947 | |
1948 | #define generate_swi_hle_handler(_swi_number, mode) \ |
1949 | { \ |
1950 | u32 swi_number = _swi_number; \ |
1951 | if(swi_hle_handle[swi_number]) \ |
1952 | { \ |
1953 | /* Div */ \ |
1954 | if(swi_number == 0x06) \ |
1955 | { \ |
1956 | generate_function_call(execute_swi_hle_div_##mode); \ |
1957 | } \ |
1958 | break; \ |
1959 | } \ |
1960 | } \ |
1961 | |
1962 | #define generate_translation_gate(type) \ |
1963 | generate_update_pc(pc); \ |
1964 | generate_indirect_branch_no_cycle_update(type) \ |
1965 | |
1966 | #define generate_step_debug() \ |
1967 | generate_function_call(step_debug_arm); \ |
1968 | write32(pc) \ |
1969 | |
1970 | #endif |
1971 | |