2823a4c8 |
1 | /* gameplaySP |
2 | * |
3 | * Copyright (C) 2006 Exophase <exophase@gmail.com> |
4 | * |
5 | * This program is free software; you can redistribute it and/or |
6 | * modify it under the terms of the GNU General Public License as |
7 | * published by the Free Software Foundation; either version 2 of |
8 | * the License, or (at your option) any later version. |
9 | * |
10 | * This program is distributed in the hope that it will be useful, |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
13 | * General Public License for more details. |
14 | * |
15 | * You should have received a copy of the GNU General Public License |
16 | * along with this program; if not, write to the Free Software |
17 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
18 | */ |
19 | |
20 | #ifndef ARM_EMIT_H |
21 | #define ARM_EMIT_H |
22 | |
23 | #include "arm_codegen.h" |
24 | |
25 | u32 arm_update_gba_arm(u32 pc); |
26 | u32 arm_update_gba_thumb(u32 pc); |
27 | u32 arm_update_gba_idle_arm(u32 pc); |
28 | u32 arm_update_gba_idle_thumb(u32 pc); |
29 | |
30 | // Although these are defined as a function, don't call them as |
31 | // such (jump to it instead) |
32 | void arm_indirect_branch_arm(u32 address); |
33 | void arm_indirect_branch_thumb(u32 address); |
34 | void arm_indirect_branch_dual_arm(u32 address); |
35 | void arm_indirect_branch_dual_thumb(u32 address); |
36 | |
37 | void execute_store_cpsr(u32 new_cpsr, u32 store_mask, u32 address); |
38 | u32 execute_store_cpsr_body(u32 _cpsr, u32 store_mask, u32 address); |
39 | void execute_store_spsr(u32 new_cpsr, u32 store_mask); |
40 | u32 execute_read_spsr(); |
41 | u32 execute_spsr_restore(u32 address); |
42 | |
43 | void execute_swi_arm(u32 pc); |
44 | void execute_swi_thumb(u32 pc); |
45 | |
46 | void function_cc execute_store_u32_safe(u32 address, u32 source); |
47 | |
48 | void step_debug_arm(u32 pc); |
49 | |
50 | |
51 | #define write32(value) \ |
52 | *((u32 *)translation_ptr) = value; \ |
53 | translation_ptr += 4 \ |
54 | |
55 | #define arm_relative_offset(source, offset) \ |
56 | (((((u32)offset - (u32)source) - 8) >> 2) & 0xFFFFFF) \ |
57 | |
58 | |
59 | // reg_base_offset is the amount of bytes after reg_base where the registers |
60 | // actually begin. |
61 | |
62 | #define reg_base_offset 1024 |
63 | |
64 | |
65 | #define reg_a0 ARMREG_R0 |
66 | #define reg_a1 ARMREG_R1 |
67 | #define reg_a2 ARMREG_R2 |
68 | |
69 | #define reg_s0 ARMREG_R9 |
70 | #define reg_base ARMREG_SP |
71 | #define reg_flags ARMREG_R11 |
72 | |
73 | #define reg_cycles ARMREG_R12 |
74 | |
75 | #define reg_rv ARMREG_R0 |
76 | |
77 | #define reg_rm ARMREG_R0 |
78 | #define reg_rn ARMREG_R1 |
79 | #define reg_rs ARMREG_R14 |
80 | #define reg_rd ARMREG_R0 |
81 | |
82 | |
83 | // Register allocation layout for ARM and Thumb: |
84 | // Map from a GBA register to a host ARM register. -1 means load it |
85 | // from memory into one of the temp registers. |
86 | |
87 | // The following registers are chosen based on statistical analysis |
88 | // of a few games (see below), but might not be the best ones. Results |
89 | // vary tremendously between ARM and Thumb (for obvious reasons), so |
90 | // two sets are used. Take care to not call any function which can |
91 | // overwrite any of these registers from the dynarec - only call |
92 | // trusted functions in arm_stub.S which know how to save/restore |
93 | // them and know how to transfer them to the C functions it calls |
94 | // if necessary. |
95 | |
96 | // The following define the actual registers available for allocation. |
97 | // As registers are freed up add them to this list. |
98 | |
99 | // Note that r15 is linked to the a0 temp reg - this register will |
100 | // be preloaded with a constant upon read, and used to link to |
101 | // indirect branch functions upon write. |
102 | |
103 | #define reg_x0 ARMREG_R3 |
104 | #define reg_x1 ARMREG_R4 |
105 | #define reg_x2 ARMREG_R5 |
106 | #define reg_x3 ARMREG_R6 |
107 | #define reg_x4 ARMREG_R7 |
108 | #define reg_x5 ARMREG_R8 |
109 | |
110 | #define mem_reg -1 |
111 | |
112 | /* |
113 | |
114 | ARM register usage (38.775138% ARM instructions): |
115 | r00: 18.263814% (-- 18.263814%) |
116 | r12: 11.531477% (-- 29.795291%) |
117 | r09: 11.500162% (-- 41.295453%) |
118 | r14: 9.063440% (-- 50.358893%) |
119 | r06: 7.837682% (-- 58.196574%) |
120 | r01: 7.401049% (-- 65.597623%) |
121 | r07: 6.778340% (-- 72.375963%) |
122 | r05: 5.445009% (-- 77.820973%) |
123 | r02: 5.427288% (-- 83.248260%) |
124 | r03: 5.293743% (-- 88.542003%) |
125 | r04: 3.601103% (-- 92.143106%) |
126 | r11: 3.207311% (-- 95.350417%) |
127 | r10: 2.334864% (-- 97.685281%) |
128 | r08: 1.708207% (-- 99.393488%) |
129 | r15: 0.311270% (-- 99.704757%) |
130 | r13: 0.295243% (-- 100.000000%) |
131 | |
132 | Thumb register usage (61.224862% Thumb instructions): |
133 | r00: 34.788858% (-- 34.788858%) |
134 | r01: 26.564083% (-- 61.352941%) |
135 | r03: 10.983500% (-- 72.336441%) |
136 | r02: 8.303127% (-- 80.639567%) |
137 | r04: 4.900381% (-- 85.539948%) |
138 | r05: 3.941292% (-- 89.481240%) |
139 | r06: 3.257582% (-- 92.738822%) |
140 | r07: 2.644851% (-- 95.383673%) |
141 | r13: 1.408824% (-- 96.792497%) |
142 | r08: 0.906433% (-- 97.698930%) |
143 | r09: 0.679693% (-- 98.378623%) |
144 | r10: 0.656446% (-- 99.035069%) |
145 | r12: 0.453668% (-- 99.488737%) |
146 | r14: 0.248909% (-- 99.737646%) |
147 | r11: 0.171066% (-- 99.908713%) |
148 | r15: 0.091287% (-- 100.000000%) |
149 | |
150 | */ |
151 | |
152 | s32 arm_register_allocation[] = |
153 | { |
154 | reg_x0, // GBA r0 |
155 | reg_x1, // GBA r1 |
156 | mem_reg, // GBA r2 |
157 | mem_reg, // GBA r3 |
158 | mem_reg, // GBA r4 |
159 | mem_reg, // GBA r5 |
160 | reg_x2, // GBA r6 |
161 | mem_reg, // GBA r7 |
162 | mem_reg, // GBA r8 |
163 | reg_x3, // GBA r9 |
164 | mem_reg, // GBA r10 |
165 | mem_reg, // GBA r11 |
166 | reg_x4, // GBA r12 |
167 | mem_reg, // GBA r13 |
168 | reg_x5, // GBA r14 |
169 | reg_a0 // GBA r15 |
170 | |
171 | mem_reg, |
172 | mem_reg, |
173 | mem_reg, |
174 | mem_reg, |
175 | mem_reg, |
176 | mem_reg, |
177 | mem_reg, |
178 | mem_reg, |
179 | mem_reg, |
180 | mem_reg, |
181 | mem_reg, |
182 | mem_reg, |
183 | mem_reg, |
184 | mem_reg, |
185 | mem_reg, |
186 | mem_reg, |
187 | }; |
188 | |
189 | s32 thumb_register_allocation[] = |
190 | { |
191 | reg_x0, // GBA r0 |
192 | reg_x1, // GBA r1 |
193 | reg_x2, // GBA r2 |
194 | reg_x3, // GBA r3 |
195 | reg_x4, // GBA r4 |
196 | reg_x5, // GBA r5 |
197 | mem_reg, // GBA r6 |
198 | mem_reg, // GBA r7 |
199 | mem_reg, // GBA r8 |
200 | mem_reg, // GBA r9 |
201 | mem_reg, // GBA r10 |
202 | mem_reg, // GBA r11 |
203 | mem_reg, // GBA r12 |
204 | mem_reg, // GBA r13 |
205 | mem_reg, // GBA r14 |
206 | reg_a0 // GBA r15 |
207 | |
208 | mem_reg, |
209 | mem_reg, |
210 | mem_reg, |
211 | mem_reg, |
212 | mem_reg, |
213 | mem_reg, |
214 | mem_reg, |
215 | mem_reg, |
216 | mem_reg, |
217 | mem_reg, |
218 | mem_reg, |
219 | mem_reg, |
220 | mem_reg, |
221 | mem_reg, |
222 | mem_reg, |
223 | mem_reg, |
224 | }; |
225 | |
226 | |
227 | |
228 | #define arm_imm_lsl_to_rot(value) \ |
229 | (32 - value) \ |
230 | |
231 | |
232 | u32 arm_disect_imm_32bit(u32 imm, u32 *stores, u32 *rotations) |
233 | { |
234 | u32 store_count = 0; |
235 | u32 left_shift = 0; |
236 | u32 i; |
237 | |
238 | // Otherwise it'll return 0 things to store because it'll never |
239 | // find anything. |
240 | if(imm == 0) |
241 | { |
242 | rotations[0] = 0; |
243 | stores[0] = 0; |
244 | return 1; |
245 | } |
246 | |
247 | // Find chunks of non-zero data at 2 bit alignments. |
248 | while(1) |
249 | { |
250 | for(; left_shift < 32; left_shift += 2) |
251 | { |
252 | if((imm >> left_shift) & 0x03) |
253 | break; |
254 | } |
255 | |
256 | if(left_shift == 32) |
257 | { |
258 | // We've hit the end of the useful data. |
259 | return store_count; |
260 | } |
261 | |
262 | // Hit the end, it might wrap back around to the beginning. |
263 | if(left_shift >= 24) |
264 | { |
265 | // Make a mask for the residual bits. IE, if we have |
266 | // 5 bits of data at the end we can wrap around to 3 |
267 | // bits of data in the beginning. Thus the first |
268 | // thing, after being shifted left, has to be less |
269 | // than 111b, 0x7, or (1 << 3) - 1. |
270 | u32 top_bits = 32 - left_shift; |
271 | u32 residual_bits = 8 - top_bits; |
272 | u32 residual_mask = (1 << residual_bits) - 1; |
273 | |
274 | if((store_count > 1) && (left_shift > 24) && |
275 | ((stores[0] << ((32 - rotations[0]) & 0x1F)) < residual_mask)) |
276 | { |
277 | // Then we can throw out the last bit and tack it on |
278 | // to the first bit. |
279 | u32 initial_bits = rotations[0]; |
280 | stores[0] = |
281 | (stores[0] << ((top_bits + (32 - rotations[0])) & 0x1F)) | |
282 | ((imm >> left_shift) & 0xFF); |
283 | rotations[0] = top_bits; |
284 | |
285 | return store_count; |
286 | } |
287 | else |
288 | { |
289 | // There's nothing to wrap over to in the beginning |
290 | stores[store_count] = (imm >> left_shift) & 0xFF; |
291 | rotations[store_count] = (32 - left_shift) & 0x1F; |
292 | return store_count + 1; |
293 | } |
294 | break; |
295 | } |
296 | |
297 | stores[store_count] = (imm >> left_shift) & 0xFF; |
298 | rotations[store_count] = (32 - left_shift) & 0x1F; |
299 | |
300 | store_count++; |
301 | left_shift += 8; |
302 | } |
303 | } |
304 | |
305 | #define arm_load_imm_32bit(ireg, imm) \ |
306 | { \ |
307 | u32 stores[4]; \ |
308 | u32 rotations[4]; \ |
309 | u32 store_count = arm_disect_imm_32bit(imm, stores, rotations); \ |
310 | u32 i; \ |
311 | \ |
312 | ARM_MOV_REG_IMM(0, ireg, stores[0], rotations[0]); \ |
313 | \ |
314 | for(i = 1; i < store_count; i++) \ |
315 | { \ |
316 | ARM_ORR_REG_IMM(0, ireg, ireg, stores[i], rotations[i]); \ |
317 | } \ |
318 | } \ |
319 | |
320 | |
321 | #define generate_load_pc(ireg, new_pc) \ |
322 | arm_load_imm_32bit(ireg, new_pc) \ |
323 | |
324 | #define generate_load_imm(ireg, imm, imm_ror) \ |
325 | ARM_MOV_REG_IMM(0, ireg, imm, imm_ror) \ |
326 | |
327 | |
328 | |
329 | #define generate_shift_left(ireg, imm) \ |
330 | ARM_MOV_REG_IMMSHIFT(0, ireg, ireg, ARMSHIFT_LSL, imm) \ |
331 | |
332 | #define generate_shift_right(ireg, imm) \ |
333 | ARM_MOV_REG_IMMSHIFT(0, ireg, ireg, ARMSHIFT_LSR, imm) \ |
334 | |
335 | #define generate_shift_right_arithmetic(ireg, imm) \ |
336 | ARM_MOV_REG_IMMSHIFT(0, ireg, ireg, ARMSHIFT_ASR, imm) \ |
337 | |
338 | #define generate_rotate_right(ireg, imm) \ |
339 | ARM_MOV_REG_IMMSHIFT(0, ireg, ireg, ARMSHIFT_ROR, imm) \ |
340 | |
341 | #define generate_add(ireg_dest, ireg_src) \ |
342 | ARM_ADD_REG_REG(0, ireg_dest, ireg_dest, ireg_src) \ |
343 | |
344 | #define generate_sub(ireg_dest, ireg_src) \ |
345 | ARM_SUB_REG_REG(0, ireg_dest, ireg_dest, ireg_src) \ |
346 | |
347 | #define generate_or(ireg_dest, ireg_src) \ |
348 | ARM_ORR_REG_REG(0, ireg_dest, ireg_dest, ireg_src) \ |
349 | |
350 | #define generate_xor(ireg_dest, ireg_src) \ |
351 | ARM_EOR_REG_REG(0, ireg_dest, ireg_dest, ireg_src) \ |
352 | |
353 | #define generate_add_imm(ireg, imm, imm_ror) \ |
354 | ARM_ADD_REG_IMM(0, ireg, ireg, imm, imm_ror) \ |
355 | |
356 | #define generate_sub_imm(ireg, imm, imm_ror) \ |
357 | ARM_SUB_REG_IMM(0, ireg, ireg, imm, imm_ror) \ |
358 | |
359 | #define generate_xor_imm(ireg, imm, imm_ror) \ |
360 | ARM_EOR_REG_IMM(0, ireg, ireg, imm, imm_ror) \ |
361 | |
362 | #define generate_add_reg_reg_imm(ireg_dest, ireg_src, imm, imm_ror) \ |
363 | ARM_ADD_REG_IMM(0, ireg_dest, ireg_src, imm, imm_ror) \ |
364 | |
365 | #define generate_and_imm(ireg, imm, imm_ror) \ |
366 | ARM_AND_REG_IMM(0, ireg, ireg, imm, imm_ror) \ |
367 | |
368 | #define generate_mov(ireg_dest, ireg_src) \ |
369 | if(ireg_dest != ireg_src) \ |
370 | { \ |
371 | ARM_MOV_REG_REG(0, ireg_dest, ireg_src); \ |
372 | } \ |
373 | |
374 | #define generate_function_call(function_location) \ |
375 | ARM_BL(0, arm_relative_offset(translation_ptr, function_location)) \ |
376 | |
377 | #define generate_exit_block() \ |
378 | ARM_BX(0, ARMREG_LR) \ |
379 | |
380 | // The branch target is to be filled in later (thus a 0 for now) |
381 | |
382 | #define generate_branch_filler(condition_code, writeback_location) \ |
383 | (writeback_location) = translation_ptr; \ |
384 | ARM_B_COND(0, condition_code, 0) \ |
385 | |
386 | #define generate_update_pc(new_pc) \ |
387 | generate_load_pc(reg_a0, new_pc) \ |
388 | |
389 | #define generate_cycle_update() \ |
390 | if(cycle_count) \ |
391 | { \ |
392 | if(cycle_count >> 8) \ |
393 | { \ |
394 | ARM_ADD_REG_IMM(0, reg_cycles, reg_cycles, (cycle_count >> 8) & 0xFF, \ |
395 | arm_imm_lsl_to_rot(8)); \ |
396 | } \ |
397 | ARM_ADD_REG_IMM(0, reg_cycles, reg_cycles, (cycle_count & 0xFF), 0); \ |
398 | cycle_count = 0; \ |
399 | } \ |
400 | |
401 | #define generate_cycle_update_flag_set() \ |
402 | if(cycle_count >> 8) \ |
403 | { \ |
404 | ARM_ADD_REG_IMM(0, reg_cycles, reg_cycles, (cycle_count >> 8) & 0xFF, \ |
405 | arm_imm_lsl_to_rot(8)); \ |
406 | } \ |
407 | generate_save_flags(); \ |
408 | ARM_ADDS_REG_IMM(0, reg_cycles, reg_cycles, (cycle_count & 0xFF), 0); \ |
409 | cycle_count = 0 \ |
410 | |
411 | #define generate_branch_patch_conditional(dest, offset) \ |
412 | *((u32 *)(dest)) = (*((u32 *)dest) & 0xFF000000) | \ |
413 | arm_relative_offset(dest, offset) \ |
414 | |
415 | #define generate_branch_patch_unconditional(dest, offset) \ |
416 | *((u32 *)(dest)) = (*((u32 *)dest) & 0xFF000000) | \ |
417 | arm_relative_offset(dest, offset) \ |
418 | |
419 | // A different function is called for idle updates because of the relative |
420 | // location of the embedded PC. The idle version could be optimized to put |
421 | // the CPU into halt mode too, however. |
422 | |
423 | #define generate_branch_idle_eliminate(writeback_location, new_pc, mode) \ |
424 | generate_function_call(arm_update_gba_idle_##mode); \ |
425 | write32(new_pc); \ |
426 | generate_branch_filler(ARMCOND_AL, writeback_location) \ |
427 | |
428 | #define generate_branch_update(writeback_location, new_pc, mode) \ |
429 | ARM_MOV_REG_IMMSHIFT(0, reg_a0, reg_cycles, ARMSHIFT_LSR, 31); \ |
430 | ARM_ADD_REG_IMMSHIFT(0, ARMREG_PC, ARMREG_PC, reg_a0, ARMSHIFT_LSL, 2); \ |
431 | write32(new_pc); \ |
432 | generate_function_call(arm_update_gba_##mode); \ |
433 | generate_branch_filler(ARMCOND_AL, writeback_location) \ |
434 | |
435 | |
436 | #define generate_branch_no_cycle_update(writeback_location, new_pc, mode) \ |
437 | if(pc == idle_loop_target_pc) \ |
438 | { \ |
439 | generate_branch_idle_eliminate(writeback_location, new_pc, mode); \ |
440 | } \ |
441 | else \ |
442 | { \ |
443 | generate_branch_update(writeback_location, new_pc, mode); \ |
444 | } \ |
445 | |
446 | #define generate_branch_cycle_update(writeback_location, new_pc, mode) \ |
447 | generate_cycle_update(); \ |
448 | generate_branch_no_cycle_update(writeback_location, new_pc, mode) \ |
449 | |
450 | // a0 holds the destination |
451 | |
452 | #define generate_indirect_branch_no_cycle_update(type) \ |
453 | ARM_B(0, arm_relative_offset(translation_ptr, arm_indirect_branch_##type)) \ |
454 | |
455 | #define generate_indirect_branch_cycle_update(type) \ |
456 | generate_cycle_update(); \ |
457 | generate_indirect_branch_no_cycle_update(type) \ |
458 | |
459 | #define generate_block_prologue() \ |
460 | |
461 | #define generate_block_extra_vars_arm() \ |
462 | void generate_indirect_branch_arm() \ |
463 | { \ |
464 | if(condition == 0x0E) \ |
465 | { \ |
466 | generate_cycle_update(); \ |
467 | } \ |
468 | generate_indirect_branch_no_cycle_update(arm); \ |
469 | } \ |
470 | \ |
471 | void generate_indirect_branch_dual() \ |
472 | { \ |
473 | if(condition == 0x0E) \ |
474 | { \ |
475 | generate_cycle_update(); \ |
476 | } \ |
477 | generate_indirect_branch_no_cycle_update(dual_arm); \ |
478 | } \ |
479 | \ |
480 | u32 prepare_load_reg(u32 scratch_reg, u32 reg_index) \ |
481 | { \ |
482 | u32 reg_use = arm_register_allocation[reg_index]; \ |
483 | if(reg_use == mem_reg) \ |
484 | { \ |
485 | ARM_LDR_IMM(0, scratch_reg, reg_base, \ |
486 | (reg_base_offset + (reg_index * 4))); \ |
487 | return scratch_reg; \ |
488 | } \ |
489 | \ |
490 | return reg_use; \ |
491 | } \ |
492 | \ |
493 | u32 prepare_load_reg_pc(u32 scratch_reg, u32 reg_index, u32 pc_offset) \ |
494 | { \ |
495 | if(reg_index == 15) \ |
496 | { \ |
497 | generate_load_pc(scratch_reg, pc + pc_offset); \ |
498 | return scratch_reg; \ |
499 | } \ |
500 | return prepare_load_reg(scratch_reg, reg_index); \ |
501 | } \ |
502 | \ |
503 | u32 prepare_store_reg(u32 scratch_reg, u32 reg_index) \ |
504 | { \ |
505 | u32 reg_use = arm_register_allocation[reg_index]; \ |
506 | if(reg_use == mem_reg) \ |
507 | return scratch_reg; \ |
508 | \ |
509 | return reg_use; \ |
510 | } \ |
511 | \ |
512 | void complete_store_reg(u32 scratch_reg, u32 reg_index) \ |
513 | { \ |
514 | if(arm_register_allocation[reg_index] == mem_reg) \ |
515 | { \ |
516 | ARM_STR_IMM(0, scratch_reg, reg_base, \ |
517 | (reg_base_offset + (reg_index * 4))); \ |
518 | } \ |
519 | } \ |
520 | \ |
521 | void complete_store_reg_pc_no_flags(u32 scratch_reg, u32 reg_index) \ |
522 | { \ |
523 | if(reg_index == 15) \ |
524 | { \ |
525 | generate_indirect_branch_arm(); \ |
526 | } \ |
527 | else \ |
528 | { \ |
529 | complete_store_reg(scratch_reg, reg_index); \ |
530 | } \ |
531 | } \ |
532 | \ |
533 | void complete_store_reg_pc_flags(u32 scratch_reg, u32 reg_index) \ |
534 | { \ |
535 | if(reg_index == 15) \ |
536 | { \ |
537 | if(condition == 0x0E) \ |
538 | { \ |
539 | generate_cycle_update(); \ |
540 | } \ |
541 | generate_function_call(execute_spsr_restore); \ |
542 | } \ |
543 | else \ |
544 | { \ |
545 | complete_store_reg(scratch_reg, reg_index); \ |
546 | } \ |
547 | } \ |
548 | \ |
549 | void generate_load_reg(u32 ireg, u32 reg_index) \ |
550 | { \ |
551 | s32 load_src = arm_register_allocation[reg_index]; \ |
552 | if(load_src != mem_reg) \ |
553 | { \ |
554 | ARM_MOV_REG_REG(0, ireg, load_src); \ |
555 | } \ |
556 | else \ |
557 | { \ |
558 | ARM_LDR_IMM(0, ireg, reg_base, (reg_base_offset + (reg_index * 4))); \ |
559 | } \ |
560 | } \ |
561 | \ |
562 | void generate_store_reg(u32 ireg, u32 reg_index) \ |
563 | { \ |
564 | s32 store_dest = arm_register_allocation[reg_index]; \ |
565 | if(store_dest != mem_reg) \ |
566 | { \ |
567 | ARM_MOV_REG_REG(0, store_dest, ireg); \ |
568 | } \ |
569 | else \ |
570 | { \ |
571 | ARM_STR_IMM(0, ireg, reg_base, (reg_base_offset + (reg_index * 4))); \ |
572 | } \ |
573 | } \ |
574 | |
575 | |
576 | #define generate_block_extra_vars_thumb() \ |
577 | u32 prepare_load_reg(u32 scratch_reg, u32 reg_index) \ |
578 | { \ |
579 | u32 reg_use = thumb_register_allocation[reg_index]; \ |
580 | if(reg_use == mem_reg) \ |
581 | { \ |
582 | ARM_LDR_IMM(0, scratch_reg, reg_base, \ |
583 | (reg_base_offset + (reg_index * 4))); \ |
584 | return scratch_reg; \ |
585 | } \ |
586 | \ |
587 | return reg_use; \ |
588 | } \ |
589 | \ |
590 | u32 prepare_load_reg_pc(u32 scratch_reg, u32 reg_index, u32 pc_offset) \ |
591 | { \ |
592 | if(reg_index == 15) \ |
593 | { \ |
594 | generate_load_pc(scratch_reg, pc + pc_offset); \ |
595 | return scratch_reg; \ |
596 | } \ |
597 | return prepare_load_reg(scratch_reg, reg_index); \ |
598 | } \ |
599 | \ |
600 | u32 prepare_store_reg(u32 scratch_reg, u32 reg_index) \ |
601 | { \ |
602 | u32 reg_use = thumb_register_allocation[reg_index]; \ |
603 | if(reg_use == mem_reg) \ |
604 | return scratch_reg; \ |
605 | \ |
606 | return reg_use; \ |
607 | } \ |
608 | \ |
609 | void complete_store_reg(u32 scratch_reg, u32 reg_index) \ |
610 | { \ |
611 | if(thumb_register_allocation[reg_index] == mem_reg) \ |
612 | { \ |
613 | ARM_STR_IMM(0, scratch_reg, reg_base, \ |
614 | (reg_base_offset + (reg_index * 4))); \ |
615 | } \ |
616 | } \ |
617 | \ |
618 | void generate_load_reg(u32 ireg, u32 reg_index) \ |
619 | { \ |
620 | s32 load_src = thumb_register_allocation[reg_index]; \ |
621 | if(load_src != mem_reg) \ |
622 | { \ |
623 | ARM_MOV_REG_REG(0, ireg, load_src); \ |
624 | } \ |
625 | else \ |
626 | { \ |
627 | ARM_LDR_IMM(0, ireg, reg_base, (reg_base_offset + (reg_index * 4))); \ |
628 | } \ |
629 | } \ |
630 | \ |
631 | void generate_store_reg(u32 ireg, u32 reg_index) \ |
632 | { \ |
633 | s32 store_dest = thumb_register_allocation[reg_index]; \ |
634 | if(store_dest != mem_reg) \ |
635 | { \ |
636 | ARM_MOV_REG_REG(0, store_dest, ireg); \ |
637 | } \ |
638 | else \ |
639 | { \ |
640 | ARM_STR_IMM(0, ireg, reg_base, (reg_base_offset + (reg_index * 4))); \ |
641 | } \ |
642 | } \ |
643 | |
644 | #define translate_invalidate_dcache() \ |
645 | { \ |
646 | invalidate_cache_region(rom_translation_cache, \ |
647 | rom_translation_cache + ROM_TRANSLATION_CACHE_SIZE); \ |
648 | invalidate_cache_region(ram_translation_cache, \ |
649 | ram_translation_cache + RAM_TRANSLATION_CACHE_SIZE); \ |
650 | invalidate_cache_region(bios_translation_cache, \ |
651 | bios_translation_cache + BIOS_TRANSLATION_CACHE_SIZE); \ |
652 | } \ |
653 | |
654 | #define block_prologue_size 0 |
655 | |
656 | |
657 | // It should be okay to still generate result flags, spsr will overwrite them. |
658 | // This is pretty infrequent (returning from interrupt handlers, et al) so |
659 | // probably not worth optimizing for. |
660 | |
661 | #define check_for_interrupts() \ |
662 | if((io_registers[REG_IE] & io_registers[REG_IF]) && \ |
663 | io_registers[REG_IME] && ((reg[REG_CPSR] & 0x80) == 0)) \ |
664 | { \ |
665 | reg_mode[MODE_IRQ][6] = pc + 4; \ |
666 | spsr[MODE_IRQ] = reg[REG_CPSR]; \ |
667 | reg[REG_CPSR] = 0xD2; \ |
668 | pc = 0x00000018; \ |
669 | set_cpu_mode(MODE_IRQ); \ |
670 | } \ |
671 | |
672 | #define generate_load_reg_pc(ireg, reg_index, pc_offset) \ |
673 | if(reg_index == 15) \ |
674 | { \ |
675 | generate_load_pc(ireg, pc + pc_offset); \ |
676 | } \ |
677 | else \ |
678 | { \ |
679 | generate_load_reg(ireg, reg_index); \ |
680 | } \ |
681 | |
682 | #define generate_store_reg_pc_no_flags(ireg, reg_index) \ |
683 | generate_store_reg(ireg, reg_index); \ |
684 | if(reg_index == 15) \ |
685 | { \ |
686 | generate_indirect_branch_arm(); \ |
687 | } \ |
688 | |
689 | |
690 | u32 function_cc execute_spsr_restore_body(u32 pc) |
691 | { |
692 | set_cpu_mode(cpu_modes[reg[REG_CPSR] & 0x1F]); |
693 | check_for_interrupts(); |
694 | |
695 | return pc; |
696 | } |
697 | |
698 | |
699 | #define generate_store_reg_pc_flags(ireg, reg_index) \ |
700 | generate_store_reg(ireg, reg_index); \ |
701 | if(reg_index == 15) \ |
702 | { \ |
703 | if(condition == 0x0E) \ |
704 | { \ |
705 | generate_cycle_update(); \ |
706 | } \ |
707 | generate_function_call(execute_spsr_restore); \ |
708 | } \ |
709 | |
710 | |
711 | #define generate_load_flags() \ |
712 | /* ARM_MSR_REG(0, ARM_PSR_F, reg_flags, ARM_CPSR) */ \ |
713 | |
714 | #define generate_store_flags() \ |
715 | /* ARM_MRS_CPSR(0, reg_flags) */ \ |
716 | |
717 | #define generate_save_flags() \ |
718 | ARM_MRS_CPSR(0, reg_flags) \ |
719 | |
720 | #define generate_restore_flags() \ |
721 | ARM_MSR_REG(0, ARM_PSR_F, reg_flags, ARM_CPSR) \ |
722 | |
723 | |
724 | #define condition_opposite_eq ARMCOND_NE |
725 | #define condition_opposite_ne ARMCOND_EQ |
726 | #define condition_opposite_cs ARMCOND_CC |
727 | #define condition_opposite_cc ARMCOND_CS |
728 | #define condition_opposite_mi ARMCOND_PL |
729 | #define condition_opposite_pl ARMCOND_MI |
730 | #define condition_opposite_vs ARMCOND_VC |
731 | #define condition_opposite_vc ARMCOND_VS |
732 | #define condition_opposite_hi ARMCOND_LS |
733 | #define condition_opposite_ls ARMCOND_HI |
734 | #define condition_opposite_ge ARMCOND_LT |
735 | #define condition_opposite_lt ARMCOND_GE |
736 | #define condition_opposite_gt ARMCOND_LE |
737 | #define condition_opposite_le ARMCOND_GT |
738 | #define condition_opposite_al ARMCOND_NV |
739 | #define condition_opposite_nv ARMCOND_AL |
740 | |
741 | #define generate_branch(mode) \ |
742 | { \ |
743 | generate_branch_cycle_update( \ |
744 | block_exits[block_exit_position].branch_source, \ |
745 | block_exits[block_exit_position].branch_target, mode); \ |
746 | block_exit_position++; \ |
747 | } \ |
748 | |
749 | |
750 | #define generate_op_and_reg_immshift(_rd, _rn, _rm, shift_type, shift) \ |
751 | ARM_AND_REG_IMMSHIFT(0, _rd, _rn, _rm, shift_type, shift) \ |
752 | |
753 | #define generate_op_orr_reg_immshift(_rd, _rn, _rm, shift_type, shift) \ |
754 | ARM_ORR_REG_IMMSHIFT(0, _rd, _rn, _rm, shift_type, shift) \ |
755 | |
756 | #define generate_op_eor_reg_immshift(_rd, _rn, _rm, shift_type, shift) \ |
757 | ARM_EOR_REG_IMMSHIFT(0, _rd, _rn, _rm, shift_type, shift) \ |
758 | |
759 | #define generate_op_bic_reg_immshift(_rd, _rn, _rm, shift_type, shift) \ |
760 | ARM_BIC_REG_IMMSHIFT(0, _rd, _rn, _rm, shift_type, shift) \ |
761 | |
762 | #define generate_op_sub_reg_immshift(_rd, _rn, _rm, shift_type, shift) \ |
763 | ARM_SUB_REG_IMMSHIFT(0, _rd, _rn, _rm, shift_type, shift) \ |
764 | |
765 | #define generate_op_rsb_reg_immshift(_rd, _rn, _rm, shift_type, shift) \ |
766 | ARM_RSB_REG_IMMSHIFT(0, _rd, _rn, _rm, shift_type, shift) \ |
767 | |
768 | #define generate_op_sbc_reg_immshift(_rd, _rn, _rm, shift_type, shift) \ |
769 | ARM_SBC_REG_IMMSHIFT(0, _rd, _rn, _rm, shift_type, shift) \ |
770 | |
771 | #define generate_op_rsc_reg_immshift(_rd, _rn, _rm, shift_type, shift) \ |
772 | ARM_RSC_REG_IMMSHIFT(0, _rd, _rn, _rm, shift_type, shift) \ |
773 | |
774 | #define generate_op_add_reg_immshift(_rd, _rn, _rm, shift_type, shift) \ |
775 | ARM_ADD_REG_IMMSHIFT(0, _rd, _rn, _rm, shift_type, shift) \ |
776 | |
777 | #define generate_op_adc_reg_immshift(_rd, _rn, _rm, shift_type, shift) \ |
778 | ARM_ADC_REG_IMMSHIFT(0, _rd, _rn, _rm, shift_type, shift) \ |
779 | |
780 | #define generate_op_mov_reg_immshift(_rd, _rn, _rm, shift_type, shift) \ |
781 | ARM_MOV_REG_IMMSHIFT(0, _rd, _rm, shift_type, shift) \ |
782 | |
783 | #define generate_op_mvn_reg_immshift(_rd, _rn, _rm, shift_type, shift) \ |
784 | ARM_MVN_REG_IMMSHIFT(0, _rd, _rm, shift_type, shift) \ |
785 | |
786 | |
787 | #define generate_op_and_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \ |
788 | ARM_AND_REG_REGSHIFT(0, _rd, _rn, _rm, shift_type, _rs) \ |
789 | |
790 | #define generate_op_orr_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \ |
791 | ARM_ORR_REG_REGSHIFT(0, _rd, _rn, _rm, shift_type, _rs) \ |
792 | |
793 | #define generate_op_eor_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \ |
794 | ARM_EOR_REG_REGSHIFT(0, _rd, _rn, _rm, shift_type, _rs) \ |
795 | |
796 | #define generate_op_bic_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \ |
797 | ARM_BIC_REG_REGSHIFT(0, _rd, _rn, _rm, shift_type, _rs) \ |
798 | |
799 | #define generate_op_sub_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \ |
800 | ARM_SUB_REG_REGSHIFT(0, _rd, _rn, _rm, shift_type, _rs) \ |
801 | |
802 | #define generate_op_rsb_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \ |
803 | ARM_RSB_REG_REGSHIFT(0, _rd, _rn, _rm, shift_type, _rs) \ |
804 | |
805 | #define generate_op_sbc_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \ |
806 | ARM_SBC_REG_REGSHIFT(0, _rd, _rn, _rm, shift_type, _rs) \ |
807 | |
808 | #define generate_op_rsc_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \ |
809 | ARM_RSC_REG_REGSHIFT(0, _rd, _rn, _rm, shift_type, _rs) \ |
810 | |
811 | #define generate_op_add_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \ |
812 | ARM_ADD_REG_REGSHIFT(0, _rd, _rn, _rm, shift_type, _rs) \ |
813 | |
814 | #define generate_op_adc_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \ |
815 | ARM_ADC_REG_REGSHIFT(0, _rd, _rn, _rm, shift_type, _rs) \ |
816 | |
817 | #define generate_op_mov_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \ |
818 | ARM_MOV_REG_REGSHIFT(0, _rd, _rm, shift_type, _rs) \ |
819 | |
820 | #define generate_op_mvn_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \ |
821 | ARM_MVN_REG_REGSHIFT(0, _rd, _rm, shift_type, _rs) \ |
822 | |
823 | |
824 | #define generate_op_and_imm(_rd, _rn) \ |
825 | ARM_AND_REG_IMM(0, _rd, _rn, imm, imm_ror) \ |
826 | |
827 | #define generate_op_orr_imm(_rd, _rn) \ |
828 | ARM_ORR_REG_IMM(0, _rd, _rn, imm, imm_ror) \ |
829 | |
830 | #define generate_op_eor_imm(_rd, _rn) \ |
831 | ARM_EOR_REG_IMM(0, _rd, _rn, imm, imm_ror) \ |
832 | |
833 | #define generate_op_bic_imm(_rd, _rn) \ |
834 | ARM_BIC_REG_IMM(0, _rd, _rn, imm, imm_ror) \ |
835 | |
836 | #define generate_op_sub_imm(_rd, _rn) \ |
837 | ARM_SUB_REG_IMM(0, _rd, _rn, imm, imm_ror) \ |
838 | |
839 | #define generate_op_rsb_imm(_rd, _rn) \ |
840 | ARM_RSB_REG_IMM(0, _rd, _rn, imm, imm_ror) \ |
841 | |
842 | #define generate_op_sbc_imm(_rd, _rn) \ |
843 | ARM_SBC_REG_IMM(0, _rd, _rn, imm, imm_ror) \ |
844 | |
845 | #define generate_op_rsc_imm(_rd, _rn) \ |
846 | ARM_RSC_REG_IMM(0, _rd, _rn, imm, imm_ror) \ |
847 | |
848 | #define generate_op_add_imm(_rd, _rn) \ |
849 | ARM_ADD_REG_IMM(0, _rd, _rn, imm, imm_ror) \ |
850 | |
851 | #define generate_op_adc_imm(_rd, _rn) \ |
852 | ARM_ADC_REG_IMM(0, _rd, _rn, imm, imm_ror) \ |
853 | |
854 | #define generate_op_mov_imm(_rd, _rn) \ |
855 | ARM_MOV_REG_IMM(0, _rd, imm, imm_ror) \ |
856 | |
857 | #define generate_op_mvn_imm(_rd, _rn) \ |
858 | ARM_MVN_REG_IMM(0, _rd, imm, imm_ror) \ |
859 | |
860 | |
861 | #define generate_op_reg_immshift_lflags(name, _rd, _rn, _rm, st, shift) \ |
862 | ARM_##name##_REG_IMMSHIFT(0, _rd, _rn, _rm, st, shift) \ |
863 | |
864 | #define generate_op_reg_immshift_aflags(name, _rd, _rn, _rm, st, shift) \ |
865 | ARM_##name##_REG_IMMSHIFT(0, _rd, _rn, _rm, st, shift) \ |
866 | |
867 | #define generate_op_reg_immshift_aflags_load_c(name, _rd, _rn, _rm, st, sh) \ |
868 | ARM_##name##_REG_IMMSHIFT(0, _rd, _rn, _rm, st, sh) \ |
869 | |
870 | #define generate_op_reg_immshift_uflags(name, _rd, _rm, shift_type, shift) \ |
871 | ARM_##name##_REG_IMMSHIFT(0, _rd, _rm, shift_type, shift) \ |
872 | |
873 | #define generate_op_reg_immshift_tflags(name, _rn, _rm, shift_type, shift) \ |
874 | ARM_##name##_REG_IMMSHIFT(0, _rn, _rm, shift_type, shift) \ |
875 | |
876 | |
877 | #define generate_op_reg_regshift_lflags(name, _rd, _rn, _rm, shift_type, _rs) \ |
878 | ARM_##name##_REG_REGSHIFT(0, _rd, _rn, _rm, shift_type, _rs) \ |
879 | |
880 | #define generate_op_reg_regshift_aflags(name, _rd, _rn, _rm, st, _rs) \ |
881 | ARM_##name##_REG_REGSHIFT(0, _rd, _rn, _rm, st, _rs) \ |
882 | |
883 | #define generate_op_reg_regshift_aflags_load_c(name, _rd, _rn, _rm, st, _rs) \ |
884 | ARM_##name##_REG_REGSHIFT(0, _rd, _rn, _rm, st, _rs) \ |
885 | |
886 | #define generate_op_reg_regshift_uflags(name, _rd, _rm, shift_type, _rs) \ |
887 | ARM_##name##_REG_REGSHIFT(0, _rd, _rm, shift_type, _rs) \ |
888 | |
889 | #define generate_op_reg_regshift_tflags(name, _rn, _rm, shift_type, _rs) \ |
890 | ARM_##name##_REG_REGSHIFT(0, _rn, _rm, shift_type, _rs) \ |
891 | |
892 | |
893 | #define generate_op_imm_lflags(name, _rd, _rn) \ |
894 | ARM_##name##_REG_IMM(0, _rd, _rn, imm, imm_ror) \ |
895 | |
896 | #define generate_op_imm_aflags(name, _rd, _rn) \ |
897 | ARM_##name##_REG_IMM(0, _rd, _rn, imm, imm_ror) \ |
898 | |
899 | #define generate_op_imm_aflags_load_c(name, _rd, _rn) \ |
900 | ARM_##name##_REG_IMM(0, _rd, _rn, imm, imm_ror) \ |
901 | |
902 | #define generate_op_imm_uflags(name, _rd) \ |
903 | ARM_##name##_REG_IMM(0, _rd, imm, imm_ror) \ |
904 | |
905 | #define generate_op_imm_tflags(name, _rn) \ |
906 | ARM_##name##_REG_IMM(0, _rn, imm, imm_ror) \ |
907 | |
908 | |
909 | #define generate_op_ands_reg_immshift(_rd, _rn, _rm, shift_type, shift) \ |
910 | generate_op_reg_immshift_lflags(ANDS, _rd, _rn, _rm, shift_type, shift) \ |
911 | |
912 | #define generate_op_orrs_reg_immshift(_rd, _rn, _rm, shift_type, shift) \ |
913 | generate_op_reg_immshift_lflags(ORRS, _rd, _rn, _rm, shift_type, shift) \ |
914 | |
915 | #define generate_op_eors_reg_immshift(_rd, _rn, _rm, shift_type, shift) \ |
916 | generate_op_reg_immshift_lflags(EORS, _rd, _rn, _rm, shift_type, shift) \ |
917 | |
918 | #define generate_op_bics_reg_immshift(_rd, _rn, _rm, shift_type, shift) \ |
919 | generate_op_reg_immshift_lflags(BICS, _rd, _rn, _rm, shift_type, shift) \ |
920 | |
921 | #define generate_op_subs_reg_immshift(_rd, _rn, _rm, shift_type, shift) \ |
922 | generate_op_reg_immshift_aflags(SUBS, _rd, _rn, _rm, shift_type, shift) \ |
923 | |
924 | #define generate_op_rsbs_reg_immshift(_rd, _rn, _rm, shift_type, shift) \ |
925 | generate_op_reg_immshift_aflags(RSBS, _rd, _rn, _rm, shift_type, shift) \ |
926 | |
927 | #define generate_op_sbcs_reg_immshift(_rd, _rn, _rm, st, shift) \ |
928 | generate_op_reg_immshift_aflags_load_c(SBCS, _rd, _rn, _rm, st, shift) \ |
929 | |
930 | #define generate_op_rscs_reg_immshift(_rd, _rn, _rm, st, shift) \ |
931 | generate_op_reg_immshift_aflags_load_c(RSCS, _rd, _rn, _rm, st, shift) \ |
932 | |
933 | #define generate_op_adds_reg_immshift(_rd, _rn, _rm, shift_type, shift) \ |
934 | generate_op_reg_immshift_aflags(ADDS, _rd, _rn, _rm, shift_type, shift) \ |
935 | |
936 | #define generate_op_adcs_reg_immshift(_rd, _rn, _rm, st, shift) \ |
937 | generate_op_reg_immshift_aflags_load_c(ADCS, _rd, _rn, _rm, st, shift) \ |
938 | |
939 | #define generate_op_movs_reg_immshift(_rd, _rn, _rm, shift_type, shift) \ |
940 | generate_op_reg_immshift_uflags(MOVS, _rd, _rm, shift_type, shift) \ |
941 | |
942 | #define generate_op_mvns_reg_immshift(_rd, _rn, _rm, shift_type, shift) \ |
943 | generate_op_reg_immshift_uflags(MVNS, _rd, _rm, shift_type, shift) \ |
944 | |
945 | // The reg operand is in reg_rm, not reg_rn like expected, so rsbs isn't |
946 | // being used here. When rsbs is fully inlined it can be used with the |
947 | // apropriate operands. |
948 | |
949 | #define generate_op_neg_reg_immshift(_rd, _rn, _rm, shift_type, shift) \ |
950 | { \ |
951 | generate_load_imm(reg_rn, 0, 0); \ |
952 | generate_op_subs_reg_immshift(_rd, reg_rn, _rm, ARMSHIFT_LSL, 0); \ |
953 | } \ |
954 | |
955 | #define generate_op_muls_reg_immshift(_rd, _rn, _rm, shift_type, shift) \ |
956 | generate_load_flags(); \ |
957 | ARM_MULS(0, _rd, _rn, _rm); \ |
958 | generate_store_flags() \ |
959 | |
960 | #define generate_op_cmp_reg_immshift(_rd, _rn, _rm, shift_type, shift) \ |
961 | generate_op_reg_immshift_tflags(CMP, _rn, _rm, shift_type, shift) \ |
962 | |
963 | #define generate_op_cmn_reg_immshift(_rd, _rn, _rm, shift_type, shift) \ |
964 | generate_op_reg_immshift_tflags(CMN, _rn, _rm, shift_type, shift) \ |
965 | |
966 | #define generate_op_tst_reg_immshift(_rd, _rn, _rm, shift_type, shift) \ |
967 | generate_op_reg_immshift_tflags(TST, _rn, _rm, shift_type, shift) \ |
968 | |
969 | #define generate_op_teq_reg_immshift(_rd, _rn, _rm, shift_type, shift) \ |
970 | generate_op_reg_immshift_tflags(TEQ, _rn, _rm, shift_type, shift) \ |
971 | |
972 | |
973 | #define generate_op_ands_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \ |
974 | generate_op_reg_regshift_lflags(ANDS, _rd, _rn, _rm, shift_type, _rs) \ |
975 | |
976 | #define generate_op_orrs_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \ |
977 | generate_op_reg_regshift_lflags(ORRS, _rd, _rn, _rm, shift_type, _rs) \ |
978 | |
979 | #define generate_op_eors_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \ |
980 | generate_op_reg_regshift_lflags(EORS, _rd, _rn, _rm, shift_type, _rs) \ |
981 | |
982 | #define generate_op_bics_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \ |
983 | generate_op_reg_regshift_lflags(BICS, _rd, _rn, _rm, shift_type, _rs) \ |
984 | |
985 | #define generate_op_subs_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \ |
986 | generate_op_reg_regshift_aflags(SUBS, _rd, _rn, _rm, shift_type, _rs) \ |
987 | |
988 | #define generate_op_rsbs_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \ |
989 | generate_op_reg_regshift_aflags(RSBS, _rd, _rn, _rm, shift_type, _rs) \ |
990 | |
991 | #define generate_op_sbcs_reg_regshift(_rd, _rn, _rm, st, _rs) \ |
992 | generate_op_reg_regshift_aflags_load_c(SBCS, _rd, _rn, _rm, st, _rs) \ |
993 | |
994 | #define generate_op_rscs_reg_regshift(_rd, _rn, _rm, st, _rs) \ |
995 | generate_op_reg_regshift_aflags_load_c(RSCS, _rd, _rn, _rm, st, _rs) \ |
996 | |
997 | #define generate_op_adds_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \ |
998 | generate_op_reg_regshift_aflags(ADDS, _rd, _rn, _rm, shift_type, _rs) \ |
999 | |
1000 | #define generate_op_adcs_reg_regshift(_rd, _rn, _rm, st, _rs) \ |
1001 | generate_op_reg_regshift_aflags_load_c(ADCS, _rd, _rn, _rm, st, _rs) \ |
1002 | |
1003 | #define generate_op_movs_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \ |
1004 | generate_op_reg_regshift_uflags(MOVS, _rd, _rm, shift_type, _rs) \ |
1005 | |
1006 | #define generate_op_mvns_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \ |
1007 | generate_op_reg_regshift_uflags(MVNS, _rd, _rm, shift_type, _rs) \ |
1008 | |
1009 | #define generate_op_cmp_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \ |
1010 | generate_op_reg_regshift_tflags(CMP, _rn, _rm, shift_type, _rs) \ |
1011 | |
1012 | #define generate_op_cmn_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \ |
1013 | generate_op_reg_regshift_tflags(CMN, _rn, _rm, shift_type, _rs) \ |
1014 | |
1015 | #define generate_op_tst_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \ |
1016 | generate_op_reg_regshift_tflags(TST, _rn, _rm, shift_type, _rs) \ |
1017 | |
1018 | #define generate_op_teq_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \ |
1019 | generate_op_reg_regshift_tflags(TEQ, _rn, _rm, shift_type, _rs) \ |
1020 | |
1021 | |
1022 | #define generate_op_ands_imm(_rd, _rn) \ |
1023 | generate_op_imm_lflags(ANDS, _rd, _rn) \ |
1024 | |
1025 | #define generate_op_orrs_imm(_rd, _rn) \ |
1026 | generate_op_imm_lflags(ORRS, _rd, _rn) \ |
1027 | |
1028 | #define generate_op_eors_imm(_rd, _rn) \ |
1029 | generate_op_imm_lflags(EORS, _rd, _rn) \ |
1030 | |
1031 | #define generate_op_bics_imm(_rd, _rn) \ |
1032 | generate_op_imm_lflags(BICS, _rd, _rn) \ |
1033 | |
1034 | #define generate_op_subs_imm(_rd, _rn) \ |
1035 | generate_op_imm_aflags(SUBS, _rd, _rn) \ |
1036 | |
1037 | #define generate_op_rsbs_imm(_rd, _rn) \ |
1038 | generate_op_imm_aflags(RSBS, _rd, _rn) \ |
1039 | |
1040 | #define generate_op_sbcs_imm(_rd, _rn) \ |
1041 | generate_op_imm_aflags_load_c(SBCS, _rd, _rn) \ |
1042 | |
1043 | #define generate_op_rscs_imm(_rd, _rn) \ |
1044 | generate_op_imm_aflags_load_c(RSCS, _rd, _rn) \ |
1045 | |
1046 | #define generate_op_adds_imm(_rd, _rn) \ |
1047 | generate_op_imm_aflags(ADDS, _rd, _rn) \ |
1048 | |
1049 | #define generate_op_adcs_imm(_rd, _rn) \ |
1050 | generate_op_imm_aflags_load_c(ADCS, _rd, _rn) \ |
1051 | |
1052 | #define generate_op_movs_imm(_rd, _rn) \ |
1053 | generate_op_imm_uflags(MOVS, _rd) \ |
1054 | |
1055 | #define generate_op_mvns_imm(_rd, _rn) \ |
1056 | generate_op_imm_uflags(MVNS, _rd) \ |
1057 | |
1058 | #define generate_op_cmp_imm(_rd, _rn) \ |
1059 | generate_op_imm_tflags(CMP, _rn) \ |
1060 | |
1061 | #define generate_op_cmn_imm(_rd, _rn) \ |
1062 | generate_op_imm_tflags(CMN, _rn) \ |
1063 | |
1064 | #define generate_op_tst_imm(_rd, _rn) \ |
1065 | generate_op_imm_tflags(TST, _rn) \ |
1066 | |
1067 | #define generate_op_teq_imm(_rd, _rn) \ |
1068 | generate_op_imm_tflags(TEQ, _rn) \ |
1069 | |
1070 | |
1071 | #define prepare_load_rn_yes() \ |
1072 | u32 _rn = prepare_load_reg_pc(reg_rn, rn, 8) \ |
1073 | |
1074 | #define prepare_load_rn_no() \ |
1075 | |
1076 | #define prepare_store_rd_yes() \ |
1077 | u32 _rd = prepare_store_reg(reg_rd, rd) \ |
1078 | |
1079 | #define prepare_store_rd_no() \ |
1080 | |
1081 | #define complete_store_rd_yes(flags_op) \ |
1082 | complete_store_reg_pc_##flags_op(_rd, rd) \ |
1083 | |
1084 | #define complete_store_rd_no(flags_op) \ |
1085 | |
1086 | #define arm_generate_op_reg(name, load_op, store_op, flags_op) \ |
1087 | u32 shift_type = (opcode >> 5) & 0x03; \ |
1088 | arm_decode_data_proc_reg(); \ |
1089 | prepare_load_rn_##load_op(); \ |
1090 | prepare_store_rd_##store_op(); \ |
1091 | \ |
1092 | if((opcode >> 4) & 0x01) \ |
1093 | { \ |
1094 | u32 rs = ((opcode >> 8) & 0x0F); \ |
1095 | u32 _rs = prepare_load_reg(reg_rs, rs); \ |
1096 | u32 _rm = prepare_load_reg_pc(reg_rm, rm, 12); \ |
1097 | generate_op_##name##_reg_regshift(_rd, _rn, _rm, shift_type, _rs); \ |
1098 | } \ |
1099 | else \ |
1100 | { \ |
1101 | u32 shift_imm = ((opcode >> 7) & 0x1F); \ |
1102 | u32 _rm = prepare_load_reg_pc(reg_rm, rm, 8); \ |
1103 | generate_op_##name##_reg_immshift(_rd, _rn, _rm, shift_type, shift_imm); \ |
1104 | } \ |
1105 | complete_store_rd_##store_op(flags_op) \ |
1106 | |
1107 | #define arm_generate_op_reg_flags(name, load_op, store_op, flags_op) \ |
1108 | arm_generate_op_reg(name, load_op, store_op, flags_op) \ |
1109 | |
1110 | // imm will be loaded by the called function if necessary. |
1111 | |
1112 | #define arm_generate_op_imm(name, load_op, store_op, flags_op) \ |
1113 | arm_decode_data_proc_imm(); \ |
1114 | prepare_load_rn_##load_op(); \ |
1115 | prepare_store_rd_##store_op(); \ |
1116 | generate_op_##name##_imm(_rd, _rn); \ |
1117 | complete_store_rd_##store_op(flags_op) \ |
1118 | |
1119 | #define arm_generate_op_imm_flags(name, load_op, store_op, flags_op) \ |
1120 | arm_generate_op_imm(name, load_op, store_op, flags_op) \ |
1121 | |
1122 | #define arm_data_proc(name, type, flags_op) \ |
1123 | { \ |
1124 | arm_generate_op_##type(name, yes, yes, flags_op); \ |
1125 | } \ |
1126 | |
1127 | #define arm_data_proc_test(name, type) \ |
1128 | { \ |
1129 | arm_generate_op_##type(name, yes, no, no); \ |
1130 | } \ |
1131 | |
1132 | #define arm_data_proc_unary(name, type, flags_op) \ |
1133 | { \ |
1134 | arm_generate_op_##type(name, no, yes, flags_op); \ |
1135 | } \ |
1136 | |
1137 | |
1138 | #define arm_multiply_add_no_flags_no() \ |
1139 | ARM_MUL(0, _rd, _rm, _rs) \ |
1140 | |
1141 | #define arm_multiply_add_yes_flags_no() \ |
1142 | u32 _rn = prepare_load_reg(reg_a2, rn); \ |
1143 | ARM_MLA(0, _rd, _rm, _rs, _rn) \ |
1144 | |
1145 | #define arm_multiply_add_no_flags_yes() \ |
1146 | generate_load_flags(); \ |
1147 | ARM_MULS(0, reg_a0, reg_a0, reg_a1) \ |
1148 | generate_store_flags() \ |
1149 | |
1150 | #define arm_multiply_add_yes_flags_yes() \ |
1151 | u32 _rn = prepare_load_reg(reg_a2, rn); \ |
1152 | generate_load_flags(); \ |
1153 | ARM_MLAS(0, _rd, _rm, _rs, _rn); \ |
1154 | generate_store_flags() |
1155 | |
1156 | |
1157 | #define arm_multiply(add_op, flags) \ |
1158 | { \ |
1159 | arm_decode_multiply(); \ |
1160 | u32 _rm = prepare_load_reg(reg_a0, rm); \ |
1161 | u32 _rs = prepare_load_reg(reg_a1, rs); \ |
1162 | u32 _rd = prepare_store_reg(reg_a0, rd); \ |
1163 | arm_multiply_add_##add_op##_flags_##flags(); \ |
1164 | complete_store_reg(_rd, rd); \ |
1165 | } \ |
1166 | |
1167 | |
1168 | #define arm_multiply_long_name_s64 SMULL |
1169 | #define arm_multiply_long_name_u64 UMULL |
1170 | #define arm_multiply_long_name_s64_add SMLAL |
1171 | #define arm_multiply_long_name_u64_add UMLAL |
1172 | |
1173 | |
1174 | #define arm_multiply_long_flags_no(name) \ |
1175 | ARM_##name(0, _rdlo, _rdhi, _rm, _rs) \ |
1176 | |
1177 | #define arm_multiply_long_flags_yes(name) \ |
1178 | generate_load_flags(); \ |
1179 | ARM_##name##S(0, _rdlo, _rdhi, _rm, _rs); \ |
1180 | generate_store_flags() \ |
1181 | |
1182 | |
1183 | #define arm_multiply_long_add_no(name) \ |
1184 | |
1185 | #define arm_multiply_long_add_yes(name) \ |
1186 | prepare_load_reg(reg_a0, rdlo); \ |
1187 | prepare_load_reg(reg_a1, rdhi) \ |
1188 | |
1189 | |
1190 | #define arm_multiply_long_op(flags, name) \ |
1191 | arm_multiply_long_flags_##flags(name) \ |
1192 | |
1193 | #define arm_multiply_long(name, add_op, flags) \ |
1194 | { \ |
1195 | arm_decode_multiply_long(); \ |
1196 | u32 _rm = prepare_load_reg(reg_a2, rm); \ |
1197 | u32 _rs = prepare_load_reg(reg_rs, rs); \ |
1198 | u32 _rdlo = prepare_store_reg(reg_a0, rdlo); \ |
1199 | u32 _rdhi = prepare_store_reg(reg_a1, rdhi); \ |
1200 | arm_multiply_long_add_##add_op(name); \ |
1201 | arm_multiply_long_op(flags, arm_multiply_long_name_##name); \ |
1202 | complete_store_reg(_rdlo, rdlo); \ |
1203 | complete_store_reg(_rdhi, rdhi); \ |
1204 | } \ |
1205 | |
1206 | #define arm_psr_read_cpsr() \ |
1207 | u32 _rd = prepare_store_reg(reg_a0, rd); \ |
1208 | generate_load_reg(_rd, REG_CPSR); \ |
1209 | ARM_BIC_REG_IMM(0, _rd, _rd, 0xF0, arm_imm_lsl_to_rot(24)); \ |
1210 | ARM_AND_REG_IMM(0, reg_flags, reg_flags, 0xF0, arm_imm_lsl_to_rot(24)); \ |
1211 | ARM_ORR_REG_REG(0, _rd, _rd, reg_flags); \ |
1212 | complete_store_reg(_rd, rd) \ |
1213 | |
1214 | #define arm_psr_read_spsr() \ |
1215 | generate_function_call(execute_read_spsr) \ |
1216 | generate_store_reg(reg_a0, rd) \ |
1217 | |
1218 | #define arm_psr_read(op_type, psr_reg) \ |
1219 | arm_psr_read_##psr_reg() \ |
1220 | |
1221 | // This function's okay because it's called from an ASM function that can |
1222 | // wrap it correctly. |
1223 | |
1224 | u32 execute_store_cpsr_body(u32 _cpsr, u32 store_mask, u32 address) |
1225 | { |
1226 | reg[REG_CPSR] = _cpsr; |
1227 | if(store_mask & 0xFF) |
1228 | { |
1229 | set_cpu_mode(cpu_modes[_cpsr & 0x1F]); |
1230 | if((io_registers[REG_IE] & io_registers[REG_IF]) && |
1231 | io_registers[REG_IME] && ((_cpsr & 0x80) == 0)) |
1232 | { |
1233 | reg_mode[MODE_IRQ][6] = address + 4; |
1234 | spsr[MODE_IRQ] = _cpsr; |
1235 | reg[REG_CPSR] = 0xD2; |
1236 | set_cpu_mode(MODE_IRQ); |
1237 | return 0x00000018; |
1238 | } |
1239 | } |
1240 | |
1241 | return 0; |
1242 | } |
1243 | |
1244 | #define arm_psr_load_new_reg() \ |
1245 | generate_load_reg(reg_a0, rm) \ |
1246 | |
1247 | #define arm_psr_load_new_imm() \ |
1248 | generate_load_imm(reg_a0, imm, imm_ror) \ |
1249 | |
1250 | #define arm_psr_store_cpsr() \ |
1251 | arm_load_imm_32bit(reg_a1, psr_masks[psr_field]); \ |
1252 | generate_function_call(execute_store_cpsr); \ |
1253 | write32(pc) \ |
1254 | |
1255 | #define arm_psr_store_spsr() \ |
1256 | generate_function_call(execute_store_spsr) \ |
1257 | |
1258 | #define arm_psr_store(op_type, psr_reg) \ |
1259 | arm_psr_load_new_##op_type(); \ |
1260 | arm_psr_store_##psr_reg() \ |
1261 | |
1262 | |
1263 | #define arm_psr(op_type, transfer_type, psr_reg) \ |
1264 | { \ |
1265 | arm_decode_psr_##op_type(); \ |
1266 | arm_psr_##transfer_type(op_type, psr_reg); \ |
1267 | } \ |
1268 | |
1269 | // TODO: loads will need the PC passed as well for open address, however can |
1270 | // eventually be rectified with a hash table on the memory accesses |
1271 | // (same with the stores) |
1272 | |
1273 | #define arm_access_memory_load(mem_type) \ |
1274 | cycle_count += 2; \ |
1275 | generate_function_call(execute_load_##mem_type); \ |
1276 | write32((pc + 8)); \ |
1277 | generate_store_reg_pc_no_flags(reg_rv, rd) \ |
1278 | |
1279 | #define arm_access_memory_store(mem_type) \ |
1280 | cycle_count++; \ |
1281 | generate_load_reg_pc(reg_a1, rd, 12); \ |
1282 | generate_function_call(execute_store_##mem_type); \ |
1283 | write32((pc + 4)) \ |
1284 | |
1285 | // Calculate the address into a0 from _rn, _rm |
1286 | |
1287 | #define arm_access_memory_adjust_reg_sh_up(ireg) \ |
1288 | ARM_ADD_REG_IMMSHIFT(0, ireg, _rn, _rm, ((opcode >> 5) & 0x03), \ |
1289 | ((opcode >> 7) & 0x1F)) \ |
1290 | |
1291 | #define arm_access_memory_adjust_reg_sh_down(ireg) \ |
1292 | ARM_SUB_REG_IMMSHIFT(0, ireg, _rn, _rm, ((opcode >> 5) & 0x03), \ |
1293 | ((opcode >> 7) & 0x1F)) \ |
1294 | |
1295 | #define arm_access_memory_adjust_reg_up(ireg) \ |
1296 | ARM_ADD_REG_REG(0, ireg, _rn, _rm) \ |
1297 | |
1298 | #define arm_access_memory_adjust_reg_down(ireg) \ |
1299 | ARM_SUB_REG_REG(0, ireg, _rn, _rm) \ |
1300 | |
1301 | #define arm_access_memory_adjust_imm(op, ireg) \ |
1302 | { \ |
1303 | u32 stores[4]; \ |
1304 | u32 rotations[4]; \ |
1305 | u32 store_count = arm_disect_imm_32bit(offset, stores, rotations); \ |
1306 | \ |
1307 | if(store_count > 1) \ |
1308 | { \ |
1309 | ARM_##op##_REG_IMM(0, ireg, _rn, stores[0], rotations[0]); \ |
1310 | ARM_##op##_REG_IMM(0, ireg, ireg, stores[1], rotations[1]); \ |
1311 | } \ |
1312 | else \ |
1313 | { \ |
1314 | ARM_##op##_REG_IMM(0, ireg, _rn, stores[0], rotations[0]); \ |
1315 | } \ |
1316 | } \ |
1317 | |
1318 | #define arm_access_memory_adjust_imm_up(ireg) \ |
1319 | arm_access_memory_adjust_imm(ADD, ireg) \ |
1320 | |
1321 | #define arm_access_memory_adjust_imm_down(ireg) \ |
1322 | arm_access_memory_adjust_imm(SUB, ireg) \ |
1323 | |
1324 | |
1325 | #define arm_access_memory_pre(type, direction) \ |
1326 | arm_access_memory_adjust_##type##_##direction(reg_a0) \ |
1327 | |
1328 | #define arm_access_memory_pre_wb(type, direction) \ |
1329 | arm_access_memory_adjust_##type##_##direction(reg_a0); \ |
1330 | generate_store_reg(reg_a0, rn) \ |
1331 | |
1332 | #define arm_access_memory_post(type, direction) \ |
1333 | u32 _rn_dest = prepare_store_reg(reg_a1, rn); \ |
1334 | if(_rn != reg_a0) \ |
1335 | { \ |
1336 | generate_load_reg(reg_a0, rn); \ |
1337 | } \ |
1338 | arm_access_memory_adjust_##type##_##direction(_rn_dest); \ |
1339 | complete_store_reg(_rn_dest, rn) \ |
1340 | |
1341 | |
1342 | #define arm_data_trans_reg(adjust_op, direction) \ |
1343 | arm_decode_data_trans_reg(); \ |
1344 | u32 _rn = prepare_load_reg_pc(reg_a0, rn, 8); \ |
1345 | u32 _rm = prepare_load_reg(reg_a1, rm); \ |
1346 | arm_access_memory_##adjust_op(reg_sh, direction) \ |
1347 | |
1348 | #define arm_data_trans_imm(adjust_op, direction) \ |
1349 | arm_decode_data_trans_imm(); \ |
1350 | u32 _rn = prepare_load_reg_pc(reg_a0, rn, 8); \ |
1351 | arm_access_memory_##adjust_op(imm, direction) \ |
1352 | |
1353 | |
1354 | #define arm_data_trans_half_reg(adjust_op, direction) \ |
1355 | arm_decode_half_trans_r(); \ |
1356 | u32 _rn = prepare_load_reg_pc(reg_a0, rn, 8); \ |
1357 | u32 _rm = prepare_load_reg(reg_a1, rm); \ |
1358 | arm_access_memory_##adjust_op(reg, direction) \ |
1359 | |
1360 | #define arm_data_trans_half_imm(adjust_op, direction) \ |
1361 | arm_decode_half_trans_of(); \ |
1362 | u32 _rn = prepare_load_reg_pc(reg_a0, rn, 8); \ |
1363 | arm_access_memory_##adjust_op(imm, direction) \ |
1364 | |
1365 | |
1366 | #define arm_access_memory(access_type, direction, adjust_op, mem_type, \ |
1367 | offset_type) \ |
1368 | { \ |
1369 | arm_data_trans_##offset_type(adjust_op, direction); \ |
1370 | arm_access_memory_##access_type(mem_type); \ |
1371 | } \ |
1372 | |
1373 | |
1374 | #define word_bit_count(word) \ |
1375 | (bit_count[word >> 8] + bit_count[word & 0xFF]) \ |
1376 | |
1377 | #define sprint_no(access_type, pre_op, post_op, wb) \ |
1378 | |
1379 | #define sprint_yes(access_type, pre_op, post_op, wb) \ |
1380 | printf("sbit on %s %s %s %s\n", #access_type, #pre_op, #post_op, #wb) \ |
1381 | |
1382 | |
1383 | // TODO: Make these use cached registers. Implement iwram_stack_optimize. |
1384 | |
1385 | #define arm_block_memory_load() \ |
1386 | generate_function_call(execute_load_u32); \ |
1387 | write32((pc + 8)); \ |
1388 | generate_store_reg(reg_rv, i) \ |
1389 | |
1390 | #define arm_block_memory_store() \ |
1391 | generate_load_reg_pc(reg_a1, i, 8); \ |
1392 | generate_function_call(execute_store_u32_safe) \ |
1393 | |
1394 | #define arm_block_memory_final_load() \ |
1395 | arm_block_memory_load() \ |
1396 | |
1397 | #define arm_block_memory_final_store() \ |
1398 | generate_load_reg_pc(reg_a1, i, 12); \ |
1399 | generate_function_call(execute_store_u32); \ |
1400 | write32((pc + 4)) \ |
1401 | |
1402 | #define arm_block_memory_adjust_pc_store() \ |
1403 | |
1404 | #define arm_block_memory_adjust_pc_load() \ |
1405 | if(reg_list & 0x8000) \ |
1406 | { \ |
1407 | generate_mov(reg_a0, reg_rv); \ |
1408 | generate_indirect_branch_arm(); \ |
1409 | } \ |
1410 | |
1411 | #define arm_block_memory_offset_down_a() \ |
1412 | generate_sub_imm(reg_s0, ((word_bit_count(reg_list) * 4) - 4), 0) \ |
1413 | |
1414 | #define arm_block_memory_offset_down_b() \ |
1415 | generate_sub_imm(reg_s0, (word_bit_count(reg_list) * 4), 0) \ |
1416 | |
1417 | #define arm_block_memory_offset_no() \ |
1418 | |
1419 | #define arm_block_memory_offset_up() \ |
1420 | generate_add_imm(reg_s0, 4, 0) \ |
1421 | |
1422 | #define arm_block_memory_writeback_down() \ |
1423 | generate_load_reg(reg_a0, rn); \ |
1424 | generate_sub_imm(reg_a0, (word_bit_count(reg_list) * 4), 0); \ |
1425 | generate_store_reg(reg_a0, rn) \ |
1426 | |
1427 | #define arm_block_memory_writeback_up() \ |
1428 | generate_load_reg(reg_a0, rn); \ |
1429 | generate_add_imm(reg_a0, (word_bit_count(reg_list) * 4), 0); \ |
1430 | generate_store_reg(reg_a0, rn) \ |
1431 | |
1432 | #define arm_block_memory_writeback_no() |
1433 | |
1434 | // Only emit writeback if the register is not in the list |
1435 | |
1436 | #define arm_block_memory_writeback_load(writeback_type) \ |
1437 | if(!((reg_list >> rn) & 0x01)) \ |
1438 | { \ |
1439 | arm_block_memory_writeback_##writeback_type(); \ |
1440 | } \ |
1441 | |
1442 | #define arm_block_memory_writeback_store(writeback_type) \ |
1443 | arm_block_memory_writeback_##writeback_type() \ |
1444 | |
1445 | #define arm_block_memory(access_type, offset_type, writeback_type, s_bit) \ |
1446 | { \ |
1447 | arm_decode_block_trans(); \ |
1448 | u32 offset = 0; \ |
1449 | u32 i; \ |
1450 | \ |
1451 | generate_load_reg(reg_s0, rn); \ |
1452 | arm_block_memory_offset_##offset_type(); \ |
1453 | arm_block_memory_writeback_##access_type(writeback_type); \ |
1454 | ARM_BIC_REG_IMM(0, reg_s0, reg_s0, 0x03, 0); \ |
1455 | \ |
1456 | for(i = 0; i < 16; i++) \ |
1457 | { \ |
1458 | if((reg_list >> i) & 0x01) \ |
1459 | { \ |
1460 | cycle_count++; \ |
1461 | generate_add_reg_reg_imm(reg_a0, reg_s0, offset, 0); \ |
1462 | if(reg_list & ~((2 << i) - 1)) \ |
1463 | { \ |
1464 | arm_block_memory_##access_type(); \ |
1465 | offset += 4; \ |
1466 | } \ |
1467 | else \ |
1468 | { \ |
1469 | arm_block_memory_final_##access_type(); \ |
1470 | break; \ |
1471 | } \ |
1472 | } \ |
1473 | } \ |
1474 | \ |
1475 | arm_block_memory_adjust_pc_##access_type(); \ |
1476 | } \ |
1477 | |
1478 | #define arm_swap(type) \ |
1479 | { \ |
1480 | arm_decode_swap(); \ |
1481 | cycle_count += 3; \ |
1482 | generate_load_reg(reg_a0, rn); \ |
1483 | generate_function_call(execute_load_##type); \ |
1484 | write32((pc + 8)); \ |
1485 | generate_mov(reg_s0, reg_rv); \ |
1486 | generate_load_reg(reg_a0, rn); \ |
1487 | generate_load_reg(reg_a1, rm); \ |
1488 | generate_function_call(execute_store_##type); \ |
1489 | write32((pc + 4)); \ |
1490 | generate_store_reg(reg_s0, rd); \ |
1491 | } \ |
1492 | |
1493 | |
1494 | #define thumb_generate_op_reg(name, _rd, _rs, _rn) \ |
1495 | u32 __rm = prepare_load_reg(reg_rm, _rn); \ |
1496 | generate_op_##name##_reg_immshift(__rd, __rn, __rm, ARMSHIFT_LSL, 0) \ |
1497 | |
1498 | #define thumb_generate_op_imm(name, _rd, _rs, imm_) \ |
1499 | { \ |
1500 | u32 imm_ror = 0; \ |
1501 | generate_op_##name##_imm(__rd, __rn); \ |
1502 | } \ |
1503 | |
1504 | |
1505 | #define thumb_data_proc(type, name, op_type, _rd, _rs, _rn) \ |
1506 | { \ |
1507 | thumb_decode_##type(); \ |
1508 | u32 __rn = prepare_load_reg(reg_rn, _rs); \ |
1509 | u32 __rd = prepare_store_reg(reg_rd, _rd); \ |
1510 | generate_load_reg(reg_rn, _rs); \ |
1511 | thumb_generate_op_##op_type(name, _rd, _rs, _rn); \ |
1512 | complete_store_reg(__rd, _rd); \ |
1513 | } \ |
1514 | |
1515 | #define thumb_data_proc_test(type, name, op_type, _rd, _rs) \ |
1516 | { \ |
1517 | thumb_decode_##type(); \ |
1518 | u32 __rn = prepare_load_reg(reg_rn, _rd); \ |
1519 | thumb_generate_op_##op_type(name, 0, _rd, _rs); \ |
1520 | } \ |
1521 | |
1522 | #define thumb_data_proc_unary(type, name, op_type, _rd, _rs) \ |
1523 | { \ |
1524 | thumb_decode_##type(); \ |
1525 | u32 __rd = prepare_store_reg(reg_rd, _rd); \ |
1526 | thumb_generate_op_##op_type(name, _rd, 0, _rs); \ |
1527 | complete_store_reg(__rd, _rd); \ |
1528 | } \ |
1529 | |
1530 | |
1531 | #define complete_store_reg_pc_thumb() \ |
1532 | if(rd == 15) \ |
1533 | { \ |
1534 | generate_indirect_branch_cycle_update(thumb); \ |
1535 | } \ |
1536 | else \ |
1537 | { \ |
1538 | complete_store_reg(_rd, rd); \ |
1539 | } \ |
1540 | |
1541 | #define thumb_data_proc_hi(name) \ |
1542 | { \ |
1543 | thumb_decode_hireg_op(); \ |
1544 | u32 _rd = prepare_load_reg_pc(reg_rd, rd, 4); \ |
1545 | u32 _rs = prepare_load_reg_pc(reg_rn, rs, 4); \ |
1546 | generate_op_##name##_reg_immshift(_rd, _rd, _rs, ARMSHIFT_LSL, 0); \ |
1547 | complete_store_reg_pc_thumb(); \ |
1548 | } \ |
1549 | |
1550 | #define thumb_data_proc_test_hi(name) \ |
1551 | { \ |
1552 | thumb_decode_hireg_op(); \ |
1553 | u32 _rd = prepare_load_reg_pc(reg_rd, rd, 4); \ |
1554 | u32 _rs = prepare_load_reg_pc(reg_rn, rs, 4); \ |
1555 | generate_op_##name##_reg_immshift(0, _rd, _rs, ARMSHIFT_LSL, 0); \ |
1556 | } \ |
1557 | |
1558 | #define thumb_data_proc_mov_hi() \ |
1559 | { \ |
1560 | thumb_decode_hireg_op(); \ |
1561 | u32 _rs = prepare_load_reg_pc(reg_rn, rs, 4); \ |
1562 | u32 _rd = prepare_store_reg(reg_rd, rd); \ |
1563 | ARM_MOV_REG_REG(0, _rd, _rs); \ |
1564 | complete_store_reg_pc_thumb(); \ |
1565 | } \ |
1566 | |
1567 | |
1568 | |
1569 | #define thumb_load_pc(_rd) \ |
1570 | { \ |
1571 | thumb_decode_imm(); \ |
1572 | u32 __rd = prepare_store_reg(reg_rd, _rd); \ |
1573 | generate_load_pc(__rd, (((pc & ~2) + 4) + (imm * 4))); \ |
1574 | complete_store_reg(__rd, _rd); \ |
1575 | } \ |
1576 | |
1577 | #define thumb_load_sp(_rd) \ |
1578 | { \ |
1579 | thumb_decode_imm(); \ |
1580 | u32 __sp = prepare_load_reg(reg_a0, REG_SP); \ |
1581 | u32 __rd = prepare_store_reg(reg_a0, _rd); \ |
1582 | ARM_ADD_REG_IMM(0, __rd, __sp, imm, arm_imm_lsl_to_rot(2)); \ |
1583 | complete_store_reg(__rd, _rd); \ |
1584 | } \ |
1585 | |
1586 | #define thumb_adjust_sp_up() \ |
1587 | ARM_ADD_REG_IMM(0, _sp, _sp, imm, arm_imm_lsl_to_rot(2)) \ |
1588 | |
1589 | #define thumb_adjust_sp_down() \ |
1590 | ARM_SUB_REG_IMM(0, _sp, _sp, imm, arm_imm_lsl_to_rot(2)) \ |
1591 | |
1592 | #define thumb_adjust_sp(direction) \ |
1593 | { \ |
1594 | thumb_decode_add_sp(); \ |
1595 | u32 _sp = prepare_load_reg(reg_a0, REG_SP); \ |
1596 | thumb_adjust_sp_##direction(); \ |
1597 | complete_store_reg(_sp, REG_SP); \ |
1598 | } \ |
1599 | |
1600 | #define generate_op_lsl_reg(_rd, _rm, _rs) \ |
1601 | generate_op_movs_reg_regshift(_rd, 0, _rm, ARMSHIFT_LSL, _rs) \ |
1602 | |
1603 | #define generate_op_lsr_reg(_rd, _rm, _rs) \ |
1604 | generate_op_movs_reg_regshift(_rd, 0, _rm, ARMSHIFT_LSR, _rs) \ |
1605 | |
1606 | #define generate_op_asr_reg(_rd, _rm, _rs) \ |
1607 | generate_op_movs_reg_regshift(_rd, 0, _rm, ARMSHIFT_ASR, _rs) \ |
1608 | |
1609 | #define generate_op_ror_reg(_rd, _rm, _rs) \ |
1610 | generate_op_movs_reg_regshift(_rd, 0, _rm, ARMSHIFT_ROR, _rs) \ |
1611 | |
1612 | |
1613 | #define generate_op_lsl_imm(_rd, _rm) \ |
1614 | generate_op_movs_reg_immshift(_rd, 0, _rm, ARMSHIFT_LSL, imm) \ |
1615 | |
1616 | #define generate_op_lsr_imm(_rd, _rm) \ |
1617 | generate_op_movs_reg_immshift(_rd, 0, _rm, ARMSHIFT_LSR, imm) \ |
1618 | |
1619 | #define generate_op_asr_imm(_rd, _rm) \ |
1620 | generate_op_movs_reg_immshift(_rd, 0, _rm, ARMSHIFT_ASR, imm) \ |
1621 | |
1622 | #define generate_op_ror_imm(_rd, _rm) \ |
1623 | generate_op_movs_reg_immshift(_rd, 0, _rm, ARMSHIFT_ROR, imm) \ |
1624 | |
1625 | |
1626 | #define generate_shift_reg(op_type) \ |
1627 | u32 __rm = prepare_load_reg(reg_rd, rd); \ |
1628 | u32 __rs = prepare_load_reg(reg_rs, rs); \ |
1629 | generate_op_##op_type##_reg(__rd, __rm, __rs) \ |
1630 | |
1631 | #define generate_shift_imm(op_type) \ |
1632 | u32 __rs = prepare_load_reg(reg_rs, rs); \ |
1633 | generate_op_##op_type##_imm(__rd, __rs) \ |
1634 | |
1635 | |
1636 | #define thumb_shift(decode_type, op_type, value_type) \ |
1637 | { \ |
1638 | thumb_decode_##decode_type(); \ |
1639 | u32 __rd = prepare_store_reg(reg_rd, rd); \ |
1640 | generate_shift_##value_type(op_type); \ |
1641 | complete_store_reg(__rd, rd); \ |
1642 | } \ |
1643 | |
1644 | // Operation types: imm, mem_reg, mem_imm |
1645 | |
1646 | #define thumb_access_memory_load(mem_type, _rd) \ |
1647 | cycle_count += 2; \ |
1648 | generate_function_call(execute_load_##mem_type); \ |
1649 | write32((pc + 4)); \ |
1650 | generate_store_reg(reg_rv, _rd) \ |
1651 | |
1652 | #define thumb_access_memory_store(mem_type, _rd) \ |
1653 | cycle_count++; \ |
1654 | generate_load_reg(reg_a1, _rd); \ |
1655 | generate_function_call(execute_store_##mem_type); \ |
1656 | write32((pc + 2)) \ |
1657 | |
1658 | #define thumb_access_memory_generate_address_pc_relative(offset, _rb, _ro) \ |
1659 | generate_load_pc(reg_a0, (offset)) \ |
1660 | |
1661 | #define thumb_access_memory_generate_address_reg_imm(offset, _rb, _ro) \ |
1662 | u32 __rb = prepare_load_reg(reg_a0, _rb); \ |
1663 | ARM_ADD_REG_IMM(0, reg_a0, __rb, offset, 0) \ |
1664 | |
1665 | #define thumb_access_memory_generate_address_reg_imm_sp(offset, _rb, _ro) \ |
1666 | u32 __rb = prepare_load_reg(reg_a0, _rb); \ |
1667 | ARM_ADD_REG_IMM(0, reg_a0, __rb, offset, arm_imm_lsl_to_rot(2)) \ |
1668 | |
1669 | #define thumb_access_memory_generate_address_reg_reg(offset, _rb, _ro) \ |
1670 | u32 __rb = prepare_load_reg(reg_a0, _rb); \ |
1671 | u32 __ro = prepare_load_reg(reg_a1, _ro); \ |
1672 | ARM_ADD_REG_REG(0, reg_a0, __rb, __ro) \ |
1673 | |
1674 | #define thumb_access_memory(access_type, op_type, _rd, _rb, _ro, \ |
1675 | address_type, offset, mem_type) \ |
1676 | { \ |
1677 | thumb_decode_##op_type(); \ |
1678 | thumb_access_memory_generate_address_##address_type(offset, _rb, _ro); \ |
1679 | thumb_access_memory_##access_type(mem_type, _rd); \ |
1680 | } \ |
1681 | |
1682 | // TODO: Make these use cached registers. Implement iwram_stack_optimize. |
1683 | |
1684 | #define thumb_block_address_preadjust_up() \ |
1685 | generate_add_imm(reg_s0, (bit_count[reg_list] * 4), 0) \ |
1686 | |
1687 | #define thumb_block_address_preadjust_down() \ |
1688 | generate_sub_imm(reg_s0, (bit_count[reg_list] * 4), 0) \ |
1689 | |
1690 | #define thumb_block_address_preadjust_push_lr() \ |
1691 | generate_sub_imm(reg_s0, ((bit_count[reg_list] + 1) * 4), 0) \ |
1692 | |
1693 | #define thumb_block_address_preadjust_no() \ |
1694 | |
1695 | #define thumb_block_address_postadjust_no(base_reg) \ |
1696 | generate_store_reg(reg_s0, base_reg) \ |
1697 | |
1698 | #define thumb_block_address_postadjust_up(base_reg) \ |
1699 | generate_add_reg_reg_imm(reg_a0, reg_s0, (bit_count[reg_list] * 4), 0); \ |
1700 | generate_store_reg(reg_a0, base_reg) \ |
1701 | |
1702 | #define thumb_block_address_postadjust_down(base_reg) \ |
1703 | generate_mov(reg_a0, reg_s0); \ |
1704 | generate_sub_imm(reg_a0, (bit_count[reg_list] * 4), 0); \ |
1705 | generate_store_reg(reg_a0, base_reg) \ |
1706 | |
1707 | #define thumb_block_address_postadjust_pop_pc(base_reg) \ |
1708 | generate_add_reg_reg_imm(reg_a0, reg_s0, \ |
1709 | ((bit_count[reg_list] + 1) * 4), 0); \ |
1710 | generate_store_reg(reg_a0, base_reg) \ |
1711 | |
1712 | #define thumb_block_address_postadjust_push_lr(base_reg) \ |
1713 | generate_store_reg(reg_s0, base_reg) \ |
1714 | |
1715 | #define thumb_block_memory_extra_no() \ |
1716 | |
1717 | #define thumb_block_memory_extra_up() \ |
1718 | |
1719 | #define thumb_block_memory_extra_down() \ |
1720 | |
1721 | #define thumb_block_memory_extra_pop_pc() \ |
1722 | generate_add_reg_reg_imm(reg_a0, reg_s0, (bit_count[reg_list] * 4), 0); \ |
1723 | generate_function_call(execute_load_u32); \ |
1724 | write32((pc + 4)); \ |
1725 | generate_mov(reg_a0, reg_rv); \ |
1726 | generate_indirect_branch_cycle_update(thumb) \ |
1727 | |
1728 | #define thumb_block_memory_extra_push_lr(base_reg) \ |
1729 | generate_add_reg_reg_imm(reg_a0, reg_s0, (bit_count[reg_list] * 4), 0); \ |
1730 | generate_load_reg(reg_a1, REG_LR); \ |
1731 | generate_function_call(execute_store_u32_safe) \ |
1732 | |
1733 | #define thumb_block_memory_load() \ |
1734 | generate_function_call(execute_load_u32); \ |
1735 | write32((pc + 4)); \ |
1736 | generate_store_reg(reg_rv, i) \ |
1737 | |
1738 | #define thumb_block_memory_store() \ |
1739 | generate_load_reg(reg_a1, i); \ |
1740 | generate_function_call(execute_store_u32_safe) \ |
1741 | |
1742 | #define thumb_block_memory_final_load() \ |
1743 | thumb_block_memory_load() \ |
1744 | |
1745 | #define thumb_block_memory_final_store() \ |
1746 | generate_load_reg(reg_a1, i); \ |
1747 | generate_function_call(execute_store_u32); \ |
1748 | write32((pc + 2)) \ |
1749 | |
1750 | #define thumb_block_memory_final_no(access_type) \ |
1751 | thumb_block_memory_final_##access_type() \ |
1752 | |
1753 | #define thumb_block_memory_final_up(access_type) \ |
1754 | thumb_block_memory_final_##access_type() \ |
1755 | |
1756 | #define thumb_block_memory_final_down(access_type) \ |
1757 | thumb_block_memory_final_##access_type() \ |
1758 | |
1759 | #define thumb_block_memory_final_push_lr(access_type) \ |
1760 | thumb_block_memory_##access_type() \ |
1761 | |
1762 | #define thumb_block_memory_final_pop_pc(access_type) \ |
1763 | thumb_block_memory_##access_type() \ |
1764 | |
1765 | #define thumb_block_memory(access_type, pre_op, post_op, base_reg) \ |
1766 | { \ |
1767 | thumb_decode_rlist(); \ |
1768 | u32 i; \ |
1769 | u32 offset = 0; \ |
1770 | \ |
1771 | generate_load_reg(reg_s0, base_reg); \ |
1772 | ARM_BIC_REG_IMM(0, reg_s0, reg_s0, 0x03, 0); \ |
1773 | thumb_block_address_preadjust_##pre_op(); \ |
1774 | thumb_block_address_postadjust_##post_op(base_reg); \ |
1775 | \ |
1776 | for(i = 0; i < 8; i++) \ |
1777 | { \ |
1778 | if((reg_list >> i) & 0x01) \ |
1779 | { \ |
1780 | cycle_count++; \ |
1781 | generate_add_reg_reg_imm(reg_a0, reg_s0, offset, 0); \ |
1782 | if(reg_list & ~((2 << i) - 1)) \ |
1783 | { \ |
1784 | thumb_block_memory_##access_type(); \ |
1785 | offset += 4; \ |
1786 | } \ |
1787 | else \ |
1788 | { \ |
1789 | thumb_block_memory_final_##post_op(access_type); \ |
1790 | break; \ |
1791 | } \ |
1792 | } \ |
1793 | } \ |
1794 | \ |
1795 | thumb_block_memory_extra_##post_op(); \ |
1796 | } \ |
1797 | |
1798 | #define thumb_conditional_branch(condition) \ |
1799 | { \ |
1800 | generate_cycle_update(); \ |
1801 | generate_load_flags(); \ |
1802 | generate_branch_filler(condition_opposite_##condition, backpatch_address); \ |
1803 | generate_branch_no_cycle_update( \ |
1804 | block_exits[block_exit_position].branch_source, \ |
1805 | block_exits[block_exit_position].branch_target, thumb); \ |
1806 | generate_branch_patch_conditional(backpatch_address, translation_ptr); \ |
1807 | block_exit_position++; \ |
1808 | } \ |
1809 | |
1810 | |
1811 | #define arm_conditional_block_header() \ |
1812 | generate_cycle_update(); \ |
1813 | generate_load_flags(); \ |
1814 | /* This will choose the opposite condition */ \ |
1815 | condition ^= 0x01; \ |
1816 | generate_branch_filler(condition, backpatch_address) \ |
1817 | |
1818 | #define arm_b() \ |
1819 | generate_branch(arm) \ |
1820 | |
1821 | #define arm_bl() \ |
1822 | generate_update_pc((pc + 4)); \ |
1823 | generate_store_reg(reg_a0, REG_LR); \ |
1824 | generate_branch(arm) \ |
1825 | |
1826 | #define arm_bx() \ |
1827 | arm_decode_branchx(); \ |
1828 | generate_load_reg(reg_a0, rn); \ |
1829 | generate_indirect_branch_dual(); \ |
1830 | |
1831 | #define arm_swi() \ |
1832 | generate_swi_hle_handler((opcode >> 16) & 0xFF, arm); \ |
1833 | generate_function_call(execute_swi_arm); \ |
1834 | write32((pc + 4)); \ |
1835 | generate_branch(arm) \ |
1836 | |
1837 | #define thumb_b() \ |
1838 | generate_branch(thumb) \ |
1839 | |
1840 | #define thumb_bl() \ |
1841 | generate_update_pc(((pc + 2) | 0x01)); \ |
1842 | generate_store_reg(reg_a0, REG_LR); \ |
1843 | generate_branch(thumb) \ |
1844 | |
1845 | #define thumb_blh() \ |
1846 | { \ |
1847 | thumb_decode_branch(); \ |
1848 | generate_update_pc(((pc + 2) | 0x01)); \ |
1849 | generate_load_reg(reg_a1, REG_LR); \ |
1850 | generate_store_reg(reg_a0, REG_LR); \ |
1851 | generate_mov(reg_a0, reg_a1); \ |
1852 | generate_add_imm(reg_a0, (offset * 2), 0); \ |
1853 | generate_indirect_branch_cycle_update(thumb); \ |
1854 | } \ |
1855 | |
1856 | #define thumb_bx() \ |
1857 | { \ |
1858 | thumb_decode_hireg_op(); \ |
1859 | generate_load_reg_pc(reg_a0, rs, 4); \ |
1860 | generate_indirect_branch_cycle_update(dual_thumb); \ |
1861 | } \ |
1862 | |
1863 | #define thumb_swi() \ |
1864 | generate_swi_hle_handler(opcode & 0xFF, thumb); \ |
1865 | generate_function_call(execute_swi_thumb); \ |
1866 | write32((pc + 2)); \ |
1867 | /* We're in ARM mode now */ \ |
1868 | generate_branch(arm) \ |
1869 | |
1870 | u8 swi_hle_handle[256] = |
1871 | { |
1872 | 0x0, // SWI 0: SoftReset |
1873 | 0x0, // SWI 1: RegisterRAMReset |
1874 | 0x0, // SWI 2: Halt |
1875 | 0x0, // SWI 3: Stop/Sleep |
1876 | 0x0, // SWI 4: IntrWait |
1877 | 0x0, // SWI 5: VBlankIntrWait |
1878 | 0x1, // SWI 6: Div |
1879 | 0x0, // SWI 7: DivArm |
1880 | 0x0, // SWI 8: Sqrt |
1881 | 0x0, // SWI 9: ArcTan |
1882 | 0x0, // SWI A: ArcTan2 |
1883 | 0x0, // SWI B: CpuSet |
1884 | 0x0, // SWI C: CpuFastSet |
1885 | 0x0, // SWI D: GetBIOSCheckSum |
1886 | 0x0, // SWI E: BgAffineSet |
1887 | 0x0, // SWI F: ObjAffineSet |
1888 | 0x0, // SWI 10: BitUnpack |
1889 | 0x0, // SWI 11: LZ77UnCompWram |
1890 | 0x0, // SWI 12: LZ77UnCompVram |
1891 | 0x0, // SWI 13: HuffUnComp |
1892 | 0x0, // SWI 14: RLUnCompWram |
1893 | 0x0, // SWI 15: RLUnCompVram |
1894 | 0x0, // SWI 16: Diff8bitUnFilterWram |
1895 | 0x0, // SWI 17: Diff8bitUnFilterVram |
1896 | 0x0, // SWI 18: Diff16bitUnFilter |
1897 | 0x0, // SWI 19: SoundBias |
1898 | 0x0, // SWI 1A: SoundDriverInit |
1899 | 0x0, // SWI 1B: SoundDriverMode |
1900 | 0x0, // SWI 1C: SoundDriverMain |
1901 | 0x0, // SWI 1D: SoundDriverVSync |
1902 | 0x0, // SWI 1E: SoundChannelClear |
1903 | 0x0, // SWI 1F: MidiKey2Freq |
1904 | 0x0, // SWI 20: SoundWhatever0 |
1905 | 0x0, // SWI 21: SoundWhatever1 |
1906 | 0x0, // SWI 22: SoundWhatever2 |
1907 | 0x0, // SWI 23: SoundWhatever3 |
1908 | 0x0, // SWI 24: SoundWhatever4 |
1909 | 0x0, // SWI 25: MultiBoot |
1910 | 0x0, // SWI 26: HardReset |
1911 | 0x0, // SWI 27: CustomHalt |
1912 | 0x0, // SWI 28: SoundDriverVSyncOff |
1913 | 0x0, // SWI 29: SoundDriverVSyncOn |
1914 | 0x0 // SWI 2A: SoundGetJumpList |
1915 | }; |
1916 | |
1917 | void execute_swi_hle_div_arm(); |
1918 | void execute_swi_hle_div_thumb(); |
1919 | |
1920 | void execute_swi_hle_div_c() |
1921 | { |
1922 | s32 result = (s32)reg[0] / (s32)reg[1]; |
1923 | reg[1] = (s32)reg[0] % (s32)reg[1]; |
1924 | reg[0] = result; |
1925 | |
1926 | reg[3] = (result ^ (result >> 31)) - (result >> 31); |
1927 | } |
1928 | |
1929 | #define generate_swi_hle_handler(_swi_number, mode) \ |
1930 | { \ |
1931 | u32 swi_number = _swi_number; \ |
1932 | if(swi_hle_handle[swi_number]) \ |
1933 | { \ |
1934 | /* Div */ \ |
1935 | if(swi_number == 0x06) \ |
1936 | { \ |
1937 | generate_function_call(execute_swi_hle_div_##mode); \ |
1938 | } \ |
1939 | break; \ |
1940 | } \ |
1941 | } \ |
1942 | |
1943 | #define generate_translation_gate(type) \ |
1944 | generate_update_pc(pc); \ |
1945 | generate_indirect_branch_no_cycle_update(type) \ |
1946 | |
1947 | #define generate_step_debug() \ |
1948 | generate_function_call(step_debug_arm); \ |
1949 | write32(pc) \ |
1950 | |
1951 | #endif |
1952 | |