| 1 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * |
| 2 | * linkage_arm.s for PCSX * |
| 3 | * Copyright (C) 2009-2011 Ari64 * |
| 4 | * Copyright (C) 2010-2013 GraÅžvydas "notaz" Ignotas * |
| 5 | * * |
| 6 | * This program is free software; you can redistribute it and/or modify * |
| 7 | * it under the terms of the GNU General Public License as published by * |
| 8 | * the Free Software Foundation; either version 2 of the License, or * |
| 9 | * (at your option) any later version. * |
| 10 | * * |
| 11 | * This program is distributed in the hope that it will be useful, * |
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of * |
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * |
| 14 | * GNU General Public License for more details. * |
| 15 | * * |
| 16 | * You should have received a copy of the GNU General Public License * |
| 17 | * along with this program; if not, write to the * |
| 18 | * Free Software Foundation, Inc., * |
| 19 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * |
| 20 | * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ |
| 21 | |
| 22 | #include "arm_features.h" |
| 23 | #include "new_dynarec_config.h" |
| 24 | #include "linkage_offsets.h" |
| 25 | |
| 26 | |
| 27 | #ifdef __MACH__ |
| 28 | #define dynarec_local ESYM(dynarec_local) |
| 29 | #define add_jump_out ESYM(add_jump_out) |
| 30 | #define new_recompile_block ESYM(new_recompile_block) |
| 31 | #define get_addr ESYM(get_addr) |
| 32 | #define get_addr_ht ESYM(get_addr_ht) |
| 33 | #define clean_blocks ESYM(clean_blocks) |
| 34 | #define gen_interupt ESYM(gen_interupt) |
| 35 | #define invalidate_addr ESYM(invalidate_addr) |
| 36 | #define gteCheckStallRaw ESYM(gteCheckStallRaw) |
| 37 | #endif |
| 38 | |
| 39 | .bss |
| 40 | .align 4 |
| 41 | .global dynarec_local |
| 42 | .type dynarec_local, %object |
| 43 | .size dynarec_local, LO_dynarec_local_size |
| 44 | dynarec_local: |
| 45 | .space LO_dynarec_local_size |
| 46 | |
| 47 | #define DRC_VAR_(name, vname, size_) \ |
| 48 | vname = dynarec_local + LO_##name; \ |
| 49 | .global vname; \ |
| 50 | .type vname, %object; \ |
| 51 | .size vname, size_ |
| 52 | |
| 53 | #define DRC_VAR(name, size_) \ |
| 54 | DRC_VAR_(name, ESYM(name), size_) |
| 55 | |
| 56 | DRC_VAR(next_interupt, 4) |
| 57 | DRC_VAR(cycle_count, 4) |
| 58 | DRC_VAR(last_count, 4) |
| 59 | DRC_VAR(pending_exception, 4) |
| 60 | DRC_VAR(stop, 4) |
| 61 | DRC_VAR(branch_target, 4) |
| 62 | DRC_VAR(address, 4) |
| 63 | @DRC_VAR(align0, 4) /* unused/alignment */ |
| 64 | DRC_VAR(psxRegs, LO_psxRegs_end - LO_psxRegs) |
| 65 | |
| 66 | /* psxRegs */ |
| 67 | @DRC_VAR(reg, 128) |
| 68 | DRC_VAR(lo, 4) |
| 69 | DRC_VAR(hi, 4) |
| 70 | DRC_VAR(reg_cop0, 128) |
| 71 | DRC_VAR(reg_cop2d, 128) |
| 72 | DRC_VAR(reg_cop2c, 128) |
| 73 | DRC_VAR(pcaddr, 4) |
| 74 | @DRC_VAR(code, 4) |
| 75 | @DRC_VAR(cycle, 4) |
| 76 | @DRC_VAR(interrupt, 4) |
| 77 | @DRC_VAR(intCycle, 256) |
| 78 | |
| 79 | DRC_VAR(rcnts, 7*4*4) |
| 80 | DRC_VAR(inv_code_start, 4) |
| 81 | DRC_VAR(inv_code_end, 4) |
| 82 | DRC_VAR(mem_rtab, 4) |
| 83 | DRC_VAR(mem_wtab, 4) |
| 84 | DRC_VAR(psxH_ptr, 4) |
| 85 | DRC_VAR(zeromem_ptr, 4) |
| 86 | DRC_VAR(invc_ptr, 4) |
| 87 | DRC_VAR(scratch_buf_ptr, 4) |
| 88 | DRC_VAR(ram_offset, 4) |
| 89 | DRC_VAR(mini_ht, 256) |
| 90 | DRC_VAR(restore_candidate, 512) |
| 91 | |
| 92 | |
| 93 | #ifdef TEXRELS_FORBIDDEN |
| 94 | .data |
| 95 | .align 2 |
| 96 | ptr_jump_in: |
| 97 | .word ESYM(jump_in) |
| 98 | ptr_jump_dirty: |
| 99 | .word ESYM(jump_dirty) |
| 100 | ptr_hash_table: |
| 101 | .word ESYM(hash_table) |
| 102 | #endif |
| 103 | |
| 104 | |
| 105 | .syntax unified |
| 106 | .text |
| 107 | .align 2 |
| 108 | |
| 109 | #ifndef HAVE_ARMV5 |
| 110 | .macro blx rd |
| 111 | mov lr, pc |
| 112 | bx \rd |
| 113 | .endm |
| 114 | #endif |
| 115 | |
| 116 | .macro load_varadr reg var |
| 117 | #if defined(HAVE_ARMV7) && defined(TEXRELS_FORBIDDEN) |
| 118 | movw \reg, #:lower16:(\var-(1678f+8)) |
| 119 | movt \reg, #:upper16:(\var-(1678f+8)) |
| 120 | 1678: |
| 121 | add \reg, pc |
| 122 | #elif defined(HAVE_ARMV7) && !defined(__PIC__) |
| 123 | movw \reg, #:lower16:\var |
| 124 | movt \reg, #:upper16:\var |
| 125 | #else |
| 126 | ldr \reg, =\var |
| 127 | #endif |
| 128 | .endm |
| 129 | |
| 130 | .macro load_varadr_ext reg var |
| 131 | #if defined(HAVE_ARMV7) && defined(TEXRELS_FORBIDDEN) |
| 132 | movw \reg, #:lower16:(ptr_\var-(1678f+8)) |
| 133 | movt \reg, #:upper16:(ptr_\var-(1678f+8)) |
| 134 | 1678: |
| 135 | ldr \reg, [pc, \reg] |
| 136 | #else |
| 137 | load_varadr \reg \var |
| 138 | #endif |
| 139 | .endm |
| 140 | |
| 141 | .macro mov_16 reg imm |
| 142 | #ifdef HAVE_ARMV7 |
| 143 | movw \reg, #\imm |
| 144 | #else |
| 145 | mov \reg, #(\imm & 0x00ff) |
| 146 | orr \reg, #(\imm & 0xff00) |
| 147 | #endif |
| 148 | .endm |
| 149 | |
| 150 | .macro mov_24 reg imm |
| 151 | #ifdef HAVE_ARMV7 |
| 152 | movw \reg, #(\imm & 0xffff) |
| 153 | movt \reg, #(\imm >> 16) |
| 154 | #else |
| 155 | mov \reg, #(\imm & 0x0000ff) |
| 156 | orr \reg, #(\imm & 0x00ff00) |
| 157 | orr \reg, #(\imm & 0xff0000) |
| 158 | #endif |
| 159 | .endm |
| 160 | |
| 161 | /* r0 = virtual target address */ |
| 162 | /* r1 = instruction to patch */ |
| 163 | .macro dyna_linker_main |
| 164 | #ifndef NO_WRITE_EXEC |
| 165 | load_varadr_ext r3, jump_in |
| 166 | /* get_page */ |
| 167 | lsr r2, r0, #12 |
| 168 | mov r6, #4096 |
| 169 | bic r2, r2, #0xe0000 |
| 170 | sub r6, r6, #1 |
| 171 | cmp r2, #0x1000 |
| 172 | ldr r7, [r1] |
| 173 | biclt r2, #0x0e00 |
| 174 | and r6, r6, r2 |
| 175 | cmp r2, #2048 |
| 176 | add r12, r7, #2 |
| 177 | orrcs r2, r6, #2048 |
| 178 | ldr r5, [r3, r2, lsl #2] |
| 179 | lsl r12, r12, #8 |
| 180 | add r6, r1, r12, asr #6 /* old target */ |
| 181 | mov r8, #0 |
| 182 | /* jump_in lookup */ |
| 183 | 1: |
| 184 | movs r4, r5 |
| 185 | beq 2f |
| 186 | ldr r3, [r5] /* ll_entry .vaddr */ |
| 187 | ldrd r4, r5, [r4, #8] /* ll_entry .next, .addr */ |
| 188 | teq r3, r0 |
| 189 | bne 1b |
| 190 | teq r4, r6 |
| 191 | moveq pc, r4 /* Stale i-cache */ |
| 192 | mov r8, r4 |
| 193 | b 1b /* jump_in may have dupes, continue search */ |
| 194 | 2: |
| 195 | tst r8, r8 |
| 196 | beq 3f /* r0 not in jump_in */ |
| 197 | |
| 198 | mov r5, r1 |
| 199 | mov r1, r6 |
| 200 | bl add_jump_out |
| 201 | sub r2, r8, r5 |
| 202 | and r1, r7, #0xff000000 |
| 203 | lsl r2, r2, #6 |
| 204 | sub r1, r1, #2 |
| 205 | add r1, r1, r2, lsr #8 |
| 206 | str r1, [r5] |
| 207 | mov pc, r8 |
| 208 | 3: |
| 209 | /* hash_table lookup */ |
| 210 | cmp r2, #2048 |
| 211 | load_varadr_ext r3, jump_dirty |
| 212 | eor r4, r0, r0, lsl #16 |
| 213 | lslcc r2, r0, #9 |
| 214 | load_varadr_ext r6, hash_table |
| 215 | lsr r4, r4, #12 |
| 216 | lsrcc r2, r2, #21 |
| 217 | bic r4, r4, #15 |
| 218 | ldr r5, [r3, r2, lsl #2] |
| 219 | ldr r7, [r6, r4]! |
| 220 | teq r7, r0 |
| 221 | ldreq pc, [r6, #8] |
| 222 | ldr r7, [r6, #4] |
| 223 | teq r7, r0 |
| 224 | ldreq pc, [r6, #12] |
| 225 | /* jump_dirty lookup */ |
| 226 | 6: |
| 227 | movs r4, r5 |
| 228 | beq 8f |
| 229 | ldr r3, [r5] |
| 230 | ldr r5, [r4, #12] |
| 231 | teq r3, r0 |
| 232 | bne 6b |
| 233 | 7: |
| 234 | ldr r1, [r4, #8] |
| 235 | /* hash_table insert */ |
| 236 | ldr r2, [r6] |
| 237 | ldr r3, [r6, #8] |
| 238 | str r0, [r6] |
| 239 | str r1, [r6, #8] |
| 240 | str r2, [r6, #4] |
| 241 | str r3, [r6, #12] |
| 242 | mov pc, r1 |
| 243 | 8: |
| 244 | #else |
| 245 | /* XXX: should be able to do better than this... */ |
| 246 | bl get_addr_ht |
| 247 | mov pc, r0 |
| 248 | #endif |
| 249 | .endm |
| 250 | |
| 251 | |
| 252 | FUNCTION(dyna_linker): |
| 253 | /* r0 = virtual target address */ |
| 254 | /* r1 = instruction to patch */ |
| 255 | dyna_linker_main |
| 256 | |
| 257 | mov r4, r0 |
| 258 | mov r5, r1 |
| 259 | bl new_recompile_block |
| 260 | tst r0, r0 |
| 261 | mov r0, r4 |
| 262 | mov r1, r5 |
| 263 | beq dyna_linker |
| 264 | /* pagefault */ |
| 265 | mov r1, r0 |
| 266 | mov r2, #8 |
| 267 | .size dyna_linker, .-dyna_linker |
| 268 | |
| 269 | FUNCTION(exec_pagefault): |
| 270 | /* r0 = instruction pointer */ |
| 271 | /* r1 = fault address */ |
| 272 | /* r2 = cause */ |
| 273 | ldr r3, [fp, #LO_reg_cop0+48] /* Status */ |
| 274 | mvn r6, #0xF000000F |
| 275 | ldr r4, [fp, #LO_reg_cop0+16] /* Context */ |
| 276 | bic r6, r6, #0x0F800000 |
| 277 | str r0, [fp, #LO_reg_cop0+56] /* EPC */ |
| 278 | orr r3, r3, #2 |
| 279 | str r1, [fp, #LO_reg_cop0+32] /* BadVAddr */ |
| 280 | bic r4, r4, r6 |
| 281 | str r3, [fp, #LO_reg_cop0+48] /* Status */ |
| 282 | and r5, r6, r1, lsr #9 |
| 283 | str r2, [fp, #LO_reg_cop0+52] /* Cause */ |
| 284 | and r1, r1, r6, lsl #9 |
| 285 | str r1, [fp, #LO_reg_cop0+40] /* EntryHi */ |
| 286 | orr r4, r4, r5 |
| 287 | str r4, [fp, #LO_reg_cop0+16] /* Context */ |
| 288 | mov r0, #0x80000000 |
| 289 | bl get_addr_ht |
| 290 | mov pc, r0 |
| 291 | .size exec_pagefault, .-exec_pagefault |
| 292 | |
| 293 | /* Special dynamic linker for the case where a page fault |
| 294 | may occur in a branch delay slot */ |
| 295 | FUNCTION(dyna_linker_ds): |
| 296 | /* r0 = virtual target address */ |
| 297 | /* r1 = instruction to patch */ |
| 298 | dyna_linker_main |
| 299 | |
| 300 | mov r4, r0 |
| 301 | bic r0, r0, #7 |
| 302 | mov r5, r1 |
| 303 | orr r0, r0, #1 |
| 304 | bl new_recompile_block |
| 305 | tst r0, r0 |
| 306 | mov r0, r4 |
| 307 | mov r1, r5 |
| 308 | beq dyna_linker_ds |
| 309 | /* pagefault */ |
| 310 | bic r1, r0, #7 |
| 311 | mov r2, #0x80000008 /* High bit set indicates pagefault in delay slot */ |
| 312 | sub r0, r1, #4 |
| 313 | b exec_pagefault |
| 314 | .size dyna_linker_ds, .-dyna_linker_ds |
| 315 | |
| 316 | .align 2 |
| 317 | |
| 318 | FUNCTION(jump_vaddr_r0): |
| 319 | eor r2, r0, r0, lsl #16 |
| 320 | b jump_vaddr |
| 321 | .size jump_vaddr_r0, .-jump_vaddr_r0 |
| 322 | FUNCTION(jump_vaddr_r1): |
| 323 | eor r2, r1, r1, lsl #16 |
| 324 | mov r0, r1 |
| 325 | b jump_vaddr |
| 326 | .size jump_vaddr_r1, .-jump_vaddr_r1 |
| 327 | FUNCTION(jump_vaddr_r2): |
| 328 | mov r0, r2 |
| 329 | eor r2, r2, r2, lsl #16 |
| 330 | b jump_vaddr |
| 331 | .size jump_vaddr_r2, .-jump_vaddr_r2 |
| 332 | FUNCTION(jump_vaddr_r3): |
| 333 | eor r2, r3, r3, lsl #16 |
| 334 | mov r0, r3 |
| 335 | b jump_vaddr |
| 336 | .size jump_vaddr_r3, .-jump_vaddr_r3 |
| 337 | FUNCTION(jump_vaddr_r4): |
| 338 | eor r2, r4, r4, lsl #16 |
| 339 | mov r0, r4 |
| 340 | b jump_vaddr |
| 341 | .size jump_vaddr_r4, .-jump_vaddr_r4 |
| 342 | FUNCTION(jump_vaddr_r5): |
| 343 | eor r2, r5, r5, lsl #16 |
| 344 | mov r0, r5 |
| 345 | b jump_vaddr |
| 346 | .size jump_vaddr_r5, .-jump_vaddr_r5 |
| 347 | FUNCTION(jump_vaddr_r6): |
| 348 | eor r2, r6, r6, lsl #16 |
| 349 | mov r0, r6 |
| 350 | b jump_vaddr |
| 351 | .size jump_vaddr_r6, .-jump_vaddr_r6 |
| 352 | FUNCTION(jump_vaddr_r8): |
| 353 | eor r2, r8, r8, lsl #16 |
| 354 | mov r0, r8 |
| 355 | b jump_vaddr |
| 356 | .size jump_vaddr_r8, .-jump_vaddr_r8 |
| 357 | FUNCTION(jump_vaddr_r9): |
| 358 | eor r2, r9, r9, lsl #16 |
| 359 | mov r0, r9 |
| 360 | b jump_vaddr |
| 361 | .size jump_vaddr_r9, .-jump_vaddr_r9 |
| 362 | FUNCTION(jump_vaddr_r10): |
| 363 | eor r2, r10, r10, lsl #16 |
| 364 | mov r0, r10 |
| 365 | b jump_vaddr |
| 366 | .size jump_vaddr_r10, .-jump_vaddr_r10 |
| 367 | FUNCTION(jump_vaddr_r12): |
| 368 | eor r2, r12, r12, lsl #16 |
| 369 | mov r0, r12 |
| 370 | b jump_vaddr |
| 371 | .size jump_vaddr_r12, .-jump_vaddr_r12 |
| 372 | FUNCTION(jump_vaddr_r7): |
| 373 | eor r2, r7, r7, lsl #16 |
| 374 | add r0, r7, #0 |
| 375 | .size jump_vaddr_r7, .-jump_vaddr_r7 |
| 376 | FUNCTION(jump_vaddr): |
| 377 | load_varadr_ext r1, hash_table |
| 378 | mvn r3, #15 |
| 379 | and r2, r3, r2, lsr #12 |
| 380 | ldr r2, [r1, r2]! |
| 381 | teq r2, r0 |
| 382 | ldreq pc, [r1, #8] |
| 383 | ldr r2, [r1, #4] |
| 384 | teq r2, r0 |
| 385 | ldreq pc, [r1, #12] |
| 386 | str r10, [fp, #LO_cycle_count] |
| 387 | bl get_addr |
| 388 | ldr r10, [fp, #LO_cycle_count] |
| 389 | mov pc, r0 |
| 390 | .size jump_vaddr, .-jump_vaddr |
| 391 | |
| 392 | .align 2 |
| 393 | |
| 394 | FUNCTION(verify_code_ds): |
| 395 | str r8, [fp, #LO_branch_target] @ preserve HOST_BTREG? |
| 396 | FUNCTION(verify_code): |
| 397 | /* r1 = source */ |
| 398 | /* r2 = target */ |
| 399 | /* r3 = length */ |
| 400 | tst r3, #4 |
| 401 | mov r4, #0 |
| 402 | add r3, r1, r3 |
| 403 | mov r5, #0 |
| 404 | ldrne r4, [r1], #4 |
| 405 | mov r12, #0 |
| 406 | ldrne r5, [r2], #4 |
| 407 | teq r1, r3 |
| 408 | beq .D3 |
| 409 | .D2: |
| 410 | ldr r7, [r1], #4 |
| 411 | eor r9, r4, r5 |
| 412 | ldr r8, [r2], #4 |
| 413 | orrs r9, r9, r12 |
| 414 | bne .D4 |
| 415 | ldr r4, [r1], #4 |
| 416 | eor r12, r7, r8 |
| 417 | ldr r5, [r2], #4 |
| 418 | cmp r1, r3 |
| 419 | bcc .D2 |
| 420 | teq r7, r8 |
| 421 | .D3: |
| 422 | teqeq r4, r5 |
| 423 | .D4: |
| 424 | ldr r8, [fp, #LO_branch_target] |
| 425 | moveq pc, lr |
| 426 | .D5: |
| 427 | bl get_addr |
| 428 | mov pc, r0 |
| 429 | .size verify_code, .-verify_code |
| 430 | .size verify_code_ds, .-verify_code_ds |
| 431 | |
| 432 | .align 2 |
| 433 | FUNCTION(cc_interrupt): |
| 434 | ldr r0, [fp, #LO_last_count] |
| 435 | mov r1, #0 |
| 436 | mov r2, #0x1fc |
| 437 | add r10, r0, r10 |
| 438 | str r1, [fp, #LO_pending_exception] |
| 439 | and r2, r2, r10, lsr #17 |
| 440 | add r3, fp, #LO_restore_candidate |
| 441 | str r10, [fp, #LO_cycle] /* PCSX cycles */ |
| 442 | @@ str r10, [fp, #LO_reg_cop0+36] /* Count */ |
| 443 | ldr r4, [r2, r3] |
| 444 | mov r10, lr |
| 445 | tst r4, r4 |
| 446 | bne .E4 |
| 447 | .E1: |
| 448 | bl gen_interupt |
| 449 | mov lr, r10 |
| 450 | ldr r10, [fp, #LO_cycle] |
| 451 | ldr r0, [fp, #LO_next_interupt] |
| 452 | ldr r1, [fp, #LO_pending_exception] |
| 453 | ldr r2, [fp, #LO_stop] |
| 454 | str r0, [fp, #LO_last_count] |
| 455 | sub r10, r10, r0 |
| 456 | tst r2, r2 |
| 457 | ldmfdne sp!, {r4, r5, r6, r7, r8, r9, sl, fp, ip, pc} |
| 458 | tst r1, r1 |
| 459 | moveq pc, lr |
| 460 | .E2: |
| 461 | ldr r0, [fp, #LO_pcaddr] |
| 462 | bl get_addr_ht |
| 463 | mov pc, r0 |
| 464 | .E4: |
| 465 | /* Move 'dirty' blocks to the 'clean' list */ |
| 466 | lsl r5, r2, #3 |
| 467 | str r1, [r2, r3] |
| 468 | .E5: |
| 469 | lsrs r4, r4, #1 |
| 470 | mov r0, r5 |
| 471 | add r5, r5, #1 |
| 472 | blcs clean_blocks |
| 473 | tst r5, #31 |
| 474 | bne .E5 |
| 475 | b .E1 |
| 476 | .size cc_interrupt, .-cc_interrupt |
| 477 | |
| 478 | .align 2 |
| 479 | FUNCTION(fp_exception): |
| 480 | mov r2, #0x10000000 |
| 481 | .E7: |
| 482 | ldr r1, [fp, #LO_reg_cop0+48] /* Status */ |
| 483 | mov r3, #0x80000000 |
| 484 | str r0, [fp, #LO_reg_cop0+56] /* EPC */ |
| 485 | orr r1, #2 |
| 486 | add r2, r2, #0x2c |
| 487 | str r1, [fp, #LO_reg_cop0+48] /* Status */ |
| 488 | str r2, [fp, #LO_reg_cop0+52] /* Cause */ |
| 489 | add r0, r3, #0x80 |
| 490 | bl get_addr_ht |
| 491 | mov pc, r0 |
| 492 | .size fp_exception, .-fp_exception |
| 493 | .align 2 |
| 494 | FUNCTION(fp_exception_ds): |
| 495 | mov r2, #0x90000000 /* Set high bit if delay slot */ |
| 496 | b .E7 |
| 497 | .size fp_exception_ds, .-fp_exception_ds |
| 498 | |
| 499 | .align 2 |
| 500 | FUNCTION(jump_syscall): |
| 501 | ldr r1, [fp, #LO_reg_cop0+48] /* Status */ |
| 502 | mov r3, #0x80000000 |
| 503 | str r0, [fp, #LO_reg_cop0+56] /* EPC */ |
| 504 | orr r1, #2 |
| 505 | mov r2, #0x20 |
| 506 | str r1, [fp, #LO_reg_cop0+48] /* Status */ |
| 507 | str r2, [fp, #LO_reg_cop0+52] /* Cause */ |
| 508 | add r0, r3, #0x80 |
| 509 | bl get_addr_ht |
| 510 | mov pc, r0 |
| 511 | .size jump_syscall, .-jump_syscall |
| 512 | .align 2 |
| 513 | |
| 514 | /* note: psxException might do recursive recompiler call from it's HLE code, |
| 515 | * so be ready for this */ |
| 516 | FUNCTION(jump_to_new_pc): |
| 517 | ldr r1, [fp, #LO_next_interupt] |
| 518 | ldr r10, [fp, #LO_cycle] |
| 519 | ldr r0, [fp, #LO_pcaddr] |
| 520 | sub r10, r10, r1 |
| 521 | str r1, [fp, #LO_last_count] |
| 522 | bl get_addr_ht |
| 523 | mov pc, r0 |
| 524 | .size jump_to_new_pc, .-jump_to_new_pc |
| 525 | |
| 526 | .align 2 |
| 527 | FUNCTION(new_dyna_leave): |
| 528 | ldr r0, [fp, #LO_last_count] |
| 529 | add r12, fp, #28 |
| 530 | add r10, r0, r10 |
| 531 | str r10, [fp, #LO_cycle] |
| 532 | ldmfd sp!, {r4, r5, r6, r7, r8, r9, sl, fp, ip, pc} |
| 533 | .size new_dyna_leave, .-new_dyna_leave |
| 534 | |
| 535 | .align 2 |
| 536 | FUNCTION(invalidate_addr_r0): |
| 537 | stmia fp, {r0, r1, r2, r3, EXTRA_UNSAVED_REGS r12, lr} |
| 538 | b invalidate_addr_call |
| 539 | .size invalidate_addr_r0, .-invalidate_addr_r0 |
| 540 | .align 2 |
| 541 | FUNCTION(invalidate_addr_r1): |
| 542 | stmia fp, {r0, r1, r2, r3, EXTRA_UNSAVED_REGS r12, lr} |
| 543 | mov r0, r1 |
| 544 | b invalidate_addr_call |
| 545 | .size invalidate_addr_r1, .-invalidate_addr_r1 |
| 546 | .align 2 |
| 547 | FUNCTION(invalidate_addr_r2): |
| 548 | stmia fp, {r0, r1, r2, r3, EXTRA_UNSAVED_REGS r12, lr} |
| 549 | mov r0, r2 |
| 550 | b invalidate_addr_call |
| 551 | .size invalidate_addr_r2, .-invalidate_addr_r2 |
| 552 | .align 2 |
| 553 | FUNCTION(invalidate_addr_r3): |
| 554 | stmia fp, {r0, r1, r2, r3, EXTRA_UNSAVED_REGS r12, lr} |
| 555 | mov r0, r3 |
| 556 | b invalidate_addr_call |
| 557 | .size invalidate_addr_r3, .-invalidate_addr_r3 |
| 558 | .align 2 |
| 559 | FUNCTION(invalidate_addr_r4): |
| 560 | stmia fp, {r0, r1, r2, r3, EXTRA_UNSAVED_REGS r12, lr} |
| 561 | mov r0, r4 |
| 562 | b invalidate_addr_call |
| 563 | .size invalidate_addr_r4, .-invalidate_addr_r4 |
| 564 | .align 2 |
| 565 | FUNCTION(invalidate_addr_r5): |
| 566 | stmia fp, {r0, r1, r2, r3, EXTRA_UNSAVED_REGS r12, lr} |
| 567 | mov r0, r5 |
| 568 | b invalidate_addr_call |
| 569 | .size invalidate_addr_r5, .-invalidate_addr_r5 |
| 570 | .align 2 |
| 571 | FUNCTION(invalidate_addr_r6): |
| 572 | stmia fp, {r0, r1, r2, r3, EXTRA_UNSAVED_REGS r12, lr} |
| 573 | mov r0, r6 |
| 574 | b invalidate_addr_call |
| 575 | .size invalidate_addr_r6, .-invalidate_addr_r6 |
| 576 | .align 2 |
| 577 | FUNCTION(invalidate_addr_r7): |
| 578 | stmia fp, {r0, r1, r2, r3, EXTRA_UNSAVED_REGS r12, lr} |
| 579 | mov r0, r7 |
| 580 | b invalidate_addr_call |
| 581 | .size invalidate_addr_r7, .-invalidate_addr_r7 |
| 582 | .align 2 |
| 583 | FUNCTION(invalidate_addr_r8): |
| 584 | stmia fp, {r0, r1, r2, r3, EXTRA_UNSAVED_REGS r12, lr} |
| 585 | mov r0, r8 |
| 586 | b invalidate_addr_call |
| 587 | .size invalidate_addr_r8, .-invalidate_addr_r8 |
| 588 | .align 2 |
| 589 | FUNCTION(invalidate_addr_r9): |
| 590 | stmia fp, {r0, r1, r2, r3, EXTRA_UNSAVED_REGS r12, lr} |
| 591 | mov r0, r9 |
| 592 | b invalidate_addr_call |
| 593 | .size invalidate_addr_r9, .-invalidate_addr_r9 |
| 594 | .align 2 |
| 595 | FUNCTION(invalidate_addr_r10): |
| 596 | stmia fp, {r0, r1, r2, r3, EXTRA_UNSAVED_REGS r12, lr} |
| 597 | mov r0, r10 |
| 598 | b invalidate_addr_call |
| 599 | .size invalidate_addr_r10, .-invalidate_addr_r10 |
| 600 | .align 2 |
| 601 | FUNCTION(invalidate_addr_r12): |
| 602 | stmia fp, {r0, r1, r2, r3, EXTRA_UNSAVED_REGS r12, lr} |
| 603 | mov r0, r12 |
| 604 | .size invalidate_addr_r12, .-invalidate_addr_r12 |
| 605 | .align 2 |
| 606 | invalidate_addr_call: |
| 607 | ldr r12, [fp, #LO_inv_code_start] |
| 608 | ldr lr, [fp, #LO_inv_code_end] |
| 609 | cmp r0, r12 |
| 610 | cmpcs lr, r0 |
| 611 | blcc invalidate_addr |
| 612 | ldmia fp, {r0, r1, r2, r3, EXTRA_UNSAVED_REGS r12, pc} |
| 613 | .size invalidate_addr_call, .-invalidate_addr_call |
| 614 | |
| 615 | .align 2 |
| 616 | FUNCTION(new_dyna_start): |
| 617 | /* ip is stored to conform EABI alignment */ |
| 618 | stmfd sp!, {r4, r5, r6, r7, r8, r9, sl, fp, ip, lr} |
| 619 | mov fp, r0 /* dynarec_local */ |
| 620 | ldr r0, [fp, #LO_pcaddr] |
| 621 | bl get_addr_ht |
| 622 | ldr r1, [fp, #LO_next_interupt] |
| 623 | ldr r10, [fp, #LO_cycle] |
| 624 | str r1, [fp, #LO_last_count] |
| 625 | sub r10, r10, r1 |
| 626 | mov pc, r0 |
| 627 | .size new_dyna_start, .-new_dyna_start |
| 628 | |
| 629 | /* --------------------------------------- */ |
| 630 | |
| 631 | .align 2 |
| 632 | |
| 633 | .macro pcsx_read_mem readop tab_shift |
| 634 | /* r0 = address, r1 = handler_tab, r2 = cycles */ |
| 635 | lsl r3, r0, #20 |
| 636 | lsr r3, #(20+\tab_shift) |
| 637 | ldr r12, [fp, #LO_last_count] |
| 638 | ldr r1, [r1, r3, lsl #2] |
| 639 | add r2, r2, r12 |
| 640 | lsls r1, #1 |
| 641 | .if \tab_shift == 1 |
| 642 | lsl r3, #1 |
| 643 | \readop r0, [r1, r3] |
| 644 | .else |
| 645 | \readop r0, [r1, r3, lsl #\tab_shift] |
| 646 | .endif |
| 647 | movcc pc, lr |
| 648 | str r2, [fp, #LO_cycle] |
| 649 | bx r1 |
| 650 | .endm |
| 651 | |
| 652 | FUNCTION(jump_handler_read8): |
| 653 | add r1, #0x1000/4*4 + 0x1000/2*4 @ shift to r8 part |
| 654 | pcsx_read_mem ldrbcc, 0 |
| 655 | |
| 656 | FUNCTION(jump_handler_read16): |
| 657 | add r1, #0x1000/4*4 @ shift to r16 part |
| 658 | pcsx_read_mem ldrhcc, 1 |
| 659 | |
| 660 | FUNCTION(jump_handler_read32): |
| 661 | pcsx_read_mem ldrcc, 2 |
| 662 | |
| 663 | |
| 664 | .macro memhandler_post |
| 665 | ldr r0, [fp, #LO_next_interupt] |
| 666 | ldr r2, [fp, #LO_cycle] @ memhandlers can modify cc, like dma |
| 667 | str r0, [fp, #LO_last_count] |
| 668 | sub r0, r2, r0 |
| 669 | .endm |
| 670 | |
| 671 | .macro pcsx_write_mem wrtop tab_shift |
| 672 | /* r0 = address, r1 = data, r2 = cycles, r3 = handler_tab */ |
| 673 | lsl r12,r0, #20 |
| 674 | lsr r12, #(20+\tab_shift) |
| 675 | ldr r3, [r3, r12, lsl #2] |
| 676 | str r0, [fp, #LO_address] @ some handlers still need it.. |
| 677 | lsls r3, #1 |
| 678 | mov r0, r2 @ cycle return in case of direct store |
| 679 | .if \tab_shift == 1 |
| 680 | lsl r12, #1 |
| 681 | \wrtop r1, [r3, r12] |
| 682 | .else |
| 683 | \wrtop r1, [r3, r12, lsl #\tab_shift] |
| 684 | .endif |
| 685 | movcc pc, lr |
| 686 | ldr r12, [fp, #LO_last_count] |
| 687 | mov r0, r1 |
| 688 | add r2, r2, r12 |
| 689 | str r2, [fp, #LO_cycle] |
| 690 | |
| 691 | str lr, [fp, #LO_saved_lr] |
| 692 | blx r3 |
| 693 | ldr lr, [fp, #LO_saved_lr] |
| 694 | |
| 695 | memhandler_post |
| 696 | bx lr |
| 697 | .endm |
| 698 | |
| 699 | FUNCTION(jump_handler_write8): |
| 700 | add r3, #0x1000/4*4 + 0x1000/2*4 @ shift to r8 part |
| 701 | pcsx_write_mem strbcc, 0 |
| 702 | |
| 703 | FUNCTION(jump_handler_write16): |
| 704 | add r3, #0x1000/4*4 @ shift to r16 part |
| 705 | pcsx_write_mem strhcc, 1 |
| 706 | |
| 707 | FUNCTION(jump_handler_write32): |
| 708 | pcsx_write_mem strcc, 2 |
| 709 | |
| 710 | FUNCTION(jump_handler_write_h): |
| 711 | /* r0 = address, r1 = data, r2 = cycles, r3 = handler */ |
| 712 | ldr r12, [fp, #LO_last_count] |
| 713 | str r0, [fp, #LO_address] @ some handlers still need it.. |
| 714 | add r2, r2, r12 |
| 715 | mov r0, r1 |
| 716 | str r2, [fp, #LO_cycle] |
| 717 | |
| 718 | str lr, [fp, #LO_saved_lr] |
| 719 | blx r3 |
| 720 | ldr lr, [fp, #LO_saved_lr] |
| 721 | |
| 722 | memhandler_post |
| 723 | bx lr |
| 724 | |
| 725 | FUNCTION(jump_handle_swl): |
| 726 | /* r0 = address, r1 = data, r2 = cycles */ |
| 727 | ldr r3, [fp, #LO_mem_wtab] |
| 728 | mov r12,r0,lsr #12 |
| 729 | ldr r3, [r3, r12, lsl #2] |
| 730 | lsls r3, #1 |
| 731 | bcs 4f |
| 732 | add r3, r0, r3 |
| 733 | mov r0, r2 |
| 734 | tst r3, #2 |
| 735 | beq 101f |
| 736 | tst r3, #1 |
| 737 | beq 2f |
| 738 | 3: |
| 739 | str r1, [r3, #-3] |
| 740 | bx lr |
| 741 | 2: |
| 742 | lsr r2, r1, #8 |
| 743 | lsr r1, #24 |
| 744 | strh r2, [r3, #-2] |
| 745 | strb r1, [r3] |
| 746 | bx lr |
| 747 | 101: |
| 748 | tst r3, #1 |
| 749 | lsrne r1, #16 @ 1 |
| 750 | lsreq r12, r1, #24 @ 0 |
| 751 | strhne r1, [r3, #-1] |
| 752 | strbeq r12, [r3] |
| 753 | bx lr |
| 754 | 4: |
| 755 | mov r0, r2 |
| 756 | @ b abort |
| 757 | bx lr @ TODO? |
| 758 | |
| 759 | |
| 760 | FUNCTION(jump_handle_swr): |
| 761 | /* r0 = address, r1 = data, r2 = cycles */ |
| 762 | ldr r3, [fp, #LO_mem_wtab] |
| 763 | mov r12,r0,lsr #12 |
| 764 | ldr r3, [r3, r12, lsl #2] |
| 765 | lsls r3, #1 |
| 766 | bcs 4f |
| 767 | add r3, r0, r3 |
| 768 | and r12,r3, #3 |
| 769 | mov r0, r2 |
| 770 | cmp r12,#2 |
| 771 | strbgt r1, [r3] @ 3 |
| 772 | strheq r1, [r3] @ 2 |
| 773 | cmp r12,#1 |
| 774 | strlt r1, [r3] @ 0 |
| 775 | bxne lr |
| 776 | lsr r2, r1, #8 @ 1 |
| 777 | strb r1, [r3] |
| 778 | strh r2, [r3, #1] |
| 779 | bx lr |
| 780 | 4: |
| 781 | mov r0, r2 |
| 782 | @ b abort |
| 783 | bx lr @ TODO? |
| 784 | |
| 785 | |
| 786 | .macro rcntx_read_mode0 num |
| 787 | /* r0 = address, r2 = cycles */ |
| 788 | ldr r3, [fp, #LO_rcnts+6*4+7*4*\num] @ cycleStart |
| 789 | mov r0, r2, lsl #16 |
| 790 | sub r0, r0, r3, lsl #16 |
| 791 | lsr r0, #16 |
| 792 | bx lr |
| 793 | .endm |
| 794 | |
| 795 | FUNCTION(rcnt0_read_count_m0): |
| 796 | rcntx_read_mode0 0 |
| 797 | |
| 798 | FUNCTION(rcnt1_read_count_m0): |
| 799 | rcntx_read_mode0 1 |
| 800 | |
| 801 | FUNCTION(rcnt2_read_count_m0): |
| 802 | rcntx_read_mode0 2 |
| 803 | |
| 804 | FUNCTION(rcnt0_read_count_m1): |
| 805 | /* r0 = address, r2 = cycles */ |
| 806 | ldr r3, [fp, #LO_rcnts+6*4+7*4*0] @ cycleStart |
| 807 | mov_16 r1, 0x3334 |
| 808 | sub r2, r2, r3 |
| 809 | mul r0, r1, r2 @ /= 5 |
| 810 | lsr r0, #16 |
| 811 | bx lr |
| 812 | |
| 813 | FUNCTION(rcnt1_read_count_m1): |
| 814 | /* r0 = address, r2 = cycles */ |
| 815 | ldr r3, [fp, #LO_rcnts+6*4+7*4*1] |
| 816 | mov_24 r1, 0x1e6cde |
| 817 | sub r2, r2, r3 |
| 818 | umull r3, r0, r1, r2 @ ~ /= hsync_cycles, max ~0x1e6cdd |
| 819 | bx lr |
| 820 | |
| 821 | FUNCTION(rcnt2_read_count_m1): |
| 822 | /* r0 = address, r2 = cycles */ |
| 823 | ldr r3, [fp, #LO_rcnts+6*4+7*4*2] |
| 824 | mov r0, r2, lsl #16-3 |
| 825 | sub r0, r0, r3, lsl #16-3 |
| 826 | lsr r0, #16 @ /= 8 |
| 827 | bx lr |
| 828 | |
| 829 | FUNCTION(call_gteStall): |
| 830 | /* r0 = op_cycles, r1 = cycles */ |
| 831 | ldr r2, [fp, #LO_last_count] |
| 832 | str lr, [fp, #LO_saved_lr] |
| 833 | add r1, r1, r2 |
| 834 | str r1, [fp, #LO_cycle] |
| 835 | add r1, fp, #LO_psxRegs |
| 836 | bl gteCheckStallRaw |
| 837 | ldr lr, [fp, #LO_saved_lr] |
| 838 | add r10, r10, r0 |
| 839 | bx lr |
| 840 | |
| 841 | @ vim:filetype=armasm |