9 .global flush_inval_caches
12 .global ssp_drc_next_patch
17 @ translation cache buffer
20 .size tcache, TCACHE_SIZE
30 mov r2, #0x0 @ must be 0
35 @ SSP_GR0, SSP_X, SSP_Y, SSP_A,
36 @ SSP_ST, SSP_STACK, SSP_PC, SSP_P,
37 @ SSP_PM0, SSP_PM1, SSP_PM2, SSP_XST,
38 @ SSP_PM4, SSP_gr13, SSP_PMC, SSP_AL
43 @ r6: STACK and emu flags: sss0 * .uu. .lll NZCV (NZCV is PSR bits from ARM)
51 #define SSP_OFFS_GR 0x400
56 #define SSP_OFFS_PM_WRITE 0x46c // pmac_write[]
57 #define SSP_OFFS_EMUSTAT 0x484 // emu_status
58 #define SSP_OFFS_IRAM_ROM 0x48c // ptr_iram_rom
59 #define SSP_OFFS_IRAM_DIRTY 0x494
60 #define SSP_OFFS_IRAM_CTX 0x498 // iram_context
61 #define SSP_OFFS_BLTAB 0x49c // block_table
62 #define SSP_OFFS_BLTAB_IRAM 0x4a0
63 #define SSP_OFFS_TMP0 0x4a4 // for entry PC
64 #define SSP_OFFS_TMP1 0x4a8
65 #define SSP_OFFS_TMP2 0x4ac
66 #define SSP_WAIT_PM0 0x2000
69 .macro ssp_drc_do_next patch_jump=0
71 str lr, [r7, #SSP_OFFS_TMP2] @ jump instr. (actually call) address + 4
75 str r0, [r7, #SSP_OFFS_TMP0]
79 ldr r2, [r7, #SSP_OFFS_BLTAB]
80 ldr r2, [r2, r0, lsl #2]
87 bl ssp_translate_block
89 ldr r0, [r7, #SSP_OFFS_TMP0] @ entry PC
90 ldr r1, [r7, #SSP_OFFS_BLTAB]
91 str r2, [r1, r0, lsl #2]
99 ldr r1, [r7, #SSP_OFFS_IRAM_DIRTY]
101 ldreq r1, [r7, #SSP_OFFS_IRAM_CTX]
102 beq 1f @ ssp_de_iram_ctx
104 bl ssp_get_iram_context
106 str r1, [r7, #SSP_OFFS_IRAM_DIRTY]
108 str r1, [r7, #SSP_OFFS_IRAM_CTX]
109 ldr r0, [r7, #SSP_OFFS_TMP0] @ entry PC
111 1: @ ssp_de_iram_ctx:
112 ldr r2, [r7, #SSP_OFFS_BLTAB_IRAM]
113 add r2, r2, r1, lsl #12 @ block_tab_iram + iram_context * 0x800/2*4
114 add r1, r2, r0, lsl #2
122 str r1, [r7, #SSP_OFFS_TMP1]
123 bl ssp_translate_block
125 ldr r0, [r7, #SSP_OFFS_TMP0] @ entry PC
126 ldr r1, [r7, #SSP_OFFS_TMP1] @ &block_table_iram[iram_context][rPC]
133 .endm @ ssp_drc_do_next
137 stmfd sp!, {r4-r11, lr}
144 ldmia r2, {r3,r4,r5,r6,r8}
147 orr r4, r3, r4, lsr #16 @ XXYY
149 and r8, r8, #0x0f0000
150 mov r8, r8, lsl #13 @ sss0 *
151 and r9, r6, #0x670000
155 orrne r8, r8, #0x4 @ sss0 * NZ..
156 orr r6, r8, r9, lsr #12 @ sss0 * .uu. .lll NZ..
158 ldr r8, [r7, #0x440] @ r0-r2
159 ldr r9, [r7, #0x444] @ r4-r6
160 ldr r10,[r7, #(0x400+SSP_P*4)] @ P
162 ldr r0, [r7, #(SSP_OFFS_GR+SSP_PC*4)]
174 ldr r1, [r7, #SSP_OFFS_TMP2] @ jump instr. (actually call) address + 4
176 moveq r3, #0xe1000000
177 orreq r3, r3, #0x00a00000 @ nop
184 streq r3, [r1, #-4] @ move the other cond up
185 moveq r3, #0xe1000000
186 orreq r3, r3, #0x00a00000
187 streq r3, [r1] @ fill it's place with nop
193 bic r3, r3, #1 @ L bit
194 orr r3, r3, r12,lsl #6
195 mov r3, r3, ror #8 @ patched branch instruction
199 str r2, [r7, #SSP_OFFS_TMP1]
202 bl flush_inval_caches
203 ldr r2, [r7, #SSP_OFFS_TMP1]
204 ldr r0, [r7, #SSP_OFFS_TMP0]
210 str r0, [r7, #(SSP_OFFS_GR+SSP_PC*4)]
213 str r10,[r7, #(0x400+SSP_P*4)] @ P
214 str r8, [r7, #0x440] @ r0-r2
215 str r9, [r7, #0x444] @ r4-r6
218 and r9, r9, #(7<<16) @ STACK
220 msr cpsr_flg, r3 @ to to ARM PSR
223 orrmi r6, r6, #0x80000000 @ N
224 orreq r6, r6, #0x20000000 @ Z
226 mov r3, r4, lsl #16 @ Y
228 mov r2, r2, lsl #16 @ X
231 stmia r8, {r2,r3,r5,r6,r9}
234 ldmfd sp!, {r4-r11, lr}
243 ldr r0, [r7, #(SSP_OFFS_GR+SSP_PM0*4)]
244 ldr r1, [r7, #SSP_OFFS_EMUSTAT]
246 orreq r1, r1, #SSP_WAIT_PM0
248 streq r1, [r7, #SSP_OFFS_EMUSTAT]
261 ldr r3, [r7, #SSP_OFFS_IRAM_ROM]
262 add r2, r3, r0, lsl #1 @ (r7|00)
267 add r3, r3, r0, lsl #1 @ IRAM dest
268 ldrh r12,[r2], #2 @ length
269 bic r3, r3, #3 @ always seen aligned
270 @ orr r5, r5, #0x08000000
271 @ orr r5, r5, #0x00880000
272 @ sub r5, r5, r12, lsl #16
276 str r0, [r7, #SSP_OFFS_IRAM_DIRTY]
277 sub r11,r11,r12,lsl #1
278 sub r11,r11,r12 @ -= length*3
284 orr r0, r0, r1, lsl #16
292 ldr r0, [r7, #SSP_OFFS_IRAM_ROM]
296 strh r2, [r1] @ (r7|00)
300 orr r0, r0, #0x08000000
301 orr r0, r0, #0x001c8000
302 str r0, [r7, #(SSP_OFFS_GR+SSP_PMC*4)]
303 str r0, [r7, #(SSP_OFFS_PM_WRITE+4*4)]
305 sub r6, r6, #0x20000000
307 add r1, r1, #0x048 @ stack
308 add r1, r1, r6, lsr #28
310 subs r11,r11,#16 @ timeslice is likely to end