| 1 | // Basic macros to emit ARM instructions and some utils |
| 2 | |
| 3 | // (c) Copyright 2008-2009, Grazvydas "notaz" Ignotas |
| 4 | // Free for non-commercial use. |
| 5 | |
| 6 | #define CONTEXT_REG 11 |
| 7 | |
| 8 | // XXX: tcache_ptr type for SVP and SH2 compilers differs.. |
| 9 | #define EMIT_PTR(ptr, x) \ |
| 10 | do { \ |
| 11 | *(u32 *)ptr = x; \ |
| 12 | ptr = (void *)((u8 *)ptr + sizeof(u32)); \ |
| 13 | COUNT_OP; \ |
| 14 | } while (0) |
| 15 | |
| 16 | #define EMIT(x) EMIT_PTR(tcache_ptr, x) |
| 17 | |
| 18 | #define A_R4M (1 << 4) |
| 19 | #define A_R5M (1 << 5) |
| 20 | #define A_R6M (1 << 6) |
| 21 | #define A_R7M (1 << 7) |
| 22 | #define A_R8M (1 << 8) |
| 23 | #define A_R9M (1 << 9) |
| 24 | #define A_R10M (1 << 10) |
| 25 | #define A_R11M (1 << 11) |
| 26 | #define A_R14M (1 << 14) |
| 27 | #define A_R15M (1 << 15) |
| 28 | |
| 29 | #define A_COND_AL 0xe |
| 30 | #define A_COND_EQ 0x0 |
| 31 | #define A_COND_NE 0x1 |
| 32 | #define A_COND_HS 0x2 |
| 33 | #define A_COND_LO 0x3 |
| 34 | #define A_COND_MI 0x4 |
| 35 | #define A_COND_PL 0x5 |
| 36 | #define A_COND_VS 0x6 |
| 37 | #define A_COND_VC 0x7 |
| 38 | #define A_COND_HI 0x8 |
| 39 | #define A_COND_LS 0x9 |
| 40 | #define A_COND_GE 0xa |
| 41 | #define A_COND_LT 0xb |
| 42 | #define A_COND_GT 0xc |
| 43 | #define A_COND_LE 0xd |
| 44 | #define A_COND_CS A_COND_HS |
| 45 | #define A_COND_CC A_COND_LO |
| 46 | |
| 47 | /* unified conditions */ |
| 48 | #define DCOND_EQ A_COND_EQ |
| 49 | #define DCOND_NE A_COND_NE |
| 50 | #define DCOND_MI A_COND_MI |
| 51 | #define DCOND_PL A_COND_PL |
| 52 | #define DCOND_HI A_COND_HI |
| 53 | #define DCOND_HS A_COND_HS |
| 54 | #define DCOND_LO A_COND_LO |
| 55 | #define DCOND_GE A_COND_GE |
| 56 | #define DCOND_GT A_COND_GT |
| 57 | #define DCOND_LT A_COND_LT |
| 58 | #define DCOND_LS A_COND_LS |
| 59 | #define DCOND_LE A_COND_LE |
| 60 | #define DCOND_VS A_COND_VS |
| 61 | #define DCOND_VC A_COND_VC |
| 62 | |
| 63 | /* addressing mode 1 */ |
| 64 | #define A_AM1_LSL 0 |
| 65 | #define A_AM1_LSR 1 |
| 66 | #define A_AM1_ASR 2 |
| 67 | #define A_AM1_ROR 3 |
| 68 | |
| 69 | #define A_AM1_IMM(ror2,imm8) (((ror2)<<8) | (imm8) | 0x02000000) |
| 70 | #define A_AM1_REG_XIMM(shift_imm,shift_op,rm) (((shift_imm)<<7) | ((shift_op)<<5) | (rm)) |
| 71 | #define A_AM1_REG_XREG(rs,shift_op,rm) (((rs)<<8) | ((shift_op)<<5) | 0x10 | (rm)) |
| 72 | |
| 73 | /* data processing op */ |
| 74 | #define A_OP_AND 0x0 |
| 75 | #define A_OP_EOR 0x1 |
| 76 | #define A_OP_SUB 0x2 |
| 77 | #define A_OP_RSB 0x3 |
| 78 | #define A_OP_ADD 0x4 |
| 79 | #define A_OP_ADC 0x5 |
| 80 | #define A_OP_SBC 0x6 |
| 81 | #define A_OP_RSC 0x7 |
| 82 | #define A_OP_TST 0x8 |
| 83 | #define A_OP_TEQ 0x9 |
| 84 | #define A_OP_CMP 0xa |
| 85 | #define A_OP_CMN 0xa |
| 86 | #define A_OP_ORR 0xc |
| 87 | #define A_OP_MOV 0xd |
| 88 | #define A_OP_BIC 0xe |
| 89 | #define A_OP_MVN 0xf |
| 90 | |
| 91 | #define EOP_C_DOP_X(cond,op,s,rn,rd,shifter_op) \ |
| 92 | EMIT(((cond)<<28) | ((op)<< 21) | ((s)<<20) | ((rn)<<16) | ((rd)<<12) | (shifter_op)) |
| 93 | |
| 94 | #define EOP_C_DOP_IMM( cond,op,s,rn,rd,ror2,imm8) EOP_C_DOP_X(cond,op,s,rn,rd,A_AM1_IMM(ror2,imm8)) |
| 95 | #define EOP_C_DOP_REG_XIMM(cond,op,s,rn,rd,shift_imm,shift_op,rm) EOP_C_DOP_X(cond,op,s,rn,rd,A_AM1_REG_XIMM(shift_imm,shift_op,rm)) |
| 96 | #define EOP_C_DOP_REG_XREG(cond,op,s,rn,rd,rs, shift_op,rm) EOP_C_DOP_X(cond,op,s,rn,rd,A_AM1_REG_XREG(rs, shift_op,rm)) |
| 97 | |
| 98 | #define EOP_MOV_IMM(rd, ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_MOV,0, 0,rd,ror2,imm8) |
| 99 | #define EOP_MVN_IMM(rd, ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_MVN,0, 0,rd,ror2,imm8) |
| 100 | #define EOP_ORR_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_ORR,0,rn,rd,ror2,imm8) |
| 101 | #define EOP_EOR_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_EOR,0,rn,rd,ror2,imm8) |
| 102 | #define EOP_ADD_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_ADD,0,rn,rd,ror2,imm8) |
| 103 | #define EOP_BIC_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_BIC,0,rn,rd,ror2,imm8) |
| 104 | #define EOP_AND_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_AND,0,rn,rd,ror2,imm8) |
| 105 | #define EOP_SUB_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_SUB,0,rn,rd,ror2,imm8) |
| 106 | #define EOP_TST_IMM( rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_TST,1,rn, 0,ror2,imm8) |
| 107 | #define EOP_CMP_IMM( rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_CMP,1,rn, 0,ror2,imm8) |
| 108 | #define EOP_RSB_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_RSB,0,rn,rd,ror2,imm8) |
| 109 | |
| 110 | #define EOP_MOV_IMM_C(cond,rd, ror2,imm8) EOP_C_DOP_IMM(cond,A_OP_MOV,0, 0,rd,ror2,imm8) |
| 111 | #define EOP_ORR_IMM_C(cond,rd,rn,ror2,imm8) EOP_C_DOP_IMM(cond,A_OP_ORR,0,rn,rd,ror2,imm8) |
| 112 | #define EOP_RSB_IMM_C(cond,rd,rn,ror2,imm8) EOP_C_DOP_IMM(cond,A_OP_RSB,0,rn,rd,ror2,imm8) |
| 113 | |
| 114 | #define EOP_MOV_REG(cond,s,rd, rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_MOV,s, 0,rd,shift_imm,shift_op,rm) |
| 115 | #define EOP_MVN_REG(cond,s,rd, rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_MVN,s, 0,rd,shift_imm,shift_op,rm) |
| 116 | #define EOP_ORR_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_ORR,s,rn,rd,shift_imm,shift_op,rm) |
| 117 | #define EOP_ADD_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_ADD,s,rn,rd,shift_imm,shift_op,rm) |
| 118 | #define EOP_ADC_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_ADC,s,rn,rd,shift_imm,shift_op,rm) |
| 119 | #define EOP_SUB_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_SUB,s,rn,rd,shift_imm,shift_op,rm) |
| 120 | #define EOP_SBC_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_SBC,s,rn,rd,shift_imm,shift_op,rm) |
| 121 | #define EOP_AND_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_AND,s,rn,rd,shift_imm,shift_op,rm) |
| 122 | #define EOP_EOR_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_EOR,s,rn,rd,shift_imm,shift_op,rm) |
| 123 | #define EOP_CMP_REG(cond, rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_CMP,1,rn, 0,shift_imm,shift_op,rm) |
| 124 | #define EOP_TST_REG(cond, rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_TST,1,rn, 0,shift_imm,shift_op,rm) |
| 125 | #define EOP_TEQ_REG(cond, rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_TEQ,1,rn, 0,shift_imm,shift_op,rm) |
| 126 | |
| 127 | #define EOP_MOV_REG2(s,rd, rm,shift_op,rs) EOP_C_DOP_REG_XREG(A_COND_AL,A_OP_MOV,s, 0,rd,rs,shift_op,rm) |
| 128 | #define EOP_ADD_REG2(s,rd,rn,rm,shift_op,rs) EOP_C_DOP_REG_XREG(A_COND_AL,A_OP_ADD,s,rn,rd,rs,shift_op,rm) |
| 129 | #define EOP_SUB_REG2(s,rd,rn,rm,shift_op,rs) EOP_C_DOP_REG_XREG(A_COND_AL,A_OP_SUB,s,rn,rd,rs,shift_op,rm) |
| 130 | |
| 131 | #define EOP_MOV_REG_SIMPLE(rd,rm) EOP_MOV_REG(A_COND_AL,0,rd,rm,A_AM1_LSL,0) |
| 132 | #define EOP_MOV_REG_LSL(rd, rm,shift_imm) EOP_MOV_REG(A_COND_AL,0,rd,rm,A_AM1_LSL,shift_imm) |
| 133 | #define EOP_MOV_REG_LSR(rd, rm,shift_imm) EOP_MOV_REG(A_COND_AL,0,rd,rm,A_AM1_LSR,shift_imm) |
| 134 | #define EOP_MOV_REG_ASR(rd, rm,shift_imm) EOP_MOV_REG(A_COND_AL,0,rd,rm,A_AM1_ASR,shift_imm) |
| 135 | #define EOP_MOV_REG_ROR(rd, rm,shift_imm) EOP_MOV_REG(A_COND_AL,0,rd,rm,A_AM1_ROR,shift_imm) |
| 136 | |
| 137 | #define EOP_ORR_REG_SIMPLE(rd,rm) EOP_ORR_REG(A_COND_AL,0,rd,rd,rm,A_AM1_LSL,0) |
| 138 | #define EOP_ORR_REG_LSL(rd,rn,rm,shift_imm) EOP_ORR_REG(A_COND_AL,0,rd,rn,rm,A_AM1_LSL,shift_imm) |
| 139 | #define EOP_ORR_REG_LSR(rd,rn,rm,shift_imm) EOP_ORR_REG(A_COND_AL,0,rd,rn,rm,A_AM1_LSR,shift_imm) |
| 140 | #define EOP_ORR_REG_ASR(rd,rn,rm,shift_imm) EOP_ORR_REG(A_COND_AL,0,rd,rn,rm,A_AM1_ASR,shift_imm) |
| 141 | #define EOP_ORR_REG_ROR(rd,rn,rm,shift_imm) EOP_ORR_REG(A_COND_AL,0,rd,rn,rm,A_AM1_ROR,shift_imm) |
| 142 | |
| 143 | #define EOP_ADD_REG_SIMPLE(rd,rm) EOP_ADD_REG(A_COND_AL,0,rd,rd,rm,A_AM1_LSL,0) |
| 144 | #define EOP_ADD_REG_LSL(rd,rn,rm,shift_imm) EOP_ADD_REG(A_COND_AL,0,rd,rn,rm,A_AM1_LSL,shift_imm) |
| 145 | #define EOP_ADD_REG_LSR(rd,rn,rm,shift_imm) EOP_ADD_REG(A_COND_AL,0,rd,rn,rm,A_AM1_LSR,shift_imm) |
| 146 | |
| 147 | #define EOP_TST_REG_SIMPLE(rn,rm) EOP_TST_REG(A_COND_AL, rn, 0,A_AM1_LSL,rm) |
| 148 | |
| 149 | #define EOP_MOV_REG2_LSL(rd, rm,rs) EOP_MOV_REG2(0,rd, rm,A_AM1_LSL,rs) |
| 150 | #define EOP_MOV_REG2_ROR(rd, rm,rs) EOP_MOV_REG2(0,rd, rm,A_AM1_ROR,rs) |
| 151 | #define EOP_ADD_REG2_LSL(rd,rn,rm,rs) EOP_ADD_REG2(0,rd,rn,rm,A_AM1_LSL,rs) |
| 152 | #define EOP_SUB_REG2_LSL(rd,rn,rm,rs) EOP_SUB_REG2(0,rd,rn,rm,A_AM1_LSL,rs) |
| 153 | |
| 154 | /* addressing mode 2 */ |
| 155 | #define EOP_C_AM2_IMM(cond,u,b,l,rn,rd,offset_12) \ |
| 156 | EMIT(((cond)<<28) | 0x05000000 | ((u)<<23) | ((b)<<22) | ((l)<<20) | ((rn)<<16) | ((rd)<<12) | (offset_12)) |
| 157 | |
| 158 | #define EOP_C_AM2_REG(cond,u,b,l,rn,rd,shift_imm,shift_op,rm) \ |
| 159 | EMIT(((cond)<<28) | 0x07000000 | ((u)<<23) | ((b)<<22) | ((l)<<20) | ((rn)<<16) | ((rd)<<12) | \ |
| 160 | ((shift_imm)<<7) | ((shift_op)<<5) | (rm)) |
| 161 | |
| 162 | /* addressing mode 3 */ |
| 163 | #define EOP_C_AM3(cond,u,r,l,rn,rd,s,h,immed_reg) \ |
| 164 | EMIT(((cond)<<28) | 0x01000090 | ((u)<<23) | ((r)<<22) | ((l)<<20) | ((rn)<<16) | ((rd)<<12) | \ |
| 165 | ((s)<<6) | ((h)<<5) | (immed_reg)) |
| 166 | |
| 167 | #define EOP_C_AM3_IMM(cond,u,l,rn,rd,s,h,offset_8) EOP_C_AM3(cond,u,1,l,rn,rd,s,h,(((offset_8)&0xf0)<<4)|((offset_8)&0xf)) |
| 168 | |
| 169 | #define EOP_C_AM3_REG(cond,u,l,rn,rd,s,h,rm) EOP_C_AM3(cond,u,0,l,rn,rd,s,h,rm) |
| 170 | |
| 171 | /* ldr and str */ |
| 172 | #define EOP_LDR_IMM2(cond,rd,rn,offset_12) EOP_C_AM2_IMM(cond,1,0,1,rn,rd,offset_12) |
| 173 | #define EOP_LDRB_IMM2(cond,rd,rn,offset_12) EOP_C_AM2_IMM(cond,1,1,1,rn,rd,offset_12) |
| 174 | |
| 175 | #define EOP_LDR_IMM( rd,rn,offset_12) EOP_C_AM2_IMM(A_COND_AL,1,0,1,rn,rd,offset_12) |
| 176 | #define EOP_LDR_NEGIMM(rd,rn,offset_12) EOP_C_AM2_IMM(A_COND_AL,0,0,1,rn,rd,offset_12) |
| 177 | #define EOP_LDR_SIMPLE(rd,rn) EOP_C_AM2_IMM(A_COND_AL,1,0,1,rn,rd,0) |
| 178 | #define EOP_STR_IMM( rd,rn,offset_12) EOP_C_AM2_IMM(A_COND_AL,1,0,0,rn,rd,offset_12) |
| 179 | #define EOP_STR_SIMPLE(rd,rn) EOP_C_AM2_IMM(A_COND_AL,1,0,0,rn,rd,0) |
| 180 | |
| 181 | #define EOP_LDR_REG_LSL(cond,rd,rn,rm,shift_imm) EOP_C_AM2_REG(cond,1,0,1,rn,rd,shift_imm,A_AM1_LSL,rm) |
| 182 | |
| 183 | #define EOP_LDRH_IMM2(cond,rd,rn,offset_8) EOP_C_AM3_IMM(cond,1,1,rn,rd,0,1,offset_8) |
| 184 | |
| 185 | #define EOP_LDRH_IMM( rd,rn,offset_8) EOP_C_AM3_IMM(A_COND_AL,1,1,rn,rd,0,1,offset_8) |
| 186 | #define EOP_LDRH_SIMPLE(rd,rn) EOP_C_AM3_IMM(A_COND_AL,1,1,rn,rd,0,1,0) |
| 187 | #define EOP_LDRH_REG( rd,rn,rm) EOP_C_AM3_REG(A_COND_AL,1,1,rn,rd,0,1,rm) |
| 188 | #define EOP_STRH_IMM( rd,rn,offset_8) EOP_C_AM3_IMM(A_COND_AL,1,0,rn,rd,0,1,offset_8) |
| 189 | #define EOP_STRH_SIMPLE(rd,rn) EOP_C_AM3_IMM(A_COND_AL,1,0,rn,rd,0,1,0) |
| 190 | #define EOP_STRH_REG( rd,rn,rm) EOP_C_AM3_REG(A_COND_AL,1,0,rn,rd,0,1,rm) |
| 191 | |
| 192 | /* ldm and stm */ |
| 193 | #define EOP_XXM(cond,p,u,s,w,l,rn,list) \ |
| 194 | EMIT(((cond)<<28) | (1<<27) | ((p)<<24) | ((u)<<23) | ((s)<<22) | ((w)<<21) | ((l)<<20) | ((rn)<<16) | (list)) |
| 195 | |
| 196 | #define EOP_STMIA(rb,list) EOP_XXM(A_COND_AL,0,1,0,0,0,rb,list) |
| 197 | #define EOP_LDMIA(rb,list) EOP_XXM(A_COND_AL,0,1,0,0,1,rb,list) |
| 198 | |
| 199 | #define EOP_STMFD_SP(list) EOP_XXM(A_COND_AL,1,0,0,1,0,13,list) |
| 200 | #define EOP_LDMFD_SP(list) EOP_XXM(A_COND_AL,0,1,0,1,1,13,list) |
| 201 | |
| 202 | /* branches */ |
| 203 | #define EOP_C_BX(cond,rm) \ |
| 204 | EMIT(((cond)<<28) | 0x012fff10 | (rm)) |
| 205 | |
| 206 | #define EOP_C_B_PTR(ptr,cond,l,signed_immed_24) \ |
| 207 | EMIT_PTR(ptr, ((cond)<<28) | 0x0a000000 | ((l)<<24) | (signed_immed_24)) |
| 208 | |
| 209 | #define EOP_C_B(cond,l,signed_immed_24) \ |
| 210 | EOP_C_B_PTR(tcache_ptr,cond,l,signed_immed_24) |
| 211 | |
| 212 | #define EOP_B( signed_immed_24) EOP_C_B(A_COND_AL,0,signed_immed_24) |
| 213 | #define EOP_BL(signed_immed_24) EOP_C_B(A_COND_AL,1,signed_immed_24) |
| 214 | |
| 215 | /* misc */ |
| 216 | #define EOP_C_MUL(cond,s,rd,rs,rm) \ |
| 217 | EMIT(((cond)<<28) | ((s)<<20) | ((rd)<<16) | ((rs)<<8) | 0x90 | (rm)) |
| 218 | |
| 219 | #define EOP_C_UMULL(cond,s,rdhi,rdlo,rs,rm) \ |
| 220 | EMIT(((cond)<<28) | 0x00800000 | ((s)<<20) | ((rdhi)<<16) | ((rdlo)<<12) | ((rs)<<8) | 0x90 | (rm)) |
| 221 | |
| 222 | #define EOP_C_SMULL(cond,s,rdhi,rdlo,rs,rm) \ |
| 223 | EMIT(((cond)<<28) | 0x00c00000 | ((s)<<20) | ((rdhi)<<16) | ((rdlo)<<12) | ((rs)<<8) | 0x90 | (rm)) |
| 224 | |
| 225 | #define EOP_C_SMLAL(cond,s,rdhi,rdlo,rs,rm) \ |
| 226 | EMIT(((cond)<<28) | 0x00e00000 | ((s)<<20) | ((rdhi)<<16) | ((rdlo)<<12) | ((rs)<<8) | 0x90 | (rm)) |
| 227 | |
| 228 | #define EOP_MUL(rd,rm,rs) EOP_C_MUL(A_COND_AL,0,rd,rs,rm) // note: rd != rm |
| 229 | |
| 230 | #define EOP_C_MRS(cond,rd) \ |
| 231 | EMIT(((cond)<<28) | 0x010f0000 | ((rd)<<12)) |
| 232 | |
| 233 | #define EOP_C_MSR_IMM(cond,ror2,imm) \ |
| 234 | EMIT(((cond)<<28) | 0x0328f000 | ((ror2)<<8) | (imm)) // cpsr_f |
| 235 | |
| 236 | #define EOP_C_MSR_REG(cond,rm) \ |
| 237 | EMIT(((cond)<<28) | 0x0128f000 | (rm)) // cpsr_f |
| 238 | |
| 239 | #define EOP_MRS(rd) EOP_C_MRS(A_COND_AL,rd) |
| 240 | #define EOP_MSR_IMM(ror2,imm) EOP_C_MSR_IMM(A_COND_AL,ror2,imm) |
| 241 | #define EOP_MSR_REG(rm) EOP_C_MSR_REG(A_COND_AL,rm) |
| 242 | |
| 243 | |
| 244 | // XXX: AND, RSB, *C, will break if 1 insn is not enough |
| 245 | static void emith_op_imm2(int cond, int s, int op, int rd, int rn, unsigned int imm) |
| 246 | { |
| 247 | int ror2; |
| 248 | u32 v; |
| 249 | |
| 250 | switch (op) { |
| 251 | case A_OP_MOV: |
| 252 | rn = 0; |
| 253 | if (~imm < 0x10000) { |
| 254 | imm = ~imm; |
| 255 | op = A_OP_MVN; |
| 256 | } |
| 257 | break; |
| 258 | |
| 259 | case A_OP_EOR: |
| 260 | case A_OP_SUB: |
| 261 | case A_OP_ADD: |
| 262 | case A_OP_ORR: |
| 263 | case A_OP_BIC: |
| 264 | if (s == 0 && imm == 0) |
| 265 | return; |
| 266 | break; |
| 267 | } |
| 268 | |
| 269 | for (v = imm, ror2 = 0; ; ror2 -= 8/2) { |
| 270 | /* shift down to get 'best' rot2 */ |
| 271 | for (; v && !(v & 3); v >>= 2) |
| 272 | ror2--; |
| 273 | |
| 274 | EOP_C_DOP_IMM(cond, op, s, rn, rd, ror2 & 0x0f, v & 0xff); |
| 275 | |
| 276 | v >>= 8; |
| 277 | if (v == 0) |
| 278 | break; |
| 279 | if (op == A_OP_MOV) |
| 280 | op = A_OP_ORR; |
| 281 | if (op == A_OP_MVN) |
| 282 | op = A_OP_BIC; |
| 283 | rn = rd; |
| 284 | } |
| 285 | } |
| 286 | |
| 287 | #define emith_op_imm(cond, s, op, r, imm) \ |
| 288 | emith_op_imm2(cond, s, op, r, r, imm) |
| 289 | |
| 290 | // test op |
| 291 | #define emith_top_imm(cond, op, r, imm) do { \ |
| 292 | u32 ror2, v; \ |
| 293 | for (ror2 = 0, v = imm; v && !(v & 3); v >>= 2) \ |
| 294 | ror2--; \ |
| 295 | EOP_C_DOP_IMM(cond, op, 1, r, 0, ror2 & 0x0f, v & 0xff); \ |
| 296 | } while (0) |
| 297 | |
| 298 | #define is_offset_24(val) \ |
| 299 | ((val) >= (int)0xff000000 && (val) <= 0x00ffffff) |
| 300 | |
| 301 | static int emith_xbranch(int cond, void *target, int is_call) |
| 302 | { |
| 303 | int val = (u32 *)target - (u32 *)tcache_ptr - 2; |
| 304 | int direct = is_offset_24(val); |
| 305 | u32 *start_ptr = (u32 *)tcache_ptr; |
| 306 | |
| 307 | if (direct) |
| 308 | { |
| 309 | EOP_C_B(cond,is_call,val & 0xffffff); // b, bl target |
| 310 | } |
| 311 | else |
| 312 | { |
| 313 | #ifdef __EPOC32__ |
| 314 | // elprintf(EL_SVP, "emitting indirect jmp %08x->%08x", tcache_ptr, target); |
| 315 | if (is_call) |
| 316 | EOP_ADD_IMM(14,15,0,8); // add lr,pc,#8 |
| 317 | EOP_C_AM2_IMM(cond,1,0,1,15,15,0); // ldrcc pc,[pc] |
| 318 | EOP_MOV_REG_SIMPLE(15,15); // mov pc, pc |
| 319 | EMIT((u32)target); |
| 320 | #else |
| 321 | // should never happen |
| 322 | elprintf(EL_STATUS|EL_SVP|EL_ANOMALY, "indirect jmp %08x->%08x", target, tcache_ptr); |
| 323 | exit(1); |
| 324 | #endif |
| 325 | } |
| 326 | |
| 327 | return (u32 *)tcache_ptr - start_ptr; |
| 328 | } |
| 329 | |
| 330 | #define JMP_POS(ptr) \ |
| 331 | ptr = tcache_ptr; \ |
| 332 | tcache_ptr += sizeof(u32) |
| 333 | |
| 334 | #define JMP_EMIT(cond, ptr) { \ |
| 335 | u32 val_ = (u32 *)tcache_ptr - (u32 *)(ptr) - 2; \ |
| 336 | EOP_C_B_PTR(ptr, cond, 0, val_ & 0xffffff); \ |
| 337 | } |
| 338 | |
| 339 | #define EMITH_JMP_START(cond) { \ |
| 340 | void *cond_ptr; \ |
| 341 | JMP_POS(cond_ptr) |
| 342 | |
| 343 | #define EMITH_JMP_END(cond) \ |
| 344 | JMP_EMIT(cond, cond_ptr); \ |
| 345 | } |
| 346 | |
| 347 | // fake "simple" or "short" jump - using cond insns instead |
| 348 | #define EMITH_NOTHING1(cond) \ |
| 349 | (void)(cond) |
| 350 | |
| 351 | #define EMITH_SJMP_START(cond) EMITH_NOTHING1(cond) |
| 352 | #define EMITH_SJMP_END(cond) EMITH_NOTHING1(cond) |
| 353 | #define EMITH_SJMP3_START(cond) EMITH_NOTHING1(cond) |
| 354 | #define EMITH_SJMP3_MID(cond) EMITH_NOTHING1(cond) |
| 355 | #define EMITH_SJMP3_END() |
| 356 | |
| 357 | #define emith_move_r_r(d, s) \ |
| 358 | EOP_MOV_REG_SIMPLE(d, s) |
| 359 | |
| 360 | #define emith_mvn_r_r(d, s) \ |
| 361 | EOP_MVN_REG(A_COND_AL,0,d,s,A_AM1_LSL,0) |
| 362 | |
| 363 | #define emith_or_r_r_r_lsl(d, s1, s2, lslimm) \ |
| 364 | EOP_ORR_REG(A_COND_AL,0,d,s1,s2,A_AM1_LSL,lslimm) |
| 365 | |
| 366 | #define emith_eor_r_r_r_lsl(d, s1, s2, lslimm) \ |
| 367 | EOP_EOR_REG(A_COND_AL,0,d,s1,s2,A_AM1_LSL,lslimm) |
| 368 | |
| 369 | #define emith_eor_r_r_r_lsr(d, s1, s2, lsrimm) \ |
| 370 | EOP_EOR_REG(A_COND_AL,0,d,s1,s2,A_AM1_LSR,lsrimm) |
| 371 | |
| 372 | #define emith_or_r_r_lsl(d, s, lslimm) \ |
| 373 | emith_or_r_r_r_lsl(d, d, s, lslimm) |
| 374 | |
| 375 | #define emith_eor_r_r_lsr(d, s, lsrimm) \ |
| 376 | emith_eor_r_r_r_lsr(d, d, s, lsrimm) |
| 377 | |
| 378 | #define emith_or_r_r_r(d, s1, s2) \ |
| 379 | emith_or_r_r_r_lsl(d, s1, s2, 0) |
| 380 | |
| 381 | #define emith_eor_r_r_r(d, s1, s2) \ |
| 382 | emith_eor_r_r_r_lsl(d, s1, s2, 0) |
| 383 | |
| 384 | #define emith_add_r_r(d, s) \ |
| 385 | EOP_ADD_REG(A_COND_AL,0,d,d,s,A_AM1_LSL,0) |
| 386 | |
| 387 | #define emith_sub_r_r(d, s) \ |
| 388 | EOP_SUB_REG(A_COND_AL,0,d,d,s,A_AM1_LSL,0) |
| 389 | |
| 390 | #define emith_adc_r_r(d, s) \ |
| 391 | EOP_ADC_REG(A_COND_AL,0,d,d,s,A_AM1_LSL,0) |
| 392 | |
| 393 | #define emith_and_r_r(d, s) \ |
| 394 | EOP_AND_REG(A_COND_AL,0,d,d,s,A_AM1_LSL,0) |
| 395 | |
| 396 | #define emith_or_r_r(d, s) \ |
| 397 | emith_or_r_r_r(d, d, s) |
| 398 | |
| 399 | #define emith_eor_r_r(d, s) \ |
| 400 | emith_eor_r_r_r(d, d, s) |
| 401 | |
| 402 | #define emith_tst_r_r(d, s) \ |
| 403 | EOP_TST_REG(A_COND_AL,d,s,A_AM1_LSL,0) |
| 404 | |
| 405 | #define emith_teq_r_r(d, s) \ |
| 406 | EOP_TEQ_REG(A_COND_AL,d,s,A_AM1_LSL,0) |
| 407 | |
| 408 | #define emith_cmp_r_r(d, s) \ |
| 409 | EOP_CMP_REG(A_COND_AL,d,s,A_AM1_LSL,0) |
| 410 | |
| 411 | #define emith_addf_r_r(d, s) \ |
| 412 | EOP_ADD_REG(A_COND_AL,1,d,d,s,A_AM1_LSL,0) |
| 413 | |
| 414 | #define emith_subf_r_r(d, s) \ |
| 415 | EOP_SUB_REG(A_COND_AL,1,d,d,s,A_AM1_LSL,0) |
| 416 | |
| 417 | #define emith_adcf_r_r(d, s) \ |
| 418 | EOP_ADC_REG(A_COND_AL,1,d,d,s,A_AM1_LSL,0) |
| 419 | |
| 420 | #define emith_sbcf_r_r(d, s) \ |
| 421 | EOP_SBC_REG(A_COND_AL,1,d,d,s,A_AM1_LSL,0) |
| 422 | |
| 423 | #define emith_eorf_r_r(d, s) \ |
| 424 | EOP_EOR_REG(A_COND_AL,1,d,d,s,A_AM1_LSL,0) |
| 425 | |
| 426 | #define emith_move_r_imm(r, imm) \ |
| 427 | emith_op_imm(A_COND_AL, 0, A_OP_MOV, r, imm) |
| 428 | |
| 429 | #define emith_add_r_imm(r, imm) \ |
| 430 | emith_op_imm(A_COND_AL, 0, A_OP_ADD, r, imm) |
| 431 | |
| 432 | #define emith_adc_r_imm(r, imm) \ |
| 433 | emith_op_imm(A_COND_AL, 0, A_OP_ADC, r, imm) |
| 434 | |
| 435 | #define emith_sub_r_imm(r, imm) \ |
| 436 | emith_op_imm(A_COND_AL, 0, A_OP_SUB, r, imm) |
| 437 | |
| 438 | #define emith_bic_r_imm(r, imm) \ |
| 439 | emith_op_imm(A_COND_AL, 0, A_OP_BIC, r, imm) |
| 440 | |
| 441 | #define emith_and_r_imm(r, imm) \ |
| 442 | emith_op_imm(A_COND_AL, 0, A_OP_AND, r, imm) |
| 443 | |
| 444 | #define emith_or_r_imm(r, imm) \ |
| 445 | emith_op_imm(A_COND_AL, 0, A_OP_ORR, r, imm) |
| 446 | |
| 447 | #define emith_eor_r_imm(r, imm) \ |
| 448 | emith_op_imm(A_COND_AL, 0, A_OP_EOR, r, imm) |
| 449 | |
| 450 | // note: only use 8bit imm for these |
| 451 | #define emith_tst_r_imm(r, imm) \ |
| 452 | emith_top_imm(A_COND_AL, A_OP_TST, r, imm) |
| 453 | |
| 454 | #define emith_cmp_r_imm(r, imm) { \ |
| 455 | u32 op = A_OP_CMP, imm_ = imm; \ |
| 456 | if (~imm_ < 0x100) { \ |
| 457 | imm_ = ~imm_; \ |
| 458 | op = A_OP_CMN; \ |
| 459 | } \ |
| 460 | emith_top_imm(A_COND_AL, op, r, imm); \ |
| 461 | } |
| 462 | |
| 463 | #define emith_subf_r_imm(r, imm) \ |
| 464 | emith_op_imm(A_COND_AL, 1, A_OP_SUB, r, imm) |
| 465 | |
| 466 | #define emith_move_r_imm_c(cond, r, imm) \ |
| 467 | emith_op_imm(cond, 0, A_OP_MOV, r, imm) |
| 468 | |
| 469 | #define emith_add_r_imm_c(cond, r, imm) \ |
| 470 | emith_op_imm(cond, 0, A_OP_ADD, r, imm) |
| 471 | |
| 472 | #define emith_sub_r_imm_c(cond, r, imm) \ |
| 473 | emith_op_imm(cond, 0, A_OP_SUB, r, imm) |
| 474 | |
| 475 | #define emith_or_r_imm_c(cond, r, imm) \ |
| 476 | emith_op_imm(cond, 0, A_OP_ORR, r, imm) |
| 477 | |
| 478 | #define emith_eor_r_imm_c(cond, r, imm) \ |
| 479 | emith_op_imm(cond, 0, A_OP_EOR, r, imm) |
| 480 | |
| 481 | #define emith_bic_r_imm_c(cond, r, imm) \ |
| 482 | emith_op_imm(cond, 0, A_OP_BIC, r, imm) |
| 483 | |
| 484 | #define emith_move_r_imm_s8(r, imm) { \ |
| 485 | if ((imm) & 0x80) \ |
| 486 | EOP_MVN_IMM(r, 0, ((imm) ^ 0xff)); \ |
| 487 | else \ |
| 488 | EOP_MOV_IMM(r, 0, imm); \ |
| 489 | } |
| 490 | |
| 491 | #define emith_and_r_r_imm(d, s, imm) \ |
| 492 | emith_op_imm2(A_COND_AL, 0, A_OP_AND, d, s, imm) |
| 493 | |
| 494 | #define emith_add_r_r_imm(d, s, imm) \ |
| 495 | emith_op_imm2(A_COND_AL, 0, A_OP_ADD, d, s, imm) |
| 496 | |
| 497 | #define emith_sub_r_r_imm(d, s, imm) \ |
| 498 | emith_op_imm2(A_COND_AL, 0, A_OP_SUB, d, s, imm) |
| 499 | |
| 500 | #define emith_neg_r_r(d, s) \ |
| 501 | EOP_RSB_IMM(d, s, 0, 0) |
| 502 | |
| 503 | #define emith_lsl(d, s, cnt) \ |
| 504 | EOP_MOV_REG(A_COND_AL,0,d,s,A_AM1_LSL,cnt) |
| 505 | |
| 506 | #define emith_lsr(d, s, cnt) \ |
| 507 | EOP_MOV_REG(A_COND_AL,0,d,s,A_AM1_LSR,cnt) |
| 508 | |
| 509 | #define emith_asr(d, s, cnt) \ |
| 510 | EOP_MOV_REG(A_COND_AL,0,d,s,A_AM1_ASR,cnt) |
| 511 | |
| 512 | #define emith_ror_c(cond, d, s, cnt) \ |
| 513 | EOP_MOV_REG(cond,0,d,s,A_AM1_ROR,cnt) |
| 514 | |
| 515 | #define emith_ror(d, s, cnt) \ |
| 516 | emith_ror_c(A_COND_AL, d, s, cnt) |
| 517 | |
| 518 | #define emith_rol(d, s, cnt) \ |
| 519 | EOP_MOV_REG(A_COND_AL,0,d,s,A_AM1_ROR,32-(cnt)); \ |
| 520 | |
| 521 | #define emith_lslf(d, s, cnt) \ |
| 522 | EOP_MOV_REG(A_COND_AL,1,d,s,A_AM1_LSL,cnt) |
| 523 | |
| 524 | #define emith_lsrf(d, s, cnt) \ |
| 525 | EOP_MOV_REG(A_COND_AL,1,d,s,A_AM1_LSR,cnt) |
| 526 | |
| 527 | #define emith_asrf(d, s, cnt) \ |
| 528 | EOP_MOV_REG(A_COND_AL,1,d,s,A_AM1_ASR,cnt) |
| 529 | |
| 530 | // note: only C flag updated correctly |
| 531 | #define emith_rolf(d, s, cnt) { \ |
| 532 | EOP_MOV_REG(A_COND_AL,1,d,s,A_AM1_ROR,32-(cnt)); \ |
| 533 | /* we don't have ROL so we shift to get the right carry */ \ |
| 534 | EOP_TST_REG(A_COND_AL,d,d,A_AM1_LSR,1); \ |
| 535 | } |
| 536 | |
| 537 | #define emith_rorf(d, s, cnt) \ |
| 538 | EOP_MOV_REG(A_COND_AL,1,d,s,A_AM1_ROR,cnt) |
| 539 | |
| 540 | #define emith_rolcf(d) \ |
| 541 | emith_adcf_r_r(d, d) |
| 542 | |
| 543 | #define emith_rorcf(d) \ |
| 544 | EOP_MOV_REG(A_COND_AL,1,d,d,A_AM1_ROR,0) /* ROR #0 -> RRX */ |
| 545 | |
| 546 | #define emith_negcf_r_r(d, s) \ |
| 547 | EOP_C_DOP_IMM(A_COND_AL,A_OP_RSC,1,s,d,0,0) |
| 548 | |
| 549 | #define emith_mul(d, s1, s2) { \ |
| 550 | if ((d) != (s1)) /* rd != rm limitation */ \ |
| 551 | EOP_MUL(d, s1, s2); \ |
| 552 | else \ |
| 553 | EOP_MUL(d, s2, s1); \ |
| 554 | } |
| 555 | |
| 556 | #define emith_mul_u64(dlo, dhi, s1, s2) \ |
| 557 | EOP_C_UMULL(A_COND_AL,0,dhi,dlo,s1,s2) |
| 558 | |
| 559 | #define emith_mul_s64(dlo, dhi, s1, s2) \ |
| 560 | EOP_C_SMULL(A_COND_AL,0,dhi,dlo,s1,s2) |
| 561 | |
| 562 | #define emith_mula_s64(dlo, dhi, s1, s2) \ |
| 563 | EOP_C_SMLAL(A_COND_AL,0,dhi,dlo,s1,s2) |
| 564 | |
| 565 | // misc |
| 566 | #define emith_read_r_r_offs_c(cond, r, rs, offs) \ |
| 567 | EOP_LDR_IMM2(cond, r, rs, offs) |
| 568 | |
| 569 | #define emith_read8_r_r_offs_c(cond, r, rs, offs) \ |
| 570 | EOP_LDRB_IMM2(cond, r, rs, offs) |
| 571 | |
| 572 | #define emith_read16_r_r_offs_c(cond, r, rs, offs) \ |
| 573 | EOP_LDRH_IMM2(cond, r, rs, offs) |
| 574 | |
| 575 | #define emith_read_r_r_offs(r, rs, offs) \ |
| 576 | emith_read_r_r_offs_c(A_COND_AL, r, rs, offs) |
| 577 | |
| 578 | #define emith_read8_r_r_offs(r, rs, offs) \ |
| 579 | emith_read8_r_r_offs_c(A_COND_AL, r, rs, offs) |
| 580 | |
| 581 | #define emith_read16_r_r_offs(r, rs, offs) \ |
| 582 | emith_read16_r_r_offs_c(A_COND_AL, r, rs, offs) |
| 583 | |
| 584 | #define emith_ctx_read(r, offs) \ |
| 585 | emith_read_r_r_offs(r, CONTEXT_REG, offs) |
| 586 | |
| 587 | #define emith_ctx_write(r, offs) \ |
| 588 | EOP_STR_IMM(r, CONTEXT_REG, offs) |
| 589 | |
| 590 | #define emith_ctx_do_multiple(op, r, offs, count, tmpr) do { \ |
| 591 | int v_, r_ = r, c_ = count, b_ = CONTEXT_REG; \ |
| 592 | for (v_ = 0; c_; c_--, r_++) \ |
| 593 | v_ |= 1 << r_; \ |
| 594 | if ((offs) != 0) { \ |
| 595 | EOP_ADD_IMM(tmpr,CONTEXT_REG,30/2,(offs)>>2);\ |
| 596 | b_ = tmpr; \ |
| 597 | } \ |
| 598 | op(b_,v_); \ |
| 599 | } while(0) |
| 600 | |
| 601 | #define emith_ctx_read_multiple(r, offs, count, tmpr) \ |
| 602 | emith_ctx_do_multiple(EOP_LDMIA, r, offs, count, tmpr) |
| 603 | |
| 604 | #define emith_ctx_write_multiple(r, offs, count, tmpr) \ |
| 605 | emith_ctx_do_multiple(EOP_STMIA, r, offs, count, tmpr) |
| 606 | |
| 607 | #define emith_clear_msb_c(cond, d, s, count) { \ |
| 608 | u32 t; \ |
| 609 | if ((count) <= 8) { \ |
| 610 | t = (count) - 8; \ |
| 611 | t = (0xff << t) & 0xff; \ |
| 612 | EOP_BIC_IMM(d,s,8/2,t); \ |
| 613 | EOP_C_DOP_IMM(cond,A_OP_BIC,0,s,d,8/2,t); \ |
| 614 | } else if ((count) >= 24) { \ |
| 615 | t = (count) - 24; \ |
| 616 | t = 0xff >> t; \ |
| 617 | EOP_AND_IMM(d,s,0,t); \ |
| 618 | EOP_C_DOP_IMM(cond,A_OP_AND,0,s,d,0,t); \ |
| 619 | } else { \ |
| 620 | EOP_MOV_REG(cond,0,d,s,A_AM1_LSL,count); \ |
| 621 | EOP_MOV_REG(cond,0,d,d,A_AM1_LSR,count); \ |
| 622 | } \ |
| 623 | } |
| 624 | |
| 625 | #define emith_clear_msb(d, s, count) \ |
| 626 | emith_clear_msb_c(A_COND_AL, d, s, count) |
| 627 | |
| 628 | #define emith_sext(d, s, bits) { \ |
| 629 | EOP_MOV_REG_LSL(d,s,32 - (bits)); \ |
| 630 | EOP_MOV_REG_ASR(d,d,32 - (bits)); \ |
| 631 | } |
| 632 | |
| 633 | // upto 4 args |
| 634 | #define emith_pass_arg_r(arg, reg) \ |
| 635 | EOP_MOV_REG_SIMPLE(arg, reg) |
| 636 | |
| 637 | #define emith_pass_arg_imm(arg, imm) \ |
| 638 | emith_move_r_imm(arg, imm) |
| 639 | |
| 640 | #define emith_jump(target) \ |
| 641 | emith_jump_cond(A_COND_AL, target) |
| 642 | |
| 643 | #define emith_jump_patchable(target) \ |
| 644 | emith_jump(target) |
| 645 | |
| 646 | #define emith_jump_cond(cond, target) \ |
| 647 | emith_xbranch(cond, target, 0) |
| 648 | |
| 649 | #define emith_jump_cond_patchable(cond, target) \ |
| 650 | emith_jump_cond(cond, target) |
| 651 | |
| 652 | #define emith_jump_patch(ptr, target) do { \ |
| 653 | u32 *ptr_ = ptr; \ |
| 654 | u32 val_ = (u32 *)(target) - ptr_ - 2; \ |
| 655 | *ptr_ = (*ptr_ & 0xff000000) | (val_ & 0x00ffffff); \ |
| 656 | } while (0) |
| 657 | |
| 658 | #define emith_jump_at(ptr, target) { \ |
| 659 | u32 val_ = (u32 *)(target) - (u32 *)(ptr) - 2; \ |
| 660 | EOP_C_B_PTR(ptr, A_COND_AL, 0, val_ & 0xffffff); \ |
| 661 | } |
| 662 | |
| 663 | #define emith_jump_reg_c(cond, r) \ |
| 664 | EOP_C_BX(cond, r) |
| 665 | |
| 666 | #define emith_jump_reg(r) \ |
| 667 | emith_jump_reg_c(A_COND_AL, r) |
| 668 | |
| 669 | #define emith_jump_ctx_c(cond, offs) \ |
| 670 | EOP_LDR_IMM2(cond,15,CONTEXT_REG,offs) |
| 671 | |
| 672 | #define emith_jump_ctx(offs) \ |
| 673 | emith_jump_ctx_c(A_COND_AL, offs) |
| 674 | |
| 675 | #define emith_call_cond(cond, target) \ |
| 676 | emith_xbranch(cond, target, 1) |
| 677 | |
| 678 | #define emith_call(target) \ |
| 679 | emith_call_cond(A_COND_AL, target) |
| 680 | |
| 681 | #define emith_call_ctx(offs) { \ |
| 682 | emith_move_r_r(14, 15); \ |
| 683 | emith_jump_ctx(offs); \ |
| 684 | } |
| 685 | |
| 686 | #define emith_ret_c(cond) \ |
| 687 | emith_jump_reg_c(cond, 14) |
| 688 | |
| 689 | #define emith_ret() \ |
| 690 | emith_ret_c(A_COND_AL) |
| 691 | |
| 692 | #define emith_ret_to_ctx(offs) \ |
| 693 | emith_ctx_write(14, offs) |
| 694 | |
| 695 | #define emith_push_ret() \ |
| 696 | EOP_STMFD_SP(A_R14M) |
| 697 | |
| 698 | #define emith_pop_and_ret() \ |
| 699 | EOP_LDMFD_SP(A_R15M) |
| 700 | |
| 701 | #define host_instructions_updated(base, end) \ |
| 702 | cache_flush_d_inval_i(base, end) |
| 703 | |
| 704 | #define host_arg2reg(rd, arg) \ |
| 705 | rd = arg |
| 706 | |
| 707 | /* SH2 drc specific */ |
| 708 | #define emith_sh2_drc_entry() \ |
| 709 | EOP_STMFD_SP(A_R4M|A_R5M|A_R6M|A_R7M|A_R8M|A_R9M|A_R10M|A_R11M|A_R14M) |
| 710 | |
| 711 | #define emith_sh2_drc_exit() \ |
| 712 | EOP_LDMFD_SP(A_R4M|A_R5M|A_R6M|A_R7M|A_R8M|A_R9M|A_R10M|A_R11M|A_R15M) |
| 713 | |
| 714 | #define emith_sh2_wcall(a, tab, ret_ptr) { \ |
| 715 | int val_ = (char *)(ret_ptr) - (char *)tcache_ptr - 2*4; \ |
| 716 | if (val_ >= 0) \ |
| 717 | emith_add_r_r_imm(14, 15, val_); \ |
| 718 | else if (val_ < 0) \ |
| 719 | emith_sub_r_r_imm(14, 15, -val_); \ |
| 720 | emith_lsr(12, a, SH2_WRITE_SHIFT); \ |
| 721 | EOP_LDR_REG_LSL(A_COND_AL,12,tab,12,2); \ |
| 722 | emith_ctx_read(2, offsetof(SH2, is_slave)); \ |
| 723 | emith_jump_reg(12); \ |
| 724 | } |
| 725 | |
| 726 | #define emith_sh2_dtbf_loop() { \ |
| 727 | int cr, rn; \ |
| 728 | int tmp_ = rcache_get_tmp(); \ |
| 729 | cr = rcache_get_reg(SHR_SR, RC_GR_RMW); \ |
| 730 | rn = rcache_get_reg((op >> 8) & 0x0f, RC_GR_RMW); \ |
| 731 | emith_sub_r_imm(rn, 1); /* sub rn, #1 */ \ |
| 732 | emith_bic_r_imm(cr, 1); /* bic cr, #1 */ \ |
| 733 | emith_sub_r_imm(cr, (cycles+1) << 12); /* sub cr, #(cycles+1)<<12 */ \ |
| 734 | cycles = 0; \ |
| 735 | emith_asrf(tmp_, cr, 2+12); /* movs tmp_, cr, asr #2+12 */\ |
| 736 | EOP_MOV_IMM_C(A_COND_MI,tmp_,0,0); /* movmi tmp_, #0 */ \ |
| 737 | emith_lsl(cr, cr, 20); /* mov cr, cr, lsl #20 */ \ |
| 738 | emith_lsr(cr, cr, 20); /* mov cr, cr, lsr #20 */ \ |
| 739 | emith_subf_r_r(rn, tmp_); /* subs rn, tmp_ */ \ |
| 740 | EOP_RSB_IMM_C(A_COND_LS,tmp_,rn,0,0); /* rsbls tmp_, rn, #0 */ \ |
| 741 | EOP_ORR_REG(A_COND_LS,0,cr,cr,tmp_,A_AM1_LSL,12+2); /* orrls cr,tmp_,lsl #12+2 */\ |
| 742 | EOP_ORR_IMM_C(A_COND_LS,cr,cr,0,1); /* orrls cr, #1 */ \ |
| 743 | EOP_MOV_IMM_C(A_COND_LS,rn,0,0); /* movls rn, #0 */ \ |
| 744 | rcache_free_tmp(tmp_); \ |
| 745 | } |
| 746 | |
| 747 | #define emith_write_sr(sr, srcr) { \ |
| 748 | emith_lsr(sr, sr, 10); \ |
| 749 | emith_or_r_r_r_lsl(sr, sr, srcr, 22); \ |
| 750 | emith_ror(sr, sr, 22); \ |
| 751 | } |
| 752 | |
| 753 | #define emith_carry_to_t(srr, is_sub) { \ |
| 754 | if (is_sub) { /* has inverted C on ARM */ \ |
| 755 | emith_or_r_imm_c(A_COND_CC, srr, 1); \ |
| 756 | emith_bic_r_imm_c(A_COND_CS, srr, 1); \ |
| 757 | } else { \ |
| 758 | emith_or_r_imm_c(A_COND_CS, srr, 1); \ |
| 759 | emith_bic_r_imm_c(A_COND_CC, srr, 1); \ |
| 760 | } \ |
| 761 | } |
| 762 | |
| 763 | #define emith_tpop_carry(sr, is_sub) { \ |
| 764 | if (is_sub) \ |
| 765 | emith_eor_r_imm(sr, 1); \ |
| 766 | emith_lsrf(sr, sr, 1); \ |
| 767 | } |
| 768 | |
| 769 | #define emith_tpush_carry(sr, is_sub) { \ |
| 770 | emith_adc_r_r(sr, sr); \ |
| 771 | if (is_sub) \ |
| 772 | emith_eor_r_imm(sr, 1); \ |
| 773 | } |
| 774 | |
| 775 | /* |
| 776 | * if Q |
| 777 | * t = carry(Rn += Rm) |
| 778 | * else |
| 779 | * t = carry(Rn -= Rm) |
| 780 | * T ^= t |
| 781 | */ |
| 782 | #define emith_sh2_div1_step(rn, rm, sr) { \ |
| 783 | void *jmp0, *jmp1; \ |
| 784 | emith_tst_r_imm(sr, Q); /* if (Q ^ M) */ \ |
| 785 | JMP_POS(jmp0); /* beq do_sub */ \ |
| 786 | emith_addf_r_r(rn, rm); \ |
| 787 | emith_eor_r_imm_c(A_COND_CS, sr, T); \ |
| 788 | JMP_POS(jmp1); /* b done */ \ |
| 789 | JMP_EMIT(A_COND_EQ, jmp0); /* do_sub: */ \ |
| 790 | emith_subf_r_r(rn, rm); \ |
| 791 | emith_eor_r_imm_c(A_COND_CC, sr, T); \ |
| 792 | JMP_EMIT(A_COND_AL, jmp1); /* done: */ \ |
| 793 | } |
| 794 | |