| 1 | /* SPDX-License-Identifier: LGPL-2.1-or-later */ |
| 2 | /* |
| 3 | * Copyright (C) 2016-2021 Paul Cercueil <paul@crapouillou.net> |
| 4 | */ |
| 5 | |
| 6 | #ifndef __LIGHTREC_PRIVATE_H__ |
| 7 | #define __LIGHTREC_PRIVATE_H__ |
| 8 | |
| 9 | #include "lightning-wrapper.h" |
| 10 | #include "lightrec-config.h" |
| 11 | #include "disassembler.h" |
| 12 | #include "lightrec.h" |
| 13 | #include "regcache.h" |
| 14 | |
| 15 | #if ENABLE_THREADED_COMPILER |
| 16 | #include <stdatomic.h> |
| 17 | #endif |
| 18 | |
| 19 | #ifdef _MSC_BUILD |
| 20 | #include <immintrin.h> |
| 21 | #endif |
| 22 | |
| 23 | #define ARRAY_SIZE(x) (sizeof(x) ? sizeof(x) / sizeof((x)[0]) : 0) |
| 24 | |
| 25 | #define GENMASK(h, l) \ |
| 26 | (((uintptr_t)-1 << (l)) & ((uintptr_t)-1 >> (__WORDSIZE - 1 - (h)))) |
| 27 | |
| 28 | #ifdef __GNUC__ |
| 29 | # define likely(x) __builtin_expect(!!(x),1) |
| 30 | # define unlikely(x) __builtin_expect(!!(x),0) |
| 31 | #else |
| 32 | # define likely(x) (x) |
| 33 | # define unlikely(x) (x) |
| 34 | #endif |
| 35 | |
| 36 | #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ |
| 37 | # define LE32TOH(x) __builtin_bswap32(x) |
| 38 | # define HTOLE32(x) __builtin_bswap32(x) |
| 39 | # define LE16TOH(x) __builtin_bswap16(x) |
| 40 | # define HTOLE16(x) __builtin_bswap16(x) |
| 41 | #else |
| 42 | # define LE32TOH(x) (x) |
| 43 | # define HTOLE32(x) (x) |
| 44 | # define LE16TOH(x) (x) |
| 45 | # define HTOLE16(x) (x) |
| 46 | #endif |
| 47 | |
| 48 | #if HAS_DEFAULT_ELM |
| 49 | #define SET_DEFAULT_ELM(table, value) [0 ... ARRAY_SIZE(table) - 1] = value |
| 50 | #else |
| 51 | #define SET_DEFAULT_ELM(table, value) [0] = NULL |
| 52 | #endif |
| 53 | |
| 54 | #define fallthrough do {} while (0) /* fall-through */ |
| 55 | |
| 56 | #define container_of(ptr, type, member) \ |
| 57 | ((type *)((void *)(ptr) - offsetof(type, member))) |
| 58 | |
| 59 | #ifdef _MSC_BUILD |
| 60 | # define popcount32(x) __popcnt(x) |
| 61 | # define clz32(x) _lzcnt_u32(x) |
| 62 | # define ctz32(x) _tzcnt_u32(x) |
| 63 | #else |
| 64 | # define popcount32(x) __builtin_popcount(x) |
| 65 | # define clz32(x) __builtin_clz(x) |
| 66 | # define ctz32(x) __builtin_ctz(x) |
| 67 | #endif |
| 68 | |
| 69 | /* Flags for (struct block *)->flags */ |
| 70 | #define BLOCK_NEVER_COMPILE BIT(0) |
| 71 | #define BLOCK_SHOULD_RECOMPILE BIT(1) |
| 72 | #define BLOCK_FULLY_TAGGED BIT(2) |
| 73 | #define BLOCK_IS_DEAD BIT(3) |
| 74 | #define BLOCK_IS_MEMSET BIT(4) |
| 75 | #define BLOCK_NO_OPCODE_LIST BIT(5) |
| 76 | |
| 77 | #define RAM_SIZE 0x200000 |
| 78 | #define BIOS_SIZE 0x80000 |
| 79 | |
| 80 | #define CODE_LUT_SIZE ((RAM_SIZE + BIOS_SIZE) >> 2) |
| 81 | |
| 82 | #define REG_LO 32 |
| 83 | #define REG_HI 33 |
| 84 | #define REG_TEMP (offsetof(struct lightrec_state, temp_reg) / sizeof(u32)) |
| 85 | |
| 86 | /* Definition of jit_state_t (avoids inclusion of <lightning.h>) */ |
| 87 | struct jit_node; |
| 88 | struct jit_state; |
| 89 | typedef struct jit_state jit_state_t; |
| 90 | |
| 91 | struct blockcache; |
| 92 | struct recompiler; |
| 93 | struct regcache; |
| 94 | struct opcode; |
| 95 | struct reaper; |
| 96 | |
| 97 | struct u16x2 { |
| 98 | #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ |
| 99 | u16 h, l; |
| 100 | #else |
| 101 | u16 l, h; |
| 102 | #endif |
| 103 | }; |
| 104 | |
| 105 | struct block { |
| 106 | jit_state_t *_jit; |
| 107 | struct opcode *opcode_list; |
| 108 | void (*function)(void); |
| 109 | const u32 *code; |
| 110 | struct block *next; |
| 111 | u32 pc; |
| 112 | u32 hash; |
| 113 | u32 precompile_date; |
| 114 | unsigned int code_size; |
| 115 | u16 nb_ops; |
| 116 | #if ENABLE_THREADED_COMPILER |
| 117 | _Atomic u8 flags; |
| 118 | #else |
| 119 | u8 flags; |
| 120 | #endif |
| 121 | }; |
| 122 | |
| 123 | struct lightrec_branch { |
| 124 | struct jit_node *branch; |
| 125 | u32 target; |
| 126 | }; |
| 127 | |
| 128 | struct lightrec_branch_target { |
| 129 | struct jit_node *label; |
| 130 | u32 offset; |
| 131 | }; |
| 132 | |
| 133 | enum c_wrappers { |
| 134 | C_WRAPPER_RW, |
| 135 | C_WRAPPER_RW_GENERIC, |
| 136 | C_WRAPPER_MFC, |
| 137 | C_WRAPPER_MTC, |
| 138 | C_WRAPPER_CP, |
| 139 | C_WRAPPERS_COUNT, |
| 140 | }; |
| 141 | |
| 142 | struct lightrec_cstate { |
| 143 | struct lightrec_state *state; |
| 144 | |
| 145 | struct lightrec_branch local_branches[512]; |
| 146 | struct lightrec_branch_target targets[512]; |
| 147 | unsigned int nb_local_branches; |
| 148 | unsigned int nb_targets; |
| 149 | unsigned int cycles; |
| 150 | |
| 151 | struct regcache *reg_cache; |
| 152 | |
| 153 | _Bool no_load_delay; |
| 154 | }; |
| 155 | |
| 156 | struct lightrec_state { |
| 157 | struct lightrec_registers regs; |
| 158 | u32 temp_reg; |
| 159 | u32 next_pc; |
| 160 | uintptr_t wrapper_regs[NUM_TEMPS]; |
| 161 | u8 in_delay_slot_n; |
| 162 | u32 current_cycle; |
| 163 | u32 target_cycle; |
| 164 | u32 exit_flags; |
| 165 | u32 old_cycle_counter; |
| 166 | struct block *dispatcher, *c_wrapper_block; |
| 167 | void *c_wrappers[C_WRAPPERS_COUNT]; |
| 168 | void *wrappers_eps[C_WRAPPERS_COUNT]; |
| 169 | struct blockcache *block_cache; |
| 170 | struct recompiler *rec; |
| 171 | struct lightrec_cstate *cstate; |
| 172 | struct reaper *reaper; |
| 173 | void *tlsf; |
| 174 | void (*eob_wrapper_func)(void); |
| 175 | void (*interpreter_func)(void); |
| 176 | void (*ds_check_func)(void); |
| 177 | void (*memset_func)(void); |
| 178 | void (*get_next_block)(void); |
| 179 | struct lightrec_ops ops; |
| 180 | unsigned int nb_precompile; |
| 181 | unsigned int nb_compile; |
| 182 | unsigned int nb_maps; |
| 183 | const struct lightrec_mem_map *maps; |
| 184 | uintptr_t offset_ram, offset_bios, offset_scratch, offset_io; |
| 185 | _Bool with_32bit_lut; |
| 186 | _Bool mirrors_mapped; |
| 187 | _Bool invalidate_from_dma_only; |
| 188 | void *code_lut[]; |
| 189 | }; |
| 190 | |
| 191 | u32 lightrec_rw(struct lightrec_state *state, union code op, u32 addr, |
| 192 | u32 data, u32 *flags, struct block *block, u16 offset); |
| 193 | |
| 194 | void lightrec_free_block(struct lightrec_state *state, struct block *block); |
| 195 | |
| 196 | void remove_from_code_lut(struct blockcache *cache, struct block *block); |
| 197 | |
| 198 | const struct lightrec_mem_map * |
| 199 | lightrec_get_map(struct lightrec_state *state, void **host, u32 kaddr); |
| 200 | |
| 201 | static inline u32 kunseg(u32 addr) |
| 202 | { |
| 203 | if (unlikely(addr >= 0xa0000000)) |
| 204 | return addr - 0xa0000000; |
| 205 | else |
| 206 | return addr &~ 0x80000000; |
| 207 | } |
| 208 | |
| 209 | static inline u32 lut_offset(u32 pc) |
| 210 | { |
| 211 | if (pc & BIT(28)) |
| 212 | return ((pc & (BIOS_SIZE - 1)) + RAM_SIZE) >> 2; // BIOS |
| 213 | else |
| 214 | return (pc & (RAM_SIZE - 1)) >> 2; // RAM |
| 215 | } |
| 216 | |
| 217 | static inline _Bool is_big_endian(void) |
| 218 | { |
| 219 | return __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__; |
| 220 | } |
| 221 | |
| 222 | static inline _Bool lut_is_32bit(const struct lightrec_state *state) |
| 223 | { |
| 224 | return __WORDSIZE == 32 || |
| 225 | (ENABLE_CODE_BUFFER && state->with_32bit_lut); |
| 226 | } |
| 227 | |
| 228 | static inline size_t lut_elm_size(const struct lightrec_state *state) |
| 229 | { |
| 230 | return lut_is_32bit(state) ? 4 : sizeof(void *); |
| 231 | } |
| 232 | |
| 233 | static inline void ** lut_address(struct lightrec_state *state, u32 offset) |
| 234 | { |
| 235 | if (lut_is_32bit(state)) |
| 236 | return (void **) ((uintptr_t) state->code_lut + offset * 4); |
| 237 | else |
| 238 | return &state->code_lut[offset]; |
| 239 | } |
| 240 | |
| 241 | static inline void * lut_read(struct lightrec_state *state, u32 offset) |
| 242 | { |
| 243 | void **lut_entry = lut_address(state, offset); |
| 244 | |
| 245 | if (lut_is_32bit(state)) |
| 246 | return (void *)(uintptr_t) *(u32 *) lut_entry; |
| 247 | else |
| 248 | return *lut_entry; |
| 249 | } |
| 250 | |
| 251 | static inline void lut_write(struct lightrec_state *state, u32 offset, void *ptr) |
| 252 | { |
| 253 | void **lut_entry = lut_address(state, offset); |
| 254 | |
| 255 | if (lut_is_32bit(state)) |
| 256 | *(u32 *) lut_entry = (u32)(uintptr_t) ptr; |
| 257 | else |
| 258 | *lut_entry = ptr; |
| 259 | } |
| 260 | |
| 261 | static inline u32 get_ds_pc(const struct block *block, u16 offset, s16 imm) |
| 262 | { |
| 263 | u16 flags = block->opcode_list[offset].flags; |
| 264 | |
| 265 | offset += op_flag_no_ds(flags); |
| 266 | |
| 267 | return block->pc + (offset + imm << 2); |
| 268 | } |
| 269 | |
| 270 | static inline u32 get_branch_pc(const struct block *block, u16 offset, s16 imm) |
| 271 | { |
| 272 | u16 flags = block->opcode_list[offset].flags; |
| 273 | |
| 274 | offset -= op_flag_no_ds(flags); |
| 275 | |
| 276 | return block->pc + (offset + imm << 2); |
| 277 | } |
| 278 | |
| 279 | void lightrec_mtc(struct lightrec_state *state, union code op, u8 reg, u32 data); |
| 280 | u32 lightrec_mfc(struct lightrec_state *state, union code op); |
| 281 | void lightrec_rfe(struct lightrec_state *state); |
| 282 | void lightrec_cp(struct lightrec_state *state, union code op); |
| 283 | |
| 284 | struct lightrec_cstate * lightrec_create_cstate(struct lightrec_state *state); |
| 285 | void lightrec_free_cstate(struct lightrec_cstate *cstate); |
| 286 | |
| 287 | union code lightrec_read_opcode(struct lightrec_state *state, u32 pc); |
| 288 | |
| 289 | int lightrec_compile_block(struct lightrec_cstate *cstate, struct block *block); |
| 290 | void lightrec_free_opcode_list(struct lightrec_state *state, |
| 291 | struct opcode *list); |
| 292 | |
| 293 | __cnst unsigned int lightrec_cycles_of_opcode(union code code); |
| 294 | |
| 295 | static inline u8 get_mult_div_lo(union code c) |
| 296 | { |
| 297 | return (OPT_FLAG_MULT_DIV && c.r.rd) ? c.r.rd : REG_LO; |
| 298 | } |
| 299 | |
| 300 | static inline u8 get_mult_div_hi(union code c) |
| 301 | { |
| 302 | return (OPT_FLAG_MULT_DIV && c.r.imm) ? c.r.imm : REG_HI; |
| 303 | } |
| 304 | |
| 305 | static inline s16 s16_max(s16 a, s16 b) |
| 306 | { |
| 307 | return a > b ? a : b; |
| 308 | } |
| 309 | |
| 310 | static inline _Bool block_has_flag(struct block *block, u8 flag) |
| 311 | { |
| 312 | #if ENABLE_THREADED_COMPILER |
| 313 | return atomic_load_explicit(&block->flags, memory_order_relaxed) & flag; |
| 314 | #else |
| 315 | return block->flags & flag; |
| 316 | #endif |
| 317 | } |
| 318 | |
| 319 | static inline u8 block_set_flags(struct block *block, u8 mask) |
| 320 | { |
| 321 | #if ENABLE_THREADED_COMPILER |
| 322 | return atomic_fetch_or_explicit(&block->flags, mask, |
| 323 | memory_order_relaxed); |
| 324 | #else |
| 325 | u8 flags = block->flags; |
| 326 | |
| 327 | block->flags |= mask; |
| 328 | |
| 329 | return flags; |
| 330 | #endif |
| 331 | } |
| 332 | |
| 333 | static inline u8 block_clear_flags(struct block *block, u8 mask) |
| 334 | { |
| 335 | #if ENABLE_THREADED_COMPILER |
| 336 | return atomic_fetch_and_explicit(&block->flags, ~mask, |
| 337 | memory_order_relaxed); |
| 338 | #else |
| 339 | u8 flags = block->flags; |
| 340 | |
| 341 | block->flags &= ~mask; |
| 342 | |
| 343 | return flags; |
| 344 | #endif |
| 345 | } |
| 346 | |
| 347 | static inline _Bool can_sign_extend(s32 value, u8 order) |
| 348 | { |
| 349 | return (u32)(value >> order - 1) + 1 < 2; |
| 350 | } |
| 351 | |
| 352 | static inline _Bool can_zero_extend(u32 value, u8 order) |
| 353 | { |
| 354 | return (value >> order) == 0; |
| 355 | } |
| 356 | |
| 357 | static inline const struct opcode * |
| 358 | get_delay_slot(const struct opcode *list, u16 i) |
| 359 | { |
| 360 | return op_flag_no_ds(list[i].flags) ? &list[i - 1] : &list[i + 1]; |
| 361 | } |
| 362 | |
| 363 | #endif /* __LIGHTREC_PRIVATE_H__ */ |