1 // SPDX-License-Identifier: LGPL-2.1-or-later
3 * Copyright (C) 2019-2021 Paul Cercueil <paul@crapouillou.net>
6 #include "blockcache.h"
8 #include "interpreter.h"
9 #include "lightrec-private.h"
10 #include "memmanager.h"
15 #include <stdatomic.h>
25 struct slist_elm slist;
29 struct recompiler_thd {
30 struct lightrec_cstate *cstate;
36 struct lightrec_state *state;
39 pthread_mutex_t mutex;
40 bool stop, must_flush;
41 struct slist_elm slist;
43 pthread_mutex_t alloc_mutex;
46 struct recompiler_thd thds[];
49 static unsigned int get_processors_count(void)
53 #if defined(PTW32_VERSION)
54 nb = pthread_num_processors_np();
55 #elif defined(__APPLE__) || defined(__FreeBSD__)
57 size_t size = sizeof(count);
59 nb = sysctlbyname("hw.ncpu", &count, &size, NULL, 0) ? 1 : count;
60 #elif defined(_SC_NPROCESSORS_ONLN)
61 nb = sysconf(_SC_NPROCESSORS_ONLN);
64 return nb < 1 ? 1 : nb;
67 static struct slist_elm * lightrec_get_first_elm(struct slist_elm *head)
69 struct block_rec *block_rec;
70 struct slist_elm *elm;
72 for (elm = slist_first(head); elm; elm = elm->next) {
73 block_rec = container_of(elm, struct block_rec, slist);
75 if (!block_rec->compiling)
82 static bool lightrec_cancel_block_rec(struct recompiler *rec,
83 struct block_rec *block_rec)
85 if (block_rec->compiling) {
86 /* Block is being recompiled - wait for
88 pthread_cond_wait(&rec->cond2, &rec->mutex);
90 /* We can't guarantee the signal was for us.
91 * Since block_rec may have been removed while
92 * we were waiting on the condition, we cannot
93 * check block_rec->compiling again. The best
94 * thing is just to restart the function. */
98 /* Block is not yet being processed - remove it from the list */
99 slist_remove(&rec->slist, &block_rec->slist);
100 lightrec_free(rec->state, MEM_FOR_LIGHTREC,
101 sizeof(*block_rec), block_rec);
106 static void lightrec_cancel_list(struct recompiler *rec)
108 struct block_rec *block_rec;
109 struct slist_elm *elm, *head = &rec->slist;
111 for (elm = slist_first(head); elm; elm = slist_first(head)) {
112 block_rec = container_of(elm, struct block_rec, slist);
113 lightrec_cancel_block_rec(rec, block_rec);
117 static void lightrec_flush_code_buffer(struct lightrec_state *state, void *d)
119 struct recompiler *rec = d;
121 lightrec_remove_outdated_blocks(state->block_cache, NULL);
122 rec->must_flush = false;
125 static void lightrec_compile_list(struct recompiler *rec,
126 struct recompiler_thd *thd)
128 struct block_rec *block_rec;
129 struct slist_elm *next;
133 while (!!(next = lightrec_get_first_elm(&rec->slist))) {
134 block_rec = container_of(next, struct block_rec, slist);
135 block_rec->compiling = true;
136 block = block_rec->block;
138 pthread_mutex_unlock(&rec->mutex);
140 if (likely(!block_has_flag(block, BLOCK_IS_DEAD))) {
141 ret = lightrec_compile_block(thd->cstate, block);
142 if (ret == -ENOMEM) {
143 /* Code buffer is full. Request the reaper to
146 pthread_mutex_lock(&rec->mutex);
147 block_rec->compiling = false;
148 pthread_cond_broadcast(&rec->cond2);
150 if (!rec->must_flush) {
151 rec->must_flush = true;
152 lightrec_cancel_list(rec);
154 lightrec_reaper_add(rec->state->reaper,
155 lightrec_flush_code_buffer,
162 pr_err("Unable to compile block at PC 0x%x: %d\n",
167 pthread_mutex_lock(&rec->mutex);
169 slist_remove(&rec->slist, next);
170 lightrec_free(rec->state, MEM_FOR_LIGHTREC,
171 sizeof(*block_rec), block_rec);
172 pthread_cond_broadcast(&rec->cond2);
176 static void * lightrec_recompiler_thd(void *d)
178 struct recompiler_thd *thd = d;
179 struct recompiler *rec = container_of(thd, struct recompiler, thds[thd->tid]);
181 pthread_mutex_lock(&rec->mutex);
185 pthread_cond_wait(&rec->cond, &rec->mutex);
190 } while (slist_empty(&rec->slist));
192 lightrec_compile_list(rec, thd);
196 pthread_mutex_unlock(&rec->mutex);
200 struct recompiler *lightrec_recompiler_init(struct lightrec_state *state)
202 struct recompiler *rec;
203 unsigned int i, nb_recs, nb_cpus;
206 nb_cpus = get_processors_count();
207 nb_recs = nb_cpus < 2 ? 1 : nb_cpus - 1;
209 rec = lightrec_malloc(state, MEM_FOR_LIGHTREC, sizeof(*rec)
210 + nb_recs * sizeof(*rec->thds));
212 pr_err("Cannot create recompiler: Out of memory\n");
216 for (i = 0; i < nb_recs; i++) {
217 rec->thds[i].tid = i;
218 rec->thds[i].cstate = NULL;
221 for (i = 0; i < nb_recs; i++) {
222 rec->thds[i].cstate = lightrec_create_cstate(state);
223 if (!rec->thds[i].cstate) {
224 pr_err("Cannot create recompiler: Out of memory\n");
225 goto err_free_cstates;
231 rec->must_flush = false;
232 rec->nb_recs = nb_recs;
233 slist_init(&rec->slist);
235 ret = pthread_cond_init(&rec->cond, NULL);
237 pr_err("Cannot init cond variable: %d\n", ret);
238 goto err_free_cstates;
241 ret = pthread_cond_init(&rec->cond2, NULL);
243 pr_err("Cannot init cond variable: %d\n", ret);
244 goto err_cnd_destroy;
247 ret = pthread_mutex_init(&rec->alloc_mutex, NULL);
249 pr_err("Cannot init alloc mutex variable: %d\n", ret);
250 goto err_cnd2_destroy;
253 ret = pthread_mutex_init(&rec->mutex, NULL);
255 pr_err("Cannot init mutex variable: %d\n", ret);
256 goto err_alloc_mtx_destroy;
259 for (i = 0; i < nb_recs; i++) {
260 ret = pthread_create(&rec->thds[i].thd, NULL,
261 lightrec_recompiler_thd, &rec->thds[i]);
263 pr_err("Cannot create recompiler thread: %d\n", ret);
264 /* TODO: Handle cleanup properly */
265 goto err_mtx_destroy;
269 pr_info("Threaded recompiler started with %u workers.\n", nb_recs);
274 pthread_mutex_destroy(&rec->mutex);
275 err_alloc_mtx_destroy:
276 pthread_mutex_destroy(&rec->alloc_mutex);
278 pthread_cond_destroy(&rec->cond2);
280 pthread_cond_destroy(&rec->cond);
282 for (i = 0; i < nb_recs; i++) {
283 if (rec->thds[i].cstate)
284 lightrec_free_cstate(rec->thds[i].cstate);
286 lightrec_free(state, MEM_FOR_LIGHTREC, sizeof(*rec), rec);
290 void lightrec_free_recompiler(struct recompiler *rec)
296 /* Stop the thread */
297 pthread_mutex_lock(&rec->mutex);
298 pthread_cond_broadcast(&rec->cond);
299 lightrec_cancel_list(rec);
300 pthread_mutex_unlock(&rec->mutex);
302 for (i = 0; i < rec->nb_recs; i++)
303 pthread_join(rec->thds[i].thd, NULL);
305 for (i = 0; i < rec->nb_recs; i++)
306 lightrec_free_cstate(rec->thds[i].cstate);
308 pthread_mutex_destroy(&rec->mutex);
309 pthread_mutex_destroy(&rec->alloc_mutex);
310 pthread_cond_destroy(&rec->cond);
311 pthread_cond_destroy(&rec->cond2);
312 lightrec_free(rec->state, MEM_FOR_LIGHTREC, sizeof(*rec), rec);
315 int lightrec_recompiler_add(struct recompiler *rec, struct block *block)
317 struct slist_elm *elm, *prev;
318 struct block_rec *block_rec;
321 pthread_mutex_lock(&rec->mutex);
323 /* If the recompiler must flush the code cache, we can't add the new
324 * job. It will be re-added next time the block's address is jumped to
329 /* If the block is marked as dead, don't compile it, it will be removed
330 * as soon as it's safe. */
331 if (block_has_flag(block, BLOCK_IS_DEAD))
334 for (elm = slist_first(&rec->slist), prev = NULL; elm;
335 prev = elm, elm = elm->next) {
336 block_rec = container_of(elm, struct block_rec, slist);
338 if (block_rec->block == block) {
339 /* The block to compile is already in the queue - bump
340 * it to the top of the list, unless the block is being
342 if (prev && !block_rec->compiling &&
343 !block_has_flag(block, BLOCK_SHOULD_RECOMPILE)) {
344 slist_remove_next(prev);
345 slist_append(&rec->slist, elm);
352 /* By the time this function was called, the block has been recompiled
353 * and ins't in the wait list anymore. Just return here. */
354 if (block->function && !block_has_flag(block, BLOCK_SHOULD_RECOMPILE))
357 block_rec = lightrec_malloc(rec->state, MEM_FOR_LIGHTREC,
364 pr_debug("Adding block PC 0x%x to recompiler\n", block->pc);
366 block_rec->block = block;
367 block_rec->compiling = false;
371 /* If the block is being recompiled, push it to the end of the queue;
372 * otherwise push it to the front of the queue. */
373 if (block_has_flag(block, BLOCK_SHOULD_RECOMPILE))
374 for (; elm->next; elm = elm->next);
376 slist_append(elm, &block_rec->slist);
378 /* Signal the thread */
379 pthread_cond_signal(&rec->cond);
382 pthread_mutex_unlock(&rec->mutex);
387 void lightrec_recompiler_remove(struct recompiler *rec, struct block *block)
389 struct block_rec *block_rec;
390 struct slist_elm *elm;
392 pthread_mutex_lock(&rec->mutex);
395 for (elm = slist_first(&rec->slist); elm; elm = elm->next) {
396 block_rec = container_of(elm, struct block_rec, slist);
398 if (block_rec->block == block) {
399 if (lightrec_cancel_block_rec(rec, block_rec))
411 pthread_mutex_unlock(&rec->mutex);
414 void * lightrec_recompiler_run_first_pass(struct lightrec_state *state,
415 struct block *block, u32 *pc)
419 /* There's no point in running the first pass if the block will never
420 * be compiled. Let the main loop run the interpreter instead. */
421 if (block_has_flag(block, BLOCK_NEVER_COMPILE))
424 /* The block is marked as dead, and will be removed the next time the
425 * reaper is run. In the meantime, the old function can still be
427 if (block_has_flag(block, BLOCK_IS_DEAD))
428 return block->function;
430 /* If the block is already fully tagged, there is no point in running
431 * the first pass. Request a recompilation of the block, and maybe the
432 * interpreter will run the block in the meantime. */
433 if (block_has_flag(block, BLOCK_FULLY_TAGGED))
434 lightrec_recompiler_add(state->rec, block);
436 if (likely(block->function)) {
437 if (block_has_flag(block, BLOCK_FULLY_TAGGED)) {
438 old_flags = block_set_flags(block, BLOCK_NO_OPCODE_LIST);
440 if (!(old_flags & BLOCK_NO_OPCODE_LIST)) {
441 pr_debug("Block PC 0x%08x is fully tagged"
442 " - free opcode list\n", block->pc);
444 /* The block was already compiled but the opcode list
445 * didn't get freed yet - do it now */
446 lightrec_free_opcode_list(state, block->opcode_list);
450 return block->function;
453 /* Mark the opcode list as freed, so that the threaded compiler won't
454 * free it while we're using it in the interpreter. */
455 old_flags = block_set_flags(block, BLOCK_NO_OPCODE_LIST);
457 /* Block wasn't compiled yet - run the interpreter */
458 *pc = lightrec_emulate_block(state, block, *pc);
460 if (!(old_flags & BLOCK_NO_OPCODE_LIST))
461 block_clear_flags(block, BLOCK_NO_OPCODE_LIST);
463 /* The block got compiled while the interpreter was running.
464 * We can free the opcode list now. */
465 if (block->function && block_has_flag(block, BLOCK_FULLY_TAGGED)) {
466 old_flags = block_set_flags(block, BLOCK_NO_OPCODE_LIST);
468 if (!(old_flags & BLOCK_NO_OPCODE_LIST)) {
469 pr_debug("Block PC 0x%08x is fully tagged"
470 " - free opcode list\n", block->pc);
472 lightrec_free_opcode_list(state, block->opcode_list);
479 void lightrec_code_alloc_lock(struct lightrec_state *state)
481 pthread_mutex_lock(&state->rec->alloc_mutex);
484 void lightrec_code_alloc_unlock(struct lightrec_state *state)
486 pthread_mutex_unlock(&state->rec->alloc_mutex);