a6c8cd15cb53740b0b2f5ed5c3495efcd789e5da
[pcsx_rearmed.git] / deps / lightrec / recompiler.c
1 // SPDX-License-Identifier: LGPL-2.1-or-later
2 /*
3  * Copyright (C) 2019-2021 Paul Cercueil <paul@crapouillou.net>
4  */
5
6 #include "debug.h"
7 #include "interpreter.h"
8 #include "lightrec-private.h"
9 #include "memmanager.h"
10 #include "slist.h"
11
12 #include <errno.h>
13 #include <stdatomic.h>
14 #include <stdbool.h>
15 #include <stdlib.h>
16 #include <pthread.h>
17 #ifdef __linux__
18 #include <unistd.h>
19 #endif
20
21 struct block_rec {
22         struct block *block;
23         struct slist_elm slist;
24         bool compiling;
25 };
26
27 struct recompiler_thd {
28         struct lightrec_cstate *cstate;
29         unsigned int tid;
30         pthread_t thd;
31 };
32
33 struct recompiler {
34         struct lightrec_state *state;
35         pthread_cond_t cond;
36         pthread_cond_t cond2;
37         pthread_mutex_t mutex;
38         bool stop;
39         struct slist_elm slist;
40
41         unsigned int nb_recs;
42         struct recompiler_thd thds[];
43 };
44
45 static unsigned int get_processors_count(void)
46 {
47         unsigned int nb;
48
49 #if defined(PTW32_VERSION)
50         nb = pthread_num_processors_np();
51 #elif defined(__APPLE__) || defined(__FreeBSD__)
52         int count;
53         size_t size = sizeof(count);
54
55         nb = sysctlbyname("hw.ncpu", &count, &size, NULL, 0) ? 1 : count;
56 #elif defined(__linux__)
57         nb = sysconf(_SC_NPROCESSORS_ONLN);
58 #endif
59
60         return nb < 1 ? 1 : nb;
61 }
62
63 static struct slist_elm * lightrec_get_first_elm(struct slist_elm *head)
64 {
65         struct block_rec *block_rec;
66         struct slist_elm *elm;
67
68         for (elm = slist_first(head); elm; elm = elm->next) {
69                 block_rec = container_of(elm, struct block_rec, slist);
70
71                 if (!block_rec->compiling)
72                         return elm;
73         }
74
75         return NULL;
76 }
77
78 static void lightrec_compile_list(struct recompiler *rec,
79                                   struct recompiler_thd *thd)
80 {
81         struct block_rec *block_rec;
82         struct slist_elm *next;
83         struct block *block;
84         int ret;
85
86         while (!!(next = lightrec_get_first_elm(&rec->slist))) {
87                 block_rec = container_of(next, struct block_rec, slist);
88                 block_rec->compiling = true;
89                 block = block_rec->block;
90
91                 pthread_mutex_unlock(&rec->mutex);
92
93                 if (likely(!(block->flags & BLOCK_IS_DEAD))) {
94                         ret = lightrec_compile_block(thd->cstate, block);
95                         if (ret) {
96                                 pr_err("Unable to compile block at PC 0x%x: %d\n",
97                                        block->pc, ret);
98                         }
99                 }
100
101                 pthread_mutex_lock(&rec->mutex);
102
103                 slist_remove(&rec->slist, next);
104                 lightrec_free(rec->state, MEM_FOR_LIGHTREC,
105                               sizeof(*block_rec), block_rec);
106                 pthread_cond_signal(&rec->cond2);
107         }
108 }
109
110 static void * lightrec_recompiler_thd(void *d)
111 {
112         struct recompiler_thd *thd = d;
113         struct recompiler *rec = container_of(thd, struct recompiler, thds[thd->tid]);
114
115         pthread_mutex_lock(&rec->mutex);
116
117         while (!rec->stop) {
118                 do {
119                         pthread_cond_wait(&rec->cond, &rec->mutex);
120
121                         if (rec->stop)
122                                 goto out_unlock;
123
124                 } while (slist_empty(&rec->slist));
125
126                 lightrec_compile_list(rec, thd);
127         }
128
129 out_unlock:
130         pthread_mutex_unlock(&rec->mutex);
131         return NULL;
132 }
133
134 struct recompiler *lightrec_recompiler_init(struct lightrec_state *state)
135 {
136         struct recompiler *rec;
137         unsigned int i, nb_recs, nb_cpus;
138         int ret;
139
140         nb_cpus = get_processors_count();
141         nb_recs = nb_cpus < 2 ? 1 : nb_cpus - 1;
142
143         rec = lightrec_malloc(state, MEM_FOR_LIGHTREC, sizeof(*rec)
144                               + nb_recs * sizeof(*rec->thds));
145         if (!rec) {
146                 pr_err("Cannot create recompiler: Out of memory\n");
147                 return NULL;
148         }
149
150         for (i = 0; i < nb_recs; i++) {
151                 rec->thds[i].tid = i;
152                 rec->thds[i].cstate = NULL;
153         }
154
155         for (i = 0; i < nb_recs; i++) {
156                 rec->thds[i].cstate = lightrec_create_cstate(state);
157                 if (!rec->state) {
158                         pr_err("Cannot create recompiler: Out of memory\n");
159                         goto err_free_cstates;
160                 }
161         }
162
163         rec->state = state;
164         rec->stop = false;
165         rec->nb_recs = nb_recs;
166         slist_init(&rec->slist);
167
168         ret = pthread_cond_init(&rec->cond, NULL);
169         if (ret) {
170                 pr_err("Cannot init cond variable: %d\n", ret);
171                 goto err_free_cstates;
172         }
173
174         ret = pthread_cond_init(&rec->cond2, NULL);
175         if (ret) {
176                 pr_err("Cannot init cond variable: %d\n", ret);
177                 goto err_cnd_destroy;
178         }
179
180         ret = pthread_mutex_init(&rec->mutex, NULL);
181         if (ret) {
182                 pr_err("Cannot init mutex variable: %d\n", ret);
183                 goto err_cnd2_destroy;
184         }
185
186         for (i = 0; i < nb_recs; i++) {
187                 ret = pthread_create(&rec->thds[i].thd, NULL,
188                                      lightrec_recompiler_thd, &rec->thds[i]);
189                 if (ret) {
190                         pr_err("Cannot create recompiler thread: %d\n", ret);
191                         /* TODO: Handle cleanup properly */
192                         goto err_mtx_destroy;
193                 }
194         }
195
196         pr_info("Threaded recompiler started with %u workers.\n", nb_recs);
197
198         return rec;
199
200 err_mtx_destroy:
201         pthread_mutex_destroy(&rec->mutex);
202 err_cnd2_destroy:
203         pthread_cond_destroy(&rec->cond2);
204 err_cnd_destroy:
205         pthread_cond_destroy(&rec->cond);
206 err_free_cstates:
207         for (i = 0; i < nb_recs; i++) {
208                 if (rec->thds[i].cstate)
209                         lightrec_free_cstate(rec->thds[i].cstate);
210         }
211         lightrec_free(state, MEM_FOR_LIGHTREC, sizeof(*rec), rec);
212         return NULL;
213 }
214
215 void lightrec_free_recompiler(struct recompiler *rec)
216 {
217         unsigned int i;
218
219         rec->stop = true;
220
221         /* Stop the thread */
222         pthread_mutex_lock(&rec->mutex);
223         pthread_cond_broadcast(&rec->cond);
224         pthread_mutex_unlock(&rec->mutex);
225
226         for (i = 0; i < rec->nb_recs; i++)
227                 pthread_join(rec->thds[i].thd, NULL);
228
229         for (i = 0; i < rec->nb_recs; i++)
230                 lightrec_free_cstate(rec->thds[i].cstate);
231
232         pthread_mutex_destroy(&rec->mutex);
233         pthread_cond_destroy(&rec->cond);
234         pthread_cond_destroy(&rec->cond2);
235         lightrec_free(rec->state, MEM_FOR_LIGHTREC, sizeof(*rec), rec);
236 }
237
238 int lightrec_recompiler_add(struct recompiler *rec, struct block *block)
239 {
240         struct slist_elm *elm, *prev;
241         struct block_rec *block_rec;
242         int ret = 0;
243
244         pthread_mutex_lock(&rec->mutex);
245
246         /* If the block is marked as dead, don't compile it, it will be removed
247          * as soon as it's safe. */
248         if (block->flags & BLOCK_IS_DEAD)
249                 goto out_unlock;
250
251         for (elm = slist_first(&rec->slist), prev = NULL; elm;
252              prev = elm, elm = elm->next) {
253                 block_rec = container_of(elm, struct block_rec, slist);
254
255                 if (block_rec->block == block) {
256                         /* The block to compile is already in the queue - bump
257                          * it to the top of the list, unless the block is being
258                          * recompiled. */
259                         if (prev && !block_rec->compiling &&
260                             !(block->flags & BLOCK_SHOULD_RECOMPILE)) {
261                                 slist_remove_next(prev);
262                                 slist_append(&rec->slist, elm);
263                         }
264
265                         goto out_unlock;
266                 }
267         }
268
269         /* By the time this function was called, the block has been recompiled
270          * and ins't in the wait list anymore. Just return here. */
271         if (block->function && !(block->flags & BLOCK_SHOULD_RECOMPILE))
272                 goto out_unlock;
273
274         block_rec = lightrec_malloc(rec->state, MEM_FOR_LIGHTREC,
275                                     sizeof(*block_rec));
276         if (!block_rec) {
277                 ret = -ENOMEM;
278                 goto out_unlock;
279         }
280
281         pr_debug("Adding block PC 0x%x to recompiler\n", block->pc);
282
283         block_rec->block = block;
284         block_rec->compiling = false;
285
286         elm = &rec->slist;
287
288         /* If the block is being recompiled, push it to the end of the queue;
289          * otherwise push it to the front of the queue. */
290         if (block->flags & BLOCK_SHOULD_RECOMPILE)
291                 for (; elm->next; elm = elm->next);
292
293         slist_append(elm, &block_rec->slist);
294
295         /* Signal the thread */
296         pthread_cond_signal(&rec->cond);
297
298 out_unlock:
299         pthread_mutex_unlock(&rec->mutex);
300
301         return ret;
302 }
303
304 void lightrec_recompiler_remove(struct recompiler *rec, struct block *block)
305 {
306         struct block_rec *block_rec;
307         struct slist_elm *elm;
308
309         pthread_mutex_lock(&rec->mutex);
310
311         while (true) {
312                 for (elm = slist_first(&rec->slist); elm; elm = elm->next) {
313                         block_rec = container_of(elm, struct block_rec, slist);
314
315                         if (block_rec->block != block)
316                                 continue;
317
318                         if (block_rec->compiling) {
319                                 /* Block is being recompiled - wait for
320                                  * completion */
321                                 pthread_cond_wait(&rec->cond2, &rec->mutex);
322
323                                 /* We can't guarantee the signal was for us.
324                                  * Since block_rec may have been removed while
325                                  * we were waiting on the condition, we cannot
326                                  * check block_rec->compiling again. The best
327                                  * thing is just to restart the function. */
328                                 break;
329                         } else {
330                                 /* Block is not yet being processed - remove it
331                                  * from the list */
332                                 slist_remove(&rec->slist, elm);
333                                 lightrec_free(rec->state, MEM_FOR_LIGHTREC,
334                                               sizeof(*block_rec), block_rec);
335
336                                 goto out_unlock;
337                         }
338                 }
339
340                 if (!elm)
341                         break;
342         }
343
344 out_unlock:
345         pthread_mutex_unlock(&rec->mutex);
346 }
347
348 void * lightrec_recompiler_run_first_pass(struct lightrec_state *state,
349                                           struct block *block, u32 *pc)
350 {
351         bool freed;
352
353         /* There's no point in running the first pass if the block will never
354          * be compiled. Let the main loop run the interpreter instead. */
355         if (block->flags & BLOCK_NEVER_COMPILE)
356                 return NULL;
357
358         /* If the block is already fully tagged, there is no point in running
359          * the first pass. Request a recompilation of the block, and maybe the
360          * interpreter will run the block in the meantime. */
361         if (block->flags & BLOCK_FULLY_TAGGED)
362                 lightrec_recompiler_add(state->rec, block);
363
364         if (likely(block->function)) {
365                 if (block->flags & BLOCK_FULLY_TAGGED) {
366                         freed = atomic_flag_test_and_set(&block->op_list_freed);
367
368                         if (!freed) {
369                                 pr_debug("Block PC 0x%08x is fully tagged"
370                                          " - free opcode list\n", block->pc);
371
372                                 /* The block was already compiled but the opcode list
373                                  * didn't get freed yet - do it now */
374                                 lightrec_free_opcode_list(state, block);
375                                 block->opcode_list = NULL;
376                         }
377                 }
378
379                 return block->function;
380         }
381
382         /* Mark the opcode list as freed, so that the threaded compiler won't
383          * free it while we're using it in the interpreter. */
384         freed = atomic_flag_test_and_set(&block->op_list_freed);
385
386         /* Block wasn't compiled yet - run the interpreter */
387         *pc = lightrec_emulate_block(state, block, *pc);
388
389         if (!freed)
390                 atomic_flag_clear(&block->op_list_freed);
391
392         /* The block got compiled while the interpreter was running.
393          * We can free the opcode list now. */
394         if (block->function && (block->flags & BLOCK_FULLY_TAGGED) &&
395             !atomic_flag_test_and_set(&block->op_list_freed)) {
396                 pr_debug("Block PC 0x%08x is fully tagged"
397                          " - free opcode list\n", block->pc);
398
399                 lightrec_free_opcode_list(state, block);
400                 block->opcode_list = NULL;
401         }
402
403         return NULL;
404 }