X-Git-Url: https://notaz.gp2x.de/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=deps%2Flightrec%2Frecompiler.c;h=a6d2f322a71c03f89204399047abdecca9b8c0c4;hb=41bc558fbe8ff0d8bf822ab2a470ea6bd61a1835;hp=7350adba1d0586fefbec6158d94281d431a501f1;hpb=11357fef3e80fd3b788ffd9a937758de5d5fd622;p=pcsx_rearmed.git diff --git a/deps/lightrec/recompiler.c b/deps/lightrec/recompiler.c index 7350adba..a6d2f322 100644 --- a/deps/lightrec/recompiler.c +++ b/deps/lightrec/recompiler.c @@ -23,6 +23,7 @@ struct block_rec { struct block *block; struct slist_elm slist; + unsigned int requests; bool compiling; }; @@ -64,19 +65,20 @@ static unsigned int get_processors_count(void) return nb < 1 ? 1 : nb; } -static struct slist_elm * lightrec_get_first_elm(struct slist_elm *head) +static struct block_rec * lightrec_get_best_elm(struct slist_elm *head) { - struct block_rec *block_rec; + struct block_rec *block_rec, *best = NULL; struct slist_elm *elm; for (elm = slist_first(head); elm; elm = elm->next) { block_rec = container_of(elm, struct block_rec, slist); - if (!block_rec->compiling) - return elm; + if (!block_rec->compiling + && (!best || block_rec->requests > best->requests)) + best = block_rec; } - return NULL; + return best; } static bool lightrec_cancel_block_rec(struct recompiler *rec, @@ -106,75 +108,68 @@ static bool lightrec_cancel_block_rec(struct recompiler *rec, static void lightrec_cancel_list(struct recompiler *rec) { struct block_rec *block_rec; - struct slist_elm *next; - - while (!!(next = lightrec_get_first_elm(&rec->slist))) { - block_rec = container_of(next, struct block_rec, slist); + struct slist_elm *elm, *head = &rec->slist; + for (elm = slist_first(head); elm; elm = slist_first(head)) { + block_rec = container_of(elm, struct block_rec, slist); lightrec_cancel_block_rec(rec, block_rec); } - - pthread_cond_broadcast(&rec->cond2); } static void lightrec_flush_code_buffer(struct lightrec_state *state, void *d) { struct recompiler *rec = d; - pthread_mutex_lock(&rec->mutex); - - if (rec->must_flush) { - lightrec_remove_outdated_blocks(state->block_cache, NULL); - rec->must_flush = false; - } - - pthread_mutex_unlock(&rec->mutex); + lightrec_remove_outdated_blocks(state->block_cache, NULL); + rec->must_flush = false; } static void lightrec_compile_list(struct recompiler *rec, struct recompiler_thd *thd) { struct block_rec *block_rec; - struct slist_elm *next; struct block *block; int ret; - while (!!(next = lightrec_get_first_elm(&rec->slist))) { - block_rec = container_of(next, struct block_rec, slist); + while (!!(block_rec = lightrec_get_best_elm(&rec->slist))) { block_rec->compiling = true; block = block_rec->block; pthread_mutex_unlock(&rec->mutex); - if (likely(!(block->flags & BLOCK_IS_DEAD))) { + if (likely(!block_has_flag(block, BLOCK_IS_DEAD))) { ret = lightrec_compile_block(thd->cstate, block); if (ret == -ENOMEM) { /* Code buffer is full. Request the reaper to * flush it. */ pthread_mutex_lock(&rec->mutex); + block_rec->compiling = false; + pthread_cond_broadcast(&rec->cond2); + if (!rec->must_flush) { + rec->must_flush = true; + lightrec_cancel_list(rec); + lightrec_reaper_add(rec->state->reaper, lightrec_flush_code_buffer, rec); - lightrec_cancel_list(rec); - rec->must_flush = true; } return; } if (ret) { - pr_err("Unable to compile block at PC 0x%x: %d\n", + pr_err("Unable to compile block at "PC_FMT": %d\n", block->pc, ret); } } pthread_mutex_lock(&rec->mutex); - slist_remove(&rec->slist, next); + slist_remove(&rec->slist, &block_rec->slist); lightrec_free(rec->state, MEM_FOR_LIGHTREC, sizeof(*block_rec), block_rec); - pthread_cond_signal(&rec->cond2); + pthread_cond_broadcast(&rec->cond2); } } @@ -319,8 +314,9 @@ void lightrec_free_recompiler(struct recompiler *rec) int lightrec_recompiler_add(struct recompiler *rec, struct block *block) { - struct slist_elm *elm, *prev; + struct slist_elm *elm; struct block_rec *block_rec; + u32 pc1, pc2; int ret = 0; pthread_mutex_lock(&rec->mutex); @@ -333,30 +329,33 @@ int lightrec_recompiler_add(struct recompiler *rec, struct block *block) /* If the block is marked as dead, don't compile it, it will be removed * as soon as it's safe. */ - if (block->flags & BLOCK_IS_DEAD) + if (block_has_flag(block, BLOCK_IS_DEAD)) goto out_unlock; - for (elm = slist_first(&rec->slist), prev = NULL; elm; - prev = elm, elm = elm->next) { + for (elm = slist_first(&rec->slist); elm; elm = elm->next) { block_rec = container_of(elm, struct block_rec, slist); if (block_rec->block == block) { - /* The block to compile is already in the queue - bump - * it to the top of the list, unless the block is being - * recompiled. */ - if (prev && !block_rec->compiling && - !(block->flags & BLOCK_SHOULD_RECOMPILE)) { - slist_remove_next(prev); - slist_append(&rec->slist, elm); - } + /* The block to compile is already in the queue - + * increment its counter to increase its priority */ + block_rec->requests++; + goto out_unlock; + } + pc1 = kunseg(block_rec->block->pc); + pc2 = kunseg(block->pc); + if (pc2 >= pc1 && pc2 < pc1 + block_rec->block->nb_ops * 4) { + /* The block we want to compile is already covered by + * another one in the queue - increment its counter to + * increase its priority */ + block_rec->requests++; goto out_unlock; } } /* By the time this function was called, the block has been recompiled * and ins't in the wait list anymore. Just return here. */ - if (block->function && !(block->flags & BLOCK_SHOULD_RECOMPILE)) + if (block->function && !block_has_flag(block, BLOCK_SHOULD_RECOMPILE)) goto out_unlock; block_rec = lightrec_malloc(rec->state, MEM_FOR_LIGHTREC, @@ -366,18 +365,15 @@ int lightrec_recompiler_add(struct recompiler *rec, struct block *block) goto out_unlock; } - pr_debug("Adding block PC 0x%x to recompiler\n", block->pc); + pr_debug("Adding block "PC_FMT" to recompiler\n", block->pc); block_rec->block = block; block_rec->compiling = false; + block_rec->requests = 1; elm = &rec->slist; - /* If the block is being recompiled, push it to the end of the queue; - * otherwise push it to the front of the queue. */ - if (block->flags & BLOCK_SHOULD_RECOMPILE) - for (; elm->next; elm = elm->next); - + /* Push the new entry to the front of the queue */ slist_append(elm, &block_rec->slist); /* Signal the thread */ @@ -419,31 +415,36 @@ out_unlock: void * lightrec_recompiler_run_first_pass(struct lightrec_state *state, struct block *block, u32 *pc) { - bool freed; + u8 old_flags; /* There's no point in running the first pass if the block will never * be compiled. Let the main loop run the interpreter instead. */ - if (block->flags & BLOCK_NEVER_COMPILE) + if (block_has_flag(block, BLOCK_NEVER_COMPILE)) return NULL; + /* The block is marked as dead, and will be removed the next time the + * reaper is run. In the meantime, the old function can still be + * executed. */ + if (block_has_flag(block, BLOCK_IS_DEAD)) + return block->function; + /* If the block is already fully tagged, there is no point in running * the first pass. Request a recompilation of the block, and maybe the * interpreter will run the block in the meantime. */ - if (block->flags & BLOCK_FULLY_TAGGED) + if (block_has_flag(block, BLOCK_FULLY_TAGGED)) lightrec_recompiler_add(state->rec, block); if (likely(block->function)) { - if (block->flags & BLOCK_FULLY_TAGGED) { - freed = atomic_flag_test_and_set(&block->op_list_freed); + if (block_has_flag(block, BLOCK_FULLY_TAGGED)) { + old_flags = block_set_flags(block, BLOCK_NO_OPCODE_LIST); - if (!freed) { - pr_debug("Block PC 0x%08x is fully tagged" + if (!(old_flags & BLOCK_NO_OPCODE_LIST)) { + pr_debug("Block "PC_FMT" is fully tagged" " - free opcode list\n", block->pc); /* The block was already compiled but the opcode list * didn't get freed yet - do it now */ - lightrec_free_opcode_list(state, block); - block->opcode_list = NULL; + lightrec_free_opcode_list(state, block->opcode_list); } } @@ -452,23 +453,25 @@ void * lightrec_recompiler_run_first_pass(struct lightrec_state *state, /* Mark the opcode list as freed, so that the threaded compiler won't * free it while we're using it in the interpreter. */ - freed = atomic_flag_test_and_set(&block->op_list_freed); + old_flags = block_set_flags(block, BLOCK_NO_OPCODE_LIST); /* Block wasn't compiled yet - run the interpreter */ *pc = lightrec_emulate_block(state, block, *pc); - if (!freed) - atomic_flag_clear(&block->op_list_freed); + if (!(old_flags & BLOCK_NO_OPCODE_LIST)) + block_clear_flags(block, BLOCK_NO_OPCODE_LIST); /* The block got compiled while the interpreter was running. * We can free the opcode list now. */ - if (block->function && (block->flags & BLOCK_FULLY_TAGGED) && - !atomic_flag_test_and_set(&block->op_list_freed)) { - pr_debug("Block PC 0x%08x is fully tagged" - " - free opcode list\n", block->pc); + if (block->function && block_has_flag(block, BLOCK_FULLY_TAGGED)) { + old_flags = block_set_flags(block, BLOCK_NO_OPCODE_LIST); + + if (!(old_flags & BLOCK_NO_OPCODE_LIST)) { + pr_debug("Block "PC_FMT" is fully tagged" + " - free opcode list\n", block->pc); - lightrec_free_opcode_list(state, block); - block->opcode_list = NULL; + lightrec_free_opcode_list(state, block->opcode_list); + } } return NULL;