nir: Use u_worklist to back nir_block_worklist

u_worklist is nir_block_worklist, suitably generalized. All we need to do is
define the macros to translate between the APIs.

Signed-off-by: Alyssa Rosenzweig <alyssa@collabora.com>
Reviewed-by: Emma Anholt <emma@anholt.net>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/16046>
This commit is contained in:
Alyssa Rosenzweig 2022-04-19 12:46:43 -04:00 committed by Marge Bot
parent 04a2c6efb1
commit 94b01ddcdd
2 changed files with 22 additions and 143 deletions

View File

@ -27,26 +27,6 @@
#include "nir_worklist.h"
void
nir_block_worklist_init(nir_block_worklist *w, unsigned num_blocks,
void *mem_ctx)
{
w->size = num_blocks;
w->count = 0;
w->start = 0;
w->blocks_present = rzalloc_array(mem_ctx, BITSET_WORD,
BITSET_WORDS(num_blocks));
w->blocks = rzalloc_array(mem_ctx, nir_block *, num_blocks);
}
void
nir_block_worklist_fini(nir_block_worklist *w)
{
ralloc_free(w->blocks_present);
ralloc_free(w->blocks);
}
void
nir_block_worklist_add_all(nir_block_worklist *w, nir_function_impl *impl)
{
@ -55,88 +35,6 @@ nir_block_worklist_add_all(nir_block_worklist *w, nir_function_impl *impl)
}
}
void
nir_block_worklist_push_head(nir_block_worklist *w, nir_block *block)
{
/* Pushing a block we already have is a no-op */
if (BITSET_TEST(w->blocks_present, block->index))
return;
assert(w->count < w->size);
if (w->start == 0)
w->start = w->size - 1;
else
w->start--;
w->count++;
w->blocks[w->start] = block;
BITSET_SET(w->blocks_present, block->index);
}
nir_block *
nir_block_worklist_peek_head(const nir_block_worklist *w)
{
assert(w->count > 0);
return w->blocks[w->start];
}
nir_block *
nir_block_worklist_pop_head(nir_block_worklist *w)
{
assert(w->count > 0);
unsigned head = w->start;
w->start = (w->start + 1) % w->size;
w->count--;
BITSET_CLEAR(w->blocks_present, w->blocks[head]->index);
return w->blocks[head];
}
void
nir_block_worklist_push_tail(nir_block_worklist *w, nir_block *block)
{
/* Pushing a block we already have is a no-op */
if (BITSET_TEST(w->blocks_present, block->index))
return;
assert(w->count < w->size);
w->count++;
unsigned tail = (w->start + w->count - 1) % w->size;
w->blocks[tail] = block;
BITSET_SET(w->blocks_present, block->index);
}
nir_block *
nir_block_worklist_peek_tail(const nir_block_worklist *w)
{
assert(w->count > 0);
unsigned tail = (w->start + w->count - 1) % w->size;
return w->blocks[tail];
}
nir_block *
nir_block_worklist_pop_tail(nir_block_worklist *w)
{
assert(w->count > 0);
unsigned tail = (w->start + w->count - 1) % w->size;
w->count--;
BITSET_CLEAR(w->blocks_present, w->blocks[tail]->index);
return w->blocks[tail];
}
static bool
nir_instr_worklist_add_srcs_cb(nir_src *src, void *state)
{

View File

@ -32,60 +32,41 @@
#include "nir.h"
#include "util/set.h"
#include "util/u_vector.h"
#include "util/u_worklist.h"
#ifdef __cplusplus
extern "C" {
#endif
/** Represents a double-ended queue of unique blocks
*
* The worklist datastructure guarantees that eacy block is in the queue at
* most once. Pushing a block onto either end of the queue is a no-op if
* the block is already in the queue. In order for this to work, the
* caller must ensure that the blocks are properly indexed.
*/
typedef struct {
/* The total size of the worklist */
unsigned size;
typedef u_worklist nir_block_worklist;
/* The number of blocks currently in the worklist */
unsigned count;
#define nir_block_worklist_init(w, num_blocks, mem_ctx) \
u_worklist_init(w, num_blocks, mem_ctx)
/* The offset in the array of blocks at which the list starts */
unsigned start;
#define nir_block_worklist_fini(w) u_worklist_fini(w)
/* A bitset of all of the blocks currently present in the worklist */
BITSET_WORD *blocks_present;
#define nir_block_worklist_is_empty(w) u_worklist_is_empty(w)
/* The actual worklist */
nir_block **blocks;
} nir_block_worklist;
#define nir_block_worklist_push_head(w, block) \
u_worklist_push_head(w, block, index)
void nir_block_worklist_init(nir_block_worklist *w, unsigned num_blocks,
void *mem_ctx);
void nir_block_worklist_fini(nir_block_worklist *w);
#define nir_block_worklist_peek_head(w) \
u_worklist_peek_head(w, nir_block, index)
#define nir_block_worklist_pop_head(w) \
u_worklist_pop_head(w, nir_block, index)
#define nir_block_worklist_push_tail(w, block) \
u_worklist_push_tail(w, block, index)
#define nir_block_worklist_peek_tail(w) \
u_worklist_peek_tail(w, nir_block, index)
#define nir_block_worklist_pop_tail(w) \
u_worklist_pop_tail(w, nir_block, index)
void nir_block_worklist_add_all(nir_block_worklist *w, nir_function_impl *impl);
static inline bool
nir_block_worklist_is_empty(const nir_block_worklist *w)
{
return w->count == 0;
}
void nir_block_worklist_push_head(nir_block_worklist *w, nir_block *block);
nir_block *nir_block_worklist_peek_head(const nir_block_worklist *w);
nir_block *nir_block_worklist_pop_head(nir_block_worklist *w);
void nir_block_worklist_push_tail(nir_block_worklist *w, nir_block *block);
nir_block *nir_block_worklist_peek_tail(const nir_block_worklist *w);
nir_block *nir_block_worklist_pop_tail(nir_block_worklist *w);
/*
* This worklist implementation, in contrast to the block worklist, does not
* have unique entries, meaning a nir_instr can be inserted more than once