nir: Add a better out-of-SSA pass

This commit rewrites the out-of-SSA pass to not be nearly as naieve.  It's
based on "Revisiting Out-of-SSA Translation for Correctness, Code Quality,
and Efficiency" by Boissinot et. al.  It should be fairly close to
state-of-the art.

Reviewed-by: Connor Abbott <cwabbott0@gmail.com>
This commit is contained in:
Jason Ekstrand 2014-10-31 11:17:09 -07:00
parent 4f44120ff5
commit 943ddb9458
1 changed files with 733 additions and 90 deletions

View File

@ -28,134 +28,763 @@
#include "nir.h"
/*
* Implements a quick-and-dirty out-of-ssa pass.
* This file implements an out-of-SSA pass as described in "Revisiting
* Out-of-SSA Translation for Correctness, Code Quality, and Efficiency" by
* Boissinot et. al.
*/
struct from_ssa_state {
void *mem_ctx;
void *dead_ctx;
struct hash_table *ssa_table;
nir_function_impl *current_impl;
struct hash_table *merge_node_table;
nir_instr *instr;
nir_function_impl *impl;
};
/* Returns true if a dominates b */
static bool
ssa_def_dominates(nir_ssa_def *a, nir_ssa_def *b)
{
if (a->live_index == 0) {
/* SSA undefs always dominate */
return true;
} else if (b->live_index < a->live_index) {
return false;
} else if (a->parent_instr->block == b->parent_instr->block) {
return a->live_index <= b->live_index;
} else {
nir_block *block = b->parent_instr->block;
while (block->imm_dom != NULL) {
if (block->imm_dom == a->parent_instr->block)
return true;
block = block->imm_dom;
}
return false;
}
}
/* The following data structure, which I have named merge_set is a way of
* representing a set registers of non-interfering registers. This is
* based on the concept of a "dominence forest" presented in "Fast Copy
* Coalescing and Live-Range Identification" by Budimlic et. al. but the
* implementation concept is taken from "Revisiting Out-of-SSA Translation
* for Correctness, Code Quality, and Efficiency" by Boissinot et. al..
*
* Each SSA definition is associated with a merge_node and the association
* is represented by a combination of a hash table and the "def" parameter
* in the merge_node structure. The merge_set stores a linked list of
* merge_node's in dominence order of the ssa definitions. (Since the
* liveness analysis pass indexes the SSA values in dominence order for us,
* this is an easy thing to keep up.) It is assumed that no pair of the
* nodes in a given set interfere. Merging two sets or checking for
* interference can be done in a single linear-time merge-sort walk of the
* two lists of nodes.
*/
struct merge_set;
typedef struct {
struct exec_node node;
struct merge_set *set;
nir_ssa_def *def;
} merge_node;
typedef struct merge_set {
struct exec_list nodes;
unsigned size;
nir_register *reg;
} merge_set;
#if 0
static void
merge_set_dump(merge_set *set, FILE *fp)
{
nir_ssa_def *dom[set->size];
int dom_idx = -1;
foreach_list_typed(merge_node, node, node, &set->nodes) {
while (dom_idx >= 0 && !ssa_def_dominates(dom[dom_idx], node->def))
dom_idx--;
for (int i = 0; i <= dom_idx; i++)
fprintf(fp, " ");
if (node->def->name)
fprintf(fp, "ssa_%d /* %s */\n", node->def->index, node->def->name);
else
fprintf(fp, "ssa_%d\n", node->def->index);
dom[++dom_idx] = node->def;
}
}
#endif
static merge_node *
get_merge_node(nir_ssa_def *def, struct from_ssa_state *state)
{
struct hash_entry *entry =
_mesa_hash_table_search(state->merge_node_table, def);
if (entry)
return entry->data;
merge_set *set = ralloc(state->dead_ctx, merge_set);
exec_list_make_empty(&set->nodes);
set->size = 1;
set->reg = NULL;
merge_node *node = ralloc(state->dead_ctx, merge_node);
node->set = set;
node->def = def;
exec_list_push_head(&set->nodes, &node->node);
_mesa_hash_table_insert(state->merge_node_table, def, node);
return node;
}
static bool
merge_nodes_interfere(merge_node *a, merge_node *b)
{
return nir_ssa_defs_interfere(a->def, b->def);
}
/* Merges b into a */
static merge_set *
merge_merge_sets(merge_set *a, merge_set *b)
{
struct exec_node *an = exec_list_get_head(&a->nodes);
struct exec_node *bn = exec_list_get_head(&b->nodes);
while (!exec_node_is_tail_sentinel(bn)) {
merge_node *a_node = exec_node_data(merge_node, an, node);
merge_node *b_node = exec_node_data(merge_node, bn, node);
if (exec_node_is_tail_sentinel(an) ||
a_node->def->live_index > b_node->def->live_index) {
struct exec_node *next = bn->next;
exec_node_remove(bn);
exec_node_insert_node_before(an, bn);
exec_node_data(merge_node, bn, node)->set = a;
bn = next;
} else {
an = an->next;
}
}
a->size += b->size;
b->size = 0;
return a;
}
/* Checks for any interference between two merge sets
*
* This is an implementation of Algorithm 2 in "Revisiting Out-of-SSA
* Translation for Correctness, Code Quality, and Efficiency" by
* Boissinot et. al.
*/
static bool
merge_sets_interfere(merge_set *a, merge_set *b)
{
merge_node *dom[a->size + b->size];
int dom_idx = -1;
struct exec_node *an = exec_list_get_head(&a->nodes);
struct exec_node *bn = exec_list_get_head(&b->nodes);
while (!exec_node_is_tail_sentinel(an) ||
!exec_node_is_tail_sentinel(bn)) {
merge_node *current;
if (exec_node_is_tail_sentinel(an)) {
current = exec_node_data(merge_node, bn, node);
bn = bn->next;
} else if (exec_node_is_tail_sentinel(bn)) {
current = exec_node_data(merge_node, an, node);
an = an->next;
} else {
merge_node *a_node = exec_node_data(merge_node, an, node);
merge_node *b_node = exec_node_data(merge_node, bn, node);
if (a_node->def->live_index <= b_node->def->live_index) {
current = a_node;
an = an->next;
} else {
current = b_node;
bn = bn->next;
}
}
while (dom_idx >= 0 &&
!ssa_def_dominates(dom[dom_idx]->def, current->def))
dom_idx--;
if (dom_idx >= 0 && merge_nodes_interfere(current, dom[dom_idx]))
return true;
dom[++dom_idx] = current;
}
return false;
}
static nir_parallel_copy_instr *
block_get_parallel_copy_at_end(nir_block *block, void *mem_ctx)
{
nir_instr *last_instr = nir_block_last_instr(block);
/* First we try and find a parallel copy if it already exists. If the
* last instruction is a jump, it will be right before the jump;
* otherwise, it will be the last instruction.
*/
nir_instr *pcopy_instr;
if (last_instr != NULL && last_instr->type == nir_instr_type_jump)
pcopy_instr = nir_instr_prev(last_instr);
else
pcopy_instr = last_instr;
if (pcopy_instr != NULL &&
pcopy_instr->type == nir_instr_type_parallel_copy) {
/* A parallel copy already exists. */
nir_parallel_copy_instr *pcopy = nir_instr_as_parallel_copy(pcopy_instr);
/* This parallel copy may be the copy for the beginning of some
* block, so we need to check for that before we return it.
*/
if (pcopy->at_end)
return pcopy;
}
/* At this point, we haven't found a suitable parallel copy, so we
* have to create one.
*/
nir_parallel_copy_instr *pcopy = nir_parallel_copy_instr_create(mem_ctx);
pcopy->at_end = true;
if (last_instr && last_instr->type == nir_instr_type_jump) {
nir_instr_insert_before(last_instr, &pcopy->instr);
} else {
nir_instr_insert_after_block(block, &pcopy->instr);
}
return pcopy;
}
static bool
isolate_phi_nodes_block(nir_block *block, void *void_state)
{
struct from_ssa_state *state = void_state;
nir_instr *last_phi_instr = NULL;
nir_foreach_instr(block, instr) {
/* Phi nodes only ever come at the start of a block */
if (instr->type != nir_instr_type_phi)
break;
last_phi_instr = instr;
}
/* If we don't have any phi's, then there's nothing for us to do. */
if (last_phi_instr == NULL)
return true;
/* If we have phi nodes, we need to create a parallel copy at the
* start of this block but after the phi nodes.
*/
nir_parallel_copy_instr *block_pcopy =
nir_parallel_copy_instr_create(state->dead_ctx);
nir_instr_insert_after(last_phi_instr, &block_pcopy->instr);
nir_foreach_instr(block, instr) {
/* Phi nodes only ever come at the start of a block */
if (instr->type != nir_instr_type_phi)
break;
nir_phi_instr *phi = nir_instr_as_phi(instr);
assert(phi->dest.is_ssa);
foreach_list_typed(nir_phi_src, src, node, &phi->srcs) {
nir_parallel_copy_instr *pcopy =
block_get_parallel_copy_at_end(src->pred, state->dead_ctx);
nir_parallel_copy_copy *copy = ralloc(state->dead_ctx,
nir_parallel_copy_copy);
exec_list_push_tail(&pcopy->copies, &copy->node);
copy->src = nir_src_copy(src->src, state->dead_ctx);
_mesa_set_add(src->src.ssa->uses,
_mesa_hash_pointer(&pcopy->instr), &pcopy->instr);
copy->dest.is_ssa = true;
nir_ssa_def_init(state->impl, &pcopy->instr, &copy->dest.ssa,
phi->dest.ssa.num_components, src->src.ssa->name);
struct set_entry *entry = _mesa_set_search(src->src.ssa->uses,
_mesa_hash_pointer(instr),
instr);
if (entry)
/* It is possible that a phi node can use the same source twice
* but for different basic blocks. If that happens, entry will
* be NULL because we already deleted it. This is safe
* because, by the time the loop is done, we will have deleted
* all of the sources of the phi from their respective use sets
* and moved them to the parallel copy definitions.
*/
_mesa_set_remove(src->src.ssa->uses, entry);
src->src.ssa = &copy->dest.ssa;
_mesa_set_add(copy->dest.ssa.uses, _mesa_hash_pointer(instr), instr);
}
nir_parallel_copy_copy *copy = ralloc(state->dead_ctx,
nir_parallel_copy_copy);
exec_list_push_tail(&block_pcopy->copies, &copy->node);
copy->dest.is_ssa = true;
nir_ssa_def_init(state->impl, &block_pcopy->instr, &copy->dest.ssa,
phi->dest.ssa.num_components, phi->dest.ssa.name);
nir_src copy_dest_src = {
.ssa = &copy->dest.ssa,
.is_ssa = true,
};
nir_ssa_def_rewrite_uses(&phi->dest.ssa, copy_dest_src, state->mem_ctx);
copy->src.is_ssa = true;
copy->src.ssa = &phi->dest.ssa;
_mesa_set_add(phi->dest.ssa.uses,
_mesa_hash_pointer(&block_pcopy->instr),
&block_pcopy->instr);
}
return true;
}
static bool
coalesce_phi_nodes_block(nir_block *block, void *void_state)
{
struct from_ssa_state *state = void_state;
nir_foreach_instr(block, instr) {
/* Phi nodes only ever come at the start of a block */
if (instr->type != nir_instr_type_phi)
break;
nir_phi_instr *phi = nir_instr_as_phi(instr);
assert(phi->dest.is_ssa);
merge_node *dest_node = get_merge_node(&phi->dest.ssa, state);
foreach_list_typed(nir_phi_src, src, node, &phi->srcs) {
assert(src->src.is_ssa);
merge_node *src_node = get_merge_node(src->src.ssa, state);
if (src_node->set != dest_node->set)
merge_merge_sets(dest_node->set, src_node->set);
}
}
return true;
}
static void
agressive_coalesce_parallel_copy(nir_parallel_copy_instr *pcopy,
struct from_ssa_state *state)
{
foreach_list_typed_safe(nir_parallel_copy_copy, copy, node, &pcopy->copies) {
if (!copy->src.is_ssa)
continue;
/* Don't try and coalesce these */
if (copy->dest.ssa.num_components != copy->src.ssa->num_components)
continue;
merge_node *src_node = get_merge_node(copy->src.ssa, state);
merge_node *dest_node = get_merge_node(&copy->dest.ssa, state);
if (src_node->set == dest_node->set)
continue;
if (!merge_sets_interfere(src_node->set, dest_node->set))
merge_merge_sets(src_node->set, dest_node->set);
}
}
static bool
agressive_coalesce_block(nir_block *block, void *void_state)
{
struct from_ssa_state *state = void_state;
nir_foreach_instr(block, instr) {
/* Phi nodes only ever come at the start of a block */
if (instr->type != nir_instr_type_phi) {
if (instr->type != nir_instr_type_parallel_copy)
break; /* The parallel copy must be right after the phis */
nir_parallel_copy_instr *pcopy = nir_instr_as_parallel_copy(instr);
agressive_coalesce_parallel_copy(pcopy, state);
if (pcopy->at_end)
return true;
break;
}
}
nir_instr *last_instr = nir_block_last_instr(block);
if (last_instr && last_instr->type == nir_instr_type_parallel_copy) {
nir_parallel_copy_instr *pcopy = nir_instr_as_parallel_copy(last_instr);
if (pcopy->at_end)
agressive_coalesce_parallel_copy(pcopy, state);
}
return true;
}
static nir_register *
get_register_for_ssa_def(nir_ssa_def *def, struct from_ssa_state *state)
{
struct hash_entry *entry =
_mesa_hash_table_search(state->merge_node_table, def);
if (entry) {
merge_node *node = (merge_node *)entry->data;
/* If it doesn't have a register yet, create one. Note that all of
* the things in the merge set should be the same so it doesn't
* matter which node's definition we use.
*/
if (node->set->reg == NULL) {
node->set->reg = nir_local_reg_create(state->impl);
node->set->reg->name = def->name;
node->set->reg->num_components = def->num_components;
node->set->reg->num_array_elems = 0;
}
return node->set->reg;
}
entry = _mesa_hash_table_search(state->ssa_table, def);
if (entry) {
return (nir_register *)entry->data;
} else {
nir_register *reg = nir_local_reg_create(state->impl);
reg->name = def->name;
reg->num_components = def->num_components;
reg->num_array_elems = 0;
_mesa_hash_table_insert(state->ssa_table, def, reg);
return reg;
}
}
static bool
rewrite_ssa_src(nir_src *src, void *void_state)
{
struct from_ssa_state *state = void_state;
if (src->is_ssa) {
struct hash_entry *entry =
_mesa_hash_table_search(state->ssa_table, src->ssa);
assert(entry);
/* We don't need to remove it from the uses set because that is going
* away. We just need to add it to the one for the register. */
nir_register *reg = get_register_for_ssa_def(src->ssa, state);
memset(src, 0, sizeof *src);
src->reg.reg = (nir_register *)entry->data;
src->reg.reg = reg;
_mesa_set_add(reg->uses, _mesa_hash_pointer(state->instr), state->instr);
}
return true;
}
static nir_register *
reg_create_from_def(nir_ssa_def *def, struct from_ssa_state *state)
{
nir_register *reg = nir_local_reg_create(state->current_impl);
reg->name = def->name;
reg->num_components = def->num_components;
reg->num_array_elems = 0;
/* Might as well steal the use-def information from SSA */
_mesa_set_destroy(reg->uses, NULL);
reg->uses = def->uses;
_mesa_set_destroy(reg->if_uses, NULL);
reg->if_uses = def->if_uses;
_mesa_set_add(reg->defs, _mesa_hash_pointer(def->parent_instr),
def->parent_instr);
/* Add the new register to the table and rewrite the destination */
_mesa_hash_table_insert(state->ssa_table, def, reg);
return reg;
}
static bool
rewrite_ssa_dest(nir_dest *dest, void *void_state)
{
struct from_ssa_state *state = void_state;
if (dest->is_ssa) {
nir_register *reg = reg_create_from_def(&dest->ssa, state);
_mesa_set_destroy(dest->ssa.uses, NULL);
_mesa_set_destroy(dest->ssa.if_uses, NULL);
nir_register *reg = get_register_for_ssa_def(&dest->ssa, state);
memset(dest, 0, sizeof *dest);
dest->reg.reg = reg;
_mesa_set_add(reg->defs, _mesa_hash_pointer(state->instr), state->instr);
}
return true;
}
/* Resolves ssa definitions to registers. While we're at it, we also
* remove phi nodes and ssa_undef instructions
*/
static bool
convert_from_ssa_block(nir_block *block, void *void_state)
resolve_registers_block(nir_block *block, void *void_state)
{
struct from_ssa_state *state = void_state;
nir_foreach_instr_safe(block, instr) {
if (instr->type == nir_instr_type_ssa_undef) {
nir_ssa_undef_instr *undef = nir_instr_as_ssa_undef(instr);
reg_create_from_def(&undef->def, state);
exec_node_remove(&instr->node);
ralloc_steal(state->dead_ctx, instr);
} else {
nir_foreach_src(instr, rewrite_ssa_src, state);
nir_foreach_dest(instr, rewrite_ssa_dest, state);
}
}
nir_if *following_if = nir_block_following_if(block);
if (following_if)
rewrite_ssa_src(&following_if->condition, state);
return true;
}
static bool
remove_phi_nodes(nir_block *block, void *void_state)
{
struct from_ssa_state *state = void_state;
nir_foreach_instr_safe(block, instr) {
/* Phi nodes only ever come at the start of a block */
if (instr->type != nir_instr_type_phi)
break;
state->instr = instr;
nir_foreach_src(instr, rewrite_ssa_src, state);
nir_foreach_dest(instr, rewrite_ssa_dest, state);
nir_phi_instr *phi = nir_instr_as_phi(instr);
foreach_list_typed(nir_phi_src, src, node, &phi->srcs) {
assert(src->src.is_ssa);
struct hash_entry *entry =
_mesa_hash_table_search(state->ssa_table, src->src.ssa);
nir_alu_instr *mov = nir_alu_instr_create(state->mem_ctx, nir_op_imov);
mov->dest.dest = nir_dest_copy(phi->dest, state->mem_ctx);
if (entry) {
nir_register *reg = (nir_register *)entry->data;
mov->src[0].src.reg.reg = reg;
mov->dest.write_mask = (1 << reg->num_components) - 1;
} else {
mov->src[0].src = nir_src_copy(src->src, state->mem_ctx);
mov->dest.write_mask = (1 << src->src.ssa->num_components) - 1;
}
if (instr->type == nir_instr_type_ssa_undef ||
instr->type == nir_instr_type_phi) {
nir_instr_remove(instr);
ralloc_steal(state->dead_ctx, instr);
continue;
}
}
state->instr = NULL;
nir_instr *block_end = nir_block_last_instr(src->pred);
if (block_end && block_end->type == nir_instr_type_jump) {
/* If the last instruction in the block is a jump, we want to
* place the moves after the jump. Otherwise, we want to place
* them at the very end.
*/
exec_node_insert_node_before(&block_end->node, &mov->instr.node);
} else {
exec_list_push_tail(&src->pred->instr_list, &mov->instr.node);
}
nir_if *following_if = nir_block_following_if(block);
if (following_if && following_if->condition.is_ssa) {
nir_register *reg = get_register_for_ssa_def(following_if->condition.ssa,
state);
memset(&following_if->condition, 0, sizeof following_if->condition);
following_if->condition.reg.reg = reg;
_mesa_set_add(reg->if_uses, _mesa_hash_pointer(following_if),
following_if);
}
return true;
}
static void
emit_copy(nir_parallel_copy_instr *pcopy, nir_src src, nir_src dest_src,
void *mem_ctx)
{
assert(!dest_src.is_ssa &&
dest_src.reg.indirect == NULL &&
dest_src.reg.base_offset == 0);
nir_dest dest = {
.reg.reg = dest_src.reg.reg,
.reg.indirect = NULL,
.reg.base_offset = 0,
.is_ssa = false,
};
if (src.is_ssa)
assert(src.ssa->num_components >= dest.reg.reg->num_components);
else
assert(src.reg.reg->num_components >= dest.reg.reg->num_components);
nir_alu_instr *mov = nir_alu_instr_create(mem_ctx, nir_op_imov);
mov->src[0].src = nir_src_copy(src, mem_ctx);
mov->dest.dest = nir_dest_copy(dest, mem_ctx);
mov->dest.write_mask = (1 << dest.reg.reg->num_components) - 1;
nir_instr_insert_before(&pcopy->instr, &mov->instr);
}
/* Resolves a single parallel copy operation into a sequence of mov's
*
* This is based on Algorithm 1 from "Revisiting Out-of-SSA Translation for
* Correctness, Code Quality, and Efficiency" by Boissinot et. al..
* However, I never got the algorithm to work as written, so this version
* is slightly modified.
*
* The algorithm works by playing this little shell game with the values.
* We start by recording where every source value is and which source value
* each destination value should recieve. We then grab any copy whose
* destination is "empty", i.e. not used as a source, and do the following:
* - Find where its source value currently lives
* - Emit the move instruction
* - Set the location of the source value to the destination
* - Mark the location containing the source value
* - Mark the destination as no longer needing to be copied
*
* When we run out of "empty" destinations, we have a cycle and so we
* create a temporary register, copy to that register, and mark the value
* we copied as living in that temporary. Now, the cycle is broken, so we
* can continue with the above steps.
*/
static void
resolve_parallel_copy(nir_parallel_copy_instr *pcopy,
struct from_ssa_state *state)
{
unsigned num_copies = 0;
foreach_list_typed_safe(nir_parallel_copy_copy, copy, node, &pcopy->copies) {
/* Sources may be SSA */
if (!copy->src.is_ssa && copy->src.reg.reg == copy->dest.reg.reg)
continue;
/* Set both indices equal to UINT_MAX to mark them as not indexed yet. */
num_copies++;
}
if (num_copies == 0) {
/* Hooray, we don't need any copies! */
nir_instr_remove(&pcopy->instr);
return;
}
/* The register/source corresponding to the given index */
nir_src values[num_copies * 2];
memset(values, 0, sizeof values);
/* The current location of a given piece of data */
int loc[num_copies * 2];
/* The piece of data that the given piece of data is to be copied from */
int pred[num_copies * 2];
/* Initialize loc and pred. We will use -1 for "null" */
memset(loc, -1, sizeof loc);
memset(pred, -1, sizeof pred);
/* The destinations we have yet to properly fill */
int to_do[num_copies * 2];
int to_do_idx = -1;
/* Now we set everything up:
* - All values get assigned a temporary index
* - Current locations are set from sources
* - Predicessors are recorded from sources and destinations
*/
int num_vals = 0;
foreach_list_typed(nir_parallel_copy_copy, copy, node, &pcopy->copies) {
/* Sources may be SSA */
if (!copy->src.is_ssa && copy->src.reg.reg == copy->dest.reg.reg)
continue;
int src_idx = -1;
for (int i = 0; i < num_vals; ++i) {
if (nir_srcs_equal(values[i], copy->src))
src_idx = i;
}
if (src_idx < 0) {
src_idx = num_vals++;
values[src_idx] = copy->src;
}
exec_node_remove(&instr->node);
ralloc_steal(state->dead_ctx, instr);
nir_src dest_src = {
.reg.reg = copy->dest.reg.reg,
.reg.indirect = NULL,
.reg.base_offset = 0,
.is_ssa = false,
};
int dest_idx = -1;
for (int i = 0; i < num_vals; ++i) {
if (nir_srcs_equal(values[i], dest_src)) {
/* Each destination of a parallel copy instruction should be
* unique. A destination may get used as a source, so we still
* have to walk the list. However, the predecessor should not,
* at this point, be set yet, so we should have -1 here.
*/
assert(pred[i] == -1);
dest_idx = i;
}
}
if (dest_idx < 0) {
dest_idx = num_vals++;
values[dest_idx] = dest_src;
}
loc[src_idx] = src_idx;
pred[dest_idx] = src_idx;
to_do[++to_do_idx] = dest_idx;
}
/* Currently empty destinations we can go ahead and fill */
int ready[num_copies * 2];
int ready_idx = -1;
/* Mark the ones that are ready for copying. We know an index is a
* destination if it has a predecessor and it's ready for copying if
* it's not marked as containing data.
*/
for (int i = 0; i < num_vals; i++) {
if (pred[i] != -1 && loc[i] == -1)
ready[++ready_idx] = i;
}
while (to_do_idx >= 0) {
while (ready_idx >= 0) {
int b = ready[ready_idx--];
int a = pred[b];
emit_copy(pcopy, values[loc[a]], values[b], state->mem_ctx);
/* If any other copies want a they can find it at b */
loc[a] = b;
/* b has been filled, mark it as not needing to be copied */
pred[b] = -1;
/* If a needs to be filled, it's ready for copying now */
if (pred[a] != -1)
ready[++ready_idx] = a;
}
int b = to_do[to_do_idx--];
if (pred[b] == -1)
continue;
/* If we got here, then we don't have any more trivial copies that we
* can do. We have to break a cycle, so we create a new temporary
* register for that purpose. Normally, if going out of SSA after
* register allocation, you would want to avoid creating temporary
* registers. However, we are going out of SSA before register
* allocation, so we would rather not create extra register
* dependencies for the backend to deal with. If it wants, the
* backend can coalesce the (possibly multiple) temporaries.
*/
assert(num_vals < num_copies * 2);
nir_register *reg = nir_local_reg_create(state->impl);
reg->name = "copy_temp";
reg->num_array_elems = 0;
if (values[b].is_ssa)
reg->num_components = values[b].ssa->num_components;
else
reg->num_components = values[b].reg.reg->num_components;
values[num_vals].is_ssa = false;
values[num_vals].reg.reg = reg;
emit_copy(pcopy, values[b], values[num_vals], state->mem_ctx);
loc[b] = num_vals;
ready[++ready_idx] = b;
num_vals++;
}
nir_instr_remove(&pcopy->instr);
}
/* Resolves the parallel copies in a block. Each block can have at most
* two: One at the beginning, right after all the phi noces, and one at
* the end (or right before the final jump if it exists).
*/
static bool
resolve_parallel_copies_block(nir_block *block, void *void_state)
{
struct from_ssa_state *state = void_state;
/* At this point, we have removed all of the phi nodes. If a parallel
* copy existed right after the phi nodes in this block, it is now the
* first instruction.
*/
nir_instr *first_instr = nir_block_first_instr(block);
if (first_instr == NULL)
return true; /* Empty, nothing to do. */
if (first_instr->type == nir_instr_type_parallel_copy) {
nir_parallel_copy_instr *pcopy = nir_instr_as_parallel_copy(first_instr);
resolve_parallel_copy(pcopy, state);
}
nir_instr *last_instr = nir_block_last_instr(block);
if (last_instr == NULL)
return true; /* Now empty, nothing to do. */
/* If the last instruction is a jump, the parallel copy will be before
* the jump.
*/
if (last_instr->type == nir_instr_type_jump)
last_instr = nir_instr_prev(last_instr);
if (last_instr && last_instr->type == nir_instr_type_parallel_copy) {
nir_parallel_copy_instr *pcopy = nir_instr_as_parallel_copy(last_instr);
if (pcopy->at_end)
resolve_parallel_copy(pcopy, state);
}
return true;
@ -168,16 +797,30 @@ nir_convert_from_ssa_impl(nir_function_impl *impl)
state.mem_ctx = ralloc_parent(impl);
state.dead_ctx = ralloc_context(NULL);
state.current_impl = impl;
state.impl = impl;
state.merge_node_table = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
_mesa_key_pointer_equal);
nir_foreach_block(impl, isolate_phi_nodes_block, &state);
nir_metadata_dirty(impl, nir_metadata_block_index |
nir_metadata_dominance);
nir_metadata_require(impl, nir_metadata_live_variables |
nir_metadata_dominance);
nir_foreach_block(impl, coalesce_phi_nodes_block, &state);
nir_foreach_block(impl, agressive_coalesce_block, &state);
state.ssa_table = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
_mesa_key_pointer_equal);
nir_foreach_block(impl, resolve_registers_block, &state);
nir_foreach_block(impl, remove_phi_nodes, &state);
nir_foreach_block(impl, convert_from_ssa_block, &state);
nir_foreach_block(impl, resolve_parallel_copies_block, &state);
/* Clean up dead instructions and the hash table */
ralloc_free(state.dead_ctx);
/* Clean up dead instructions and the hash tables */
_mesa_hash_table_destroy(state.ssa_table, NULL);
_mesa_hash_table_destroy(state.merge_node_table, NULL);
ralloc_free(state.dead_ctx);
}
void