2014-10-29 21:17:17 +00:00
|
|
|
/*
|
|
|
|
* Copyright © 2014 Intel Corporation
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
|
|
* to deal in the Software without restriction, including without limitation
|
|
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice (including the next
|
|
|
|
* paragraph) shall be included in all copies or substantial portions of the
|
|
|
|
* Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
|
|
|
* IN THE SOFTWARE.
|
|
|
|
*
|
|
|
|
* Authors:
|
|
|
|
* Jason Ekstrand (jason@jlekstrand.net)
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "nir.h"
|
2014-12-19 19:49:58 +00:00
|
|
|
#include "nir_worklist.h"
|
2015-02-27 15:32:24 +00:00
|
|
|
#include "nir_vla.h"
|
2014-10-29 21:17:17 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Basic liveness analysis. This works only in SSA form.
|
|
|
|
*
|
|
|
|
* This liveness pass treats phi nodes as being melded to the space between
|
|
|
|
* blocks so that the destinations of a phi are in the livein of the block
|
|
|
|
* in which it resides and the sources are in the liveout of the
|
|
|
|
* corresponding block. By formulating the liveness information in this
|
|
|
|
* way, we ensure that the definition of any variable dominates its entire
|
|
|
|
* live range. This is true because the only way that the definition of an
|
|
|
|
* SSA value may not dominate a use is if the use is in a phi node and the
|
|
|
|
* uses in phi no are in the live-out of the corresponding predecessor
|
|
|
|
* block but not in the live-in of the block containing the phi node.
|
|
|
|
*/
|
|
|
|
|
2015-11-04 01:15:24 +00:00
|
|
|
struct live_ssa_defs_state {
|
2014-10-29 21:17:17 +00:00
|
|
|
unsigned bitset_words;
|
2014-12-19 19:49:58 +00:00
|
|
|
|
2019-07-22 06:51:24 +01:00
|
|
|
/* Used in propagate_across_edge() */
|
|
|
|
BITSET_WORD *tmp_live;
|
|
|
|
|
2014-12-19 19:49:58 +00:00
|
|
|
nir_block_worklist worklist;
|
2014-10-29 21:17:17 +00:00
|
|
|
};
|
|
|
|
|
2014-12-19 19:49:58 +00:00
|
|
|
/* Initialize the liveness data to zero and add the given block to the
|
|
|
|
* worklist.
|
|
|
|
*/
|
2014-10-29 21:17:17 +00:00
|
|
|
static bool
|
2016-04-08 20:30:02 +01:00
|
|
|
init_liveness_block(nir_block *block,
|
|
|
|
struct live_ssa_defs_state *state)
|
2014-10-29 21:17:17 +00:00
|
|
|
{
|
|
|
|
block->live_in = reralloc(block, block->live_in, BITSET_WORD,
|
|
|
|
state->bitset_words);
|
|
|
|
memset(block->live_in, 0, state->bitset_words * sizeof(BITSET_WORD));
|
|
|
|
|
|
|
|
block->live_out = reralloc(block, block->live_out, BITSET_WORD,
|
|
|
|
state->bitset_words);
|
|
|
|
memset(block->live_out, 0, state->bitset_words * sizeof(BITSET_WORD));
|
|
|
|
|
2014-12-19 19:49:58 +00:00
|
|
|
nir_block_worklist_push_head(&state->worklist, block);
|
|
|
|
|
2014-10-29 21:17:17 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool
|
|
|
|
set_src_live(nir_src *src, void *void_live)
|
|
|
|
{
|
|
|
|
BITSET_WORD *live = void_live;
|
|
|
|
|
|
|
|
if (!src->is_ssa)
|
|
|
|
return true;
|
|
|
|
|
2020-09-29 23:44:57 +01:00
|
|
|
if (src->ssa->parent_instr->type == nir_instr_type_ssa_undef)
|
2014-10-29 21:17:17 +00:00
|
|
|
return true; /* undefined variables are never live */
|
|
|
|
|
2020-07-23 20:29:02 +01:00
|
|
|
BITSET_SET(live, src->ssa->index);
|
2014-10-29 21:17:17 +00:00
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool
|
2014-12-15 23:15:01 +00:00
|
|
|
set_ssa_def_dead(nir_ssa_def *def, void *void_live)
|
2014-10-29 21:17:17 +00:00
|
|
|
{
|
|
|
|
BITSET_WORD *live = void_live;
|
|
|
|
|
2020-07-23 20:29:02 +01:00
|
|
|
BITSET_CLEAR(live, def->index);
|
2014-10-29 21:17:17 +00:00
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2014-12-19 19:49:58 +00:00
|
|
|
/** Propagates the live in of succ across the edge to the live out of pred
|
|
|
|
*
|
|
|
|
* Phi nodes exist "between" blocks and all the phi nodes at the start of a
|
2014-10-29 21:17:17 +00:00
|
|
|
* block act "in parallel". When we propagate from the live_in of one
|
|
|
|
* block to the live out of the other, we have to kill any writes from phis
|
|
|
|
* and make live any sources.
|
2014-12-19 19:49:58 +00:00
|
|
|
*
|
|
|
|
* Returns true if updating live out of pred added anything
|
2014-10-29 21:17:17 +00:00
|
|
|
*/
|
2014-12-19 19:49:58 +00:00
|
|
|
static bool
|
2014-10-29 21:17:17 +00:00
|
|
|
propagate_across_edge(nir_block *pred, nir_block *succ,
|
2015-11-04 01:15:24 +00:00
|
|
|
struct live_ssa_defs_state *state)
|
2014-10-29 21:17:17 +00:00
|
|
|
{
|
2019-07-22 06:51:24 +01:00
|
|
|
BITSET_WORD *live = state->tmp_live;
|
2015-02-26 16:37:48 +00:00
|
|
|
memcpy(live, succ->live_in, state->bitset_words * sizeof *live);
|
2014-10-29 21:17:17 +00:00
|
|
|
|
2016-04-27 02:34:19 +01:00
|
|
|
nir_foreach_instr(instr, succ) {
|
2014-10-29 21:17:17 +00:00
|
|
|
if (instr->type != nir_instr_type_phi)
|
|
|
|
break;
|
|
|
|
nir_phi_instr *phi = nir_instr_as_phi(instr);
|
|
|
|
|
2014-12-15 23:15:01 +00:00
|
|
|
assert(phi->dest.is_ssa);
|
|
|
|
set_ssa_def_dead(&phi->dest.ssa, live);
|
2014-10-29 21:17:17 +00:00
|
|
|
}
|
|
|
|
|
2016-04-27 02:34:19 +01:00
|
|
|
nir_foreach_instr(instr, succ) {
|
2014-10-29 21:17:17 +00:00
|
|
|
if (instr->type != nir_instr_type_phi)
|
|
|
|
break;
|
|
|
|
nir_phi_instr *phi = nir_instr_as_phi(instr);
|
|
|
|
|
2016-04-27 04:16:21 +01:00
|
|
|
nir_foreach_phi_src(src, phi) {
|
2014-10-29 21:17:17 +00:00
|
|
|
if (src->pred == pred) {
|
|
|
|
set_src_live(&src->src, live);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-12-19 19:49:58 +00:00
|
|
|
BITSET_WORD progress = 0;
|
2014-10-29 21:17:17 +00:00
|
|
|
for (unsigned i = 0; i < state->bitset_words; ++i) {
|
2014-12-19 19:49:58 +00:00
|
|
|
progress |= live[i] & ~pred->live_out[i];
|
2014-10-29 21:17:17 +00:00
|
|
|
pred->live_out[i] |= live[i];
|
|
|
|
}
|
2014-12-19 19:49:58 +00:00
|
|
|
return progress != 0;
|
2014-10-29 21:17:17 +00:00
|
|
|
}
|
|
|
|
|
2014-12-19 19:49:58 +00:00
|
|
|
void
|
2015-11-04 01:15:24 +00:00
|
|
|
nir_live_ssa_defs_impl(nir_function_impl *impl)
|
2014-10-29 21:17:17 +00:00
|
|
|
{
|
2020-07-23 20:29:02 +01:00
|
|
|
struct live_ssa_defs_state state = {
|
|
|
|
.bitset_words = BITSET_WORDS(impl->ssa_alloc),
|
|
|
|
};
|
|
|
|
state.tmp_live = rzalloc_array(impl, BITSET_WORD, state.bitset_words),
|
2014-10-29 21:17:17 +00:00
|
|
|
|
2020-09-29 23:44:57 +01:00
|
|
|
/* Number the instructions so we can do cheap interference tests using the
|
|
|
|
* instruction index.
|
2014-12-19 19:49:58 +00:00
|
|
|
*/
|
2020-09-29 23:44:57 +01:00
|
|
|
nir_metadata_require(impl, nir_metadata_instr_index);
|
2014-10-29 21:17:17 +00:00
|
|
|
|
2014-12-19 19:49:58 +00:00
|
|
|
nir_block_worklist_init(&state.worklist, impl->num_blocks, NULL);
|
2014-10-29 21:17:17 +00:00
|
|
|
|
2020-07-23 20:29:02 +01:00
|
|
|
/* Allocate live_in and live_out sets and add all of the blocks to the
|
|
|
|
* worklist.
|
2014-12-19 19:49:58 +00:00
|
|
|
*/
|
2016-04-08 20:30:02 +01:00
|
|
|
nir_foreach_block(block, impl) {
|
|
|
|
init_liveness_block(block, &state);
|
|
|
|
}
|
|
|
|
|
2014-10-29 21:17:17 +00:00
|
|
|
|
2014-12-19 19:49:58 +00:00
|
|
|
/* We're now ready to work through the worklist and update the liveness
|
|
|
|
* sets of each of the blocks. By the time we get to this point, every
|
|
|
|
* block in the function implementation has been pushed onto the
|
|
|
|
* worklist in reverse order. As long as we keep the worklist
|
|
|
|
* up-to-date as we go, everything will get covered.
|
|
|
|
*/
|
|
|
|
while (!nir_block_worklist_is_empty(&state.worklist)) {
|
|
|
|
/* We pop them off in the reverse order we pushed them on. This way
|
|
|
|
* the first walk of the instructions is backwards so we only walk
|
|
|
|
* once in the case of no control flow.
|
2014-10-29 21:17:17 +00:00
|
|
|
*/
|
2014-12-19 19:49:58 +00:00
|
|
|
nir_block *block = nir_block_worklist_pop_head(&state.worklist);
|
|
|
|
|
|
|
|
memcpy(block->live_in, block->live_out,
|
|
|
|
state.bitset_words * sizeof(BITSET_WORD));
|
2014-10-29 21:17:17 +00:00
|
|
|
|
2014-12-19 19:49:58 +00:00
|
|
|
nir_if *following_if = nir_block_get_following_if(block);
|
|
|
|
if (following_if)
|
|
|
|
set_src_live(&following_if->condition, block->live_in);
|
|
|
|
|
2016-04-27 02:34:19 +01:00
|
|
|
nir_foreach_instr_reverse(instr, block) {
|
2014-12-19 19:49:58 +00:00
|
|
|
/* Phi nodes are handled seperately so we want to skip them. Since
|
|
|
|
* we are going backwards and they are at the beginning, we can just
|
|
|
|
* break as soon as we see one.
|
|
|
|
*/
|
|
|
|
if (instr->type == nir_instr_type_phi)
|
|
|
|
break;
|
|
|
|
|
|
|
|
nir_foreach_ssa_def(instr, set_ssa_def_dead, block->live_in);
|
|
|
|
nir_foreach_src(instr, set_src_live, block->live_in);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Walk over all of the predecessors of the current block updating
|
|
|
|
* their live in with the live out of this one. If anything has
|
|
|
|
* changed, add the predecessor to the work list so that we ensure
|
|
|
|
* that the new information is used.
|
|
|
|
*/
|
|
|
|
set_foreach(block->predecessors, entry) {
|
|
|
|
nir_block *pred = (nir_block *)entry->key;
|
|
|
|
if (propagate_across_edge(pred, block, &state))
|
|
|
|
nir_block_worklist_push_tail(&state.worklist, pred);
|
|
|
|
}
|
2014-10-29 21:17:17 +00:00
|
|
|
}
|
|
|
|
|
2019-07-22 06:51:24 +01:00
|
|
|
ralloc_free(state.tmp_live);
|
2014-12-19 19:49:58 +00:00
|
|
|
nir_block_worklist_fini(&state.worklist);
|
2014-10-29 21:17:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static bool
|
|
|
|
src_does_not_use_def(nir_src *src, void *def)
|
|
|
|
{
|
|
|
|
return !src->is_ssa || src->ssa != (nir_ssa_def *)def;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool
|
|
|
|
search_for_use_after_instr(nir_instr *start, nir_ssa_def *def)
|
|
|
|
{
|
|
|
|
/* Only look for a use strictly after the given instruction */
|
|
|
|
struct exec_node *node = start->node.next;
|
|
|
|
while (!exec_node_is_tail_sentinel(node)) {
|
|
|
|
nir_instr *instr = exec_node_data(nir_instr, node, node);
|
|
|
|
if (!nir_foreach_src(instr, src_does_not_use_def, def))
|
|
|
|
return true;
|
|
|
|
node = node->next;
|
|
|
|
}
|
2020-09-22 22:56:42 +01:00
|
|
|
|
|
|
|
/* If uses are considered to be in the block immediately preceding the if
|
|
|
|
* so we need to also check the following if condition, if any.
|
|
|
|
*/
|
|
|
|
nir_if *following_if = nir_block_get_following_if(start->block);
|
|
|
|
if (following_if && following_if->condition.is_ssa &&
|
|
|
|
following_if->condition.ssa == def)
|
|
|
|
return true;
|
|
|
|
|
2014-10-29 21:17:17 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Returns true if def is live at instr assuming that def comes before
|
|
|
|
* instr in a pre DFS search of the dominance tree.
|
|
|
|
*/
|
|
|
|
static bool
|
|
|
|
nir_ssa_def_is_live_at(nir_ssa_def *def, nir_instr *instr)
|
|
|
|
{
|
2020-07-23 20:29:02 +01:00
|
|
|
if (BITSET_TEST(instr->block->live_out, def->index)) {
|
2014-10-29 21:17:17 +00:00
|
|
|
/* Since def dominates instr, if def is in the liveout of the block,
|
|
|
|
* it's live at instr
|
|
|
|
*/
|
|
|
|
return true;
|
|
|
|
} else {
|
2020-07-23 20:29:02 +01:00
|
|
|
if (BITSET_TEST(instr->block->live_in, def->index) ||
|
2014-10-29 21:17:17 +00:00
|
|
|
def->parent_instr->block == instr->block) {
|
|
|
|
/* In this case it is either live coming into instr's block or it
|
|
|
|
* is defined in the same block. In this case, we simply need to
|
|
|
|
* see if it is used after instr.
|
|
|
|
*/
|
|
|
|
return search_for_use_after_instr(instr, def);
|
|
|
|
} else {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
nir_ssa_defs_interfere(nir_ssa_def *a, nir_ssa_def *b)
|
|
|
|
{
|
|
|
|
if (a->parent_instr == b->parent_instr) {
|
|
|
|
/* Two variables defined at the same time interfere assuming at
|
|
|
|
* least one isn't dead.
|
|
|
|
*/
|
|
|
|
return true;
|
2020-09-29 23:44:57 +01:00
|
|
|
} else if (a->parent_instr->type == nir_instr_type_ssa_undef ||
|
|
|
|
b->parent_instr->type == nir_instr_type_ssa_undef) {
|
2014-10-29 21:17:17 +00:00
|
|
|
/* If either variable is an ssa_undef, then there's no interference */
|
|
|
|
return false;
|
2020-09-29 23:44:57 +01:00
|
|
|
} else if (a->parent_instr->index < b->parent_instr->index) {
|
2014-10-29 21:17:17 +00:00
|
|
|
return nir_ssa_def_is_live_at(a, b->parent_instr);
|
|
|
|
} else {
|
|
|
|
return nir_ssa_def_is_live_at(b, a->parent_instr);
|
|
|
|
}
|
|
|
|
}
|
2020-07-22 17:06:48 +01:00
|
|
|
|
|
|
|
/* Takes an SSA def's defs and uses and expands the live interval to cover
|
|
|
|
* that range. Control flow effects are handled separately.
|
|
|
|
*/
|
|
|
|
static bool def_cb(nir_ssa_def *def, void *state)
|
|
|
|
{
|
|
|
|
nir_instr_liveness *liveness = state;
|
|
|
|
nir_instr *instr = def->parent_instr;
|
|
|
|
int index = def->index;
|
|
|
|
|
|
|
|
liveness->defs[index].start = MIN2(liveness->defs[index].start, instr->index);
|
|
|
|
|
|
|
|
nir_foreach_use(src, def) {
|
|
|
|
liveness->defs[index].end = MAX2(liveness->defs[index].end,
|
|
|
|
src->parent_instr->index);
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
nir_instr_liveness *
|
|
|
|
nir_live_ssa_defs_per_instr(nir_function_impl *impl)
|
|
|
|
{
|
|
|
|
/* We'll use block-level live_ssa_defs to expand our per-instr ranges for
|
|
|
|
* control flow.
|
|
|
|
*/
|
|
|
|
nir_metadata_require(impl,
|
|
|
|
nir_metadata_block_index |
|
|
|
|
nir_metadata_instr_index |
|
|
|
|
nir_metadata_live_ssa_defs);
|
|
|
|
|
|
|
|
/* Make our struct. */
|
|
|
|
nir_instr_liveness *liveness = ralloc(NULL, nir_instr_liveness);
|
|
|
|
liveness->defs = rzalloc_array(liveness, nir_liveness_bounds,
|
|
|
|
impl->ssa_alloc);
|
|
|
|
|
|
|
|
/* Set our starts so we can use MIN2() as we accumulate bounds. */
|
|
|
|
for (int i = 0; i < impl->ssa_alloc; i++)
|
|
|
|
liveness->defs->start = ~0;
|
|
|
|
|
|
|
|
unsigned last_instr = 0;
|
|
|
|
nir_foreach_block(block, impl) {
|
|
|
|
unsigned index;
|
|
|
|
BITSET_FOREACH_SET(index, block->live_in, impl->ssa_alloc) {
|
|
|
|
liveness->defs[index].start = MIN2(liveness->defs[index].start,
|
|
|
|
last_instr);
|
|
|
|
}
|
|
|
|
|
|
|
|
nir_foreach_instr(instr, block) {
|
|
|
|
nir_foreach_ssa_def(instr, def_cb, liveness);
|
|
|
|
|
|
|
|
last_instr = instr->index;
|
|
|
|
};
|
|
|
|
|
|
|
|
/* track an if src's use. We need to make sure that our value is live
|
|
|
|
* across the if reference, where we don't have an instr->index
|
|
|
|
* representing the use. Mark it as live through the next real
|
|
|
|
* instruction.
|
|
|
|
*/
|
|
|
|
nir_if *nif = nir_block_get_following_if(block);
|
|
|
|
if (nif) {
|
|
|
|
if (nif->condition.is_ssa) {
|
|
|
|
liveness->defs[nif->condition.ssa->index].end = MAX2(
|
|
|
|
liveness->defs[nif->condition.ssa->index].end,
|
|
|
|
last_instr + 1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
BITSET_FOREACH_SET(index, block->live_out, impl->ssa_alloc) {
|
|
|
|
liveness->defs[index].end = MAX2(liveness->defs[index].end, last_instr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return liveness;
|
|
|
|
}
|