2019-09-17 12:22:17 +01:00
|
|
|
/*
|
|
|
|
* Copyright © 2018 Valve Corporation
|
|
|
|
* Copyright © 2018 Google
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
|
|
* to deal in the Software without restriction, including without limitation
|
|
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice (including the next
|
|
|
|
* paragraph) shall be included in all copies or substantial portions of the
|
|
|
|
* Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
|
|
|
* IN THE SOFTWARE.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2019-10-24 17:27:25 +01:00
|
|
|
#include "aco_builder.h"
|
2021-06-09 14:40:03 +01:00
|
|
|
#include "aco_ir.h"
|
2021-06-09 09:14:54 +01:00
|
|
|
|
2021-06-10 10:33:15 +01:00
|
|
|
#include "common/sid.h"
|
2019-10-24 17:27:25 +01:00
|
|
|
|
2021-07-20 10:48:15 +01:00
|
|
|
#include <algorithm>
|
aco/spill: Prefer unordered_map over map for next use distances
This changes the iteration order of next use distances, so some "random"
changes to shader metrics are expected.
fossil-db for Navi14:
Totals from 1239 (0.82% of 150305) affected shaders:
SpillSGPRs: 10559 -> 10562 (+0.03%); split: -0.05%, +0.08%
SpillVGPRs: 1632 -> 1863 (+14.15%)
CodeSize: 19321468 -> 19304164 (-0.09%); split: -0.09%, +0.01%
Instrs: 3593957 -> 3591647 (-0.06%); split: -0.07%, +0.01%
Latency: 103120695 -> 102475647 (-0.63%); split: -0.63%, +0.01%
InvThroughput: 23897614 -> 23575320 (-1.35%); split: -1.36%, +0.02%
VClause: 66406 -> 66943 (+0.81%); split: -0.01%, +0.81%
SClause: 118559 -> 118548 (-0.01%)
Copies: 310871 -> 308950 (-0.62%); split: -0.69%, +0.08%
Branches: 123386 -> 123413 (+0.02%); split: -0.00%, +0.03%
These numbers mostly come from parallel-rdp ubershaders. Small changes are
also found in the rdr2 and rage2 shader metrics, whereas others are not
significantly affected.
Reviewed-by: Daniel Schürmann <daniel@schuermann.dev>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/11925>
2021-07-13 11:31:12 +01:00
|
|
|
#include <cstring>
|
2019-09-17 12:22:17 +01:00
|
|
|
#include <map>
|
2020-03-11 10:02:20 +00:00
|
|
|
#include <set>
|
2019-09-17 12:22:17 +01:00
|
|
|
#include <stack>
|
aco/spill: Prefer unordered_map over map for next use distances
This changes the iteration order of next use distances, so some "random"
changes to shader metrics are expected.
fossil-db for Navi14:
Totals from 1239 (0.82% of 150305) affected shaders:
SpillSGPRs: 10559 -> 10562 (+0.03%); split: -0.05%, +0.08%
SpillVGPRs: 1632 -> 1863 (+14.15%)
CodeSize: 19321468 -> 19304164 (-0.09%); split: -0.09%, +0.01%
Instrs: 3593957 -> 3591647 (-0.06%); split: -0.07%, +0.01%
Latency: 103120695 -> 102475647 (-0.63%); split: -0.63%, +0.01%
InvThroughput: 23897614 -> 23575320 (-1.35%); split: -1.36%, +0.02%
VClause: 66406 -> 66943 (+0.81%); split: -0.01%, +0.81%
SClause: 118559 -> 118548 (-0.01%)
Copies: 310871 -> 308950 (-0.62%); split: -0.69%, +0.08%
Branches: 123386 -> 123413 (+0.02%); split: -0.00%, +0.03%
These numbers mostly come from parallel-rdp ubershaders. Small changes are
also found in the rdr2 and rage2 shader metrics, whereas others are not
significantly affected.
Reviewed-by: Daniel Schürmann <daniel@schuermann.dev>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/11925>
2021-07-13 11:31:12 +01:00
|
|
|
#include <unordered_map>
|
2021-06-09 14:40:03 +01:00
|
|
|
#include <unordered_set>
|
|
|
|
#include <vector>
|
2019-09-17 12:22:17 +01:00
|
|
|
|
aco/spill: Prefer unordered_map over map for next use distances
This changes the iteration order of next use distances, so some "random"
changes to shader metrics are expected.
fossil-db for Navi14:
Totals from 1239 (0.82% of 150305) affected shaders:
SpillSGPRs: 10559 -> 10562 (+0.03%); split: -0.05%, +0.08%
SpillVGPRs: 1632 -> 1863 (+14.15%)
CodeSize: 19321468 -> 19304164 (-0.09%); split: -0.09%, +0.01%
Instrs: 3593957 -> 3591647 (-0.06%); split: -0.07%, +0.01%
Latency: 103120695 -> 102475647 (-0.63%); split: -0.63%, +0.01%
InvThroughput: 23897614 -> 23575320 (-1.35%); split: -1.36%, +0.02%
VClause: 66406 -> 66943 (+0.81%); split: -0.01%, +0.81%
SClause: 118559 -> 118548 (-0.01%)
Copies: 310871 -> 308950 (-0.62%); split: -0.69%, +0.08%
Branches: 123386 -> 123413 (+0.02%); split: -0.00%, +0.03%
These numbers mostly come from parallel-rdp ubershaders. Small changes are
also found in the rdr2 and rage2 shader metrics, whereas others are not
significantly affected.
Reviewed-by: Daniel Schürmann <daniel@schuermann.dev>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/11925>
2021-07-13 11:31:12 +01:00
|
|
|
namespace std {
|
|
|
|
template <> struct hash<aco::Temp> {
|
|
|
|
size_t operator()(aco::Temp temp) const noexcept
|
|
|
|
{
|
|
|
|
uint32_t v;
|
|
|
|
std::memcpy(&v, &temp, sizeof(temp));
|
|
|
|
return std::hash<uint32_t>{}(v);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
} // namespace std
|
|
|
|
|
2019-09-17 12:22:17 +01:00
|
|
|
/*
|
|
|
|
* Implements the spilling algorithm on SSA-form from
|
|
|
|
* "Register Spilling and Live-Range Splitting for SSA-Form Programs"
|
|
|
|
* by Matthias Braun and Sebastian Hack.
|
|
|
|
*/
|
|
|
|
|
|
|
|
namespace aco {
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
|
|
|
struct remat_info {
|
2021-06-09 09:14:54 +01:00
|
|
|
Instruction* instr;
|
2019-09-17 12:22:17 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
struct spill_ctx {
|
|
|
|
RegisterDemand target_pressure;
|
|
|
|
Program* program;
|
|
|
|
std::vector<std::vector<RegisterDemand>> register_demand;
|
|
|
|
std::vector<std::map<Temp, Temp>> renames;
|
2021-07-16 11:19:28 +01:00
|
|
|
std::vector<std::unordered_map<Temp, uint32_t>> spills_entry;
|
2021-07-16 11:17:29 +01:00
|
|
|
std::vector<std::unordered_map<Temp, uint32_t>> spills_exit;
|
2021-07-19 18:32:38 +01:00
|
|
|
|
2019-09-17 12:22:17 +01:00
|
|
|
std::vector<bool> processed;
|
2021-07-10 11:20:56 +01:00
|
|
|
std::stack<Block*, std::vector<Block*>> loop_header;
|
aco/spill: Prefer unordered_map over map for next use distances
This changes the iteration order of next use distances, so some "random"
changes to shader metrics are expected.
fossil-db for Navi14:
Totals from 1239 (0.82% of 150305) affected shaders:
SpillSGPRs: 10559 -> 10562 (+0.03%); split: -0.05%, +0.08%
SpillVGPRs: 1632 -> 1863 (+14.15%)
CodeSize: 19321468 -> 19304164 (-0.09%); split: -0.09%, +0.01%
Instrs: 3593957 -> 3591647 (-0.06%); split: -0.07%, +0.01%
Latency: 103120695 -> 102475647 (-0.63%); split: -0.63%, +0.01%
InvThroughput: 23897614 -> 23575320 (-1.35%); split: -1.36%, +0.02%
VClause: 66406 -> 66943 (+0.81%); split: -0.01%, +0.81%
SClause: 118559 -> 118548 (-0.01%)
Copies: 310871 -> 308950 (-0.62%); split: -0.69%, +0.08%
Branches: 123386 -> 123413 (+0.02%); split: -0.00%, +0.03%
These numbers mostly come from parallel-rdp ubershaders. Small changes are
also found in the rdr2 and rage2 shader metrics, whereas others are not
significantly affected.
Reviewed-by: Daniel Schürmann <daniel@schuermann.dev>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/11925>
2021-07-13 11:31:12 +01:00
|
|
|
std::vector<std::unordered_map<Temp, std::pair<uint32_t, uint32_t>>> next_use_distances_start;
|
|
|
|
std::vector<std::unordered_map<Temp, std::pair<uint32_t, uint32_t>>> next_use_distances_end;
|
2021-07-20 10:48:15 +01:00
|
|
|
std::vector<std::vector<std::pair<Temp, uint32_t>>> local_next_use_distance; /* Working buffer */
|
2020-07-07 13:11:07 +01:00
|
|
|
std::vector<std::pair<RegClass, std::unordered_set<uint32_t>>> interferences;
|
2019-10-16 15:39:06 +01:00
|
|
|
std::vector<std::vector<uint32_t>> affinities;
|
2019-09-17 12:22:17 +01:00
|
|
|
std::vector<bool> is_reloaded;
|
2021-07-15 14:14:41 +01:00
|
|
|
std::unordered_map<Temp, remat_info> remat;
|
2021-07-15 14:11:44 +01:00
|
|
|
std::set<Instruction*> unused_remats;
|
2019-10-28 16:15:17 +00:00
|
|
|
unsigned wave_size;
|
2019-09-17 12:22:17 +01:00
|
|
|
|
2020-11-03 13:40:05 +00:00
|
|
|
spill_ctx(const RegisterDemand target_pressure_, Program* program_,
|
|
|
|
std::vector<std::vector<RegisterDemand>> register_demand_)
|
2021-06-09 09:14:54 +01:00
|
|
|
: target_pressure(target_pressure_), program(program_),
|
|
|
|
register_demand(std::move(register_demand_)), renames(program->blocks.size()),
|
|
|
|
spills_entry(program->blocks.size()), spills_exit(program->blocks.size()),
|
|
|
|
processed(program->blocks.size(), false), wave_size(program->wave_size)
|
|
|
|
{}
|
2019-09-17 12:22:17 +01:00
|
|
|
|
2019-10-16 15:39:06 +01:00
|
|
|
void add_affinity(uint32_t first, uint32_t second)
|
|
|
|
{
|
|
|
|
unsigned found_first = affinities.size();
|
|
|
|
unsigned found_second = affinities.size();
|
|
|
|
for (unsigned i = 0; i < affinities.size(); i++) {
|
|
|
|
std::vector<uint32_t>& vec = affinities[i];
|
|
|
|
for (uint32_t entry : vec) {
|
|
|
|
if (entry == first)
|
|
|
|
found_first = i;
|
|
|
|
else if (entry == second)
|
|
|
|
found_second = i;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (found_first == affinities.size() && found_second == affinities.size()) {
|
|
|
|
affinities.emplace_back(std::vector<uint32_t>({first, second}));
|
|
|
|
} else if (found_first < affinities.size() && found_second == affinities.size()) {
|
|
|
|
affinities[found_first].push_back(second);
|
|
|
|
} else if (found_second < affinities.size() && found_first == affinities.size()) {
|
|
|
|
affinities[found_second].push_back(first);
|
|
|
|
} else if (found_first != found_second) {
|
|
|
|
/* merge second into first */
|
2021-06-09 09:14:54 +01:00
|
|
|
affinities[found_first].insert(affinities[found_first].end(),
|
|
|
|
affinities[found_second].begin(),
|
|
|
|
affinities[found_second].end());
|
2019-10-16 15:39:06 +01:00
|
|
|
affinities.erase(std::next(affinities.begin(), found_second));
|
|
|
|
} else {
|
|
|
|
assert(found_first == found_second);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-16 11:08:50 +01:00
|
|
|
void add_interference(uint32_t first, uint32_t second)
|
|
|
|
{
|
|
|
|
if (interferences[first].first.type() != interferences[second].first.type())
|
|
|
|
return;
|
|
|
|
|
|
|
|
bool inserted = interferences[first].second.insert(second).second;
|
|
|
|
if (inserted)
|
|
|
|
interferences[second].second.insert(first);
|
|
|
|
}
|
|
|
|
|
2019-09-17 12:22:17 +01:00
|
|
|
uint32_t allocate_spill_id(RegClass rc)
|
|
|
|
{
|
2020-07-07 13:11:07 +01:00
|
|
|
interferences.emplace_back(rc, std::unordered_set<uint32_t>());
|
2019-09-17 12:22:17 +01:00
|
|
|
is_reloaded.push_back(false);
|
|
|
|
return next_spill_id++;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint32_t next_spill_id = 0;
|
|
|
|
};
|
|
|
|
|
2021-06-09 09:14:54 +01:00
|
|
|
int32_t
|
|
|
|
get_dominator(int idx_a, int idx_b, Program* program, bool is_linear)
|
2019-09-17 12:22:17 +01:00
|
|
|
{
|
|
|
|
|
|
|
|
if (idx_a == -1)
|
|
|
|
return idx_b;
|
|
|
|
if (idx_b == -1)
|
|
|
|
return idx_a;
|
|
|
|
if (is_linear) {
|
|
|
|
while (idx_a != idx_b) {
|
|
|
|
if (idx_a > idx_b)
|
|
|
|
idx_a = program->blocks[idx_a].linear_idom;
|
|
|
|
else
|
|
|
|
idx_b = program->blocks[idx_b].linear_idom;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
while (idx_a != idx_b) {
|
|
|
|
if (idx_a > idx_b)
|
|
|
|
idx_a = program->blocks[idx_a].logical_idom;
|
|
|
|
else
|
|
|
|
idx_b = program->blocks[idx_b].logical_idom;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
assert(idx_a != -1);
|
|
|
|
return idx_a;
|
|
|
|
}
|
|
|
|
|
2021-06-09 09:14:54 +01:00
|
|
|
void
|
2021-07-15 15:36:09 +01:00
|
|
|
next_uses_per_block(spill_ctx& ctx, unsigned block_idx, uint32_t& worklist)
|
2019-09-17 12:22:17 +01:00
|
|
|
{
|
|
|
|
Block* block = &ctx.program->blocks[block_idx];
|
2021-07-15 16:11:24 +01:00
|
|
|
ctx.next_use_distances_start[block_idx] = ctx.next_use_distances_end[block_idx];
|
|
|
|
auto& next_use_distances_start = ctx.next_use_distances_start[block_idx];
|
2019-09-17 12:22:17 +01:00
|
|
|
|
2021-06-09 09:14:54 +01:00
|
|
|
/* to compute the next use distance at the beginning of the block, we have to add the block's
|
|
|
|
* size */
|
2021-07-15 16:11:24 +01:00
|
|
|
for (std::unordered_map<Temp, std::pair<uint32_t, uint32_t>>::iterator it =
|
|
|
|
next_use_distances_start.begin();
|
|
|
|
it != next_use_distances_start.end(); ++it)
|
2019-09-17 12:22:17 +01:00
|
|
|
it->second.second = it->second.second + block->instructions.size();
|
|
|
|
|
|
|
|
int idx = block->instructions.size() - 1;
|
|
|
|
while (idx >= 0) {
|
|
|
|
aco_ptr<Instruction>& instr = block->instructions[idx];
|
|
|
|
|
2021-06-09 09:14:54 +01:00
|
|
|
if (instr->opcode == aco_opcode::p_linear_phi || instr->opcode == aco_opcode::p_phi)
|
2019-09-17 12:22:17 +01:00
|
|
|
break;
|
|
|
|
|
|
|
|
for (const Definition& def : instr->definitions) {
|
|
|
|
if (def.isTemp())
|
2021-07-15 16:11:24 +01:00
|
|
|
next_use_distances_start.erase(def.getTemp());
|
2019-09-17 12:22:17 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
for (const Operand& op : instr->operands) {
|
|
|
|
/* omit exec mask */
|
|
|
|
if (op.isFixed() && op.physReg() == exec)
|
|
|
|
continue;
|
2019-10-30 13:54:44 +00:00
|
|
|
if (op.regClass().type() == RegType::vgpr && op.regClass().is_linear())
|
|
|
|
continue;
|
2019-09-17 12:22:17 +01:00
|
|
|
if (op.isTemp())
|
2021-07-15 16:11:24 +01:00
|
|
|
next_use_distances_start[op.getTemp()] = {block_idx, idx};
|
2019-09-17 12:22:17 +01:00
|
|
|
}
|
|
|
|
idx--;
|
|
|
|
}
|
|
|
|
|
2021-07-15 16:11:24 +01:00
|
|
|
assert(block_idx != 0 || next_use_distances_start.empty());
|
|
|
|
std::unordered_set<Temp> phi_defs;
|
2019-09-17 12:22:17 +01:00
|
|
|
while (idx >= 0) {
|
|
|
|
aco_ptr<Instruction>& instr = block->instructions[idx];
|
|
|
|
assert(instr->opcode == aco_opcode::p_linear_phi || instr->opcode == aco_opcode::p_phi);
|
|
|
|
|
2021-09-02 17:30:57 +01:00
|
|
|
std::pair<uint32_t, uint32_t> distance{block_idx, 0};
|
|
|
|
|
2021-07-15 16:11:24 +01:00
|
|
|
auto it = instr->definitions[0].isTemp() ? next_use_distances_start.find(instr->definitions[0].getTemp())
|
|
|
|
: next_use_distances_start.end();
|
|
|
|
if (it != next_use_distances_start.end() &&
|
|
|
|
phi_defs.insert(instr->definitions[0].getTemp()).second) {
|
2021-09-02 17:30:57 +01:00
|
|
|
distance = it->second;
|
2021-02-22 13:57:28 +00:00
|
|
|
}
|
|
|
|
|
2019-09-17 12:22:17 +01:00
|
|
|
for (unsigned i = 0; i < instr->operands.size(); i++) {
|
2021-06-09 09:14:54 +01:00
|
|
|
unsigned pred_idx =
|
|
|
|
instr->opcode == aco_opcode::p_phi ? block->logical_preds[i] : block->linear_preds[i];
|
2019-09-17 12:22:17 +01:00
|
|
|
if (instr->operands[i].isTemp()) {
|
2021-07-13 11:59:58 +01:00
|
|
|
auto insert_result = ctx.next_use_distances_end[pred_idx].insert(
|
|
|
|
std::make_pair(instr->operands[i].getTemp(), distance));
|
|
|
|
const bool inserted = insert_result.second;
|
|
|
|
std::pair<uint32_t, uint32_t>& entry_distance = insert_result.first->second;
|
|
|
|
if (inserted || entry_distance != distance)
|
2021-07-15 15:36:09 +01:00
|
|
|
worklist = std::max(worklist, pred_idx + 1);
|
2021-07-13 11:59:58 +01:00
|
|
|
entry_distance = distance;
|
2019-09-17 12:22:17 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
idx--;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* all remaining live vars must be live-out at the predecessors */
|
2021-07-15 16:11:24 +01:00
|
|
|
for (std::pair<const Temp, std::pair<uint32_t, uint32_t>>& pair : next_use_distances_start) {
|
2019-09-17 12:22:17 +01:00
|
|
|
Temp temp = pair.first;
|
2021-07-15 16:11:24 +01:00
|
|
|
if (phi_defs.count(temp)) {
|
|
|
|
continue;
|
|
|
|
}
|
2019-09-17 12:22:17 +01:00
|
|
|
uint32_t distance = pair.second.second;
|
|
|
|
uint32_t dom = pair.second.first;
|
|
|
|
std::vector<unsigned>& preds = temp.is_linear() ? block->linear_preds : block->logical_preds;
|
|
|
|
for (unsigned pred_idx : preds) {
|
|
|
|
if (ctx.program->blocks[pred_idx].loop_nest_depth > block->loop_nest_depth)
|
|
|
|
distance += 0xFFFF;
|
2021-07-13 11:59:58 +01:00
|
|
|
auto insert_result = ctx.next_use_distances_end[pred_idx].insert(
|
|
|
|
std::make_pair(temp, std::pair<uint32_t, uint32_t>{}));
|
|
|
|
const bool inserted = insert_result.second;
|
|
|
|
std::pair<uint32_t, uint32_t>& entry_distance = insert_result.first->second;
|
|
|
|
|
|
|
|
if (!inserted) {
|
|
|
|
dom = get_dominator(dom, entry_distance.first, ctx.program, temp.is_linear());
|
|
|
|
distance = std::min(entry_distance.second, distance);
|
2019-09-17 12:22:17 +01:00
|
|
|
}
|
2021-07-13 11:59:58 +01:00
|
|
|
if (entry_distance != std::pair<uint32_t, uint32_t>{dom, distance}) {
|
2021-07-15 15:36:09 +01:00
|
|
|
worklist = std::max(worklist, pred_idx + 1);
|
2021-07-13 11:59:58 +01:00
|
|
|
entry_distance = {dom, distance};
|
|
|
|
}
|
2019-09-17 12:22:17 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-06-09 09:14:54 +01:00
|
|
|
void
|
|
|
|
compute_global_next_uses(spill_ctx& ctx)
|
2019-09-17 12:22:17 +01:00
|
|
|
{
|
|
|
|
ctx.next_use_distances_start.resize(ctx.program->blocks.size());
|
|
|
|
ctx.next_use_distances_end.resize(ctx.program->blocks.size());
|
2021-07-15 15:36:09 +01:00
|
|
|
|
|
|
|
uint32_t worklist = ctx.program->blocks.size();
|
|
|
|
while (worklist) {
|
|
|
|
unsigned block_idx = --worklist;
|
2019-09-17 12:22:17 +01:00
|
|
|
next_uses_per_block(ctx, block_idx, worklist);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-06-09 09:14:54 +01:00
|
|
|
bool
|
|
|
|
should_rematerialize(aco_ptr<Instruction>& instr)
|
2019-09-17 12:22:17 +01:00
|
|
|
{
|
|
|
|
/* TODO: rematerialization is only supported for VOP1, SOP1 and PSEUDO */
|
2021-06-09 09:14:54 +01:00
|
|
|
if (instr->format != Format::VOP1 && instr->format != Format::SOP1 &&
|
|
|
|
instr->format != Format::PSEUDO && instr->format != Format::SOPK)
|
2019-09-17 12:22:17 +01:00
|
|
|
return false;
|
2021-06-09 09:14:54 +01:00
|
|
|
/* TODO: pseudo-instruction rematerialization is only supported for
|
|
|
|
* p_create_vector/p_parallelcopy */
|
2021-01-20 15:27:16 +00:00
|
|
|
if (instr->isPseudo() && instr->opcode != aco_opcode::p_create_vector &&
|
aco: always use p_parallelcopy for pre-RA copies
Most fossil-db changes are because literals are applied earlier
(in label_instruction), so use counts are more accurate and more literals
are applied.
fossil-db (Navi):
Totals from 79551 (57.89% of 137413) affected shaders:
SGPRs: 4549610 -> 4542802 (-0.15%); split: -0.19%, +0.04%
VGPRs: 3326764 -> 3324172 (-0.08%); split: -0.10%, +0.03%
SpillSGPRs: 38886 -> 34562 (-11.12%); split: -11.14%, +0.02%
CodeSize: 240143456 -> 240001008 (-0.06%); split: -0.11%, +0.05%
MaxWaves: 1078919 -> 1079281 (+0.03%); split: +0.04%, -0.01%
Instrs: 46627073 -> 46528490 (-0.21%); split: -0.22%, +0.01%
fossil-db (Polaris):
Totals from 98463 (70.90% of 138881) affected shaders:
SGPRs: 5164689 -> 5164353 (-0.01%); split: -0.02%, +0.01%
VGPRs: 3920936 -> 3921856 (+0.02%); split: -0.00%, +0.03%
SpillSGPRs: 56298 -> 52259 (-7.17%); split: -7.22%, +0.04%
CodeSize: 258680092 -> 258692712 (+0.00%); split: -0.02%, +0.03%
MaxWaves: 620863 -> 620823 (-0.01%); split: +0.00%, -0.01%
Instrs: 50776289 -> 50757577 (-0.04%); split: -0.04%, +0.00%
Signed-off-by: Rhys Perry <pendingchaos02@gmail.com>
Reviewed-by: Timur Kristóf <timur.kristof@gmail.com>
Reviewed-by: Daniel Schürmann <daniel@schuermann.dev>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/7216>
2020-10-14 13:50:24 +01:00
|
|
|
instr->opcode != aco_opcode::p_parallelcopy)
|
2019-09-17 12:22:17 +01:00
|
|
|
return false;
|
2021-01-20 15:27:16 +00:00
|
|
|
if (instr->isSOPK() && instr->opcode != aco_opcode::s_movk_i32)
|
2019-11-06 16:47:06 +00:00
|
|
|
return false;
|
2019-09-17 12:22:17 +01:00
|
|
|
|
|
|
|
for (const Operand& op : instr->operands) {
|
|
|
|
/* TODO: rematerialization using temporaries isn't yet supported */
|
2021-02-23 14:07:43 +00:00
|
|
|
if (!op.isConstant())
|
2019-09-17 12:22:17 +01:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* TODO: rematerialization with multiple definitions isn't yet supported */
|
|
|
|
if (instr->definitions.size() > 1)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2021-06-09 09:14:54 +01:00
|
|
|
aco_ptr<Instruction>
|
|
|
|
do_reload(spill_ctx& ctx, Temp tmp, Temp new_name, uint32_t spill_id)
|
2019-09-17 12:22:17 +01:00
|
|
|
{
|
2021-07-15 14:14:41 +01:00
|
|
|
std::unordered_map<Temp, remat_info>::iterator remat = ctx.remat.find(tmp);
|
2019-09-17 12:22:17 +01:00
|
|
|
if (remat != ctx.remat.end()) {
|
2021-06-09 09:14:54 +01:00
|
|
|
Instruction* instr = remat->second.instr;
|
|
|
|
assert((instr->isVOP1() || instr->isSOP1() || instr->isPseudo() || instr->isSOPK()) &&
|
|
|
|
"unsupported");
|
|
|
|
assert((instr->format != Format::PSEUDO || instr->opcode == aco_opcode::p_create_vector ||
|
|
|
|
instr->opcode == aco_opcode::p_parallelcopy) &&
|
|
|
|
"unsupported");
|
2019-09-17 12:22:17 +01:00
|
|
|
assert(instr->definitions.size() == 1 && "unsupported");
|
|
|
|
|
|
|
|
aco_ptr<Instruction> res;
|
2021-01-20 15:27:16 +00:00
|
|
|
if (instr->isVOP1()) {
|
2021-06-09 09:14:54 +01:00
|
|
|
res.reset(create_instruction<VOP1_instruction>(
|
|
|
|
instr->opcode, instr->format, instr->operands.size(), instr->definitions.size()));
|
2021-01-20 15:27:16 +00:00
|
|
|
} else if (instr->isSOP1()) {
|
2021-06-09 09:14:54 +01:00
|
|
|
res.reset(create_instruction<SOP1_instruction>(
|
|
|
|
instr->opcode, instr->format, instr->operands.size(), instr->definitions.size()));
|
2021-01-20 15:27:16 +00:00
|
|
|
} else if (instr->isPseudo()) {
|
2021-06-09 09:14:54 +01:00
|
|
|
res.reset(create_instruction<Pseudo_instruction>(
|
|
|
|
instr->opcode, instr->format, instr->operands.size(), instr->definitions.size()));
|
2021-01-20 15:27:16 +00:00
|
|
|
} else if (instr->isSOPK()) {
|
2021-06-09 09:14:54 +01:00
|
|
|
res.reset(create_instruction<SOPK_instruction>(
|
|
|
|
instr->opcode, instr->format, instr->operands.size(), instr->definitions.size()));
|
2021-01-21 16:13:34 +00:00
|
|
|
res->sopk().imm = instr->sopk().imm;
|
2019-09-17 12:22:17 +01:00
|
|
|
}
|
|
|
|
for (unsigned i = 0; i < instr->operands.size(); i++) {
|
|
|
|
res->operands[i] = instr->operands[i];
|
|
|
|
if (instr->operands[i].isTemp()) {
|
|
|
|
assert(false && "unsupported");
|
|
|
|
if (ctx.remat.count(instr->operands[i].getTemp()))
|
2021-07-15 14:11:44 +01:00
|
|
|
ctx.unused_remats.erase(ctx.remat[instr->operands[i].getTemp()].instr);
|
2019-09-17 12:22:17 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
res->definitions[0] = Definition(new_name);
|
|
|
|
return res;
|
|
|
|
} else {
|
2021-06-09 09:14:54 +01:00
|
|
|
aco_ptr<Pseudo_instruction> reload{
|
|
|
|
create_instruction<Pseudo_instruction>(aco_opcode::p_reload, Format::PSEUDO, 1, 1)};
|
2021-07-13 10:22:46 +01:00
|
|
|
reload->operands[0] = Operand::c32(spill_id);
|
2019-09-17 12:22:17 +01:00
|
|
|
reload->definitions[0] = Definition(new_name);
|
|
|
|
ctx.is_reloaded[spill_id] = true;
|
|
|
|
return reload;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-06-09 09:14:54 +01:00
|
|
|
void
|
|
|
|
get_rematerialize_info(spill_ctx& ctx)
|
2019-09-17 12:22:17 +01:00
|
|
|
{
|
|
|
|
for (Block& block : ctx.program->blocks) {
|
|
|
|
bool logical = false;
|
|
|
|
for (aco_ptr<Instruction>& instr : block.instructions) {
|
|
|
|
if (instr->opcode == aco_opcode::p_logical_start)
|
|
|
|
logical = true;
|
|
|
|
else if (instr->opcode == aco_opcode::p_logical_end)
|
|
|
|
logical = false;
|
|
|
|
if (logical && should_rematerialize(instr)) {
|
|
|
|
for (const Definition& def : instr->definitions) {
|
|
|
|
if (def.isTemp()) {
|
2020-11-27 06:03:27 +00:00
|
|
|
ctx.remat[def.getTemp()] = remat_info{instr.get()};
|
2021-07-15 14:11:44 +01:00
|
|
|
ctx.unused_remats.insert(instr.get());
|
2019-09-17 12:22:17 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-07-19 18:32:38 +01:00
|
|
|
void
|
|
|
|
update_local_next_uses(spill_ctx& ctx, Block* block,
|
2021-07-20 10:48:15 +01:00
|
|
|
std::vector<std::vector<std::pair<Temp, uint32_t>>>& local_next_uses)
|
2019-09-17 12:22:17 +01:00
|
|
|
{
|
2021-07-19 18:43:54 +01:00
|
|
|
if (local_next_uses.size() < block->instructions.size()) {
|
|
|
|
/* Allocate more next-use-maps. Note that by never reducing the vector size, we enable
|
|
|
|
* future calls to this function to re-use already allocated map memory. */
|
|
|
|
local_next_uses.resize(block->instructions.size());
|
|
|
|
}
|
2019-09-17 12:22:17 +01:00
|
|
|
|
2021-07-19 18:32:38 +01:00
|
|
|
local_next_uses[block->instructions.size() - 1].clear();
|
2021-07-10 11:17:37 +01:00
|
|
|
for (std::pair<const Temp, std::pair<uint32_t, uint32_t>>& pair :
|
2021-07-10 12:59:42 +01:00
|
|
|
ctx.next_use_distances_end[block->index]) {
|
2021-07-20 10:48:15 +01:00
|
|
|
local_next_uses[block->instructions.size() - 1].push_back(std::make_pair<Temp, uint32_t>(
|
|
|
|
(Temp)pair.first, pair.second.second + block->instructions.size()));
|
2021-07-10 12:59:42 +01:00
|
|
|
}
|
2019-09-17 12:22:17 +01:00
|
|
|
|
|
|
|
for (int idx = block->instructions.size() - 1; idx >= 0; idx--) {
|
|
|
|
aco_ptr<Instruction>& instr = block->instructions[idx];
|
|
|
|
if (!instr)
|
|
|
|
break;
|
|
|
|
if (instr->opcode == aco_opcode::p_phi || instr->opcode == aco_opcode::p_linear_phi)
|
|
|
|
break;
|
|
|
|
|
2021-07-10 12:59:42 +01:00
|
|
|
if (idx != (int)block->instructions.size() - 1) {
|
|
|
|
local_next_uses[idx] = local_next_uses[idx + 1];
|
|
|
|
}
|
|
|
|
|
2019-09-17 12:22:17 +01:00
|
|
|
for (const Operand& op : instr->operands) {
|
|
|
|
if (op.isFixed() && op.physReg() == exec)
|
|
|
|
continue;
|
2019-10-30 13:54:44 +00:00
|
|
|
if (op.regClass().type() == RegType::vgpr && op.regClass().is_linear())
|
|
|
|
continue;
|
2021-07-10 12:59:42 +01:00
|
|
|
if (op.isTemp()) {
|
2021-07-20 10:48:15 +01:00
|
|
|
auto it = std::find_if(local_next_uses[idx].begin(), local_next_uses[idx].end(),
|
|
|
|
[op](auto& pair) { return pair.first == op.getTemp(); });
|
|
|
|
if (it == local_next_uses[idx].end()) {
|
|
|
|
local_next_uses[idx].push_back(std::make_pair<Temp, uint32_t>(op.getTemp(), idx));
|
|
|
|
} else {
|
|
|
|
it->second = idx;
|
|
|
|
}
|
2021-07-10 12:59:42 +01:00
|
|
|
}
|
2019-09-17 12:22:17 +01:00
|
|
|
}
|
|
|
|
for (const Definition& def : instr->definitions) {
|
2021-07-10 12:59:42 +01:00
|
|
|
if (def.isTemp()) {
|
2021-07-20 10:48:15 +01:00
|
|
|
auto it = std::find_if(local_next_uses[idx].begin(), local_next_uses[idx].end(),
|
|
|
|
[def](auto& pair) { return pair.first == def.getTemp(); });
|
|
|
|
if (it != local_next_uses[idx].end()) {
|
|
|
|
local_next_uses[idx].erase(it);
|
|
|
|
}
|
2021-07-10 12:59:42 +01:00
|
|
|
}
|
2019-09-17 12:22:17 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-06-09 09:14:54 +01:00
|
|
|
RegisterDemand
|
|
|
|
get_demand_before(spill_ctx& ctx, unsigned block_idx, unsigned idx)
|
2021-02-22 17:02:24 +00:00
|
|
|
{
|
|
|
|
if (idx == 0) {
|
|
|
|
RegisterDemand demand = ctx.register_demand[block_idx][idx];
|
|
|
|
aco_ptr<Instruction>& instr = ctx.program->blocks[block_idx].instructions[idx];
|
|
|
|
aco_ptr<Instruction> instr_before(nullptr);
|
|
|
|
return get_demand_before(demand, instr, instr_before);
|
|
|
|
} else {
|
|
|
|
return ctx.register_demand[block_idx][idx - 1];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-06-09 09:14:54 +01:00
|
|
|
RegisterDemand
|
|
|
|
get_live_in_demand(spill_ctx& ctx, unsigned block_idx)
|
2021-02-22 17:02:24 +00:00
|
|
|
{
|
|
|
|
unsigned idx = 0;
|
|
|
|
RegisterDemand reg_pressure = RegisterDemand();
|
|
|
|
Block& block = ctx.program->blocks[block_idx];
|
|
|
|
for (aco_ptr<Instruction>& phi : block.instructions) {
|
|
|
|
if (!is_phi(phi))
|
|
|
|
break;
|
|
|
|
idx++;
|
|
|
|
|
|
|
|
/* Killed phi definitions increase pressure in the predecessor but not
|
|
|
|
* the block they're in. Since the loops below are both to control
|
|
|
|
* pressure of the start of this block and the ends of it's
|
|
|
|
* predecessors, we need to count killed unspilled phi definitions here. */
|
|
|
|
if (phi->definitions[0].isTemp() && phi->definitions[0].isKill() &&
|
|
|
|
!ctx.spills_entry[block_idx].count(phi->definitions[0].getTemp()))
|
|
|
|
reg_pressure += phi->definitions[0].getTemp();
|
|
|
|
}
|
|
|
|
|
|
|
|
reg_pressure += get_demand_before(ctx, block_idx, idx);
|
|
|
|
|
|
|
|
/* Consider register pressure from linear predecessors. This can affect
|
|
|
|
* reg_pressure if the branch instructions define sgprs. */
|
|
|
|
for (unsigned pred : block.linear_preds)
|
2021-06-09 09:14:54 +01:00
|
|
|
reg_pressure.sgpr =
|
|
|
|
std::max<int16_t>(reg_pressure.sgpr, ctx.register_demand[pred].back().sgpr);
|
2021-02-22 17:02:24 +00:00
|
|
|
|
|
|
|
return reg_pressure;
|
|
|
|
}
|
2019-09-17 12:22:17 +01:00
|
|
|
|
2021-06-09 09:14:54 +01:00
|
|
|
RegisterDemand
|
|
|
|
init_live_in_vars(spill_ctx& ctx, Block* block, unsigned block_idx)
|
2019-09-17 12:22:17 +01:00
|
|
|
{
|
|
|
|
RegisterDemand spilled_registers;
|
|
|
|
|
|
|
|
/* first block, nothing was spilled before */
|
|
|
|
if (block_idx == 0)
|
|
|
|
return {0, 0};
|
|
|
|
|
2021-03-26 13:12:43 +00:00
|
|
|
/* next use distances at the beginning of the current block */
|
2021-07-16 10:33:32 +01:00
|
|
|
const auto& next_use_distances = ctx.next_use_distances_start[block_idx];
|
2021-03-26 13:12:43 +00:00
|
|
|
|
2019-09-17 12:22:17 +01:00
|
|
|
/* loop header block */
|
|
|
|
if (block->loop_nest_depth > ctx.program->blocks[block_idx - 1].loop_nest_depth) {
|
|
|
|
assert(block->linear_preds[0] == block_idx - 1);
|
|
|
|
assert(block->logical_preds[0] == block_idx - 1);
|
|
|
|
|
|
|
|
/* create new loop_info */
|
|
|
|
ctx.loop_header.emplace(block);
|
|
|
|
|
|
|
|
/* check how many live-through variables should be spilled */
|
2021-02-22 17:02:24 +00:00
|
|
|
RegisterDemand reg_pressure = get_live_in_demand(ctx, block_idx);
|
|
|
|
RegisterDemand loop_demand = reg_pressure;
|
2019-09-17 12:22:17 +01:00
|
|
|
unsigned i = block_idx;
|
|
|
|
while (ctx.program->blocks[i].loop_nest_depth >= block->loop_nest_depth) {
|
|
|
|
assert(ctx.program->blocks.size() > i);
|
2021-02-22 17:02:24 +00:00
|
|
|
loop_demand.update(ctx.program->blocks[i++].register_demand);
|
2019-09-17 12:22:17 +01:00
|
|
|
}
|
|
|
|
unsigned loop_end = i;
|
|
|
|
|
2020-11-25 18:57:05 +00:00
|
|
|
for (auto spilled : ctx.spills_exit[block_idx - 1]) {
|
2021-03-26 13:12:43 +00:00
|
|
|
auto it = next_use_distances.find(spilled.first);
|
2020-08-06 17:38:41 +01:00
|
|
|
|
2021-03-26 13:12:43 +00:00
|
|
|
/* variable is not live at loop entry: probably a phi operand */
|
|
|
|
if (it == next_use_distances.end())
|
2020-08-06 17:38:41 +01:00
|
|
|
continue;
|
|
|
|
|
2020-11-25 18:57:05 +00:00
|
|
|
/* keep constants and live-through variables spilled */
|
|
|
|
if (it->second.first >= loop_end || ctx.remat.count(spilled.first)) {
|
|
|
|
ctx.spills_entry[block_idx][spilled.first] = spilled.second;
|
|
|
|
spilled_registers += spilled.first;
|
2021-02-22 17:02:24 +00:00
|
|
|
loop_demand -= spilled.first;
|
2020-11-25 18:57:05 +00:00
|
|
|
}
|
2020-08-06 17:38:41 +01:00
|
|
|
}
|
|
|
|
|
2021-02-22 14:58:46 +00:00
|
|
|
/* select live-through variables and constants */
|
|
|
|
RegType type = RegType::vgpr;
|
2021-02-22 17:02:24 +00:00
|
|
|
while (loop_demand.exceeds(ctx.target_pressure)) {
|
2021-02-22 14:58:46 +00:00
|
|
|
/* if VGPR demand is low enough, select SGPRs */
|
2021-02-22 17:02:24 +00:00
|
|
|
if (type == RegType::vgpr && loop_demand.vgpr <= ctx.target_pressure.vgpr)
|
2021-02-22 14:58:46 +00:00
|
|
|
type = RegType::sgpr;
|
|
|
|
/* if SGPR demand is low enough, break */
|
2021-02-22 17:02:24 +00:00
|
|
|
if (type == RegType::sgpr && loop_demand.sgpr <= ctx.target_pressure.sgpr)
|
2019-09-17 12:22:17 +01:00
|
|
|
break;
|
|
|
|
|
|
|
|
unsigned distance = 0;
|
|
|
|
Temp to_spill;
|
2021-07-16 10:33:32 +01:00
|
|
|
for (const std::pair<const Temp, std::pair<uint32_t, uint32_t>>& pair :
|
|
|
|
next_use_distances) {
|
2021-02-22 14:58:46 +00:00
|
|
|
if (pair.first.type() == type &&
|
2021-06-09 09:14:54 +01:00
|
|
|
(pair.second.first >= loop_end ||
|
|
|
|
(ctx.remat.count(pair.first) && type == RegType::sgpr)) &&
|
2021-07-13 11:59:58 +01:00
|
|
|
pair.second.second > distance && !ctx.spills_entry[block_idx].count(pair.first)) {
|
2019-09-17 12:22:17 +01:00
|
|
|
to_spill = pair.first;
|
|
|
|
distance = pair.second.second;
|
|
|
|
}
|
|
|
|
}
|
2021-02-22 14:58:46 +00:00
|
|
|
|
|
|
|
/* select SGPRs or break */
|
|
|
|
if (distance == 0) {
|
|
|
|
if (type == RegType::sgpr)
|
|
|
|
break;
|
|
|
|
type = RegType::sgpr;
|
|
|
|
continue;
|
|
|
|
}
|
2019-09-17 12:22:17 +01:00
|
|
|
|
|
|
|
uint32_t spill_id;
|
2021-07-13 11:59:58 +01:00
|
|
|
if (!ctx.spills_exit[block_idx - 1].count(to_spill)) {
|
2019-09-17 12:22:17 +01:00
|
|
|
spill_id = ctx.allocate_spill_id(to_spill.regClass());
|
|
|
|
} else {
|
|
|
|
spill_id = ctx.spills_exit[block_idx - 1][to_spill];
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx.spills_entry[block_idx][to_spill] = spill_id;
|
2021-02-22 14:58:46 +00:00
|
|
|
spilled_registers += to_spill;
|
2021-02-22 17:02:24 +00:00
|
|
|
loop_demand -= to_spill;
|
2019-09-17 12:22:17 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* shortcut */
|
2021-02-22 17:02:24 +00:00
|
|
|
if (!loop_demand.exceeds(ctx.target_pressure))
|
2019-09-17 12:22:17 +01:00
|
|
|
return spilled_registers;
|
|
|
|
|
|
|
|
/* if reg pressure is too high at beginning of loop, add variables with furthest use */
|
2021-02-22 17:02:24 +00:00
|
|
|
reg_pressure -= spilled_registers;
|
2020-08-04 17:08:43 +01:00
|
|
|
|
2021-02-22 14:58:46 +00:00
|
|
|
while (reg_pressure.exceeds(ctx.target_pressure)) {
|
2019-09-17 12:22:17 +01:00
|
|
|
unsigned distance = 0;
|
|
|
|
Temp to_spill;
|
2021-02-22 14:58:46 +00:00
|
|
|
type = reg_pressure.vgpr > ctx.target_pressure.vgpr ? RegType::vgpr : RegType::sgpr;
|
2019-09-17 12:22:17 +01:00
|
|
|
|
2021-07-16 10:33:32 +01:00
|
|
|
for (const std::pair<const Temp, std::pair<uint32_t, uint32_t>>& pair :
|
|
|
|
next_use_distances) {
|
2021-06-09 09:14:54 +01:00
|
|
|
if (pair.first.type() == type && pair.second.second > distance &&
|
2021-07-13 11:59:58 +01:00
|
|
|
!ctx.spills_entry[block_idx].count(pair.first)) {
|
2019-09-17 12:22:17 +01:00
|
|
|
to_spill = pair.first;
|
|
|
|
distance = pair.second.second;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
assert(distance != 0);
|
|
|
|
ctx.spills_entry[block_idx][to_spill] = ctx.allocate_spill_id(to_spill.regClass());
|
2021-02-22 14:58:46 +00:00
|
|
|
spilled_registers += to_spill;
|
|
|
|
reg_pressure -= to_spill;
|
2019-09-17 12:22:17 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return spilled_registers;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* branch block */
|
2019-10-30 11:04:22 +00:00
|
|
|
if (block->linear_preds.size() == 1 && !(block->kind & block_kind_loop_exit)) {
|
2019-09-17 12:22:17 +01:00
|
|
|
/* keep variables spilled if they are alive and not used in the current block */
|
|
|
|
unsigned pred_idx = block->linear_preds[0];
|
|
|
|
for (std::pair<Temp, uint32_t> pair : ctx.spills_exit[pred_idx]) {
|
2021-07-13 11:59:58 +01:00
|
|
|
if (pair.first.type() != RegType::sgpr) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
auto next_use_distance_it = next_use_distances.find(pair.first);
|
|
|
|
if (next_use_distance_it != next_use_distances.end() &&
|
|
|
|
next_use_distance_it->second.first != block_idx) {
|
2019-09-17 12:22:17 +01:00
|
|
|
ctx.spills_entry[block_idx].insert(pair);
|
|
|
|
spilled_registers.sgpr += pair.first.size();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (block->logical_preds.size() == 1) {
|
|
|
|
pred_idx = block->logical_preds[0];
|
|
|
|
for (std::pair<Temp, uint32_t> pair : ctx.spills_exit[pred_idx]) {
|
2021-07-13 11:59:58 +01:00
|
|
|
if (pair.first.type() != RegType::vgpr) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
auto next_use_distance_it = next_use_distances.find(pair.first);
|
|
|
|
if (next_use_distance_it != next_use_distances.end() &&
|
|
|
|
next_use_distance_it->second.first != block_idx) {
|
2019-09-17 12:22:17 +01:00
|
|
|
ctx.spills_entry[block_idx].insert(pair);
|
|
|
|
spilled_registers.vgpr += pair.first.size();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-06-09 09:14:54 +01:00
|
|
|
/* if register demand is still too high, we just keep all spilled live vars
|
|
|
|
* and process the block */
|
2019-09-17 12:22:17 +01:00
|
|
|
if (block->register_demand.sgpr - spilled_registers.sgpr > ctx.target_pressure.sgpr) {
|
|
|
|
pred_idx = block->linear_preds[0];
|
|
|
|
for (std::pair<Temp, uint32_t> pair : ctx.spills_exit[pred_idx]) {
|
2021-07-13 11:59:58 +01:00
|
|
|
if (pair.first.type() == RegType::sgpr && next_use_distances.count(pair.first) &&
|
2019-09-17 12:22:17 +01:00
|
|
|
ctx.spills_entry[block_idx].insert(pair).second) {
|
|
|
|
spilled_registers.sgpr += pair.first.size();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2021-06-09 09:14:54 +01:00
|
|
|
if (block->register_demand.vgpr - spilled_registers.vgpr > ctx.target_pressure.vgpr &&
|
|
|
|
block->logical_preds.size() == 1) {
|
2019-09-17 12:22:17 +01:00
|
|
|
pred_idx = block->logical_preds[0];
|
|
|
|
for (std::pair<Temp, uint32_t> pair : ctx.spills_exit[pred_idx]) {
|
2021-07-13 11:59:58 +01:00
|
|
|
if (pair.first.type() == RegType::vgpr && next_use_distances.count(pair.first) &&
|
2019-09-17 12:22:17 +01:00
|
|
|
ctx.spills_entry[block_idx].insert(pair).second) {
|
|
|
|
spilled_registers.vgpr += pair.first.size();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return spilled_registers;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* else: merge block */
|
|
|
|
std::set<Temp> partial_spills;
|
|
|
|
|
|
|
|
/* keep variables spilled on all incoming paths */
|
2021-07-16 10:33:32 +01:00
|
|
|
for (const std::pair<const Temp, std::pair<uint32_t, uint32_t>>& pair : next_use_distances) {
|
2021-06-09 09:14:54 +01:00
|
|
|
std::vector<unsigned>& preds =
|
|
|
|
pair.first.is_linear() ? block->linear_preds : block->logical_preds;
|
|
|
|
/* If it can be rematerialized, keep the variable spilled if all predecessors do not reload
|
|
|
|
* it. Otherwise, if any predecessor reloads it, ensure it's reloaded on all other
|
|
|
|
* predecessors. The idea is that it's better in practice to rematerialize redundantly than to
|
|
|
|
* create lots of phis. */
|
|
|
|
/* TODO: test this idea with more than Dawn of War III shaders (the current pipeline-db
|
|
|
|
* doesn't seem to exercise this path much) */
|
2019-09-17 12:22:17 +01:00
|
|
|
bool remat = ctx.remat.count(pair.first);
|
|
|
|
bool spill = !remat;
|
|
|
|
uint32_t spill_id = 0;
|
|
|
|
for (unsigned pred_idx : preds) {
|
|
|
|
/* variable is not even live at the predecessor: probably from a phi */
|
2021-07-13 11:59:58 +01:00
|
|
|
if (!ctx.next_use_distances_end[pred_idx].count(pair.first)) {
|
2019-09-17 12:22:17 +01:00
|
|
|
spill = false;
|
|
|
|
break;
|
|
|
|
}
|
2021-07-13 11:59:58 +01:00
|
|
|
if (!ctx.spills_exit[pred_idx].count(pair.first)) {
|
2019-09-17 12:22:17 +01:00
|
|
|
if (!remat)
|
|
|
|
spill = false;
|
|
|
|
} else {
|
|
|
|
partial_spills.insert(pair.first);
|
2021-06-09 09:14:54 +01:00
|
|
|
/* it might be that on one incoming path, the variable has a different spill_id, but
|
|
|
|
* add_couple_code() will take care of that. */
|
2019-09-17 12:22:17 +01:00
|
|
|
spill_id = ctx.spills_exit[pred_idx][pair.first];
|
|
|
|
if (remat)
|
|
|
|
spill = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (spill) {
|
|
|
|
ctx.spills_entry[block_idx][pair.first] = spill_id;
|
|
|
|
partial_spills.erase(pair.first);
|
|
|
|
spilled_registers += pair.first;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* same for phis */
|
2021-02-22 17:02:24 +00:00
|
|
|
for (aco_ptr<Instruction>& phi : block->instructions) {
|
|
|
|
if (!is_phi(phi))
|
|
|
|
break;
|
2021-04-27 16:26:09 +01:00
|
|
|
if (!phi->definitions[0].isTemp())
|
|
|
|
continue;
|
|
|
|
|
2021-06-09 09:14:54 +01:00
|
|
|
std::vector<unsigned>& preds =
|
|
|
|
phi->opcode == aco_opcode::p_phi ? block->logical_preds : block->linear_preds;
|
2019-09-17 12:22:17 +01:00
|
|
|
bool spill = true;
|
|
|
|
|
|
|
|
for (unsigned i = 0; i < phi->operands.size(); i++) {
|
2021-02-22 13:06:05 +00:00
|
|
|
/* non-temp operands can increase the register pressure */
|
|
|
|
if (!phi->operands[i].isTemp()) {
|
|
|
|
partial_spills.insert(phi->definitions[0].getTemp());
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2021-07-13 11:59:58 +01:00
|
|
|
if (!ctx.spills_exit[preds[i]].count(phi->operands[i].getTemp()))
|
2019-09-17 12:22:17 +01:00
|
|
|
spill = false;
|
|
|
|
else
|
|
|
|
partial_spills.insert(phi->definitions[0].getTemp());
|
|
|
|
}
|
|
|
|
if (spill) {
|
2021-06-09 09:14:54 +01:00
|
|
|
ctx.spills_entry[block_idx][phi->definitions[0].getTemp()] =
|
|
|
|
ctx.allocate_spill_id(phi->definitions[0].regClass());
|
2019-09-17 12:22:17 +01:00
|
|
|
partial_spills.erase(phi->definitions[0].getTemp());
|
|
|
|
spilled_registers += phi->definitions[0].getTemp();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* if reg pressure at first instruction is still too high, add partially spilled variables */
|
2021-02-22 17:02:24 +00:00
|
|
|
RegisterDemand reg_pressure = get_live_in_demand(ctx, block_idx);
|
|
|
|
reg_pressure -= spilled_registers;
|
2020-08-04 17:08:43 +01:00
|
|
|
|
2021-02-22 14:58:46 +00:00
|
|
|
while (reg_pressure.exceeds(ctx.target_pressure)) {
|
2019-09-17 12:22:17 +01:00
|
|
|
assert(!partial_spills.empty());
|
|
|
|
std::set<Temp>::iterator it = partial_spills.begin();
|
2020-08-05 13:29:58 +01:00
|
|
|
Temp to_spill = Temp();
|
|
|
|
unsigned distance = 0;
|
2021-02-22 14:58:46 +00:00
|
|
|
RegType type = reg_pressure.vgpr > ctx.target_pressure.vgpr ? RegType::vgpr : RegType::sgpr;
|
2019-09-17 12:22:17 +01:00
|
|
|
|
|
|
|
while (it != partial_spills.end()) {
|
2021-07-13 11:59:58 +01:00
|
|
|
assert(!ctx.spills_entry[block_idx].count(*it));
|
2019-09-17 12:22:17 +01:00
|
|
|
|
2021-07-16 10:33:32 +01:00
|
|
|
if (it->type() == type && next_use_distances.at(*it).second > distance) {
|
|
|
|
distance = next_use_distances.at(*it).second;
|
2019-09-17 12:22:17 +01:00
|
|
|
to_spill = *it;
|
|
|
|
}
|
|
|
|
++it;
|
|
|
|
}
|
|
|
|
assert(distance != 0);
|
|
|
|
|
|
|
|
ctx.spills_entry[block_idx][to_spill] = ctx.allocate_spill_id(to_spill.regClass());
|
|
|
|
partial_spills.erase(to_spill);
|
2021-02-22 14:58:46 +00:00
|
|
|
spilled_registers += to_spill;
|
|
|
|
reg_pressure -= to_spill;
|
2019-09-17 12:22:17 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return spilled_registers;
|
|
|
|
}
|
|
|
|
|
2021-06-09 09:14:54 +01:00
|
|
|
void
|
|
|
|
add_coupling_code(spill_ctx& ctx, Block* block, unsigned block_idx)
|
2019-09-17 12:22:17 +01:00
|
|
|
{
|
|
|
|
/* no coupling code necessary */
|
|
|
|
if (block->linear_preds.size() == 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
std::vector<aco_ptr<Instruction>> instructions;
|
|
|
|
/* branch block: TODO take other branch into consideration */
|
2021-06-09 09:14:54 +01:00
|
|
|
if (block->linear_preds.size() == 1 &&
|
|
|
|
!(block->kind & (block_kind_loop_exit | block_kind_loop_header))) {
|
2019-09-17 12:22:17 +01:00
|
|
|
assert(ctx.processed[block->linear_preds[0]]);
|
2019-10-30 13:42:00 +00:00
|
|
|
assert(ctx.register_demand[block_idx].size() == block->instructions.size());
|
|
|
|
std::vector<RegisterDemand> reg_demand;
|
|
|
|
unsigned insert_idx = 0;
|
2020-01-20 15:57:21 +00:00
|
|
|
RegisterDemand demand_before = get_demand_before(ctx, block_idx, 0);
|
2019-10-30 13:42:00 +00:00
|
|
|
|
2021-07-10 11:17:37 +01:00
|
|
|
for (std::pair<const Temp, std::pair<uint32_t, uint32_t>>& live :
|
2021-06-09 09:14:54 +01:00
|
|
|
ctx.next_use_distances_start[block_idx]) {
|
2020-11-03 13:40:05 +00:00
|
|
|
const unsigned pred_idx = block->linear_preds[0];
|
|
|
|
|
2019-10-30 13:42:00 +00:00
|
|
|
if (!live.first.is_linear())
|
|
|
|
continue;
|
|
|
|
/* still spilled */
|
2021-07-13 11:59:58 +01:00
|
|
|
if (ctx.spills_entry[block_idx].count(live.first))
|
2019-10-30 13:42:00 +00:00
|
|
|
continue;
|
|
|
|
|
|
|
|
/* in register at end of predecessor */
|
2021-07-13 11:59:58 +01:00
|
|
|
auto spills_exit_it = ctx.spills_exit[pred_idx].find(live.first);
|
|
|
|
if (spills_exit_it == ctx.spills_exit[pred_idx].end()) {
|
2019-10-30 13:42:00 +00:00
|
|
|
std::map<Temp, Temp>::iterator it = ctx.renames[pred_idx].find(live.first);
|
|
|
|
if (it != ctx.renames[pred_idx].end())
|
|
|
|
ctx.renames[block_idx].insert(*it);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* variable is spilled at predecessor and live at current block: create reload instruction */
|
2020-09-14 20:58:33 +01:00
|
|
|
Temp new_name = ctx.program->allocateTmp(live.first.regClass());
|
2021-07-13 11:59:58 +01:00
|
|
|
aco_ptr<Instruction> reload = do_reload(ctx, live.first, new_name, spills_exit_it->second);
|
2019-10-30 13:42:00 +00:00
|
|
|
instructions.emplace_back(std::move(reload));
|
2020-01-20 15:57:21 +00:00
|
|
|
reg_demand.push_back(demand_before);
|
2019-10-30 13:42:00 +00:00
|
|
|
ctx.renames[block_idx][live.first] = new_name;
|
|
|
|
}
|
2019-09-17 12:22:17 +01:00
|
|
|
|
|
|
|
if (block->logical_preds.size() == 1) {
|
2019-10-30 13:42:00 +00:00
|
|
|
do {
|
|
|
|
assert(insert_idx < block->instructions.size());
|
|
|
|
instructions.emplace_back(std::move(block->instructions[insert_idx]));
|
|
|
|
reg_demand.push_back(ctx.register_demand[block_idx][insert_idx]);
|
|
|
|
insert_idx++;
|
|
|
|
} while (instructions.back()->opcode != aco_opcode::p_logical_start);
|
|
|
|
|
2019-09-17 12:22:17 +01:00
|
|
|
unsigned pred_idx = block->logical_preds[0];
|
2021-07-10 11:17:37 +01:00
|
|
|
for (std::pair<const Temp, std::pair<uint32_t, uint32_t>>& live :
|
2021-06-09 09:14:54 +01:00
|
|
|
ctx.next_use_distances_start[block_idx]) {
|
2019-10-30 13:42:00 +00:00
|
|
|
if (live.first.is_linear())
|
2019-09-17 12:22:17 +01:00
|
|
|
continue;
|
|
|
|
/* still spilled */
|
2021-07-13 11:59:58 +01:00
|
|
|
if (ctx.spills_entry[block_idx].count(live.first))
|
2019-09-17 12:22:17 +01:00
|
|
|
continue;
|
|
|
|
|
|
|
|
/* in register at end of predecessor */
|
2021-07-13 11:59:58 +01:00
|
|
|
auto spills_exit_it = ctx.spills_exit[pred_idx].find(live.first);
|
|
|
|
if (spills_exit_it == ctx.spills_exit[pred_idx].end()) {
|
2019-09-17 12:22:17 +01:00
|
|
|
std::map<Temp, Temp>::iterator it = ctx.renames[pred_idx].find(live.first);
|
|
|
|
if (it != ctx.renames[pred_idx].end())
|
|
|
|
ctx.renames[block_idx].insert(*it);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2021-06-09 09:14:54 +01:00
|
|
|
/* variable is spilled at predecessor and live at current block:
|
|
|
|
* create reload instruction */
|
2020-09-14 20:58:33 +01:00
|
|
|
Temp new_name = ctx.program->allocateTmp(live.first.regClass());
|
2021-06-09 09:14:54 +01:00
|
|
|
aco_ptr<Instruction> reload =
|
2021-07-13 11:59:58 +01:00
|
|
|
do_reload(ctx, live.first, new_name, spills_exit_it->second);
|
2019-09-17 12:22:17 +01:00
|
|
|
instructions.emplace_back(std::move(reload));
|
2019-10-30 13:42:00 +00:00
|
|
|
reg_demand.emplace_back(reg_demand.back());
|
2019-09-17 12:22:17 +01:00
|
|
|
ctx.renames[block_idx][live.first] = new_name;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* combine new reload instructions with original block */
|
|
|
|
if (!instructions.empty()) {
|
2021-06-09 09:14:54 +01:00
|
|
|
reg_demand.insert(reg_demand.end(),
|
|
|
|
std::next(ctx.register_demand[block->index].begin(), insert_idx),
|
2019-10-30 13:42:00 +00:00
|
|
|
ctx.register_demand[block->index].end());
|
|
|
|
ctx.register_demand[block_idx] = std::move(reg_demand);
|
|
|
|
instructions.insert(instructions.end(),
|
2021-06-09 09:14:54 +01:00
|
|
|
std::move_iterator<std::vector<aco_ptr<Instruction>>::iterator>(
|
|
|
|
std::next(block->instructions.begin(), insert_idx)),
|
|
|
|
std::move_iterator<std::vector<aco_ptr<Instruction>>::iterator>(
|
|
|
|
block->instructions.end()));
|
2019-10-30 13:42:00 +00:00
|
|
|
block->instructions = std::move(instructions);
|
2019-09-17 12:22:17 +01:00
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* loop header and merge blocks: check if all (linear) predecessors have been processed */
|
|
|
|
for (ASSERTED unsigned pred : block->linear_preds)
|
|
|
|
assert(ctx.processed[pred]);
|
|
|
|
|
|
|
|
/* iterate the phi nodes for which operands to spill at the predecessor */
|
|
|
|
for (aco_ptr<Instruction>& phi : block->instructions) {
|
2021-02-22 17:02:24 +00:00
|
|
|
if (!is_phi(phi))
|
2019-09-17 12:22:17 +01:00
|
|
|
break;
|
|
|
|
|
|
|
|
/* if the phi is not spilled, add to instructions */
|
2021-02-04 14:55:23 +00:00
|
|
|
if (!phi->definitions[0].isTemp() ||
|
2021-07-13 11:59:58 +01:00
|
|
|
!ctx.spills_entry[block_idx].count(phi->definitions[0].getTemp())) {
|
2019-09-17 12:22:17 +01:00
|
|
|
instructions.emplace_back(std::move(phi));
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2021-06-09 09:14:54 +01:00
|
|
|
std::vector<unsigned>& preds =
|
|
|
|
phi->opcode == aco_opcode::p_phi ? block->logical_preds : block->linear_preds;
|
2019-09-17 12:22:17 +01:00
|
|
|
uint32_t def_spill_id = ctx.spills_entry[block_idx][phi->definitions[0].getTemp()];
|
|
|
|
|
|
|
|
for (unsigned i = 0; i < phi->operands.size(); i++) {
|
2019-10-15 17:23:52 +01:00
|
|
|
if (phi->operands[i].isUndefined())
|
2019-09-17 12:22:17 +01:00
|
|
|
continue;
|
|
|
|
|
2019-10-15 17:23:52 +01:00
|
|
|
unsigned pred_idx = preds[i];
|
2021-02-22 13:06:05 +00:00
|
|
|
Operand spill_op = phi->operands[i];
|
|
|
|
|
|
|
|
if (spill_op.isTemp()) {
|
|
|
|
assert(phi->operands[i].isKill());
|
|
|
|
Temp var = phi->operands[i].getTemp();
|
|
|
|
|
|
|
|
std::map<Temp, Temp>::iterator rename_it = ctx.renames[pred_idx].find(var);
|
|
|
|
/* prevent the definining instruction from being DCE'd if it could be rematerialized */
|
|
|
|
if (rename_it == ctx.renames[preds[i]].end() && ctx.remat.count(var))
|
2021-07-15 14:11:44 +01:00
|
|
|
ctx.unused_remats.erase(ctx.remat[var].instr);
|
2021-02-22 13:06:05 +00:00
|
|
|
|
|
|
|
/* check if variable is already spilled at predecessor */
|
2021-07-16 11:17:29 +01:00
|
|
|
auto spilled = ctx.spills_exit[pred_idx].find(var);
|
2021-02-22 13:06:05 +00:00
|
|
|
if (spilled != ctx.spills_exit[pred_idx].end()) {
|
|
|
|
if (spilled->second != def_spill_id)
|
|
|
|
ctx.add_affinity(def_spill_id, spilled->second);
|
2019-09-17 12:22:17 +01:00
|
|
|
continue;
|
2021-02-22 13:06:05 +00:00
|
|
|
}
|
2019-09-17 12:22:17 +01:00
|
|
|
|
2021-02-22 13:06:05 +00:00
|
|
|
/* rename if necessary */
|
|
|
|
if (rename_it != ctx.renames[pred_idx].end()) {
|
|
|
|
spill_op.setTemp(rename_it->second);
|
|
|
|
ctx.renames[pred_idx].erase(rename_it);
|
|
|
|
}
|
2019-09-17 12:22:17 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
uint32_t spill_id = ctx.allocate_spill_id(phi->definitions[0].regClass());
|
2021-02-22 13:06:05 +00:00
|
|
|
|
|
|
|
/* add interferences and affinity */
|
|
|
|
for (std::pair<Temp, uint32_t> pair : ctx.spills_exit[pred_idx])
|
|
|
|
ctx.add_interference(spill_id, pair.second);
|
2019-10-16 15:39:06 +01:00
|
|
|
ctx.add_affinity(def_spill_id, spill_id);
|
2021-02-22 13:06:05 +00:00
|
|
|
|
2021-06-09 09:14:54 +01:00
|
|
|
aco_ptr<Pseudo_instruction> spill{
|
|
|
|
create_instruction<Pseudo_instruction>(aco_opcode::p_spill, Format::PSEUDO, 2, 0)};
|
2021-02-22 13:06:05 +00:00
|
|
|
spill->operands[0] = spill_op;
|
2021-07-13 10:22:46 +01:00
|
|
|
spill->operands[1] = Operand::c32(spill_id);
|
2019-09-17 12:22:17 +01:00
|
|
|
Block& pred = ctx.program->blocks[pred_idx];
|
|
|
|
unsigned idx = pred.instructions.size();
|
|
|
|
do {
|
|
|
|
assert(idx != 0);
|
|
|
|
idx--;
|
2021-06-09 09:14:54 +01:00
|
|
|
} while (phi->opcode == aco_opcode::p_phi &&
|
|
|
|
pred.instructions[idx]->opcode != aco_opcode::p_logical_end);
|
2019-09-17 12:22:17 +01:00
|
|
|
std::vector<aco_ptr<Instruction>>::iterator it = std::next(pred.instructions.begin(), idx);
|
|
|
|
pred.instructions.insert(it, std::move(spill));
|
2021-02-22 13:06:05 +00:00
|
|
|
if (spill_op.isTemp())
|
|
|
|
ctx.spills_exit[pred_idx][spill_op.getTemp()] = spill_id;
|
2019-09-17 12:22:17 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* remove phi from instructions */
|
|
|
|
phi.reset();
|
|
|
|
}
|
|
|
|
|
|
|
|
/* iterate all (other) spilled variables for which to spill at the predecessor */
|
|
|
|
// TODO: would be better to have them sorted: first vgprs and first with longest distance
|
|
|
|
for (std::pair<Temp, uint32_t> pair : ctx.spills_entry[block_idx]) {
|
2021-06-09 09:14:54 +01:00
|
|
|
std::vector<unsigned> preds =
|
|
|
|
pair.first.is_linear() ? block->linear_preds : block->logical_preds;
|
2019-09-17 12:22:17 +01:00
|
|
|
|
|
|
|
for (unsigned pred_idx : preds) {
|
|
|
|
/* variable is already spilled at predecessor */
|
2021-07-16 11:17:29 +01:00
|
|
|
auto spilled = ctx.spills_exit[pred_idx].find(pair.first);
|
2019-09-17 12:22:17 +01:00
|
|
|
if (spilled != ctx.spills_exit[pred_idx].end()) {
|
|
|
|
if (spilled->second != pair.second)
|
2019-10-16 15:39:06 +01:00
|
|
|
ctx.add_affinity(pair.second, spilled->second);
|
2019-09-17 12:22:17 +01:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2019-10-15 17:23:52 +01:00
|
|
|
/* variable is dead at predecessor, it must be from a phi: this works because of CSSA form */
|
2021-07-13 11:59:58 +01:00
|
|
|
if (!ctx.next_use_distances_end[pred_idx].count(pair.first))
|
2019-09-17 12:22:17 +01:00
|
|
|
continue;
|
|
|
|
|
2019-10-30 11:32:32 +00:00
|
|
|
/* add interferences between spilled variable and predecessors exit spills */
|
|
|
|
for (std::pair<Temp, uint32_t> exit_spill : ctx.spills_exit[pred_idx]) {
|
|
|
|
if (exit_spill.first == pair.first)
|
|
|
|
continue;
|
2020-07-16 11:08:50 +01:00
|
|
|
ctx.add_interference(exit_spill.second, pair.second);
|
2019-10-30 11:32:32 +00:00
|
|
|
}
|
|
|
|
|
2019-09-17 12:22:17 +01:00
|
|
|
/* variable is in register at predecessor and has to be spilled */
|
|
|
|
/* rename if necessary */
|
|
|
|
Temp var = pair.first;
|
|
|
|
std::map<Temp, Temp>::iterator rename_it = ctx.renames[pred_idx].find(var);
|
|
|
|
if (rename_it != ctx.renames[pred_idx].end()) {
|
|
|
|
var = rename_it->second;
|
|
|
|
ctx.renames[pred_idx].erase(rename_it);
|
|
|
|
}
|
|
|
|
|
2021-06-09 09:14:54 +01:00
|
|
|
aco_ptr<Pseudo_instruction> spill{
|
|
|
|
create_instruction<Pseudo_instruction>(aco_opcode::p_spill, Format::PSEUDO, 2, 0)};
|
2019-09-17 12:22:17 +01:00
|
|
|
spill->operands[0] = Operand(var);
|
2021-07-13 10:22:46 +01:00
|
|
|
spill->operands[1] = Operand::c32(pair.second);
|
2019-09-17 12:22:17 +01:00
|
|
|
Block& pred = ctx.program->blocks[pred_idx];
|
|
|
|
unsigned idx = pred.instructions.size();
|
|
|
|
do {
|
|
|
|
assert(idx != 0);
|
|
|
|
idx--;
|
2021-06-09 09:14:54 +01:00
|
|
|
} while (pair.first.type() == RegType::vgpr &&
|
|
|
|
pred.instructions[idx]->opcode != aco_opcode::p_logical_end);
|
2019-09-17 12:22:17 +01:00
|
|
|
std::vector<aco_ptr<Instruction>>::iterator it = std::next(pred.instructions.begin(), idx);
|
|
|
|
pred.instructions.insert(it, std::move(spill));
|
|
|
|
ctx.spills_exit[pred.index][pair.first] = pair.second;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* iterate phis for which operands to reload */
|
|
|
|
for (aco_ptr<Instruction>& phi : instructions) {
|
|
|
|
assert(phi->opcode == aco_opcode::p_phi || phi->opcode == aco_opcode::p_linear_phi);
|
2021-06-09 09:14:54 +01:00
|
|
|
assert(!phi->definitions[0].isTemp() ||
|
2021-07-13 11:59:58 +01:00
|
|
|
!ctx.spills_entry[block_idx].count(phi->definitions[0].getTemp()));
|
2019-09-17 12:22:17 +01:00
|
|
|
|
2021-06-09 09:14:54 +01:00
|
|
|
std::vector<unsigned>& preds =
|
|
|
|
phi->opcode == aco_opcode::p_phi ? block->logical_preds : block->linear_preds;
|
2019-09-17 12:22:17 +01:00
|
|
|
for (unsigned i = 0; i < phi->operands.size(); i++) {
|
|
|
|
if (!phi->operands[i].isTemp())
|
|
|
|
continue;
|
|
|
|
unsigned pred_idx = preds[i];
|
|
|
|
|
2021-02-22 10:00:22 +00:00
|
|
|
/* if the operand was reloaded, rename */
|
2021-07-13 11:59:58 +01:00
|
|
|
if (!ctx.spills_exit[pred_idx].count(phi->operands[i].getTemp())) {
|
2021-06-09 09:14:54 +01:00
|
|
|
std::map<Temp, Temp>::iterator it =
|
|
|
|
ctx.renames[pred_idx].find(phi->operands[i].getTemp());
|
2021-07-13 11:59:58 +01:00
|
|
|
if (it != ctx.renames[pred_idx].end()) {
|
2019-09-17 12:22:17 +01:00
|
|
|
phi->operands[i].setTemp(it->second);
|
2020-12-11 18:37:50 +00:00
|
|
|
/* prevent the definining instruction from being DCE'd if it could be rematerialized */
|
2021-07-13 11:59:58 +01:00
|
|
|
} else {
|
|
|
|
auto remat_it = ctx.remat.find(phi->operands[i].getTemp());
|
|
|
|
if (remat_it != ctx.remat.end()) {
|
2021-07-15 14:11:44 +01:00
|
|
|
ctx.unused_remats.erase(remat_it->second.instr);
|
2021-07-13 11:59:58 +01:00
|
|
|
}
|
|
|
|
}
|
2019-09-17 12:22:17 +01:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
Temp tmp = phi->operands[i].getTemp();
|
|
|
|
|
|
|
|
/* reload phi operand at end of predecessor block */
|
2020-09-14 20:58:33 +01:00
|
|
|
Temp new_name = ctx.program->allocateTmp(tmp.regClass());
|
2019-09-17 12:22:17 +01:00
|
|
|
Block& pred = ctx.program->blocks[pred_idx];
|
|
|
|
unsigned idx = pred.instructions.size();
|
|
|
|
do {
|
|
|
|
assert(idx != 0);
|
|
|
|
idx--;
|
2021-06-09 09:14:54 +01:00
|
|
|
} while (phi->opcode == aco_opcode::p_phi &&
|
|
|
|
pred.instructions[idx]->opcode != aco_opcode::p_logical_end);
|
2019-09-17 12:22:17 +01:00
|
|
|
std::vector<aco_ptr<Instruction>>::iterator it = std::next(pred.instructions.begin(), idx);
|
2021-06-09 09:14:54 +01:00
|
|
|
aco_ptr<Instruction> reload =
|
|
|
|
do_reload(ctx, tmp, new_name, ctx.spills_exit[pred_idx][tmp]);
|
2019-09-17 12:22:17 +01:00
|
|
|
|
2021-02-22 10:00:22 +00:00
|
|
|
/* reload spilled exec mask directly to exec */
|
|
|
|
if (!phi->definitions[0].isTemp()) {
|
|
|
|
assert(phi->definitions[0].isFixed() && phi->definitions[0].physReg() == exec);
|
|
|
|
reload->definitions[0] = phi->definitions[0];
|
|
|
|
phi->operands[i] = Operand(exec, ctx.program->lane_mask);
|
|
|
|
} else {
|
|
|
|
ctx.spills_exit[pred_idx].erase(tmp);
|
|
|
|
ctx.renames[pred_idx][tmp] = new_name;
|
|
|
|
phi->operands[i].setTemp(new_name);
|
|
|
|
}
|
|
|
|
|
|
|
|
pred.instructions.insert(it, std::move(reload));
|
2019-09-17 12:22:17 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* iterate live variables for which to reload */
|
|
|
|
// TODO: reload at current block if variable is spilled on all predecessors
|
2021-07-10 11:17:37 +01:00
|
|
|
for (std::pair<const Temp, std::pair<uint32_t, uint32_t>>& pair :
|
2021-06-09 09:14:54 +01:00
|
|
|
ctx.next_use_distances_start[block_idx]) {
|
2019-09-17 12:22:17 +01:00
|
|
|
/* skip spilled variables */
|
2021-07-13 11:59:58 +01:00
|
|
|
if (ctx.spills_entry[block_idx].count(pair.first))
|
2019-09-17 12:22:17 +01:00
|
|
|
continue;
|
2021-06-09 09:14:54 +01:00
|
|
|
std::vector<unsigned> preds =
|
|
|
|
pair.first.is_linear() ? block->linear_preds : block->logical_preds;
|
2019-09-17 12:22:17 +01:00
|
|
|
|
|
|
|
/* variable is dead at predecessor, it must be from a phi */
|
|
|
|
bool is_dead = false;
|
|
|
|
for (unsigned pred_idx : preds) {
|
2021-07-13 11:59:58 +01:00
|
|
|
if (!ctx.next_use_distances_end[pred_idx].count(pair.first))
|
2019-09-17 12:22:17 +01:00
|
|
|
is_dead = true;
|
|
|
|
}
|
|
|
|
if (is_dead)
|
|
|
|
continue;
|
|
|
|
for (unsigned pred_idx : preds) {
|
|
|
|
/* the variable is not spilled at the predecessor */
|
2021-07-13 11:59:58 +01:00
|
|
|
if (!ctx.spills_exit[pred_idx].count(pair.first))
|
2019-09-17 12:22:17 +01:00
|
|
|
continue;
|
|
|
|
|
|
|
|
/* variable is spilled at predecessor and has to be reloaded */
|
2020-09-14 20:58:33 +01:00
|
|
|
Temp new_name = ctx.program->allocateTmp(pair.first.regClass());
|
2019-09-17 12:22:17 +01:00
|
|
|
Block& pred = ctx.program->blocks[pred_idx];
|
|
|
|
unsigned idx = pred.instructions.size();
|
|
|
|
do {
|
|
|
|
assert(idx != 0);
|
|
|
|
idx--;
|
2021-06-09 09:14:54 +01:00
|
|
|
} while (pair.first.type() == RegType::vgpr &&
|
|
|
|
pred.instructions[idx]->opcode != aco_opcode::p_logical_end);
|
2019-09-17 12:22:17 +01:00
|
|
|
std::vector<aco_ptr<Instruction>>::iterator it = std::next(pred.instructions.begin(), idx);
|
|
|
|
|
2021-06-09 09:14:54 +01:00
|
|
|
aco_ptr<Instruction> reload =
|
|
|
|
do_reload(ctx, pair.first, new_name, ctx.spills_exit[pred.index][pair.first]);
|
2019-09-17 12:22:17 +01:00
|
|
|
pred.instructions.insert(it, std::move(reload));
|
|
|
|
|
|
|
|
ctx.spills_exit[pred.index].erase(pair.first);
|
|
|
|
ctx.renames[pred.index][pair.first] = new_name;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* check if we have to create a new phi for this variable */
|
|
|
|
Temp rename = Temp();
|
|
|
|
bool is_same = true;
|
|
|
|
for (unsigned pred_idx : preds) {
|
2021-07-13 11:59:58 +01:00
|
|
|
if (!ctx.renames[pred_idx].count(pair.first)) {
|
2019-09-17 12:22:17 +01:00
|
|
|
if (rename == Temp())
|
|
|
|
rename = pair.first;
|
|
|
|
else
|
|
|
|
is_same = rename == pair.first;
|
|
|
|
} else {
|
|
|
|
if (rename == Temp())
|
|
|
|
rename = ctx.renames[pred_idx][pair.first];
|
|
|
|
else
|
|
|
|
is_same = rename == ctx.renames[pred_idx][pair.first];
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!is_same)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!is_same) {
|
|
|
|
/* the variable was renamed differently in the predecessors: we have to create a phi */
|
2019-10-30 13:54:44 +00:00
|
|
|
aco_opcode opcode = pair.first.is_linear() ? aco_opcode::p_linear_phi : aco_opcode::p_phi;
|
2021-06-09 09:14:54 +01:00
|
|
|
aco_ptr<Pseudo_instruction> phi{
|
|
|
|
create_instruction<Pseudo_instruction>(opcode, Format::PSEUDO, preds.size(), 1)};
|
2020-09-14 20:58:33 +01:00
|
|
|
rename = ctx.program->allocateTmp(pair.first.regClass());
|
2019-09-17 12:22:17 +01:00
|
|
|
for (unsigned i = 0; i < phi->operands.size(); i++) {
|
|
|
|
Temp tmp;
|
2021-07-13 11:59:58 +01:00
|
|
|
if (ctx.renames[preds[i]].count(pair.first)) {
|
2019-09-17 12:22:17 +01:00
|
|
|
tmp = ctx.renames[preds[i]][pair.first];
|
2020-12-11 18:37:50 +00:00
|
|
|
} else if (preds[i] >= block_idx) {
|
2019-09-17 12:22:17 +01:00
|
|
|
tmp = rename;
|
2020-12-11 18:37:50 +00:00
|
|
|
} else {
|
2019-09-17 12:22:17 +01:00
|
|
|
tmp = pair.first;
|
2020-12-11 18:37:50 +00:00
|
|
|
/* prevent the definining instruction from being DCE'd if it could be rematerialized */
|
|
|
|
if (ctx.remat.count(tmp))
|
2021-07-15 14:11:44 +01:00
|
|
|
ctx.unused_remats.erase(ctx.remat[tmp].instr);
|
2020-12-11 18:37:50 +00:00
|
|
|
}
|
2019-09-17 12:22:17 +01:00
|
|
|
phi->operands[i] = Operand(tmp);
|
|
|
|
}
|
|
|
|
phi->definitions[0] = Definition(rename);
|
|
|
|
instructions.emplace_back(std::move(phi));
|
|
|
|
}
|
|
|
|
|
|
|
|
/* the variable was renamed: add new name to renames */
|
|
|
|
if (!(rename == Temp() || rename == pair.first))
|
|
|
|
ctx.renames[block_idx][pair.first] = rename;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* combine phis with instructions */
|
|
|
|
unsigned idx = 0;
|
|
|
|
while (!block->instructions[idx]) {
|
|
|
|
idx++;
|
|
|
|
}
|
|
|
|
|
2020-01-02 14:57:02 +00:00
|
|
|
if (!ctx.processed[block_idx]) {
|
|
|
|
assert(!(block->kind & block_kind_loop_header));
|
2020-01-20 15:57:21 +00:00
|
|
|
RegisterDemand demand_before = get_demand_before(ctx, block_idx, idx);
|
2021-06-09 09:14:54 +01:00
|
|
|
ctx.register_demand[block->index].erase(ctx.register_demand[block->index].begin(),
|
|
|
|
ctx.register_demand[block->index].begin() + idx);
|
|
|
|
ctx.register_demand[block->index].insert(ctx.register_demand[block->index].begin(),
|
|
|
|
instructions.size(), demand_before);
|
2020-01-02 14:57:02 +00:00
|
|
|
}
|
2019-09-17 12:22:17 +01:00
|
|
|
|
|
|
|
std::vector<aco_ptr<Instruction>>::iterator start = std::next(block->instructions.begin(), idx);
|
2021-06-09 09:14:54 +01:00
|
|
|
instructions.insert(
|
|
|
|
instructions.end(), std::move_iterator<std::vector<aco_ptr<Instruction>>::iterator>(start),
|
|
|
|
std::move_iterator<std::vector<aco_ptr<Instruction>>::iterator>(block->instructions.end()));
|
2019-09-17 12:22:17 +01:00
|
|
|
block->instructions = std::move(instructions);
|
|
|
|
}
|
|
|
|
|
2021-06-09 09:14:54 +01:00
|
|
|
void
|
2021-07-10 13:06:50 +01:00
|
|
|
process_block(spill_ctx& ctx, unsigned block_idx, Block* block, RegisterDemand spilled_registers)
|
2019-09-17 12:22:17 +01:00
|
|
|
{
|
2020-01-02 14:57:02 +00:00
|
|
|
assert(!ctx.processed[block_idx]);
|
|
|
|
|
2019-09-17 12:22:17 +01:00
|
|
|
std::vector<aco_ptr<Instruction>> instructions;
|
|
|
|
unsigned idx = 0;
|
|
|
|
|
|
|
|
/* phis are handled separetely */
|
|
|
|
while (block->instructions[idx]->opcode == aco_opcode::p_phi ||
|
|
|
|
block->instructions[idx]->opcode == aco_opcode::p_linear_phi) {
|
2020-12-11 13:30:30 +00:00
|
|
|
instructions.emplace_back(std::move(block->instructions[idx++]));
|
2019-09-17 12:22:17 +01:00
|
|
|
}
|
|
|
|
|
2021-07-19 18:32:38 +01:00
|
|
|
if (block->register_demand.exceeds(ctx.target_pressure)) {
|
|
|
|
update_local_next_uses(ctx, block, ctx.local_next_use_distance);
|
|
|
|
} else {
|
|
|
|
/* We won't use local_next_use_distance, so no initialization needed */
|
|
|
|
}
|
2019-09-17 12:22:17 +01:00
|
|
|
|
2021-07-10 13:06:50 +01:00
|
|
|
auto& current_spills = ctx.spills_exit[block_idx];
|
|
|
|
|
2019-09-17 12:22:17 +01:00
|
|
|
while (idx < block->instructions.size()) {
|
|
|
|
aco_ptr<Instruction>& instr = block->instructions[idx];
|
|
|
|
|
|
|
|
std::map<Temp, std::pair<Temp, uint32_t>> reloads;
|
2021-07-10 11:25:45 +01:00
|
|
|
|
2019-09-17 12:22:17 +01:00
|
|
|
/* rename and reload operands */
|
|
|
|
for (Operand& op : instr->operands) {
|
|
|
|
if (!op.isTemp())
|
|
|
|
continue;
|
2021-07-13 11:59:58 +01:00
|
|
|
if (!current_spills.count(op.getTemp())) {
|
2019-09-17 12:22:17 +01:00
|
|
|
/* the Operand is in register: check if it was renamed */
|
2021-07-13 11:59:58 +01:00
|
|
|
auto rename_it = ctx.renames[block_idx].find(op.getTemp());
|
|
|
|
if (rename_it != ctx.renames[block_idx].end()) {
|
|
|
|
op.setTemp(rename_it->second);
|
|
|
|
} else {
|
|
|
|
/* prevent its definining instruction from being DCE'd if it could be rematerialized */
|
|
|
|
auto remat_it = ctx.remat.find(op.getTemp());
|
|
|
|
if (remat_it != ctx.remat.end()) {
|
2021-07-15 14:11:44 +01:00
|
|
|
ctx.unused_remats.erase(remat_it->second.instr);
|
2021-07-13 11:59:58 +01:00
|
|
|
}
|
|
|
|
}
|
2019-09-17 12:22:17 +01:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
/* the Operand is spilled: add it to reloads */
|
2020-09-14 20:58:33 +01:00
|
|
|
Temp new_tmp = ctx.program->allocateTmp(op.regClass());
|
2019-09-17 12:22:17 +01:00
|
|
|
ctx.renames[block_idx][op.getTemp()] = new_tmp;
|
|
|
|
reloads[new_tmp] = std::make_pair(op.getTemp(), current_spills[op.getTemp()]);
|
|
|
|
current_spills.erase(op.getTemp());
|
|
|
|
op.setTemp(new_tmp);
|
|
|
|
spilled_registers -= new_tmp;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* check if register demand is low enough before and after the current instruction */
|
|
|
|
if (block->register_demand.exceeds(ctx.target_pressure)) {
|
|
|
|
|
|
|
|
RegisterDemand new_demand = ctx.register_demand[block_idx][idx];
|
2020-01-20 15:57:21 +00:00
|
|
|
new_demand.update(get_demand_before(ctx, block_idx, idx));
|
2019-09-17 12:22:17 +01:00
|
|
|
|
2021-07-19 18:32:38 +01:00
|
|
|
assert(!ctx.local_next_use_distance.empty());
|
2019-09-17 12:22:17 +01:00
|
|
|
|
|
|
|
/* if reg pressure is too high, spill variable with furthest next use */
|
2021-02-22 14:58:46 +00:00
|
|
|
while ((new_demand - spilled_registers).exceeds(ctx.target_pressure)) {
|
2019-09-17 12:22:17 +01:00
|
|
|
unsigned distance = 0;
|
|
|
|
Temp to_spill;
|
|
|
|
bool do_rematerialize = false;
|
2021-02-17 17:29:48 +00:00
|
|
|
RegType type = RegType::sgpr;
|
|
|
|
if (new_demand.vgpr - spilled_registers.vgpr > ctx.target_pressure.vgpr)
|
|
|
|
type = RegType::vgpr;
|
|
|
|
|
2021-07-19 18:32:38 +01:00
|
|
|
for (std::pair<Temp, uint32_t> pair : ctx.local_next_use_distance[idx]) {
|
2021-02-17 17:29:48 +00:00
|
|
|
if (pair.first.type() != type)
|
|
|
|
continue;
|
|
|
|
bool can_rematerialize = ctx.remat.count(pair.first);
|
|
|
|
if (((pair.second > distance && can_rematerialize == do_rematerialize) ||
|
|
|
|
(can_rematerialize && !do_rematerialize && pair.second > idx)) &&
|
2021-07-13 11:59:58 +01:00
|
|
|
!current_spills.count(pair.first)) {
|
2021-02-17 17:29:48 +00:00
|
|
|
to_spill = pair.first;
|
|
|
|
distance = pair.second;
|
|
|
|
do_rematerialize = can_rematerialize;
|
2019-09-17 12:22:17 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(distance != 0 && distance > idx);
|
|
|
|
uint32_t spill_id = ctx.allocate_spill_id(to_spill.regClass());
|
|
|
|
|
|
|
|
/* add interferences with currently spilled variables */
|
2020-07-16 11:08:50 +01:00
|
|
|
for (std::pair<Temp, uint32_t> pair : current_spills)
|
|
|
|
ctx.add_interference(spill_id, pair.second);
|
2021-07-10 11:17:37 +01:00
|
|
|
for (std::pair<const Temp, std::pair<Temp, uint32_t>>& pair : reloads)
|
2020-07-16 11:08:50 +01:00
|
|
|
ctx.add_interference(spill_id, pair.second.second);
|
2019-09-17 12:22:17 +01:00
|
|
|
|
|
|
|
current_spills[to_spill] = spill_id;
|
|
|
|
spilled_registers += to_spill;
|
|
|
|
|
|
|
|
/* rename if necessary */
|
2021-07-13 11:59:58 +01:00
|
|
|
if (ctx.renames[block_idx].count(to_spill)) {
|
2019-09-17 12:22:17 +01:00
|
|
|
to_spill = ctx.renames[block_idx][to_spill];
|
|
|
|
}
|
|
|
|
|
|
|
|
/* add spill to new instructions */
|
2021-06-09 09:14:54 +01:00
|
|
|
aco_ptr<Pseudo_instruction> spill{
|
|
|
|
create_instruction<Pseudo_instruction>(aco_opcode::p_spill, Format::PSEUDO, 2, 0)};
|
2019-09-17 12:22:17 +01:00
|
|
|
spill->operands[0] = Operand(to_spill);
|
2021-07-13 10:22:46 +01:00
|
|
|
spill->operands[1] = Operand::c32(spill_id);
|
2019-09-17 12:22:17 +01:00
|
|
|
instructions.emplace_back(std::move(spill));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* add reloads and instruction to new instructions */
|
2021-07-10 11:17:37 +01:00
|
|
|
for (std::pair<const Temp, std::pair<Temp, uint32_t>>& pair : reloads) {
|
2021-06-09 09:14:54 +01:00
|
|
|
aco_ptr<Instruction> reload =
|
|
|
|
do_reload(ctx, pair.second.first, pair.first, pair.second.second);
|
2019-09-17 12:22:17 +01:00
|
|
|
instructions.emplace_back(std::move(reload));
|
|
|
|
}
|
|
|
|
instructions.emplace_back(std::move(instr));
|
|
|
|
idx++;
|
|
|
|
}
|
|
|
|
|
|
|
|
block->instructions = std::move(instructions);
|
|
|
|
}
|
|
|
|
|
2021-06-09 09:14:54 +01:00
|
|
|
void
|
|
|
|
spill_block(spill_ctx& ctx, unsigned block_idx)
|
2019-09-17 12:22:17 +01:00
|
|
|
{
|
|
|
|
Block* block = &ctx.program->blocks[block_idx];
|
|
|
|
|
|
|
|
/* determine set of variables which are spilled at the beginning of the block */
|
|
|
|
RegisterDemand spilled_registers = init_live_in_vars(ctx, block, block_idx);
|
|
|
|
|
|
|
|
/* add interferences for spilled variables */
|
2021-06-09 09:14:54 +01:00
|
|
|
for (auto it = ctx.spills_entry[block_idx].begin(); it != ctx.spills_entry[block_idx].end();
|
|
|
|
++it) {
|
2020-07-16 11:08:50 +01:00
|
|
|
for (auto it2 = std::next(it); it2 != ctx.spills_entry[block_idx].end(); ++it2)
|
|
|
|
ctx.add_interference(it->second, it2->second);
|
2019-09-17 12:22:17 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
bool is_loop_header = block->loop_nest_depth && ctx.loop_header.top()->index == block_idx;
|
|
|
|
if (!is_loop_header) {
|
|
|
|
/* add spill/reload code on incoming control flow edges */
|
|
|
|
add_coupling_code(ctx, block, block_idx);
|
|
|
|
}
|
|
|
|
|
2021-07-16 11:19:28 +01:00
|
|
|
const auto& current_spills = ctx.spills_entry[block_idx];
|
2019-09-17 12:22:17 +01:00
|
|
|
|
|
|
|
/* check conditions to process this block */
|
2021-02-22 14:58:46 +00:00
|
|
|
bool process = (block->register_demand - spilled_registers).exceeds(ctx.target_pressure) ||
|
2021-07-15 14:11:44 +01:00
|
|
|
!ctx.renames[block_idx].empty() || ctx.unused_remats.size();
|
2019-09-17 12:22:17 +01:00
|
|
|
|
2020-11-03 13:40:05 +00:00
|
|
|
for (auto it = current_spills.begin(); !process && it != current_spills.end(); ++it) {
|
2021-07-13 11:59:58 +01:00
|
|
|
if (ctx.next_use_distances_start[block_idx].at(it->first).first == block_idx)
|
2019-09-17 12:22:17 +01:00
|
|
|
process = true;
|
|
|
|
}
|
|
|
|
|
2021-07-10 13:06:50 +01:00
|
|
|
assert(ctx.spills_exit[block_idx].empty());
|
2021-07-16 11:19:28 +01:00
|
|
|
ctx.spills_exit[block_idx] = current_spills;
|
2021-07-10 13:06:50 +01:00
|
|
|
if (process) {
|
|
|
|
process_block(ctx, block_idx, block, spilled_registers);
|
|
|
|
}
|
2019-09-17 12:22:17 +01:00
|
|
|
|
2020-01-02 14:57:02 +00:00
|
|
|
ctx.processed[block_idx] = true;
|
|
|
|
|
2019-09-17 12:22:17 +01:00
|
|
|
/* check if the next block leaves the current loop */
|
2021-06-09 09:14:54 +01:00
|
|
|
if (block->loop_nest_depth == 0 ||
|
|
|
|
ctx.program->blocks[block_idx + 1].loop_nest_depth >= block->loop_nest_depth)
|
2019-09-17 12:22:17 +01:00
|
|
|
return;
|
|
|
|
|
|
|
|
Block* loop_header = ctx.loop_header.top();
|
|
|
|
|
|
|
|
/* preserve original renames at end of loop header block */
|
|
|
|
std::map<Temp, Temp> renames = std::move(ctx.renames[loop_header->index]);
|
|
|
|
|
|
|
|
/* add coupling code to all loop header predecessors */
|
|
|
|
add_coupling_code(ctx, loop_header, loop_header->index);
|
|
|
|
|
|
|
|
/* propagate new renames through loop: i.e. repair the SSA */
|
|
|
|
renames.swap(ctx.renames[loop_header->index]);
|
|
|
|
for (std::pair<Temp, Temp> rename : renames) {
|
|
|
|
for (unsigned idx = loop_header->index; idx <= block_idx; idx++) {
|
|
|
|
Block& current = ctx.program->blocks[idx];
|
|
|
|
std::vector<aco_ptr<Instruction>>::iterator instr_it = current.instructions.begin();
|
|
|
|
|
|
|
|
/* first rename phis */
|
|
|
|
while (instr_it != current.instructions.end()) {
|
|
|
|
aco_ptr<Instruction>& phi = *instr_it;
|
|
|
|
if (phi->opcode != aco_opcode::p_phi && phi->opcode != aco_opcode::p_linear_phi)
|
|
|
|
break;
|
2021-06-09 09:14:54 +01:00
|
|
|
/* no need to rename the loop header phis once again. this happened in
|
|
|
|
* add_coupling_code() */
|
2019-09-17 12:22:17 +01:00
|
|
|
if (idx == loop_header->index) {
|
|
|
|
instr_it++;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (Operand& op : phi->operands) {
|
|
|
|
if (!op.isTemp())
|
|
|
|
continue;
|
|
|
|
if (op.getTemp() == rename.first)
|
|
|
|
op.setTemp(rename.second);
|
|
|
|
}
|
|
|
|
instr_it++;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* variable is not live at beginning of this block */
|
2020-11-03 13:40:05 +00:00
|
|
|
if (ctx.next_use_distances_start[idx].count(rename.first) == 0)
|
2019-09-17 12:22:17 +01:00
|
|
|
continue;
|
|
|
|
|
|
|
|
/* if the variable is live at the block's exit, add rename */
|
2020-11-03 13:40:05 +00:00
|
|
|
if (ctx.next_use_distances_end[idx].count(rename.first) != 0)
|
2019-09-17 12:22:17 +01:00
|
|
|
ctx.renames[idx].insert(rename);
|
|
|
|
|
|
|
|
/* rename all uses in this block */
|
|
|
|
bool renamed = false;
|
|
|
|
while (!renamed && instr_it != current.instructions.end()) {
|
|
|
|
aco_ptr<Instruction>& instr = *instr_it;
|
|
|
|
for (Operand& op : instr->operands) {
|
|
|
|
if (!op.isTemp())
|
|
|
|
continue;
|
|
|
|
if (op.getTemp() == rename.first) {
|
|
|
|
op.setTemp(rename.second);
|
|
|
|
/* we can stop with this block as soon as the variable is spilled */
|
|
|
|
if (instr->opcode == aco_opcode::p_spill)
|
2021-06-09 09:14:54 +01:00
|
|
|
renamed = true;
|
2019-09-17 12:22:17 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
instr_it++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* remove loop header info from stack */
|
|
|
|
ctx.loop_header.pop();
|
|
|
|
}
|
|
|
|
|
2021-06-09 09:14:54 +01:00
|
|
|
Temp
|
|
|
|
load_scratch_resource(spill_ctx& ctx, Temp& scratch_offset,
|
|
|
|
std::vector<aco_ptr<Instruction>>& instructions, unsigned offset,
|
|
|
|
bool is_top_level)
|
2019-10-24 17:27:25 +01:00
|
|
|
{
|
|
|
|
Builder bld(ctx.program);
|
|
|
|
if (is_top_level) {
|
|
|
|
bld.reset(&instructions);
|
|
|
|
} else {
|
|
|
|
/* find p_logical_end */
|
|
|
|
unsigned idx = instructions.size() - 1;
|
|
|
|
while (instructions[idx]->opcode != aco_opcode::p_logical_end)
|
|
|
|
idx--;
|
|
|
|
bld.reset(&instructions, std::next(instructions.begin(), idx));
|
|
|
|
}
|
|
|
|
|
|
|
|
Temp private_segment_buffer = ctx.program->private_segment_buffer;
|
|
|
|
if (ctx.program->stage != compute_cs)
|
2021-06-09 09:14:54 +01:00
|
|
|
private_segment_buffer =
|
2021-07-13 10:22:46 +01:00
|
|
|
bld.smem(aco_opcode::s_load_dwordx2, bld.def(s2), private_segment_buffer, Operand::zero());
|
2019-10-24 17:27:25 +01:00
|
|
|
|
|
|
|
if (offset)
|
2021-06-09 09:14:54 +01:00
|
|
|
scratch_offset = bld.sop2(aco_opcode::s_add_u32, bld.def(s1), bld.def(s1, scc),
|
2021-07-13 10:22:46 +01:00
|
|
|
scratch_offset, Operand::c32(offset));
|
2019-10-24 17:27:25 +01:00
|
|
|
|
2021-06-09 09:14:54 +01:00
|
|
|
uint32_t rsrc_conf =
|
|
|
|
S_008F0C_ADD_TID_ENABLE(1) | S_008F0C_INDEX_STRIDE(ctx.program->wave_size == 64 ? 3 : 2);
|
2019-10-24 17:27:25 +01:00
|
|
|
|
|
|
|
if (ctx.program->chip_class >= GFX10) {
|
2021-03-28 10:11:09 +01:00
|
|
|
rsrc_conf |= S_008F0C_FORMAT(V_008F0C_GFX10_FORMAT_32_FLOAT) |
|
2021-06-09 09:14:54 +01:00
|
|
|
S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_RAW) | S_008F0C_RESOURCE_LEVEL(1);
|
|
|
|
} else if (ctx.program->chip_class <= GFX7) {
|
|
|
|
/* dfmt modifies stride on GFX8/GFX9 when ADD_TID_EN=1 */
|
2019-10-24 17:27:25 +01:00
|
|
|
rsrc_conf |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
|
|
|
|
S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
|
|
|
|
}
|
2019-11-01 08:06:26 +00:00
|
|
|
/* older generations need element size = 4 bytes. element size removed in GFX9 */
|
2019-10-24 17:27:25 +01:00
|
|
|
if (ctx.program->chip_class <= GFX8)
|
2019-11-01 08:06:26 +00:00
|
|
|
rsrc_conf |= S_008F0C_ELEMENT_SIZE(1);
|
2019-10-24 17:27:25 +01:00
|
|
|
|
2021-07-13 10:22:46 +01:00
|
|
|
return bld.pseudo(aco_opcode::p_create_vector, bld.def(s4), private_segment_buffer,
|
|
|
|
Operand::c32(-1u), Operand::c32(rsrc_conf));
|
2019-10-24 17:27:25 +01:00
|
|
|
}
|
|
|
|
|
2021-06-09 09:14:54 +01:00
|
|
|
void
|
|
|
|
add_interferences(spill_ctx& ctx, std::vector<bool>& is_assigned, std::vector<uint32_t>& slots,
|
|
|
|
std::vector<bool>& slots_used, unsigned id)
|
2020-07-07 13:10:38 +01:00
|
|
|
{
|
|
|
|
for (unsigned other : ctx.interferences[id].second) {
|
|
|
|
if (!is_assigned[other])
|
|
|
|
continue;
|
2019-10-16 15:39:06 +01:00
|
|
|
|
2020-07-07 13:10:38 +01:00
|
|
|
RegClass other_rc = ctx.interferences[other].first;
|
2020-07-16 11:08:50 +01:00
|
|
|
unsigned slot = slots[other];
|
|
|
|
std::fill(slots_used.begin() + slot, slots_used.begin() + slot + other_rc.size(), true);
|
2020-07-07 13:10:38 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-06-09 09:14:54 +01:00
|
|
|
unsigned
|
|
|
|
find_available_slot(std::vector<bool>& used, unsigned wave_size, unsigned size, bool is_sgpr,
|
|
|
|
unsigned* num_slots)
|
2020-07-07 13:10:38 +01:00
|
|
|
{
|
|
|
|
unsigned wave_size_minus_one = wave_size - 1;
|
|
|
|
unsigned slot = 0;
|
|
|
|
|
|
|
|
while (true) {
|
|
|
|
bool available = true;
|
|
|
|
for (unsigned i = 0; i < size; i++) {
|
|
|
|
if (slot + i < used.size() && used[slot + i]) {
|
|
|
|
available = false;
|
|
|
|
break;
|
2019-10-16 15:39:06 +01:00
|
|
|
}
|
|
|
|
}
|
2020-07-07 13:10:38 +01:00
|
|
|
if (!available) {
|
|
|
|
slot++;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (is_sgpr && ((slot & wave_size_minus_one) > wave_size - size)) {
|
|
|
|
slot = align(slot, wave_size);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
std::fill(used.begin(), used.end(), false);
|
|
|
|
|
|
|
|
if (slot + size > used.size())
|
|
|
|
used.resize(slot + size);
|
|
|
|
|
|
|
|
return slot;
|
2019-09-17 12:22:17 +01:00
|
|
|
}
|
2020-07-07 13:10:38 +01:00
|
|
|
}
|
2019-09-17 12:22:17 +01:00
|
|
|
|
2021-06-09 09:14:54 +01:00
|
|
|
void
|
|
|
|
assign_spill_slots_helper(spill_ctx& ctx, RegType type, std::vector<bool>& is_assigned,
|
|
|
|
std::vector<uint32_t>& slots, unsigned* num_slots)
|
2020-07-07 13:10:38 +01:00
|
|
|
{
|
|
|
|
std::vector<bool> slots_used(*num_slots);
|
2019-09-17 12:22:17 +01:00
|
|
|
|
2020-07-07 13:10:38 +01:00
|
|
|
/* assign slots for ids with affinities first */
|
|
|
|
for (std::vector<uint32_t>& vec : ctx.affinities) {
|
|
|
|
if (ctx.interferences[vec[0]].first.type() != type)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
for (unsigned id : vec) {
|
|
|
|
if (!ctx.is_reloaded[id])
|
2019-09-17 12:22:17 +01:00
|
|
|
continue;
|
|
|
|
|
2020-07-07 13:10:38 +01:00
|
|
|
add_interferences(ctx, is_assigned, slots, slots_used, id);
|
|
|
|
}
|
|
|
|
|
2021-06-09 09:14:54 +01:00
|
|
|
unsigned slot =
|
|
|
|
find_available_slot(slots_used, ctx.wave_size, ctx.interferences[vec[0]].first.size(),
|
|
|
|
type == RegType::sgpr, num_slots);
|
2020-07-07 13:10:38 +01:00
|
|
|
|
|
|
|
for (unsigned id : vec) {
|
|
|
|
assert(!is_assigned[id]);
|
|
|
|
|
|
|
|
if (ctx.is_reloaded[id]) {
|
|
|
|
slots[id] = slot;
|
|
|
|
is_assigned[id] = true;
|
2019-10-16 15:39:06 +01:00
|
|
|
}
|
2019-09-17 12:22:17 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-07 13:10:38 +01:00
|
|
|
/* assign slots for ids without affinities */
|
|
|
|
for (unsigned id = 0; id < ctx.interferences.size(); id++) {
|
|
|
|
if (is_assigned[id] || !ctx.is_reloaded[id] || ctx.interferences[id].first.type() != type)
|
|
|
|
continue;
|
2019-09-17 12:22:17 +01:00
|
|
|
|
2020-07-07 13:10:38 +01:00
|
|
|
add_interferences(ctx, is_assigned, slots, slots_used, id);
|
2019-09-17 12:22:17 +01:00
|
|
|
|
2021-06-09 09:14:54 +01:00
|
|
|
unsigned slot =
|
|
|
|
find_available_slot(slots_used, ctx.wave_size, ctx.interferences[id].first.size(),
|
|
|
|
type == RegType::sgpr, num_slots);
|
2019-09-17 12:22:17 +01:00
|
|
|
|
2020-07-07 13:10:38 +01:00
|
|
|
slots[id] = slot;
|
|
|
|
is_assigned[id] = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
*num_slots = slots_used.size();
|
|
|
|
}
|
|
|
|
|
2021-06-09 09:14:54 +01:00
|
|
|
void
|
|
|
|
assign_spill_slots(spill_ctx& ctx, unsigned spills_to_vgpr)
|
|
|
|
{
|
2020-07-07 13:10:38 +01:00
|
|
|
std::vector<uint32_t> slots(ctx.interferences.size());
|
|
|
|
std::vector<bool> is_assigned(ctx.interferences.size());
|
|
|
|
|
|
|
|
/* first, handle affinities: just merge all interferences into both spill ids */
|
|
|
|
for (std::vector<uint32_t>& vec : ctx.affinities) {
|
|
|
|
for (unsigned i = 0; i < vec.size(); i++) {
|
|
|
|
for (unsigned j = i + 1; j < vec.size(); j++) {
|
|
|
|
assert(vec[i] != vec[j]);
|
|
|
|
bool reloaded = ctx.is_reloaded[vec[i]] || ctx.is_reloaded[vec[j]];
|
|
|
|
ctx.is_reloaded[vec[i]] = reloaded;
|
|
|
|
ctx.is_reloaded[vec[j]] = reloaded;
|
2019-10-24 17:27:25 +01:00
|
|
|
}
|
2019-09-17 12:22:17 +01:00
|
|
|
}
|
|
|
|
}
|
2020-07-07 13:10:38 +01:00
|
|
|
for (ASSERTED uint32_t i = 0; i < ctx.interferences.size(); i++)
|
|
|
|
for (ASSERTED uint32_t id : ctx.interferences[i].second)
|
|
|
|
assert(i != id);
|
2019-09-17 12:22:17 +01:00
|
|
|
|
2020-07-07 13:10:38 +01:00
|
|
|
/* for each spill slot, assign as many spill ids as possible */
|
|
|
|
unsigned sgpr_spill_slots = 0, vgpr_spill_slots = 0;
|
|
|
|
assign_spill_slots_helper(ctx, RegType::sgpr, is_assigned, slots, &sgpr_spill_slots);
|
|
|
|
assign_spill_slots_helper(ctx, RegType::vgpr, is_assigned, slots, &vgpr_spill_slots);
|
2019-10-24 17:27:25 +01:00
|
|
|
|
2019-09-17 12:22:17 +01:00
|
|
|
for (unsigned id = 0; id < is_assigned.size(); id++)
|
|
|
|
assert(is_assigned[id] || !ctx.is_reloaded[id]);
|
|
|
|
|
2019-10-16 15:39:06 +01:00
|
|
|
for (std::vector<uint32_t>& vec : ctx.affinities) {
|
|
|
|
for (unsigned i = 0; i < vec.size(); i++) {
|
|
|
|
for (unsigned j = i + 1; j < vec.size(); j++) {
|
|
|
|
assert(is_assigned[vec[i]] == is_assigned[vec[j]]);
|
|
|
|
if (!is_assigned[vec[i]])
|
|
|
|
continue;
|
|
|
|
assert(ctx.is_reloaded[vec[i]] == ctx.is_reloaded[vec[j]]);
|
2021-06-09 09:14:54 +01:00
|
|
|
assert(ctx.interferences[vec[i]].first.type() ==
|
|
|
|
ctx.interferences[vec[j]].first.type());
|
2020-07-07 13:10:38 +01:00
|
|
|
assert(slots[vec[i]] == slots[vec[j]]);
|
2019-10-16 15:39:06 +01:00
|
|
|
}
|
|
|
|
}
|
2019-09-17 12:22:17 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* hope, we didn't mess up */
|
2019-10-28 16:15:17 +00:00
|
|
|
std::vector<Temp> vgpr_spill_temps((sgpr_spill_slots + ctx.wave_size - 1) / ctx.wave_size);
|
2019-09-17 12:22:17 +01:00
|
|
|
assert(vgpr_spill_temps.size() <= spills_to_vgpr);
|
|
|
|
|
|
|
|
/* replace pseudo instructions with actual hardware instructions */
|
2019-10-24 17:27:25 +01:00
|
|
|
Temp scratch_offset = ctx.program->scratch_offset, scratch_rsrc = Temp();
|
2019-09-17 12:22:17 +01:00
|
|
|
unsigned last_top_level_block_idx = 0;
|
|
|
|
std::vector<bool> reload_in_loop(vgpr_spill_temps.size());
|
|
|
|
for (Block& block : ctx.program->blocks) {
|
|
|
|
|
|
|
|
/* after loops, we insert a user if there was a reload inside the loop */
|
|
|
|
if (block.loop_nest_depth == 0) {
|
|
|
|
int end_vgprs = 0;
|
|
|
|
for (unsigned i = 0; i < vgpr_spill_temps.size(); i++) {
|
|
|
|
if (reload_in_loop[i])
|
|
|
|
end_vgprs++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (end_vgprs > 0) {
|
2021-06-09 09:14:54 +01:00
|
|
|
aco_ptr<Instruction> destr{create_instruction<Pseudo_instruction>(
|
|
|
|
aco_opcode::p_end_linear_vgpr, Format::PSEUDO, end_vgprs, 0)};
|
2019-09-17 12:22:17 +01:00
|
|
|
int k = 0;
|
|
|
|
for (unsigned i = 0; i < vgpr_spill_temps.size(); i++) {
|
|
|
|
if (reload_in_loop[i])
|
|
|
|
destr->operands[k++] = Operand(vgpr_spill_temps[i]);
|
|
|
|
reload_in_loop[i] = false;
|
|
|
|
}
|
|
|
|
/* find insertion point */
|
|
|
|
std::vector<aco_ptr<Instruction>>::iterator it = block.instructions.begin();
|
|
|
|
while ((*it)->opcode == aco_opcode::p_linear_phi || (*it)->opcode == aco_opcode::p_phi)
|
|
|
|
++it;
|
|
|
|
block.instructions.insert(it, std::move(destr));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (block.kind & block_kind_top_level && !block.linear_preds.empty()) {
|
|
|
|
last_top_level_block_idx = block.index;
|
|
|
|
|
|
|
|
/* check if any spilled variables use a created linear vgpr, otherwise destroy them */
|
|
|
|
for (unsigned i = 0; i < vgpr_spill_temps.size(); i++) {
|
|
|
|
if (vgpr_spill_temps[i] == Temp())
|
|
|
|
continue;
|
|
|
|
|
|
|
|
bool can_destroy = true;
|
|
|
|
for (std::pair<Temp, uint32_t> pair : ctx.spills_exit[block.linear_preds[0]]) {
|
|
|
|
|
2020-07-07 13:10:38 +01:00
|
|
|
if (ctx.interferences[pair.second].first.type() == RegType::sgpr &&
|
|
|
|
slots[pair.second] / ctx.wave_size == i) {
|
2019-09-17 12:22:17 +01:00
|
|
|
can_destroy = false;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (can_destroy)
|
|
|
|
vgpr_spill_temps[i] = Temp();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
std::vector<aco_ptr<Instruction>>::iterator it;
|
|
|
|
std::vector<aco_ptr<Instruction>> instructions;
|
|
|
|
instructions.reserve(block.instructions.size());
|
2019-10-24 17:27:25 +01:00
|
|
|
Builder bld(ctx.program, &instructions);
|
2019-09-17 12:22:17 +01:00
|
|
|
for (it = block.instructions.begin(); it != block.instructions.end(); ++it) {
|
|
|
|
|
|
|
|
if ((*it)->opcode == aco_opcode::p_spill) {
|
|
|
|
uint32_t spill_id = (*it)->operands[1].constantValue();
|
|
|
|
|
|
|
|
if (!ctx.is_reloaded[spill_id]) {
|
|
|
|
/* never reloaded, so don't spill */
|
2020-07-07 13:10:38 +01:00
|
|
|
} else if (!is_assigned[spill_id]) {
|
|
|
|
unreachable("No spill slot assigned for spill id");
|
|
|
|
} else if (ctx.interferences[spill_id].first.type() == RegType::vgpr) {
|
2019-09-17 12:22:17 +01:00
|
|
|
/* spill vgpr */
|
|
|
|
ctx.program->config->spilled_vgprs += (*it)->operands[0].size();
|
2020-07-07 13:10:38 +01:00
|
|
|
uint32_t spill_slot = slots[spill_id];
|
2021-06-09 09:14:54 +01:00
|
|
|
bool add_offset_to_sgpr =
|
|
|
|
ctx.program->config->scratch_bytes_per_wave / ctx.program->wave_size +
|
|
|
|
vgpr_spill_slots * 4 >
|
|
|
|
4096;
|
|
|
|
unsigned base_offset =
|
|
|
|
add_offset_to_sgpr
|
|
|
|
? 0
|
|
|
|
: ctx.program->config->scratch_bytes_per_wave / ctx.program->wave_size;
|
2019-10-24 17:27:25 +01:00
|
|
|
|
|
|
|
/* check if the scratch resource descriptor already exists */
|
|
|
|
if (scratch_rsrc == Temp()) {
|
2021-06-09 09:14:54 +01:00
|
|
|
unsigned offset =
|
|
|
|
add_offset_to_sgpr ? ctx.program->config->scratch_bytes_per_wave : 0;
|
|
|
|
scratch_rsrc = load_scratch_resource(
|
|
|
|
ctx, scratch_offset,
|
|
|
|
last_top_level_block_idx == block.index
|
|
|
|
? instructions
|
|
|
|
: ctx.program->blocks[last_top_level_block_idx].instructions,
|
|
|
|
offset, last_top_level_block_idx == block.index);
|
2019-10-24 17:27:25 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
unsigned offset = base_offset + spill_slot * 4;
|
2019-11-01 08:06:26 +00:00
|
|
|
aco_opcode opcode = aco_opcode::buffer_store_dword;
|
2019-10-24 17:27:25 +01:00
|
|
|
assert((*it)->operands[0].isTemp());
|
|
|
|
Temp temp = (*it)->operands[0].getTemp();
|
|
|
|
assert(temp.type() == RegType::vgpr && !temp.is_linear());
|
2019-11-01 08:06:26 +00:00
|
|
|
if (temp.size() > 1) {
|
2021-06-09 09:14:54 +01:00
|
|
|
Instruction* split{create_instruction<Pseudo_instruction>(
|
|
|
|
aco_opcode::p_split_vector, Format::PSEUDO, 1, temp.size())};
|
2019-10-24 17:27:25 +01:00
|
|
|
split->operands[0] = Operand(temp);
|
|
|
|
for (unsigned i = 0; i < temp.size(); i++)
|
|
|
|
split->definitions[i] = bld.def(v1);
|
|
|
|
bld.insert(split);
|
2020-06-26 15:54:22 +01:00
|
|
|
for (unsigned i = 0; i < temp.size(); i++) {
|
2021-06-09 09:14:54 +01:00
|
|
|
Instruction* instr =
|
|
|
|
bld.mubuf(opcode, scratch_rsrc, Operand(v1), scratch_offset,
|
|
|
|
split->definitions[i].getTemp(), offset + i * 4, false, true);
|
2021-01-21 16:13:34 +00:00
|
|
|
instr->mubuf().sync = memory_sync_info(storage_vgpr_spill, semantic_private);
|
2020-06-26 15:54:22 +01:00
|
|
|
}
|
2019-11-01 08:06:26 +00:00
|
|
|
} else {
|
2021-06-09 09:14:54 +01:00
|
|
|
Instruction* instr = bld.mubuf(opcode, scratch_rsrc, Operand(v1), scratch_offset,
|
|
|
|
temp, offset, false, true);
|
2021-01-21 16:13:34 +00:00
|
|
|
instr->mubuf().sync = memory_sync_info(storage_vgpr_spill, semantic_private);
|
2019-10-24 17:27:25 +01:00
|
|
|
}
|
2020-07-07 13:10:38 +01:00
|
|
|
} else {
|
2019-09-17 12:22:17 +01:00
|
|
|
ctx.program->config->spilled_sgprs += (*it)->operands[0].size();
|
|
|
|
|
2020-07-07 13:10:38 +01:00
|
|
|
uint32_t spill_slot = slots[spill_id];
|
2019-09-17 12:22:17 +01:00
|
|
|
|
|
|
|
/* check if the linear vgpr already exists */
|
2019-10-28 16:15:17 +00:00
|
|
|
if (vgpr_spill_temps[spill_slot / ctx.wave_size] == Temp()) {
|
2020-09-14 20:58:33 +01:00
|
|
|
Temp linear_vgpr = ctx.program->allocateTmp(v1.as_linear());
|
2019-10-28 16:15:17 +00:00
|
|
|
vgpr_spill_temps[spill_slot / ctx.wave_size] = linear_vgpr;
|
2021-06-09 09:14:54 +01:00
|
|
|
aco_ptr<Pseudo_instruction> create{create_instruction<Pseudo_instruction>(
|
|
|
|
aco_opcode::p_start_linear_vgpr, Format::PSEUDO, 0, 1)};
|
2019-09-17 12:22:17 +01:00
|
|
|
create->definitions[0] = Definition(linear_vgpr);
|
|
|
|
/* find the right place to insert this definition */
|
|
|
|
if (last_top_level_block_idx == block.index) {
|
|
|
|
/* insert right before the current instruction */
|
|
|
|
instructions.emplace_back(std::move(create));
|
|
|
|
} else {
|
|
|
|
assert(last_top_level_block_idx < block.index);
|
|
|
|
/* insert before the branch at last top level block */
|
2021-06-09 09:14:54 +01:00
|
|
|
std::vector<aco_ptr<Instruction>>& block_instrs =
|
|
|
|
ctx.program->blocks[last_top_level_block_idx].instructions;
|
2020-11-03 13:40:05 +00:00
|
|
|
block_instrs.insert(std::prev(block_instrs.end()), std::move(create));
|
2019-09-17 12:22:17 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* spill sgpr: just add the vgpr temp to operands */
|
2021-06-09 09:14:54 +01:00
|
|
|
Pseudo_instruction* spill =
|
|
|
|
create_instruction<Pseudo_instruction>(aco_opcode::p_spill, Format::PSEUDO, 3, 0);
|
2019-10-28 16:15:17 +00:00
|
|
|
spill->operands[0] = Operand(vgpr_spill_temps[spill_slot / ctx.wave_size]);
|
2021-07-13 10:22:46 +01:00
|
|
|
spill->operands[1] = Operand::c32(spill_slot % ctx.wave_size);
|
2019-09-17 12:22:17 +01:00
|
|
|
spill->operands[2] = (*it)->operands[0];
|
|
|
|
instructions.emplace_back(aco_ptr<Instruction>(spill));
|
|
|
|
}
|
|
|
|
|
|
|
|
} else if ((*it)->opcode == aco_opcode::p_reload) {
|
|
|
|
uint32_t spill_id = (*it)->operands[0].constantValue();
|
|
|
|
assert(ctx.is_reloaded[spill_id]);
|
|
|
|
|
2020-07-07 13:10:38 +01:00
|
|
|
if (!is_assigned[spill_id]) {
|
|
|
|
unreachable("No spill slot assigned for spill id");
|
|
|
|
} else if (ctx.interferences[spill_id].first.type() == RegType::vgpr) {
|
2019-09-17 12:22:17 +01:00
|
|
|
/* reload vgpr */
|
2020-07-07 13:10:38 +01:00
|
|
|
uint32_t spill_slot = slots[spill_id];
|
2021-06-09 09:14:54 +01:00
|
|
|
bool add_offset_to_sgpr =
|
|
|
|
ctx.program->config->scratch_bytes_per_wave / ctx.program->wave_size +
|
|
|
|
vgpr_spill_slots * 4 >
|
|
|
|
4096;
|
|
|
|
unsigned base_offset =
|
|
|
|
add_offset_to_sgpr
|
|
|
|
? 0
|
|
|
|
: ctx.program->config->scratch_bytes_per_wave / ctx.program->wave_size;
|
2019-10-24 17:27:25 +01:00
|
|
|
|
|
|
|
/* check if the scratch resource descriptor already exists */
|
|
|
|
if (scratch_rsrc == Temp()) {
|
2021-06-09 09:14:54 +01:00
|
|
|
unsigned offset =
|
|
|
|
add_offset_to_sgpr ? ctx.program->config->scratch_bytes_per_wave : 0;
|
|
|
|
scratch_rsrc = load_scratch_resource(
|
|
|
|
ctx, scratch_offset,
|
|
|
|
last_top_level_block_idx == block.index
|
|
|
|
? instructions
|
|
|
|
: ctx.program->blocks[last_top_level_block_idx].instructions,
|
|
|
|
offset, last_top_level_block_idx == block.index);
|
2019-10-24 17:27:25 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
unsigned offset = base_offset + spill_slot * 4;
|
2019-11-01 08:06:26 +00:00
|
|
|
aco_opcode opcode = aco_opcode::buffer_load_dword;
|
2019-10-24 17:27:25 +01:00
|
|
|
Definition def = (*it)->definitions[0];
|
2019-11-01 08:06:26 +00:00
|
|
|
if (def.size() > 1) {
|
2021-06-09 09:14:54 +01:00
|
|
|
Instruction* vec{create_instruction<Pseudo_instruction>(
|
|
|
|
aco_opcode::p_create_vector, Format::PSEUDO, def.size(), 1)};
|
2019-10-24 17:27:25 +01:00
|
|
|
vec->definitions[0] = def;
|
|
|
|
for (unsigned i = 0; i < def.size(); i++) {
|
|
|
|
Temp tmp = bld.tmp(v1);
|
|
|
|
vec->operands[i] = Operand(tmp);
|
2021-06-09 09:14:54 +01:00
|
|
|
Instruction* instr =
|
|
|
|
bld.mubuf(opcode, Definition(tmp), scratch_rsrc, Operand(v1),
|
|
|
|
scratch_offset, offset + i * 4, false, true);
|
2021-01-21 16:13:34 +00:00
|
|
|
instr->mubuf().sync = memory_sync_info(storage_vgpr_spill, semantic_private);
|
2019-10-24 17:27:25 +01:00
|
|
|
}
|
|
|
|
bld.insert(vec);
|
2019-11-01 08:06:26 +00:00
|
|
|
} else {
|
2021-06-09 09:14:54 +01:00
|
|
|
Instruction* instr = bld.mubuf(opcode, def, scratch_rsrc, Operand(v1),
|
|
|
|
scratch_offset, offset, false, true);
|
2021-01-21 16:13:34 +00:00
|
|
|
instr->mubuf().sync = memory_sync_info(storage_vgpr_spill, semantic_private);
|
2019-10-24 17:27:25 +01:00
|
|
|
}
|
2020-07-07 13:10:38 +01:00
|
|
|
} else {
|
|
|
|
uint32_t spill_slot = slots[spill_id];
|
2019-10-28 16:15:17 +00:00
|
|
|
reload_in_loop[spill_slot / ctx.wave_size] = block.loop_nest_depth > 0;
|
2019-09-17 12:22:17 +01:00
|
|
|
|
|
|
|
/* check if the linear vgpr already exists */
|
2019-10-28 16:15:17 +00:00
|
|
|
if (vgpr_spill_temps[spill_slot / ctx.wave_size] == Temp()) {
|
2020-09-14 20:58:33 +01:00
|
|
|
Temp linear_vgpr = ctx.program->allocateTmp(v1.as_linear());
|
2019-10-28 16:15:17 +00:00
|
|
|
vgpr_spill_temps[spill_slot / ctx.wave_size] = linear_vgpr;
|
2021-06-09 09:14:54 +01:00
|
|
|
aco_ptr<Pseudo_instruction> create{create_instruction<Pseudo_instruction>(
|
|
|
|
aco_opcode::p_start_linear_vgpr, Format::PSEUDO, 0, 1)};
|
2019-09-17 12:22:17 +01:00
|
|
|
create->definitions[0] = Definition(linear_vgpr);
|
|
|
|
/* find the right place to insert this definition */
|
|
|
|
if (last_top_level_block_idx == block.index) {
|
|
|
|
/* insert right before the current instruction */
|
|
|
|
instructions.emplace_back(std::move(create));
|
|
|
|
} else {
|
|
|
|
assert(last_top_level_block_idx < block.index);
|
|
|
|
/* insert before the branch at last top level block */
|
2021-06-09 09:14:54 +01:00
|
|
|
std::vector<aco_ptr<Instruction>>& block_instrs =
|
|
|
|
ctx.program->blocks[last_top_level_block_idx].instructions;
|
2020-11-03 13:40:05 +00:00
|
|
|
block_instrs.insert(std::prev(block_instrs.end()), std::move(create));
|
2019-09-17 12:22:17 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* reload sgpr: just add the vgpr temp to operands */
|
2021-06-09 09:14:54 +01:00
|
|
|
Pseudo_instruction* reload = create_instruction<Pseudo_instruction>(
|
|
|
|
aco_opcode::p_reload, Format::PSEUDO, 2, 1);
|
2019-10-28 16:15:17 +00:00
|
|
|
reload->operands[0] = Operand(vgpr_spill_temps[spill_slot / ctx.wave_size]);
|
2021-07-13 10:22:46 +01:00
|
|
|
reload->operands[1] = Operand::c32(spill_slot % ctx.wave_size);
|
2019-09-17 12:22:17 +01:00
|
|
|
reload->definitions[0] = (*it)->definitions[0];
|
|
|
|
instructions.emplace_back(aco_ptr<Instruction>(reload));
|
|
|
|
}
|
2021-07-15 14:11:44 +01:00
|
|
|
} else if (!ctx.unused_remats.count(it->get())) {
|
2019-09-17 12:22:17 +01:00
|
|
|
instructions.emplace_back(std::move(*it));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
block.instructions = std::move(instructions);
|
|
|
|
}
|
|
|
|
|
2019-10-24 17:27:25 +01:00
|
|
|
/* update required scratch memory */
|
2021-06-09 09:14:54 +01:00
|
|
|
ctx.program->config->scratch_bytes_per_wave +=
|
|
|
|
align(vgpr_spill_slots * 4 * ctx.program->wave_size, 1024);
|
2019-10-24 17:27:25 +01:00
|
|
|
|
2019-09-17 12:22:17 +01:00
|
|
|
/* SSA elimination inserts copies for logical phis right before p_logical_end
|
|
|
|
* So if a linear vgpr is used between that p_logical_end and the branch,
|
|
|
|
* we need to ensure logical phis don't choose a definition which aliases
|
|
|
|
* the linear vgpr.
|
|
|
|
* TODO: Moving the spills and reloads to before p_logical_end might produce
|
|
|
|
* slightly better code. */
|
|
|
|
for (Block& block : ctx.program->blocks) {
|
|
|
|
/* loops exits are already handled */
|
|
|
|
if (block.logical_preds.size() <= 1)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
bool has_logical_phis = false;
|
|
|
|
for (aco_ptr<Instruction>& instr : block.instructions) {
|
|
|
|
if (instr->opcode == aco_opcode::p_phi) {
|
|
|
|
has_logical_phis = true;
|
|
|
|
break;
|
|
|
|
} else if (instr->opcode != aco_opcode::p_linear_phi) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!has_logical_phis)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
std::set<Temp> vgprs;
|
|
|
|
for (unsigned pred_idx : block.logical_preds) {
|
|
|
|
Block& pred = ctx.program->blocks[pred_idx];
|
|
|
|
for (int i = pred.instructions.size() - 1; i >= 0; i--) {
|
|
|
|
aco_ptr<Instruction>& pred_instr = pred.instructions[i];
|
|
|
|
if (pred_instr->opcode == aco_opcode::p_logical_end) {
|
|
|
|
break;
|
|
|
|
} else if (pred_instr->opcode == aco_opcode::p_spill ||
|
|
|
|
pred_instr->opcode == aco_opcode::p_reload) {
|
|
|
|
vgprs.insert(pred_instr->operands[0].getTemp());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!vgprs.size())
|
|
|
|
continue;
|
|
|
|
|
2021-06-09 09:14:54 +01:00
|
|
|
aco_ptr<Instruction> destr{create_instruction<Pseudo_instruction>(
|
|
|
|
aco_opcode::p_end_linear_vgpr, Format::PSEUDO, vgprs.size(), 0)};
|
2019-09-17 12:22:17 +01:00
|
|
|
int k = 0;
|
|
|
|
for (Temp tmp : vgprs) {
|
|
|
|
destr->operands[k++] = Operand(tmp);
|
|
|
|
}
|
|
|
|
/* find insertion point */
|
|
|
|
std::vector<aco_ptr<Instruction>>::iterator it = block.instructions.begin();
|
|
|
|
while ((*it)->opcode == aco_opcode::p_linear_phi || (*it)->opcode == aco_opcode::p_phi)
|
|
|
|
++it;
|
|
|
|
block.instructions.insert(it, std::move(destr));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
} /* end namespace */
|
|
|
|
|
2021-06-09 09:14:54 +01:00
|
|
|
void
|
|
|
|
spill(Program* program, live& live_vars)
|
2019-09-17 12:22:17 +01:00
|
|
|
{
|
|
|
|
program->config->spilled_vgprs = 0;
|
|
|
|
program->config->spilled_sgprs = 0;
|
|
|
|
|
2021-04-20 17:35:41 +01:00
|
|
|
program->progress = CompilationProgress::after_spilling;
|
|
|
|
|
2019-10-24 10:38:37 +01:00
|
|
|
/* no spilling when register pressure is low enough */
|
|
|
|
if (program->num_waves > 0)
|
2019-09-17 12:22:17 +01:00
|
|
|
return;
|
|
|
|
|
2019-10-15 17:23:52 +01:00
|
|
|
/* lower to CSSA before spilling to ensure correctness w.r.t. phis */
|
2020-10-08 09:12:58 +01:00
|
|
|
lower_to_cssa(program, live_vars);
|
2019-10-15 17:23:52 +01:00
|
|
|
|
2019-09-17 12:22:17 +01:00
|
|
|
/* calculate target register demand */
|
2021-05-11 21:58:27 +01:00
|
|
|
const RegisterDemand demand = program->max_reg_demand; /* current max */
|
|
|
|
const uint16_t sgpr_limit = get_addr_sgpr_from_waves(program, program->min_waves);
|
|
|
|
const uint16_t vgpr_limit = get_addr_vgpr_from_waves(program, program->min_waves);
|
|
|
|
uint16_t extra_vgprs = 0;
|
|
|
|
uint16_t extra_sgprs = 0;
|
|
|
|
|
|
|
|
/* calculate extra VGPRs required for spilling SGPRs */
|
|
|
|
if (demand.sgpr > sgpr_limit) {
|
|
|
|
unsigned sgpr_spills = demand.sgpr - sgpr_limit;
|
|
|
|
extra_vgprs = DIV_ROUND_UP(sgpr_spills, program->wave_size) + 1;
|
|
|
|
}
|
|
|
|
/* add extra SGPRs required for spilling VGPRs */
|
|
|
|
if (demand.vgpr + extra_vgprs > vgpr_limit) {
|
|
|
|
extra_sgprs = 5; /* scratch_resource (s4) + scratch_offset (s1) */
|
|
|
|
if (demand.sgpr + extra_sgprs > sgpr_limit) {
|
|
|
|
/* re-calculate in case something has changed */
|
|
|
|
unsigned sgpr_spills = demand.sgpr + extra_sgprs - sgpr_limit;
|
|
|
|
extra_vgprs = DIV_ROUND_UP(sgpr_spills, program->wave_size) + 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/* the spiller has to target the following register demand */
|
|
|
|
const RegisterDemand target(vgpr_limit - extra_vgprs, sgpr_limit - extra_sgprs);
|
2019-09-17 12:22:17 +01:00
|
|
|
|
|
|
|
/* initialize ctx */
|
2021-05-11 21:58:27 +01:00
|
|
|
spill_ctx ctx(target, program, live_vars.register_demand);
|
2020-03-11 10:02:20 +00:00
|
|
|
compute_global_next_uses(ctx);
|
2019-09-17 12:22:17 +01:00
|
|
|
get_rematerialize_info(ctx);
|
|
|
|
|
|
|
|
/* create spills and reloads */
|
|
|
|
for (unsigned i = 0; i < program->blocks.size(); i++)
|
|
|
|
spill_block(ctx, i);
|
|
|
|
|
|
|
|
/* assign spill slots and DCE rematerialized code */
|
2021-05-11 21:58:27 +01:00
|
|
|
assign_spill_slots(ctx, extra_vgprs);
|
2019-09-17 12:22:17 +01:00
|
|
|
|
|
|
|
/* update live variable information */
|
2020-10-08 09:12:58 +01:00
|
|
|
live_vars = live_var_analysis(program);
|
2019-09-17 12:22:17 +01:00
|
|
|
|
2020-01-10 16:16:43 +00:00
|
|
|
assert(program->num_waves > 0);
|
2019-09-17 12:22:17 +01:00
|
|
|
}
|
|
|
|
|
2021-06-09 09:14:54 +01:00
|
|
|
} // namespace aco
|