2016-10-07 00:16:09 +01:00
|
|
|
/*
|
|
|
|
* Copyrigh 2016 Red Hat Inc.
|
|
|
|
* Based on anv:
|
|
|
|
* Copyright © 2015 Intel Corporation
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
|
|
* to deal in the Software without restriction, including without limitation
|
|
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice (including the next
|
|
|
|
* paragraph) shall be included in all copies or substantial portions of the
|
|
|
|
* Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
|
|
|
* IN THE SOFTWARE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <assert.h>
|
|
|
|
#include <stdbool.h>
|
|
|
|
#include <string.h>
|
|
|
|
#include <unistd.h>
|
|
|
|
#include <fcntl.h>
|
|
|
|
|
2017-02-26 17:21:01 +00:00
|
|
|
#include "nir/nir_builder.h"
|
|
|
|
#include "radv_meta.h"
|
2016-10-07 00:16:09 +01:00
|
|
|
#include "radv_private.h"
|
|
|
|
#include "radv_cs.h"
|
|
|
|
#include "sid.h"
|
|
|
|
|
2018-09-25 19:26:58 +01:00
|
|
|
#define TIMESTAMP_NOT_READY UINT64_MAX
|
2017-04-10 21:20:19 +01:00
|
|
|
|
|
|
|
static const int pipelinestat_block_size = 11 * 8;
|
|
|
|
static const unsigned pipeline_statistics_indices[] = {7, 6, 3, 4, 5, 2, 1, 0, 8, 9, 10};
|
|
|
|
|
2016-10-07 00:16:09 +01:00
|
|
|
static unsigned get_max_db(struct radv_device *device)
|
|
|
|
{
|
2017-01-16 20:23:48 +00:00
|
|
|
unsigned num_db = device->physical_device->rad_info.num_render_backends;
|
|
|
|
MAYBE_UNUSED unsigned rb_mask = device->physical_device->rad_info.enabled_rb_mask;
|
2016-10-07 00:16:09 +01:00
|
|
|
|
|
|
|
/* Otherwise we need to change the query reset procedure */
|
|
|
|
assert(rb_mask == ((1ull << num_db) - 1));
|
|
|
|
|
|
|
|
return num_db;
|
|
|
|
}
|
|
|
|
|
2017-04-10 22:54:51 +01:00
|
|
|
static void radv_break_on_count(nir_builder *b, nir_variable *var, nir_ssa_def *count)
|
2017-02-26 17:21:01 +00:00
|
|
|
{
|
|
|
|
nir_ssa_def *counter = nir_load_var(b, var);
|
|
|
|
|
|
|
|
nir_if *if_stmt = nir_if_create(b->shader);
|
2017-04-10 22:54:51 +01:00
|
|
|
if_stmt->condition = nir_src_for_ssa(nir_uge(b, counter, count));
|
2017-02-26 17:21:01 +00:00
|
|
|
nir_cf_node_insert(b->cursor, &if_stmt->cf_node);
|
|
|
|
|
|
|
|
b->cursor = nir_after_cf_list(&if_stmt->then_list);
|
|
|
|
|
|
|
|
nir_jump_instr *instr = nir_jump_instr_create(b->shader, nir_jump_break);
|
|
|
|
nir_builder_instr_insert(b, &instr->instr);
|
|
|
|
|
|
|
|
b->cursor = nir_after_cf_node(&if_stmt->cf_node);
|
|
|
|
counter = nir_iadd(b, counter, nir_imm_int(b, 1));
|
|
|
|
nir_store_var(b, var, counter, 0x1);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct nir_ssa_def *
|
|
|
|
radv_load_push_int(nir_builder *b, unsigned offset, const char *name)
|
|
|
|
{
|
|
|
|
nir_intrinsic_instr *flags = nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_push_constant);
|
2017-05-05 01:42:40 +01:00
|
|
|
nir_intrinsic_set_base(flags, 0);
|
|
|
|
nir_intrinsic_set_range(flags, 16);
|
2017-02-26 17:21:01 +00:00
|
|
|
flags->src[0] = nir_src_for_ssa(nir_imm_int(b, offset));
|
|
|
|
flags->num_components = 1;
|
|
|
|
nir_ssa_dest_init(&flags->instr, &flags->dest, 1, 32, name);
|
|
|
|
nir_builder_instr_insert(b, &flags->instr);
|
|
|
|
return &flags->dest.ssa;
|
|
|
|
}
|
|
|
|
|
|
|
|
static nir_shader *
|
|
|
|
build_occlusion_query_shader(struct radv_device *device) {
|
|
|
|
/* the shader this builds is roughly
|
|
|
|
*
|
|
|
|
* push constants {
|
|
|
|
* uint32_t flags;
|
|
|
|
* uint32_t dst_stride;
|
|
|
|
* };
|
|
|
|
*
|
|
|
|
* uint32_t src_stride = 16 * db_count;
|
|
|
|
*
|
|
|
|
* location(binding = 0) buffer dst_buf;
|
|
|
|
* location(binding = 1) buffer src_buf;
|
|
|
|
*
|
|
|
|
* void main() {
|
|
|
|
* uint64_t result = 0;
|
|
|
|
* uint64_t src_offset = src_stride * global_id.x;
|
|
|
|
* uint64_t dst_offset = dst_stride * global_id.x;
|
|
|
|
* bool available = true;
|
|
|
|
* for (int i = 0; i < db_count; ++i) {
|
|
|
|
* uint64_t start = src_buf[src_offset + 16 * i];
|
|
|
|
* uint64_t end = src_buf[src_offset + 16 * i + 8];
|
|
|
|
* if ((start & (1ull << 63)) && (end & (1ull << 63)))
|
|
|
|
* result += end - start;
|
|
|
|
* else
|
|
|
|
* available = false;
|
|
|
|
* }
|
|
|
|
* uint32_t elem_size = flags & VK_QUERY_RESULT_64_BIT ? 8 : 4;
|
|
|
|
* if ((flags & VK_QUERY_RESULT_PARTIAL_BIT) || available) {
|
|
|
|
* if (flags & VK_QUERY_RESULT_64_BIT)
|
|
|
|
* dst_buf[dst_offset] = result;
|
|
|
|
* else
|
|
|
|
* dst_buf[dst_offset] = (uint32_t)result.
|
|
|
|
* }
|
|
|
|
* if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
|
|
|
|
* dst_buf[dst_offset + elem_size] = available;
|
|
|
|
* }
|
|
|
|
* }
|
|
|
|
*/
|
|
|
|
nir_builder b;
|
|
|
|
nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_COMPUTE, NULL);
|
2017-05-08 17:20:21 +01:00
|
|
|
b.shader->info.name = ralloc_strdup(b.shader, "occlusion_query");
|
|
|
|
b.shader->info.cs.local_size[0] = 64;
|
|
|
|
b.shader->info.cs.local_size[1] = 1;
|
|
|
|
b.shader->info.cs.local_size[2] = 1;
|
2017-02-26 17:21:01 +00:00
|
|
|
|
|
|
|
nir_variable *result = nir_local_variable_create(b.impl, glsl_uint64_t_type(), "result");
|
|
|
|
nir_variable *outer_counter = nir_local_variable_create(b.impl, glsl_int_type(), "outer_counter");
|
|
|
|
nir_variable *start = nir_local_variable_create(b.impl, glsl_uint64_t_type(), "start");
|
|
|
|
nir_variable *end = nir_local_variable_create(b.impl, glsl_uint64_t_type(), "end");
|
|
|
|
nir_variable *available = nir_local_variable_create(b.impl, glsl_int_type(), "available");
|
|
|
|
unsigned db_count = get_max_db(device);
|
|
|
|
|
|
|
|
nir_ssa_def *flags = radv_load_push_int(&b, 0, "flags");
|
|
|
|
|
|
|
|
nir_intrinsic_instr *dst_buf = nir_intrinsic_instr_create(b.shader,
|
|
|
|
nir_intrinsic_vulkan_resource_index);
|
|
|
|
dst_buf->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0));
|
|
|
|
nir_intrinsic_set_desc_set(dst_buf, 0);
|
|
|
|
nir_intrinsic_set_binding(dst_buf, 0);
|
|
|
|
nir_ssa_dest_init(&dst_buf->instr, &dst_buf->dest, 1, 32, NULL);
|
|
|
|
nir_builder_instr_insert(&b, &dst_buf->instr);
|
|
|
|
|
|
|
|
nir_intrinsic_instr *src_buf = nir_intrinsic_instr_create(b.shader,
|
|
|
|
nir_intrinsic_vulkan_resource_index);
|
|
|
|
src_buf->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0));
|
|
|
|
nir_intrinsic_set_desc_set(src_buf, 0);
|
|
|
|
nir_intrinsic_set_binding(src_buf, 1);
|
|
|
|
nir_ssa_dest_init(&src_buf->instr, &src_buf->dest, 1, 32, NULL);
|
|
|
|
nir_builder_instr_insert(&b, &src_buf->instr);
|
|
|
|
|
|
|
|
nir_ssa_def *invoc_id = nir_load_system_value(&b, nir_intrinsic_load_local_invocation_id, 0);
|
|
|
|
nir_ssa_def *wg_id = nir_load_system_value(&b, nir_intrinsic_load_work_group_id, 0);
|
|
|
|
nir_ssa_def *block_size = nir_imm_ivec4(&b,
|
2017-05-08 17:20:21 +01:00
|
|
|
b.shader->info.cs.local_size[0],
|
|
|
|
b.shader->info.cs.local_size[1],
|
|
|
|
b.shader->info.cs.local_size[2], 0);
|
2017-02-26 17:21:01 +00:00
|
|
|
nir_ssa_def *global_id = nir_iadd(&b, nir_imul(&b, wg_id, block_size), invoc_id);
|
|
|
|
global_id = nir_channel(&b, global_id, 0); // We only care about x here.
|
|
|
|
|
|
|
|
nir_ssa_def *input_stride = nir_imm_int(&b, db_count * 16);
|
|
|
|
nir_ssa_def *input_base = nir_imul(&b, input_stride, global_id);
|
|
|
|
nir_ssa_def *output_stride = radv_load_push_int(&b, 4, "output_stride");
|
|
|
|
nir_ssa_def *output_base = nir_imul(&b, output_stride, global_id);
|
|
|
|
|
|
|
|
|
|
|
|
nir_store_var(&b, result, nir_imm_int64(&b, 0), 0x1);
|
|
|
|
nir_store_var(&b, outer_counter, nir_imm_int(&b, 0), 0x1);
|
|
|
|
nir_store_var(&b, available, nir_imm_int(&b, 1), 0x1);
|
|
|
|
|
|
|
|
nir_loop *outer_loop = nir_loop_create(b.shader);
|
|
|
|
nir_builder_cf_insert(&b, &outer_loop->cf_node);
|
|
|
|
b.cursor = nir_after_cf_list(&outer_loop->body);
|
|
|
|
|
|
|
|
nir_ssa_def *current_outer_count = nir_load_var(&b, outer_counter);
|
2017-04-10 22:54:51 +01:00
|
|
|
radv_break_on_count(&b, outer_counter, nir_imm_int(&b, db_count));
|
2017-02-26 17:21:01 +00:00
|
|
|
|
|
|
|
nir_ssa_def *load_offset = nir_imul(&b, current_outer_count, nir_imm_int(&b, 16));
|
|
|
|
load_offset = nir_iadd(&b, input_base, load_offset);
|
|
|
|
|
|
|
|
nir_intrinsic_instr *load = nir_intrinsic_instr_create(b.shader, nir_intrinsic_load_ssbo);
|
|
|
|
load->src[0] = nir_src_for_ssa(&src_buf->dest.ssa);
|
|
|
|
load->src[1] = nir_src_for_ssa(load_offset);
|
|
|
|
nir_ssa_dest_init(&load->instr, &load->dest, 2, 64, NULL);
|
|
|
|
load->num_components = 2;
|
|
|
|
nir_builder_instr_insert(&b, &load->instr);
|
|
|
|
|
2018-07-13 02:33:22 +01:00
|
|
|
nir_store_var(&b, start, nir_channel(&b, &load->dest.ssa, 0), 0x1);
|
|
|
|
nir_store_var(&b, end, nir_channel(&b, &load->dest.ssa, 1), 0x1);
|
2017-02-26 17:21:01 +00:00
|
|
|
|
|
|
|
nir_ssa_def *start_done = nir_ilt(&b, nir_load_var(&b, start), nir_imm_int64(&b, 0));
|
|
|
|
nir_ssa_def *end_done = nir_ilt(&b, nir_load_var(&b, end), nir_imm_int64(&b, 0));
|
|
|
|
|
|
|
|
nir_if *update_if = nir_if_create(b.shader);
|
|
|
|
update_if->condition = nir_src_for_ssa(nir_iand(&b, start_done, end_done));
|
|
|
|
nir_cf_node_insert(b.cursor, &update_if->cf_node);
|
|
|
|
|
|
|
|
b.cursor = nir_after_cf_list(&update_if->then_list);
|
|
|
|
|
|
|
|
nir_store_var(&b, result,
|
|
|
|
nir_iadd(&b, nir_load_var(&b, result),
|
|
|
|
nir_isub(&b, nir_load_var(&b, end),
|
|
|
|
nir_load_var(&b, start))), 0x1);
|
|
|
|
|
|
|
|
b.cursor = nir_after_cf_list(&update_if->else_list);
|
|
|
|
|
|
|
|
nir_store_var(&b, available, nir_imm_int(&b, 0), 0x1);
|
|
|
|
|
|
|
|
b.cursor = nir_after_cf_node(&outer_loop->cf_node);
|
|
|
|
|
|
|
|
/* Store the result if complete or if partial results have been requested. */
|
|
|
|
|
|
|
|
nir_ssa_def *result_is_64bit = nir_iand(&b, flags,
|
|
|
|
nir_imm_int(&b, VK_QUERY_RESULT_64_BIT));
|
|
|
|
nir_ssa_def *result_size = nir_bcsel(&b, result_is_64bit, nir_imm_int(&b, 8), nir_imm_int(&b, 4));
|
|
|
|
|
|
|
|
nir_if *store_if = nir_if_create(b.shader);
|
|
|
|
store_if->condition = nir_src_for_ssa(nir_ior(&b, nir_iand(&b, flags, nir_imm_int(&b, VK_QUERY_RESULT_PARTIAL_BIT)), nir_load_var(&b, available)));
|
|
|
|
nir_cf_node_insert(b.cursor, &store_if->cf_node);
|
|
|
|
|
|
|
|
b.cursor = nir_after_cf_list(&store_if->then_list);
|
|
|
|
|
|
|
|
nir_if *store_64bit_if = nir_if_create(b.shader);
|
|
|
|
store_64bit_if->condition = nir_src_for_ssa(result_is_64bit);
|
|
|
|
nir_cf_node_insert(b.cursor, &store_64bit_if->cf_node);
|
|
|
|
|
|
|
|
b.cursor = nir_after_cf_list(&store_64bit_if->then_list);
|
|
|
|
|
|
|
|
nir_intrinsic_instr *store = nir_intrinsic_instr_create(b.shader, nir_intrinsic_store_ssbo);
|
|
|
|
store->src[0] = nir_src_for_ssa(nir_load_var(&b, result));
|
|
|
|
store->src[1] = nir_src_for_ssa(&dst_buf->dest.ssa);
|
|
|
|
store->src[2] = nir_src_for_ssa(output_base);
|
|
|
|
nir_intrinsic_set_write_mask(store, 0x1);
|
|
|
|
store->num_components = 1;
|
|
|
|
nir_builder_instr_insert(&b, &store->instr);
|
|
|
|
|
|
|
|
b.cursor = nir_after_cf_list(&store_64bit_if->else_list);
|
|
|
|
|
|
|
|
store = nir_intrinsic_instr_create(b.shader, nir_intrinsic_store_ssbo);
|
|
|
|
store->src[0] = nir_src_for_ssa(nir_u2u32(&b, nir_load_var(&b, result)));
|
|
|
|
store->src[1] = nir_src_for_ssa(&dst_buf->dest.ssa);
|
|
|
|
store->src[2] = nir_src_for_ssa(output_base);
|
|
|
|
nir_intrinsic_set_write_mask(store, 0x1);
|
|
|
|
store->num_components = 1;
|
|
|
|
nir_builder_instr_insert(&b, &store->instr);
|
|
|
|
|
|
|
|
b.cursor = nir_after_cf_node(&store_if->cf_node);
|
|
|
|
|
|
|
|
/* Store the availability bit if requested. */
|
|
|
|
|
|
|
|
nir_if *availability_if = nir_if_create(b.shader);
|
|
|
|
availability_if->condition = nir_src_for_ssa(nir_iand(&b, flags, nir_imm_int(&b, VK_QUERY_RESULT_WITH_AVAILABILITY_BIT)));
|
|
|
|
nir_cf_node_insert(b.cursor, &availability_if->cf_node);
|
|
|
|
|
|
|
|
b.cursor = nir_after_cf_list(&availability_if->then_list);
|
|
|
|
|
|
|
|
store = nir_intrinsic_instr_create(b.shader, nir_intrinsic_store_ssbo);
|
|
|
|
store->src[0] = nir_src_for_ssa(nir_load_var(&b, available));
|
|
|
|
store->src[1] = nir_src_for_ssa(&dst_buf->dest.ssa);
|
|
|
|
store->src[2] = nir_src_for_ssa(nir_iadd(&b, result_size, output_base));
|
|
|
|
nir_intrinsic_set_write_mask(store, 0x1);
|
|
|
|
store->num_components = 1;
|
|
|
|
nir_builder_instr_insert(&b, &store->instr);
|
|
|
|
|
|
|
|
return b.shader;
|
|
|
|
}
|
|
|
|
|
2017-04-10 21:20:19 +01:00
|
|
|
static nir_shader *
|
|
|
|
build_pipeline_statistics_query_shader(struct radv_device *device) {
|
|
|
|
/* the shader this builds is roughly
|
|
|
|
*
|
|
|
|
* push constants {
|
|
|
|
* uint32_t flags;
|
|
|
|
* uint32_t dst_stride;
|
|
|
|
* uint32_t stats_mask;
|
|
|
|
* uint32_t avail_offset;
|
|
|
|
* };
|
|
|
|
*
|
|
|
|
* uint32_t src_stride = pipelinestat_block_size * 2;
|
|
|
|
*
|
|
|
|
* location(binding = 0) buffer dst_buf;
|
|
|
|
* location(binding = 1) buffer src_buf;
|
|
|
|
*
|
|
|
|
* void main() {
|
|
|
|
* uint64_t src_offset = src_stride * global_id.x;
|
|
|
|
* uint64_t dst_base = dst_stride * global_id.x;
|
|
|
|
* uint64_t dst_offset = dst_base;
|
|
|
|
* uint32_t elem_size = flags & VK_QUERY_RESULT_64_BIT ? 8 : 4;
|
|
|
|
* uint32_t elem_count = stats_mask >> 16;
|
|
|
|
* uint32_t available = src_buf[avail_offset + 4 * global_id.x];
|
|
|
|
* if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
|
|
|
|
* dst_buf[dst_offset + elem_count * elem_size] = available;
|
|
|
|
* }
|
|
|
|
* if (available) {
|
|
|
|
* // repeat 11 times:
|
|
|
|
* if (stats_mask & (1 << 0)) {
|
|
|
|
* uint64_t start = src_buf[src_offset + 8 * indices[0]];
|
|
|
|
* uint64_t end = src_buf[src_offset + 8 * indices[0] + pipelinestat_block_size];
|
|
|
|
* uint64_t result = end - start;
|
|
|
|
* if (flags & VK_QUERY_RESULT_64_BIT)
|
|
|
|
* dst_buf[dst_offset] = result;
|
|
|
|
* else
|
|
|
|
* dst_buf[dst_offset] = (uint32_t)result.
|
|
|
|
* dst_offset += elem_size;
|
|
|
|
* }
|
|
|
|
* } else if (flags & VK_QUERY_RESULT_PARTIAL_BIT) {
|
|
|
|
* // Set everything to 0 as we don't know what is valid.
|
|
|
|
* for (int i = 0; i < elem_count; ++i)
|
|
|
|
* dst_buf[dst_base + elem_size * i] = 0;
|
|
|
|
* }
|
|
|
|
* }
|
|
|
|
*/
|
|
|
|
nir_builder b;
|
|
|
|
nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_COMPUTE, NULL);
|
2017-05-08 17:20:21 +01:00
|
|
|
b.shader->info.name = ralloc_strdup(b.shader, "pipeline_statistics_query");
|
|
|
|
b.shader->info.cs.local_size[0] = 64;
|
|
|
|
b.shader->info.cs.local_size[1] = 1;
|
|
|
|
b.shader->info.cs.local_size[2] = 1;
|
2017-04-10 21:20:19 +01:00
|
|
|
|
|
|
|
nir_variable *output_offset = nir_local_variable_create(b.impl, glsl_int_type(), "output_offset");
|
|
|
|
|
|
|
|
nir_ssa_def *flags = radv_load_push_int(&b, 0, "flags");
|
|
|
|
nir_ssa_def *stats_mask = radv_load_push_int(&b, 8, "stats_mask");
|
|
|
|
nir_ssa_def *avail_offset = radv_load_push_int(&b, 12, "avail_offset");
|
|
|
|
|
|
|
|
nir_intrinsic_instr *dst_buf = nir_intrinsic_instr_create(b.shader,
|
|
|
|
nir_intrinsic_vulkan_resource_index);
|
|
|
|
dst_buf->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0));
|
|
|
|
nir_intrinsic_set_desc_set(dst_buf, 0);
|
|
|
|
nir_intrinsic_set_binding(dst_buf, 0);
|
|
|
|
nir_ssa_dest_init(&dst_buf->instr, &dst_buf->dest, 1, 32, NULL);
|
|
|
|
nir_builder_instr_insert(&b, &dst_buf->instr);
|
|
|
|
|
|
|
|
nir_intrinsic_instr *src_buf = nir_intrinsic_instr_create(b.shader,
|
|
|
|
nir_intrinsic_vulkan_resource_index);
|
|
|
|
src_buf->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0));
|
|
|
|
nir_intrinsic_set_desc_set(src_buf, 0);
|
|
|
|
nir_intrinsic_set_binding(src_buf, 1);
|
|
|
|
nir_ssa_dest_init(&src_buf->instr, &src_buf->dest, 1, 32, NULL);
|
|
|
|
nir_builder_instr_insert(&b, &src_buf->instr);
|
|
|
|
|
|
|
|
nir_ssa_def *invoc_id = nir_load_system_value(&b, nir_intrinsic_load_local_invocation_id, 0);
|
|
|
|
nir_ssa_def *wg_id = nir_load_system_value(&b, nir_intrinsic_load_work_group_id, 0);
|
|
|
|
nir_ssa_def *block_size = nir_imm_ivec4(&b,
|
2017-05-08 17:20:21 +01:00
|
|
|
b.shader->info.cs.local_size[0],
|
|
|
|
b.shader->info.cs.local_size[1],
|
|
|
|
b.shader->info.cs.local_size[2], 0);
|
2017-04-10 21:20:19 +01:00
|
|
|
nir_ssa_def *global_id = nir_iadd(&b, nir_imul(&b, wg_id, block_size), invoc_id);
|
|
|
|
global_id = nir_channel(&b, global_id, 0); // We only care about x here.
|
|
|
|
|
|
|
|
nir_ssa_def *input_stride = nir_imm_int(&b, pipelinestat_block_size * 2);
|
|
|
|
nir_ssa_def *input_base = nir_imul(&b, input_stride, global_id);
|
|
|
|
nir_ssa_def *output_stride = radv_load_push_int(&b, 4, "output_stride");
|
|
|
|
nir_ssa_def *output_base = nir_imul(&b, output_stride, global_id);
|
|
|
|
|
|
|
|
|
|
|
|
avail_offset = nir_iadd(&b, avail_offset,
|
|
|
|
nir_imul(&b, global_id, nir_imm_int(&b, 4)));
|
|
|
|
|
|
|
|
nir_intrinsic_instr *load = nir_intrinsic_instr_create(b.shader, nir_intrinsic_load_ssbo);
|
|
|
|
load->src[0] = nir_src_for_ssa(&src_buf->dest.ssa);
|
|
|
|
load->src[1] = nir_src_for_ssa(avail_offset);
|
|
|
|
nir_ssa_dest_init(&load->instr, &load->dest, 1, 32, NULL);
|
|
|
|
load->num_components = 1;
|
|
|
|
nir_builder_instr_insert(&b, &load->instr);
|
|
|
|
nir_ssa_def *available = &load->dest.ssa;
|
|
|
|
|
|
|
|
nir_ssa_def *result_is_64bit = nir_iand(&b, flags,
|
|
|
|
nir_imm_int(&b, VK_QUERY_RESULT_64_BIT));
|
|
|
|
nir_ssa_def *elem_size = nir_bcsel(&b, result_is_64bit, nir_imm_int(&b, 8), nir_imm_int(&b, 4));
|
|
|
|
nir_ssa_def *elem_count = nir_ushr(&b, stats_mask, nir_imm_int(&b, 16));
|
|
|
|
|
|
|
|
/* Store the availability bit if requested. */
|
|
|
|
|
|
|
|
nir_if *availability_if = nir_if_create(b.shader);
|
|
|
|
availability_if->condition = nir_src_for_ssa(nir_iand(&b, flags, nir_imm_int(&b, VK_QUERY_RESULT_WITH_AVAILABILITY_BIT)));
|
|
|
|
nir_cf_node_insert(b.cursor, &availability_if->cf_node);
|
|
|
|
|
|
|
|
b.cursor = nir_after_cf_list(&availability_if->then_list);
|
|
|
|
|
|
|
|
nir_intrinsic_instr *store = nir_intrinsic_instr_create(b.shader, nir_intrinsic_store_ssbo);
|
|
|
|
store->src[0] = nir_src_for_ssa(available);
|
|
|
|
store->src[1] = nir_src_for_ssa(&dst_buf->dest.ssa);
|
|
|
|
store->src[2] = nir_src_for_ssa(nir_iadd(&b, output_base, nir_imul(&b, elem_count, elem_size)));
|
|
|
|
nir_intrinsic_set_write_mask(store, 0x1);
|
|
|
|
store->num_components = 1;
|
|
|
|
nir_builder_instr_insert(&b, &store->instr);
|
|
|
|
|
|
|
|
b.cursor = nir_after_cf_node(&availability_if->cf_node);
|
|
|
|
|
|
|
|
nir_if *available_if = nir_if_create(b.shader);
|
|
|
|
available_if->condition = nir_src_for_ssa(available);
|
|
|
|
nir_cf_node_insert(b.cursor, &available_if->cf_node);
|
|
|
|
|
|
|
|
b.cursor = nir_after_cf_list(&available_if->then_list);
|
|
|
|
|
|
|
|
nir_store_var(&b, output_offset, output_base, 0x1);
|
|
|
|
for (int i = 0; i < 11; ++i) {
|
|
|
|
nir_if *store_if = nir_if_create(b.shader);
|
|
|
|
store_if->condition = nir_src_for_ssa(nir_iand(&b, stats_mask, nir_imm_int(&b, 1u << i)));
|
|
|
|
nir_cf_node_insert(b.cursor, &store_if->cf_node);
|
|
|
|
|
|
|
|
b.cursor = nir_after_cf_list(&store_if->then_list);
|
|
|
|
|
|
|
|
load = nir_intrinsic_instr_create(b.shader, nir_intrinsic_load_ssbo);
|
|
|
|
load->src[0] = nir_src_for_ssa(&src_buf->dest.ssa);
|
|
|
|
load->src[1] = nir_src_for_ssa(nir_iadd(&b, input_base,
|
|
|
|
nir_imm_int(&b, pipeline_statistics_indices[i] * 8)));
|
|
|
|
nir_ssa_dest_init(&load->instr, &load->dest, 1, 64, NULL);
|
|
|
|
load->num_components = 1;
|
|
|
|
nir_builder_instr_insert(&b, &load->instr);
|
|
|
|
nir_ssa_def *start = &load->dest.ssa;
|
|
|
|
|
|
|
|
load = nir_intrinsic_instr_create(b.shader, nir_intrinsic_load_ssbo);
|
|
|
|
load->src[0] = nir_src_for_ssa(&src_buf->dest.ssa);
|
|
|
|
load->src[1] = nir_src_for_ssa(nir_iadd(&b, input_base,
|
|
|
|
nir_imm_int(&b, pipeline_statistics_indices[i] * 8 + pipelinestat_block_size)));
|
|
|
|
nir_ssa_dest_init(&load->instr, &load->dest, 1, 64, NULL);
|
|
|
|
load->num_components = 1;
|
|
|
|
nir_builder_instr_insert(&b, &load->instr);
|
|
|
|
nir_ssa_def *end = &load->dest.ssa;
|
|
|
|
|
|
|
|
nir_ssa_def *result = nir_isub(&b, end, start);
|
|
|
|
|
|
|
|
/* Store result */
|
|
|
|
nir_if *store_64bit_if = nir_if_create(b.shader);
|
|
|
|
store_64bit_if->condition = nir_src_for_ssa(result_is_64bit);
|
|
|
|
nir_cf_node_insert(b.cursor, &store_64bit_if->cf_node);
|
|
|
|
|
|
|
|
b.cursor = nir_after_cf_list(&store_64bit_if->then_list);
|
|
|
|
|
|
|
|
nir_intrinsic_instr *store = nir_intrinsic_instr_create(b.shader, nir_intrinsic_store_ssbo);
|
|
|
|
store->src[0] = nir_src_for_ssa(result);
|
|
|
|
store->src[1] = nir_src_for_ssa(&dst_buf->dest.ssa);
|
|
|
|
store->src[2] = nir_src_for_ssa(nir_load_var(&b, output_offset));
|
|
|
|
nir_intrinsic_set_write_mask(store, 0x1);
|
|
|
|
store->num_components = 1;
|
|
|
|
nir_builder_instr_insert(&b, &store->instr);
|
|
|
|
|
|
|
|
b.cursor = nir_after_cf_list(&store_64bit_if->else_list);
|
|
|
|
|
|
|
|
store = nir_intrinsic_instr_create(b.shader, nir_intrinsic_store_ssbo);
|
|
|
|
store->src[0] = nir_src_for_ssa(nir_u2u32(&b, result));
|
|
|
|
store->src[1] = nir_src_for_ssa(&dst_buf->dest.ssa);
|
|
|
|
store->src[2] = nir_src_for_ssa(nir_load_var(&b, output_offset));
|
|
|
|
nir_intrinsic_set_write_mask(store, 0x1);
|
|
|
|
store->num_components = 1;
|
|
|
|
nir_builder_instr_insert(&b, &store->instr);
|
|
|
|
|
|
|
|
b.cursor = nir_after_cf_node(&store_64bit_if->cf_node);
|
|
|
|
|
|
|
|
nir_store_var(&b, output_offset,
|
|
|
|
nir_iadd(&b, nir_load_var(&b, output_offset),
|
|
|
|
elem_size), 0x1);
|
|
|
|
|
|
|
|
b.cursor = nir_after_cf_node(&store_if->cf_node);
|
|
|
|
}
|
|
|
|
|
|
|
|
b.cursor = nir_after_cf_list(&available_if->else_list);
|
|
|
|
|
|
|
|
available_if = nir_if_create(b.shader);
|
|
|
|
available_if->condition = nir_src_for_ssa(nir_iand(&b, flags,
|
|
|
|
nir_imm_int(&b, VK_QUERY_RESULT_PARTIAL_BIT)));
|
|
|
|
nir_cf_node_insert(b.cursor, &available_if->cf_node);
|
|
|
|
|
|
|
|
b.cursor = nir_after_cf_list(&available_if->then_list);
|
|
|
|
|
|
|
|
/* Stores zeros in all outputs. */
|
|
|
|
|
|
|
|
nir_variable *counter = nir_local_variable_create(b.impl, glsl_int_type(), "counter");
|
|
|
|
nir_store_var(&b, counter, nir_imm_int(&b, 0), 0x1);
|
|
|
|
|
|
|
|
nir_loop *loop = nir_loop_create(b.shader);
|
|
|
|
nir_builder_cf_insert(&b, &loop->cf_node);
|
|
|
|
b.cursor = nir_after_cf_list(&loop->body);
|
|
|
|
|
|
|
|
nir_ssa_def *current_counter = nir_load_var(&b, counter);
|
|
|
|
radv_break_on_count(&b, counter, elem_count);
|
|
|
|
|
|
|
|
nir_ssa_def *output_elem = nir_iadd(&b, output_base,
|
|
|
|
nir_imul(&b, elem_size, current_counter));
|
|
|
|
|
|
|
|
nir_if *store_64bit_if = nir_if_create(b.shader);
|
|
|
|
store_64bit_if->condition = nir_src_for_ssa(result_is_64bit);
|
|
|
|
nir_cf_node_insert(b.cursor, &store_64bit_if->cf_node);
|
|
|
|
|
|
|
|
b.cursor = nir_after_cf_list(&store_64bit_if->then_list);
|
|
|
|
|
|
|
|
store = nir_intrinsic_instr_create(b.shader, nir_intrinsic_store_ssbo);
|
|
|
|
store->src[0] = nir_src_for_ssa(nir_imm_int64(&b, 0));
|
|
|
|
store->src[1] = nir_src_for_ssa(&dst_buf->dest.ssa);
|
|
|
|
store->src[2] = nir_src_for_ssa(output_elem);
|
|
|
|
nir_intrinsic_set_write_mask(store, 0x1);
|
|
|
|
store->num_components = 1;
|
|
|
|
nir_builder_instr_insert(&b, &store->instr);
|
|
|
|
|
|
|
|
b.cursor = nir_after_cf_list(&store_64bit_if->else_list);
|
|
|
|
|
|
|
|
store = nir_intrinsic_instr_create(b.shader, nir_intrinsic_store_ssbo);
|
|
|
|
store->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0));
|
|
|
|
store->src[1] = nir_src_for_ssa(&dst_buf->dest.ssa);
|
|
|
|
store->src[2] = nir_src_for_ssa(output_elem);
|
|
|
|
nir_intrinsic_set_write_mask(store, 0x1);
|
|
|
|
store->num_components = 1;
|
|
|
|
nir_builder_instr_insert(&b, &store->instr);
|
|
|
|
|
|
|
|
b.cursor = nir_after_cf_node(&loop->cf_node);
|
|
|
|
return b.shader;
|
|
|
|
}
|
|
|
|
|
2018-08-13 23:07:57 +01:00
|
|
|
static VkResult radv_device_init_meta_query_state_internal(struct radv_device *device)
|
2017-02-26 17:21:01 +00:00
|
|
|
{
|
|
|
|
VkResult result;
|
|
|
|
struct radv_shader_module occlusion_cs = { .nir = NULL };
|
2017-04-10 21:20:19 +01:00
|
|
|
struct radv_shader_module pipeline_statistics_cs = { .nir = NULL };
|
2017-02-26 17:21:01 +00:00
|
|
|
|
2018-08-13 23:07:57 +01:00
|
|
|
mtx_lock(&device->meta_state.mtx);
|
|
|
|
if (device->meta_state.query.pipeline_statistics_query_pipeline) {
|
|
|
|
mtx_unlock(&device->meta_state.mtx);
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
2017-02-26 17:21:01 +00:00
|
|
|
occlusion_cs.nir = build_occlusion_query_shader(device);
|
2017-04-10 21:20:19 +01:00
|
|
|
pipeline_statistics_cs.nir = build_pipeline_statistics_query_shader(device);
|
2017-02-26 17:21:01 +00:00
|
|
|
|
|
|
|
VkDescriptorSetLayoutCreateInfo occlusion_ds_create_info = {
|
|
|
|
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
|
2017-04-13 23:26:59 +01:00
|
|
|
.flags = VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR,
|
2017-02-26 17:21:01 +00:00
|
|
|
.bindingCount = 2,
|
|
|
|
.pBindings = (VkDescriptorSetLayoutBinding[]) {
|
|
|
|
{
|
|
|
|
.binding = 0,
|
|
|
|
.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
|
|
|
|
.descriptorCount = 1,
|
|
|
|
.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
|
|
|
|
.pImmutableSamplers = NULL
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.binding = 1,
|
|
|
|
.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
|
|
|
|
.descriptorCount = 1,
|
|
|
|
.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
|
|
|
|
.pImmutableSamplers = NULL
|
|
|
|
},
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
result = radv_CreateDescriptorSetLayout(radv_device_to_handle(device),
|
|
|
|
&occlusion_ds_create_info,
|
|
|
|
&device->meta_state.alloc,
|
2017-04-10 20:49:48 +01:00
|
|
|
&device->meta_state.query.ds_layout);
|
2017-02-26 17:21:01 +00:00
|
|
|
if (result != VK_SUCCESS)
|
|
|
|
goto fail;
|
|
|
|
|
|
|
|
VkPipelineLayoutCreateInfo occlusion_pl_create_info = {
|
|
|
|
.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
|
|
|
|
.setLayoutCount = 1,
|
2017-04-10 20:49:48 +01:00
|
|
|
.pSetLayouts = &device->meta_state.query.ds_layout,
|
2017-02-26 17:21:01 +00:00
|
|
|
.pushConstantRangeCount = 1,
|
2017-04-10 21:20:19 +01:00
|
|
|
.pPushConstantRanges = &(VkPushConstantRange){VK_SHADER_STAGE_COMPUTE_BIT, 0, 16},
|
2017-02-26 17:21:01 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
result = radv_CreatePipelineLayout(radv_device_to_handle(device),
|
|
|
|
&occlusion_pl_create_info,
|
|
|
|
&device->meta_state.alloc,
|
2017-04-10 20:49:48 +01:00
|
|
|
&device->meta_state.query.p_layout);
|
2017-02-26 17:21:01 +00:00
|
|
|
if (result != VK_SUCCESS)
|
|
|
|
goto fail;
|
|
|
|
|
|
|
|
VkPipelineShaderStageCreateInfo occlusion_pipeline_shader_stage = {
|
|
|
|
.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
|
|
|
|
.stage = VK_SHADER_STAGE_COMPUTE_BIT,
|
|
|
|
.module = radv_shader_module_to_handle(&occlusion_cs),
|
|
|
|
.pName = "main",
|
|
|
|
.pSpecializationInfo = NULL,
|
|
|
|
};
|
|
|
|
|
|
|
|
VkComputePipelineCreateInfo occlusion_vk_pipeline_info = {
|
|
|
|
.sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
|
|
|
|
.stage = occlusion_pipeline_shader_stage,
|
|
|
|
.flags = 0,
|
2017-04-10 20:49:48 +01:00
|
|
|
.layout = device->meta_state.query.p_layout,
|
2017-02-26 17:21:01 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
result = radv_CreateComputePipelines(radv_device_to_handle(device),
|
|
|
|
radv_pipeline_cache_to_handle(&device->meta_state.cache),
|
|
|
|
1, &occlusion_vk_pipeline_info, NULL,
|
|
|
|
&device->meta_state.query.occlusion_query_pipeline);
|
|
|
|
if (result != VK_SUCCESS)
|
|
|
|
goto fail;
|
|
|
|
|
2017-04-10 21:20:19 +01:00
|
|
|
VkPipelineShaderStageCreateInfo pipeline_statistics_pipeline_shader_stage = {
|
|
|
|
.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
|
|
|
|
.stage = VK_SHADER_STAGE_COMPUTE_BIT,
|
|
|
|
.module = radv_shader_module_to_handle(&pipeline_statistics_cs),
|
|
|
|
.pName = "main",
|
|
|
|
.pSpecializationInfo = NULL,
|
|
|
|
};
|
|
|
|
|
|
|
|
VkComputePipelineCreateInfo pipeline_statistics_vk_pipeline_info = {
|
|
|
|
.sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
|
|
|
|
.stage = pipeline_statistics_pipeline_shader_stage,
|
|
|
|
.flags = 0,
|
|
|
|
.layout = device->meta_state.query.p_layout,
|
|
|
|
};
|
|
|
|
|
|
|
|
result = radv_CreateComputePipelines(radv_device_to_handle(device),
|
|
|
|
radv_pipeline_cache_to_handle(&device->meta_state.cache),
|
|
|
|
1, &pipeline_statistics_vk_pipeline_info, NULL,
|
|
|
|
&device->meta_state.query.pipeline_statistics_query_pipeline);
|
|
|
|
|
2017-02-26 17:21:01 +00:00
|
|
|
fail:
|
2017-05-02 20:46:06 +01:00
|
|
|
if (result != VK_SUCCESS)
|
|
|
|
radv_device_finish_meta_query_state(device);
|
2017-02-26 17:21:01 +00:00
|
|
|
ralloc_free(occlusion_cs.nir);
|
2017-04-10 21:20:19 +01:00
|
|
|
ralloc_free(pipeline_statistics_cs.nir);
|
2018-08-13 23:07:57 +01:00
|
|
|
mtx_unlock(&device->meta_state.mtx);
|
2017-02-26 17:21:01 +00:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2018-08-13 23:07:57 +01:00
|
|
|
VkResult radv_device_init_meta_query_state(struct radv_device *device, bool on_demand)
|
|
|
|
{
|
|
|
|
if (on_demand)
|
|
|
|
return VK_SUCCESS;
|
|
|
|
|
|
|
|
return radv_device_init_meta_query_state_internal(device);
|
|
|
|
}
|
|
|
|
|
2017-02-26 17:21:01 +00:00
|
|
|
void radv_device_finish_meta_query_state(struct radv_device *device)
|
|
|
|
{
|
2017-04-10 21:20:19 +01:00
|
|
|
if (device->meta_state.query.pipeline_statistics_query_pipeline)
|
|
|
|
radv_DestroyPipeline(radv_device_to_handle(device),
|
|
|
|
device->meta_state.query.pipeline_statistics_query_pipeline,
|
|
|
|
&device->meta_state.alloc);
|
|
|
|
|
2017-02-26 17:21:01 +00:00
|
|
|
if (device->meta_state.query.occlusion_query_pipeline)
|
|
|
|
radv_DestroyPipeline(radv_device_to_handle(device),
|
|
|
|
device->meta_state.query.occlusion_query_pipeline,
|
|
|
|
&device->meta_state.alloc);
|
|
|
|
|
2017-04-10 20:49:48 +01:00
|
|
|
if (device->meta_state.query.p_layout)
|
2017-02-26 17:21:01 +00:00
|
|
|
radv_DestroyPipelineLayout(radv_device_to_handle(device),
|
2017-04-10 20:49:48 +01:00
|
|
|
device->meta_state.query.p_layout,
|
2017-02-26 17:21:01 +00:00
|
|
|
&device->meta_state.alloc);
|
|
|
|
|
2017-04-10 20:49:48 +01:00
|
|
|
if (device->meta_state.query.ds_layout)
|
2017-02-26 17:21:01 +00:00
|
|
|
radv_DestroyDescriptorSetLayout(radv_device_to_handle(device),
|
2017-04-10 20:49:48 +01:00
|
|
|
device->meta_state.query.ds_layout,
|
2017-02-26 17:21:01 +00:00
|
|
|
&device->meta_state.alloc);
|
|
|
|
}
|
|
|
|
|
2017-04-10 21:20:19 +01:00
|
|
|
static void radv_query_shader(struct radv_cmd_buffer *cmd_buffer,
|
2018-08-13 23:07:57 +01:00
|
|
|
VkPipeline *pipeline,
|
2017-04-10 21:20:19 +01:00
|
|
|
struct radeon_winsys_bo *src_bo,
|
|
|
|
struct radeon_winsys_bo *dst_bo,
|
|
|
|
uint64_t src_offset, uint64_t dst_offset,
|
|
|
|
uint32_t src_stride, uint32_t dst_stride,
|
|
|
|
uint32_t count, uint32_t flags,
|
|
|
|
uint32_t pipeline_stats_mask, uint32_t avail_offset)
|
2017-02-26 17:21:01 +00:00
|
|
|
{
|
|
|
|
struct radv_device *device = cmd_buffer->device;
|
2017-10-03 14:11:21 +01:00
|
|
|
struct radv_meta_saved_state saved_state;
|
2017-02-26 17:21:01 +00:00
|
|
|
|
2018-08-13 23:07:57 +01:00
|
|
|
if (!*pipeline) {
|
|
|
|
VkResult ret = radv_device_init_meta_query_state_internal(device);
|
|
|
|
if (ret != VK_SUCCESS) {
|
|
|
|
cmd_buffer->record_result = ret;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-10-03 14:11:21 +01:00
|
|
|
radv_meta_save(&saved_state, cmd_buffer,
|
|
|
|
RADV_META_SAVE_COMPUTE_PIPELINE |
|
|
|
|
RADV_META_SAVE_CONSTANTS |
|
|
|
|
RADV_META_SAVE_DESCRIPTORS);
|
2017-02-26 17:21:01 +00:00
|
|
|
|
|
|
|
struct radv_buffer dst_buffer = {
|
|
|
|
.bo = dst_bo,
|
|
|
|
.offset = dst_offset,
|
|
|
|
.size = dst_stride * count
|
|
|
|
};
|
|
|
|
|
|
|
|
struct radv_buffer src_buffer = {
|
|
|
|
.bo = src_bo,
|
|
|
|
.offset = src_offset,
|
2017-04-10 21:20:19 +01:00
|
|
|
.size = MAX2(src_stride * count, avail_offset + 4 * count - src_offset)
|
2017-02-26 17:21:01 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
radv_CmdBindPipeline(radv_cmd_buffer_to_handle(cmd_buffer),
|
2018-08-13 23:07:57 +01:00
|
|
|
VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline);
|
2017-02-26 17:21:01 +00:00
|
|
|
|
2017-04-13 23:26:59 +01:00
|
|
|
radv_meta_push_descriptor_set(cmd_buffer,
|
|
|
|
VK_PIPELINE_BIND_POINT_COMPUTE,
|
|
|
|
device->meta_state.query.p_layout,
|
|
|
|
0, /* set */
|
|
|
|
2, /* descriptorWriteCount */
|
|
|
|
(VkWriteDescriptorSet[]) {
|
|
|
|
{
|
|
|
|
.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
|
|
|
|
.dstBinding = 0,
|
|
|
|
.dstArrayElement = 0,
|
|
|
|
.descriptorCount = 1,
|
|
|
|
.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
|
|
|
|
.pBufferInfo = &(VkDescriptorBufferInfo) {
|
|
|
|
.buffer = radv_buffer_to_handle(&dst_buffer),
|
|
|
|
.offset = 0,
|
|
|
|
.range = VK_WHOLE_SIZE
|
|
|
|
}
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
|
|
|
|
.dstBinding = 1,
|
|
|
|
.dstArrayElement = 0,
|
|
|
|
.descriptorCount = 1,
|
|
|
|
.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
|
|
|
|
.pBufferInfo = &(VkDescriptorBufferInfo) {
|
|
|
|
.buffer = radv_buffer_to_handle(&src_buffer),
|
|
|
|
.offset = 0,
|
|
|
|
.range = VK_WHOLE_SIZE
|
|
|
|
}
|
|
|
|
}
|
|
|
|
});
|
2017-02-26 17:21:01 +00:00
|
|
|
|
2017-04-10 21:20:19 +01:00
|
|
|
/* Encode the number of elements for easy access by the shader. */
|
|
|
|
pipeline_stats_mask &= 0x7ff;
|
|
|
|
pipeline_stats_mask |= util_bitcount(pipeline_stats_mask) << 16;
|
|
|
|
|
|
|
|
avail_offset -= src_offset;
|
|
|
|
|
2017-02-26 17:21:01 +00:00
|
|
|
struct {
|
|
|
|
uint32_t flags;
|
|
|
|
uint32_t dst_stride;
|
2017-04-10 21:20:19 +01:00
|
|
|
uint32_t pipeline_stats_mask;
|
|
|
|
uint32_t avail_offset;
|
2017-02-26 17:21:01 +00:00
|
|
|
} push_constants = {
|
|
|
|
flags,
|
2017-04-10 21:20:19 +01:00
|
|
|
dst_stride,
|
|
|
|
pipeline_stats_mask,
|
|
|
|
avail_offset
|
2017-02-26 17:21:01 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
radv_CmdPushConstants(radv_cmd_buffer_to_handle(cmd_buffer),
|
2017-04-10 20:49:48 +01:00
|
|
|
device->meta_state.query.p_layout,
|
2017-02-26 17:21:01 +00:00
|
|
|
VK_SHADER_STAGE_COMPUTE_BIT, 0, sizeof(push_constants),
|
|
|
|
&push_constants);
|
|
|
|
|
|
|
|
cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_INV_GLOBAL_L2 |
|
|
|
|
RADV_CMD_FLAG_INV_VMEM_L1;
|
|
|
|
|
|
|
|
if (flags & VK_QUERY_RESULT_WAIT_BIT)
|
|
|
|
cmd_buffer->state.flush_bits |= RADV_CMD_FLUSH_AND_INV_FRAMEBUFFER;
|
|
|
|
|
|
|
|
radv_unaligned_dispatch(cmd_buffer, count, 1, 1);
|
|
|
|
|
|
|
|
cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_INV_GLOBAL_L2 |
|
|
|
|
RADV_CMD_FLAG_INV_VMEM_L1 |
|
|
|
|
RADV_CMD_FLAG_CS_PARTIAL_FLUSH;
|
|
|
|
|
2017-10-03 14:11:21 +01:00
|
|
|
radv_meta_restore(&saved_state, cmd_buffer);
|
2017-02-26 17:21:01 +00:00
|
|
|
}
|
|
|
|
|
2016-10-07 00:16:09 +01:00
|
|
|
VkResult radv_CreateQueryPool(
|
|
|
|
VkDevice _device,
|
|
|
|
const VkQueryPoolCreateInfo* pCreateInfo,
|
|
|
|
const VkAllocationCallbacks* pAllocator,
|
|
|
|
VkQueryPool* pQueryPool)
|
|
|
|
{
|
|
|
|
RADV_FROM_HANDLE(radv_device, device, _device);
|
2016-10-14 04:36:45 +01:00
|
|
|
struct radv_query_pool *pool = vk_alloc2(&device->alloc, pAllocator,
|
2016-10-07 00:16:09 +01:00
|
|
|
sizeof(*pool), 8,
|
|
|
|
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
|
2018-09-25 19:26:58 +01:00
|
|
|
uint32_t initial_value = pCreateInfo->queryType == VK_QUERY_TYPE_TIMESTAMP
|
|
|
|
? TIMESTAMP_NOT_READY : 0;
|
2016-10-07 00:16:09 +01:00
|
|
|
|
|
|
|
if (!pool)
|
2018-05-31 00:06:41 +01:00
|
|
|
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
|
2016-10-07 00:16:09 +01:00
|
|
|
|
|
|
|
|
|
|
|
switch(pCreateInfo->queryType) {
|
|
|
|
case VK_QUERY_TYPE_OCCLUSION:
|
2017-04-09 21:35:32 +01:00
|
|
|
pool->stride = 16 * get_max_db(device);
|
2016-10-07 00:16:09 +01:00
|
|
|
break;
|
|
|
|
case VK_QUERY_TYPE_PIPELINE_STATISTICS:
|
2017-04-10 21:20:19 +01:00
|
|
|
pool->stride = pipelinestat_block_size * 2;
|
2016-10-07 00:16:09 +01:00
|
|
|
break;
|
|
|
|
case VK_QUERY_TYPE_TIMESTAMP:
|
|
|
|
pool->stride = 8;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
unreachable("creating unhandled query type");
|
|
|
|
}
|
|
|
|
|
|
|
|
pool->type = pCreateInfo->queryType;
|
2017-04-10 21:20:19 +01:00
|
|
|
pool->pipeline_stats_mask = pCreateInfo->pipelineStatistics;
|
2016-10-07 00:16:09 +01:00
|
|
|
pool->availability_offset = pool->stride * pCreateInfo->queryCount;
|
2018-02-28 19:22:29 +00:00
|
|
|
pool->size = pool->availability_offset;
|
2018-09-25 19:26:58 +01:00
|
|
|
if (pCreateInfo->queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS)
|
2018-02-28 19:22:29 +00:00
|
|
|
pool->size += 4 * pCreateInfo->queryCount;
|
2016-10-07 00:16:09 +01:00
|
|
|
|
2018-02-28 19:22:29 +00:00
|
|
|
pool->bo = device->ws->buffer_create(device->ws, pool->size,
|
2017-10-25 07:12:13 +01:00
|
|
|
64, RADEON_DOMAIN_GTT, RADEON_FLAG_NO_INTERPROCESS_SHARING);
|
2016-10-07 00:16:09 +01:00
|
|
|
|
|
|
|
if (!pool->bo) {
|
2016-10-14 04:36:45 +01:00
|
|
|
vk_free2(&device->alloc, pAllocator, pool);
|
2018-05-31 00:06:41 +01:00
|
|
|
return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
|
2016-10-07 00:16:09 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
pool->ptr = device->ws->buffer_map(pool->bo);
|
|
|
|
|
|
|
|
if (!pool->ptr) {
|
|
|
|
device->ws->buffer_destroy(pool->bo);
|
2016-10-14 04:36:45 +01:00
|
|
|
vk_free2(&device->alloc, pAllocator, pool);
|
2018-05-31 00:06:41 +01:00
|
|
|
return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
|
2016-10-07 00:16:09 +01:00
|
|
|
}
|
2018-09-25 19:26:58 +01:00
|
|
|
memset(pool->ptr, initial_value, pool->size);
|
2016-10-07 00:16:09 +01:00
|
|
|
|
|
|
|
*pQueryPool = radv_query_pool_to_handle(pool);
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
void radv_DestroyQueryPool(
|
|
|
|
VkDevice _device,
|
|
|
|
VkQueryPool _pool,
|
|
|
|
const VkAllocationCallbacks* pAllocator)
|
|
|
|
{
|
|
|
|
RADV_FROM_HANDLE(radv_device, device, _device);
|
|
|
|
RADV_FROM_HANDLE(radv_query_pool, pool, _pool);
|
|
|
|
|
|
|
|
if (!pool)
|
|
|
|
return;
|
|
|
|
|
|
|
|
device->ws->buffer_destroy(pool->bo);
|
2016-10-14 04:36:45 +01:00
|
|
|
vk_free2(&device->alloc, pAllocator, pool);
|
2016-10-07 00:16:09 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
VkResult radv_GetQueryPoolResults(
|
|
|
|
VkDevice _device,
|
|
|
|
VkQueryPool queryPool,
|
|
|
|
uint32_t firstQuery,
|
|
|
|
uint32_t queryCount,
|
|
|
|
size_t dataSize,
|
|
|
|
void* pData,
|
|
|
|
VkDeviceSize stride,
|
|
|
|
VkQueryResultFlags flags)
|
|
|
|
{
|
2017-02-26 21:26:30 +00:00
|
|
|
RADV_FROM_HANDLE(radv_device, device, _device);
|
2016-10-07 00:16:09 +01:00
|
|
|
RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
|
|
|
|
char *data = pData;
|
|
|
|
VkResult result = VK_SUCCESS;
|
|
|
|
|
|
|
|
for(unsigned i = 0; i < queryCount; ++i, data += stride) {
|
|
|
|
char *dest = data;
|
|
|
|
unsigned query = firstQuery + i;
|
|
|
|
char *src = pool->ptr + query * pool->stride;
|
|
|
|
uint32_t available;
|
|
|
|
|
2018-09-25 19:26:58 +01:00
|
|
|
if (pool->type == VK_QUERY_TYPE_PIPELINE_STATISTICS) {
|
2017-04-11 22:54:58 +01:00
|
|
|
if (flags & VK_QUERY_RESULT_WAIT_BIT)
|
|
|
|
while(!*(volatile uint32_t*)(pool->ptr + pool->availability_offset + 4 * query))
|
|
|
|
;
|
2017-04-10 21:20:19 +01:00
|
|
|
available = *(uint32_t*)(pool->ptr + pool->availability_offset + 4 * query);
|
|
|
|
}
|
|
|
|
|
2017-02-26 21:26:30 +00:00
|
|
|
switch (pool->type) {
|
|
|
|
case VK_QUERY_TYPE_TIMESTAMP: {
|
2018-09-25 19:26:58 +01:00
|
|
|
available = *(uint64_t *)src != TIMESTAMP_NOT_READY;
|
|
|
|
|
|
|
|
if (flags & VK_QUERY_RESULT_WAIT_BIT) {
|
|
|
|
while (*(volatile uint64_t *)src == TIMESTAMP_NOT_READY)
|
|
|
|
;
|
|
|
|
available = *(uint64_t *)src != TIMESTAMP_NOT_READY;
|
|
|
|
}
|
|
|
|
|
2017-02-26 21:26:30 +00:00
|
|
|
if (!available && !(flags & VK_QUERY_RESULT_PARTIAL_BIT)) {
|
|
|
|
result = VK_NOT_READY;
|
|
|
|
break;
|
2016-10-07 00:16:09 +01:00
|
|
|
|
2017-02-26 21:26:30 +00:00
|
|
|
}
|
2016-10-07 00:16:09 +01:00
|
|
|
|
|
|
|
if (flags & VK_QUERY_RESULT_64_BIT) {
|
|
|
|
*(uint64_t*)dest = *(uint64_t*)src;
|
|
|
|
dest += 8;
|
|
|
|
} else {
|
|
|
|
*(uint32_t*)dest = *(uint32_t*)src;
|
|
|
|
dest += 4;
|
|
|
|
}
|
|
|
|
break;
|
2017-02-26 21:26:30 +00:00
|
|
|
}
|
2016-10-07 00:16:09 +01:00
|
|
|
case VK_QUERY_TYPE_OCCLUSION: {
|
2017-02-26 21:26:30 +00:00
|
|
|
volatile uint64_t const *src64 = (volatile uint64_t const *)src;
|
2017-04-11 23:45:51 +01:00
|
|
|
uint64_t sample_count = 0;
|
2017-02-26 21:26:30 +00:00
|
|
|
int db_count = get_max_db(device);
|
|
|
|
available = 1;
|
|
|
|
|
|
|
|
for (int i = 0; i < db_count; ++i) {
|
|
|
|
uint64_t start, end;
|
|
|
|
do {
|
|
|
|
start = src64[2 * i];
|
|
|
|
end = src64[2 * i + 1];
|
|
|
|
} while ((!(start & (1ull << 63)) || !(end & (1ull << 63))) && (flags & VK_QUERY_RESULT_WAIT_BIT));
|
|
|
|
|
|
|
|
if (!(start & (1ull << 63)) || !(end & (1ull << 63)))
|
|
|
|
available = 0;
|
|
|
|
else {
|
2017-04-11 23:45:51 +01:00
|
|
|
sample_count += end - start;
|
2017-02-26 21:26:30 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!available && !(flags & VK_QUERY_RESULT_PARTIAL_BIT)) {
|
|
|
|
result = VK_NOT_READY;
|
|
|
|
break;
|
|
|
|
|
|
|
|
}
|
2016-10-07 00:16:09 +01:00
|
|
|
|
|
|
|
if (flags & VK_QUERY_RESULT_64_BIT) {
|
2017-04-11 23:45:51 +01:00
|
|
|
*(uint64_t*)dest = sample_count;
|
2016-10-07 00:16:09 +01:00
|
|
|
dest += 8;
|
|
|
|
} else {
|
2017-04-11 23:45:51 +01:00
|
|
|
*(uint32_t*)dest = sample_count;
|
2016-10-07 00:16:09 +01:00
|
|
|
dest += 4;
|
|
|
|
}
|
|
|
|
break;
|
2017-04-10 21:20:19 +01:00
|
|
|
}
|
|
|
|
case VK_QUERY_TYPE_PIPELINE_STATISTICS: {
|
2017-04-11 22:56:42 +01:00
|
|
|
if (!available && !(flags & VK_QUERY_RESULT_PARTIAL_BIT)) {
|
|
|
|
result = VK_NOT_READY;
|
|
|
|
break;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2017-04-10 21:20:19 +01:00
|
|
|
const uint64_t *start = (uint64_t*)src;
|
|
|
|
const uint64_t *stop = (uint64_t*)(src + pipelinestat_block_size);
|
|
|
|
if (flags & VK_QUERY_RESULT_64_BIT) {
|
|
|
|
uint64_t *dst = (uint64_t*)dest;
|
|
|
|
dest += util_bitcount(pool->pipeline_stats_mask) * 8;
|
|
|
|
for(int i = 0; i < 11; ++i)
|
|
|
|
if(pool->pipeline_stats_mask & (1u << i))
|
|
|
|
*dst++ = stop[pipeline_statistics_indices[i]] -
|
|
|
|
start[pipeline_statistics_indices[i]];
|
|
|
|
|
|
|
|
} else {
|
|
|
|
uint32_t *dst = (uint32_t*)dest;
|
|
|
|
dest += util_bitcount(pool->pipeline_stats_mask) * 4;
|
|
|
|
for(int i = 0; i < 11; ++i)
|
|
|
|
if(pool->pipeline_stats_mask & (1u << i))
|
|
|
|
*dst++ = stop[pipeline_statistics_indices[i]] -
|
|
|
|
start[pipeline_statistics_indices[i]];
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2016-10-07 00:16:09 +01:00
|
|
|
default:
|
|
|
|
unreachable("trying to get results of unhandled query type");
|
|
|
|
}
|
|
|
|
|
|
|
|
if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
|
2017-02-26 21:36:10 +00:00
|
|
|
if (flags & VK_QUERY_RESULT_64_BIT) {
|
|
|
|
*(uint64_t*)dest = available;
|
|
|
|
} else {
|
|
|
|
*(uint32_t*)dest = available;
|
|
|
|
}
|
2016-10-07 00:16:09 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
void radv_CmdCopyQueryPoolResults(
|
|
|
|
VkCommandBuffer commandBuffer,
|
|
|
|
VkQueryPool queryPool,
|
|
|
|
uint32_t firstQuery,
|
|
|
|
uint32_t queryCount,
|
|
|
|
VkBuffer dstBuffer,
|
|
|
|
VkDeviceSize dstOffset,
|
|
|
|
VkDeviceSize stride,
|
|
|
|
VkQueryResultFlags flags)
|
|
|
|
{
|
|
|
|
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
|
|
|
|
RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
|
|
|
|
RADV_FROM_HANDLE(radv_buffer, dst_buffer, dstBuffer);
|
2018-06-19 02:07:10 +01:00
|
|
|
struct radeon_cmdbuf *cs = cmd_buffer->cs;
|
2017-04-09 21:35:32 +01:00
|
|
|
unsigned elem_size = (flags & VK_QUERY_RESULT_64_BIT) ? 8 : 4;
|
2017-09-17 11:15:02 +01:00
|
|
|
uint64_t va = radv_buffer_get_va(pool->bo);
|
|
|
|
uint64_t dest_va = radv_buffer_get_va(dst_buffer->bo);
|
2016-10-07 00:16:09 +01:00
|
|
|
dest_va += dst_buffer->offset + dstOffset;
|
|
|
|
|
2018-07-10 15:13:38 +01:00
|
|
|
radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, pool->bo);
|
|
|
|
radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, dst_buffer->bo);
|
2016-10-07 00:16:09 +01:00
|
|
|
|
2017-04-09 21:35:32 +01:00
|
|
|
switch (pool->type) {
|
|
|
|
case VK_QUERY_TYPE_OCCLUSION:
|
2016-10-07 00:16:09 +01:00
|
|
|
if (flags & VK_QUERY_RESULT_WAIT_BIT) {
|
2017-04-09 21:35:32 +01:00
|
|
|
for(unsigned i = 0; i < queryCount; ++i, dest_va += stride) {
|
|
|
|
unsigned query = firstQuery + i;
|
|
|
|
uint64_t src_va = va + query * pool->stride + pool->stride - 4;
|
|
|
|
|
|
|
|
/* Waits on the upper word of the last DB entry */
|
|
|
|
radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, 0));
|
|
|
|
radeon_emit(cs, 5 | WAIT_REG_MEM_MEM_SPACE(1));
|
|
|
|
radeon_emit(cs, src_va);
|
|
|
|
radeon_emit(cs, src_va >> 32);
|
|
|
|
radeon_emit(cs, 0x80000000); /* reference value */
|
|
|
|
radeon_emit(cs, 0xffffffff); /* mask */
|
|
|
|
radeon_emit(cs, 4); /* poll interval */
|
|
|
|
}
|
2016-10-07 00:16:09 +01:00
|
|
|
}
|
2018-08-13 23:07:57 +01:00
|
|
|
radv_query_shader(cmd_buffer, &cmd_buffer->device->meta_state.query.occlusion_query_pipeline,
|
2017-04-10 21:20:19 +01:00
|
|
|
pool->bo, dst_buffer->bo, firstQuery * pool->stride,
|
|
|
|
dst_buffer->offset + dstOffset,
|
|
|
|
get_max_db(cmd_buffer->device) * 16, stride,
|
|
|
|
queryCount, flags, 0, 0);
|
|
|
|
break;
|
|
|
|
case VK_QUERY_TYPE_PIPELINE_STATISTICS:
|
|
|
|
if (flags & VK_QUERY_RESULT_WAIT_BIT) {
|
|
|
|
for(unsigned i = 0; i < queryCount; ++i, dest_va += stride) {
|
|
|
|
unsigned query = firstQuery + i;
|
|
|
|
|
|
|
|
radeon_check_space(cmd_buffer->device->ws, cs, 7);
|
|
|
|
|
|
|
|
uint64_t avail_va = va + pool->availability_offset + 4 * query;
|
|
|
|
|
|
|
|
/* This waits on the ME. All copies below are done on the ME */
|
2018-06-26 21:35:04 +01:00
|
|
|
si_emit_wait_fence(cs, avail_va, 1, 0xffffffff);
|
2017-04-10 21:20:19 +01:00
|
|
|
}
|
|
|
|
}
|
2018-08-13 23:07:57 +01:00
|
|
|
radv_query_shader(cmd_buffer, &cmd_buffer->device->meta_state.query.pipeline_statistics_query_pipeline,
|
2017-04-10 21:20:19 +01:00
|
|
|
pool->bo, dst_buffer->bo, firstQuery * pool->stride,
|
|
|
|
dst_buffer->offset + dstOffset,
|
|
|
|
pipelinestat_block_size * 2, stride, queryCount, flags,
|
|
|
|
pool->pipeline_stats_mask,
|
|
|
|
pool->availability_offset + 4 * firstQuery);
|
2017-04-09 21:35:32 +01:00
|
|
|
break;
|
|
|
|
case VK_QUERY_TYPE_TIMESTAMP:
|
|
|
|
for(unsigned i = 0; i < queryCount; ++i, dest_va += stride) {
|
|
|
|
unsigned query = firstQuery + i;
|
|
|
|
uint64_t local_src_va = va + query * pool->stride;
|
|
|
|
|
|
|
|
MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 19);
|
2016-10-07 00:16:09 +01:00
|
|
|
|
|
|
|
|
2017-04-09 21:35:32 +01:00
|
|
|
if (flags & VK_QUERY_RESULT_WAIT_BIT) {
|
2018-09-25 19:26:58 +01:00
|
|
|
radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, false));
|
|
|
|
radeon_emit(cs, WAIT_REG_MEM_NOT_EQUAL | WAIT_REG_MEM_MEM_SPACE(1));
|
|
|
|
radeon_emit(cs, local_src_va);
|
|
|
|
radeon_emit(cs, local_src_va >> 32);
|
|
|
|
radeon_emit(cs, TIMESTAMP_NOT_READY >> 32);
|
|
|
|
radeon_emit(cs, 0xffffffff);
|
|
|
|
radeon_emit(cs, 4);
|
2017-04-09 21:35:32 +01:00
|
|
|
}
|
|
|
|
if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
|
|
|
|
uint64_t avail_dest_va = dest_va + elem_size;
|
|
|
|
|
|
|
|
radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
|
|
|
|
radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) |
|
|
|
|
COPY_DATA_DST_SEL(COPY_DATA_MEM));
|
2018-09-25 19:26:58 +01:00
|
|
|
radeon_emit(cs, local_src_va);
|
|
|
|
radeon_emit(cs, local_src_va >> 32);
|
2017-04-09 21:35:32 +01:00
|
|
|
radeon_emit(cs, avail_dest_va);
|
|
|
|
radeon_emit(cs, avail_dest_va >> 32);
|
|
|
|
}
|
|
|
|
|
2016-10-07 00:16:09 +01:00
|
|
|
radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
|
|
|
|
radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) |
|
|
|
|
COPY_DATA_DST_SEL(COPY_DATA_MEM) |
|
|
|
|
((flags & VK_QUERY_RESULT_64_BIT) ? COPY_DATA_COUNT_SEL : 0));
|
|
|
|
radeon_emit(cs, local_src_va);
|
|
|
|
radeon_emit(cs, local_src_va >> 32);
|
|
|
|
radeon_emit(cs, dest_va);
|
|
|
|
radeon_emit(cs, dest_va >> 32);
|
|
|
|
|
|
|
|
|
2017-04-09 21:35:32 +01:00
|
|
|
assert(cs->cdw <= cdw_max);
|
2016-10-07 00:16:09 +01:00
|
|
|
}
|
2017-04-09 21:35:32 +01:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
unreachable("trying to get results of unhandled query type");
|
2016-10-07 00:16:09 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
void radv_CmdResetQueryPool(
|
|
|
|
VkCommandBuffer commandBuffer,
|
|
|
|
VkQueryPool queryPool,
|
|
|
|
uint32_t firstQuery,
|
|
|
|
uint32_t queryCount)
|
|
|
|
{
|
|
|
|
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
|
|
|
|
RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
|
2018-10-04 09:37:09 +01:00
|
|
|
uint32_t value = pool->type == VK_QUERY_TYPE_TIMESTAMP
|
|
|
|
? TIMESTAMP_NOT_READY : 0;
|
2018-02-28 20:47:11 +00:00
|
|
|
uint32_t flush_bits = 0;
|
2016-10-07 00:16:09 +01:00
|
|
|
|
2018-02-28 20:47:11 +00:00
|
|
|
flush_bits |= radv_fill_buffer(cmd_buffer, pool->bo,
|
|
|
|
firstQuery * pool->stride,
|
2018-10-04 09:37:09 +01:00
|
|
|
queryCount * pool->stride, value);
|
2018-09-25 19:26:58 +01:00
|
|
|
|
2018-10-04 09:37:09 +01:00
|
|
|
if (pool->type == VK_QUERY_TYPE_PIPELINE_STATISTICS) {
|
2018-02-28 20:47:11 +00:00
|
|
|
flush_bits |= radv_fill_buffer(cmd_buffer, pool->bo,
|
|
|
|
pool->availability_offset + firstQuery * 4,
|
2018-10-04 09:37:09 +01:00
|
|
|
queryCount * 4, 0);
|
2018-02-28 20:47:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (flush_bits) {
|
|
|
|
/* Only need to flush caches for the compute shader path. */
|
|
|
|
cmd_buffer->pending_reset_query = true;
|
|
|
|
cmd_buffer->state.flush_bits |= flush_bits;
|
2017-10-26 17:03:24 +01:00
|
|
|
}
|
2016-10-07 00:16:09 +01:00
|
|
|
}
|
|
|
|
|
2018-03-19 01:24:52 +00:00
|
|
|
static void emit_begin_query(struct radv_cmd_buffer *cmd_buffer,
|
|
|
|
uint64_t va,
|
2018-04-05 09:27:22 +01:00
|
|
|
VkQueryType query_type,
|
|
|
|
VkQueryControlFlags flags)
|
2016-10-07 00:16:09 +01:00
|
|
|
{
|
2018-06-19 02:07:10 +01:00
|
|
|
struct radeon_cmdbuf *cs = cmd_buffer->cs;
|
2018-03-19 01:24:52 +00:00
|
|
|
switch (query_type) {
|
2016-10-07 00:16:09 +01:00
|
|
|
case VK_QUERY_TYPE_OCCLUSION:
|
|
|
|
radeon_check_space(cmd_buffer->device->ws, cs, 7);
|
|
|
|
|
|
|
|
++cmd_buffer->state.active_occlusion_queries;
|
2018-04-05 09:27:22 +01:00
|
|
|
if (cmd_buffer->state.active_occlusion_queries == 1) {
|
|
|
|
if (flags & VK_QUERY_CONTROL_PRECISE_BIT) {
|
|
|
|
/* This is the first occlusion query, enable
|
|
|
|
* the hint if the precision bit is set.
|
|
|
|
*/
|
|
|
|
cmd_buffer->state.perfect_occlusion_queries_enabled = true;
|
|
|
|
}
|
|
|
|
|
2016-10-07 00:16:09 +01:00
|
|
|
radv_set_db_count_control(cmd_buffer);
|
2018-04-05 09:27:22 +01:00
|
|
|
} else {
|
|
|
|
if ((flags & VK_QUERY_CONTROL_PRECISE_BIT) &&
|
|
|
|
!cmd_buffer->state.perfect_occlusion_queries_enabled) {
|
|
|
|
/* This is not the first query, but this one
|
|
|
|
* needs to enable precision, DB_COUNT_CONTROL
|
|
|
|
* has to be updated accordingly.
|
|
|
|
*/
|
|
|
|
cmd_buffer->state.perfect_occlusion_queries_enabled = true;
|
|
|
|
|
|
|
|
radv_set_db_count_control(cmd_buffer);
|
|
|
|
}
|
|
|
|
}
|
2016-10-07 00:16:09 +01:00
|
|
|
|
|
|
|
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
|
|
|
|
radeon_emit(cs, EVENT_TYPE(V_028A90_ZPASS_DONE) | EVENT_INDEX(1));
|
|
|
|
radeon_emit(cs, va);
|
|
|
|
radeon_emit(cs, va >> 32);
|
|
|
|
break;
|
2017-04-10 21:20:19 +01:00
|
|
|
case VK_QUERY_TYPE_PIPELINE_STATISTICS:
|
|
|
|
radeon_check_space(cmd_buffer->device->ws, cs, 4);
|
|
|
|
|
2018-06-22 18:16:43 +01:00
|
|
|
++cmd_buffer->state.active_pipeline_queries;
|
|
|
|
if (cmd_buffer->state.active_pipeline_queries == 1) {
|
|
|
|
cmd_buffer->state.flush_bits &= ~RADV_CMD_FLAG_STOP_PIPELINE_STATS;
|
|
|
|
cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_START_PIPELINE_STATS;
|
|
|
|
}
|
|
|
|
|
2017-04-10 21:20:19 +01:00
|
|
|
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
|
|
|
|
radeon_emit(cs, EVENT_TYPE(V_028A90_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
|
|
|
|
radeon_emit(cs, va);
|
|
|
|
radeon_emit(cs, va >> 32);
|
|
|
|
break;
|
2016-10-07 00:16:09 +01:00
|
|
|
default:
|
|
|
|
unreachable("beginning unhandled query type");
|
|
|
|
}
|
|
|
|
|
2018-03-19 01:24:52 +00:00
|
|
|
}
|
2016-10-07 00:16:09 +01:00
|
|
|
|
2018-03-19 01:24:52 +00:00
|
|
|
static void emit_end_query(struct radv_cmd_buffer *cmd_buffer,
|
|
|
|
uint64_t va, uint64_t avail_va,
|
|
|
|
VkQueryType query_type)
|
2016-10-07 00:16:09 +01:00
|
|
|
{
|
2018-06-19 02:07:10 +01:00
|
|
|
struct radeon_cmdbuf *cs = cmd_buffer->cs;
|
2018-03-19 01:24:52 +00:00
|
|
|
switch (query_type) {
|
2016-10-07 00:16:09 +01:00
|
|
|
case VK_QUERY_TYPE_OCCLUSION:
|
|
|
|
radeon_check_space(cmd_buffer->device->ws, cs, 14);
|
|
|
|
|
|
|
|
cmd_buffer->state.active_occlusion_queries--;
|
2018-04-05 09:27:22 +01:00
|
|
|
if (cmd_buffer->state.active_occlusion_queries == 0) {
|
2018-04-24 16:06:18 +01:00
|
|
|
radv_set_db_count_control(cmd_buffer);
|
|
|
|
|
2018-04-05 09:27:22 +01:00
|
|
|
/* Reset the perfect occlusion queries hint now that no
|
|
|
|
* queries are active.
|
|
|
|
*/
|
|
|
|
cmd_buffer->state.perfect_occlusion_queries_enabled = false;
|
|
|
|
}
|
2016-10-07 00:16:09 +01:00
|
|
|
|
|
|
|
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
|
|
|
|
radeon_emit(cs, EVENT_TYPE(V_028A90_ZPASS_DONE) | EVENT_INDEX(1));
|
|
|
|
radeon_emit(cs, va + 8);
|
|
|
|
radeon_emit(cs, (va + 8) >> 32);
|
|
|
|
|
2017-04-10 21:20:19 +01:00
|
|
|
break;
|
|
|
|
case VK_QUERY_TYPE_PIPELINE_STATISTICS:
|
2017-06-01 05:24:34 +01:00
|
|
|
radeon_check_space(cmd_buffer->device->ws, cs, 16);
|
2017-04-10 21:20:19 +01:00
|
|
|
|
2018-06-22 18:16:43 +01:00
|
|
|
cmd_buffer->state.active_pipeline_queries--;
|
|
|
|
if (cmd_buffer->state.active_pipeline_queries == 0) {
|
|
|
|
cmd_buffer->state.flush_bits &= ~RADV_CMD_FLAG_START_PIPELINE_STATS;
|
|
|
|
cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_STOP_PIPELINE_STATS;
|
|
|
|
}
|
2017-04-10 21:20:19 +01:00
|
|
|
va += pipelinestat_block_size;
|
|
|
|
|
|
|
|
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
|
|
|
|
radeon_emit(cs, EVENT_TYPE(V_028A90_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
|
|
|
|
radeon_emit(cs, va);
|
|
|
|
radeon_emit(cs, va >> 32);
|
|
|
|
|
2017-06-01 05:24:34 +01:00
|
|
|
si_cs_emit_write_event_eop(cs,
|
|
|
|
cmd_buffer->device->physical_device->rad_info.chip_class,
|
2017-12-28 06:31:48 +00:00
|
|
|
radv_cmd_buffer_uses_mec(cmd_buffer),
|
2017-10-07 23:41:04 +01:00
|
|
|
V_028A90_BOTTOM_OF_PIPE_TS, 0,
|
2018-06-20 15:10:55 +01:00
|
|
|
EOP_DATA_SEL_VALUE_32BIT,
|
2018-07-11 10:55:55 +01:00
|
|
|
avail_va, 0, 1,
|
|
|
|
cmd_buffer->gfx9_eop_bug_va);
|
2016-10-07 00:16:09 +01:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
unreachable("ending unhandled query type");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-19 01:24:52 +00:00
|
|
|
void radv_CmdBeginQuery(
|
|
|
|
VkCommandBuffer commandBuffer,
|
|
|
|
VkQueryPool queryPool,
|
|
|
|
uint32_t query,
|
|
|
|
VkQueryControlFlags flags)
|
|
|
|
{
|
|
|
|
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
|
|
|
|
RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
|
2018-06-19 02:07:10 +01:00
|
|
|
struct radeon_cmdbuf *cs = cmd_buffer->cs;
|
2018-03-19 01:24:52 +00:00
|
|
|
uint64_t va = radv_buffer_get_va(pool->bo);
|
|
|
|
|
2018-07-10 15:13:38 +01:00
|
|
|
radv_cs_add_buffer(cmd_buffer->device->ws, cs, pool->bo);
|
2018-03-19 01:24:52 +00:00
|
|
|
|
|
|
|
if (cmd_buffer->pending_reset_query) {
|
|
|
|
if (pool->size >= RADV_BUFFER_OPS_CS_THRESHOLD) {
|
|
|
|
/* Only need to flush caches if the query pool size is
|
|
|
|
* large enough to be resetted using the compute shader
|
|
|
|
* path. Small pools don't need any cache flushes
|
|
|
|
* because we use a CP dma clear.
|
|
|
|
*/
|
|
|
|
si_emit_cache_flush(cmd_buffer);
|
|
|
|
cmd_buffer->pending_reset_query = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
va += pool->stride * query;
|
|
|
|
|
2018-04-05 09:27:22 +01:00
|
|
|
emit_begin_query(cmd_buffer, va, pool->type, flags);
|
2018-03-19 01:24:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void radv_CmdEndQuery(
|
|
|
|
VkCommandBuffer commandBuffer,
|
|
|
|
VkQueryPool queryPool,
|
|
|
|
uint32_t query)
|
|
|
|
{
|
|
|
|
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
|
|
|
|
RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
|
|
|
|
uint64_t va = radv_buffer_get_va(pool->bo);
|
|
|
|
uint64_t avail_va = va + pool->availability_offset + 4 * query;
|
|
|
|
va += pool->stride * query;
|
|
|
|
|
|
|
|
/* Do not need to add the pool BO to the list because the query must
|
|
|
|
* currently be active, which means the BO is already in the list.
|
|
|
|
*/
|
|
|
|
emit_end_query(cmd_buffer, va, avail_va, pool->type);
|
2018-05-13 21:01:44 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* For multiview we have to emit a query for each bit in the mask,
|
|
|
|
* however the first query we emit will get the totals for all the
|
|
|
|
* operations, so we don't want to get a real value in the other
|
|
|
|
* queries. This emits a fake begin/end sequence so the waiting
|
|
|
|
* code gets a completed query value and doesn't hang, but the
|
|
|
|
* query returns 0.
|
|
|
|
*/
|
|
|
|
if (cmd_buffer->state.subpass && cmd_buffer->state.subpass->view_mask) {
|
|
|
|
uint64_t avail_va = va + pool->availability_offset + 4 * query;
|
|
|
|
|
|
|
|
|
|
|
|
for (unsigned i = 1; i < util_bitcount(cmd_buffer->state.subpass->view_mask); i++) {
|
|
|
|
va += pool->stride;
|
|
|
|
avail_va += 4;
|
|
|
|
emit_begin_query(cmd_buffer, va, pool->type, 0);
|
|
|
|
emit_end_query(cmd_buffer, va, avail_va, pool->type);
|
|
|
|
}
|
|
|
|
}
|
2018-03-19 01:24:52 +00:00
|
|
|
}
|
|
|
|
|
2016-10-07 00:16:09 +01:00
|
|
|
void radv_CmdWriteTimestamp(
|
|
|
|
VkCommandBuffer commandBuffer,
|
|
|
|
VkPipelineStageFlagBits pipelineStage,
|
|
|
|
VkQueryPool queryPool,
|
|
|
|
uint32_t query)
|
|
|
|
{
|
|
|
|
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
|
|
|
|
RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
|
2016-12-17 12:27:37 +00:00
|
|
|
bool mec = radv_cmd_buffer_uses_mec(cmd_buffer);
|
2018-06-19 02:07:10 +01:00
|
|
|
struct radeon_cmdbuf *cs = cmd_buffer->cs;
|
2017-09-17 11:15:02 +01:00
|
|
|
uint64_t va = radv_buffer_get_va(pool->bo);
|
2016-10-07 00:16:09 +01:00
|
|
|
uint64_t query_va = va + pool->stride * query;
|
|
|
|
|
2018-07-10 15:13:38 +01:00
|
|
|
radv_cs_add_buffer(cmd_buffer->device->ws, cs, pool->bo);
|
2016-10-07 00:16:09 +01:00
|
|
|
|
2018-03-19 01:27:37 +00:00
|
|
|
int num_queries = 1;
|
|
|
|
if (cmd_buffer->state.subpass && cmd_buffer->state.subpass->view_mask)
|
|
|
|
num_queries = util_bitcount(cmd_buffer->state.subpass->view_mask);
|
2016-10-07 00:16:09 +01:00
|
|
|
|
2018-03-19 01:27:37 +00:00
|
|
|
MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 28 * num_queries);
|
|
|
|
|
|
|
|
for (unsigned i = 0; i < num_queries; i++) {
|
|
|
|
switch(pipelineStage) {
|
|
|
|
case VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT:
|
|
|
|
radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
|
|
|
|
radeon_emit(cs, COPY_DATA_COUNT_SEL | COPY_DATA_WR_CONFIRM |
|
|
|
|
COPY_DATA_SRC_SEL(COPY_DATA_TIMESTAMP) |
|
|
|
|
COPY_DATA_DST_SEL(V_370_MEM_ASYNC));
|
|
|
|
radeon_emit(cs, 0);
|
|
|
|
radeon_emit(cs, 0);
|
|
|
|
radeon_emit(cs, query_va);
|
|
|
|
radeon_emit(cs, query_va >> 32);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
si_cs_emit_write_event_eop(cs,
|
|
|
|
cmd_buffer->device->physical_device->rad_info.chip_class,
|
|
|
|
mec,
|
|
|
|
V_028A90_BOTTOM_OF_PIPE_TS, 0,
|
2018-06-20 15:10:55 +01:00
|
|
|
EOP_DATA_SEL_TIMESTAMP,
|
2018-07-11 10:55:55 +01:00
|
|
|
query_va, 0, 0,
|
|
|
|
cmd_buffer->gfx9_eop_bug_va);
|
2018-03-19 01:27:37 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
query_va += pool->stride;
|
|
|
|
}
|
2016-10-07 00:16:09 +01:00
|
|
|
assert(cmd_buffer->cs->cdw <= cdw_max);
|
|
|
|
}
|