zink: move update_descriptors & related funcs to zink_descriptors.c
keep zink_draw.c for draw stuff Reviewed-by: Dave Airlie <airlied@redhat.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/9883>
This commit is contained in:
parent
812f7ecb13
commit
c7f9dc5891
|
@ -675,3 +675,557 @@ zink_descriptor_pool_init(struct zink_context *ctx)
|
|||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
desc_set_res_add(struct zink_descriptor_set *zds, struct zink_resource *res, unsigned int i, bool cache_hit)
|
||||
{
|
||||
/* if we got a cache hit, we have to verify that the cached set is still valid;
|
||||
* we store the vk resource to the set here to avoid a more complex and costly mechanism of maintaining a
|
||||
* hash table on every resource with the associated descriptor sets that then needs to be iterated through
|
||||
* whenever a resource is destroyed
|
||||
*/
|
||||
assert(!cache_hit || zds->res_objs[i] == (res ? res->obj : NULL));
|
||||
if (!cache_hit)
|
||||
zink_resource_desc_set_add(res, zds, i);
|
||||
}
|
||||
|
||||
static void
|
||||
desc_set_sampler_add(struct zink_context *ctx, struct zink_descriptor_set *zds, struct zink_sampler_view *sv,
|
||||
struct zink_sampler_state *state, unsigned int i, bool is_buffer, bool cache_hit)
|
||||
{
|
||||
/* if we got a cache hit, we have to verify that the cached set is still valid;
|
||||
* we store the vk resource to the set here to avoid a more complex and costly mechanism of maintaining a
|
||||
* hash table on every resource with the associated descriptor sets that then needs to be iterated through
|
||||
* whenever a resource is destroyed
|
||||
*/
|
||||
#ifndef NDEBUG
|
||||
uint32_t cur_hash = zink_get_sampler_view_hash(ctx, zds->sampler_views[i], is_buffer);
|
||||
uint32_t new_hash = zink_get_sampler_view_hash(ctx, sv, is_buffer);
|
||||
#endif
|
||||
assert(!cache_hit || cur_hash == new_hash);
|
||||
assert(!cache_hit || zds->sampler_states[i] == state);
|
||||
if (!cache_hit) {
|
||||
zink_sampler_view_desc_set_add(sv, zds, i);
|
||||
zink_sampler_state_desc_set_add(state, zds, i);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
desc_set_image_add(struct zink_context *ctx, struct zink_descriptor_set *zds, struct zink_image_view *image_view,
|
||||
unsigned int i, bool is_buffer, bool cache_hit)
|
||||
{
|
||||
/* if we got a cache hit, we have to verify that the cached set is still valid;
|
||||
* we store the vk resource to the set here to avoid a more complex and costly mechanism of maintaining a
|
||||
* hash table on every resource with the associated descriptor sets that then needs to be iterated through
|
||||
* whenever a resource is destroyed
|
||||
*/
|
||||
#ifndef NDEBUG
|
||||
uint32_t cur_hash = zink_get_image_view_hash(ctx, zds->image_views[i], is_buffer);
|
||||
uint32_t new_hash = zink_get_image_view_hash(ctx, image_view, is_buffer);
|
||||
#endif
|
||||
assert(!cache_hit || cur_hash == new_hash);
|
||||
if (!cache_hit)
|
||||
zink_image_view_desc_set_add(image_view, zds, i);
|
||||
}
|
||||
|
||||
static bool
|
||||
barrier_equals(const void *a, const void *b)
|
||||
{
|
||||
const struct zink_descriptor_barrier *t1 = a, *t2 = b;
|
||||
if (t1->res != t2->res)
|
||||
return false;
|
||||
if ((t1->access & t2->access) != t2->access)
|
||||
return false;
|
||||
if (t1->layout != t2->layout)
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
static uint32_t
|
||||
barrier_hash(const void *key)
|
||||
{
|
||||
return _mesa_hash_data(key, offsetof(struct zink_descriptor_barrier, stage));
|
||||
}
|
||||
|
||||
static inline void
|
||||
add_barrier(struct zink_resource *res, VkImageLayout layout, VkAccessFlags flags, enum pipe_shader_type stage, struct util_dynarray *barriers, struct set *ht)
|
||||
{
|
||||
VkPipelineStageFlags pipeline = zink_pipeline_flags_from_stage(zink_shader_stage(stage));
|
||||
struct zink_descriptor_barrier key = {res, layout, flags, 0}, *t;
|
||||
|
||||
uint32_t hash = barrier_hash(&key);
|
||||
struct set_entry *entry = _mesa_set_search_pre_hashed(ht, hash, &key);
|
||||
if (entry)
|
||||
t = (struct zink_descriptor_barrier*)entry->key;
|
||||
else {
|
||||
util_dynarray_append(barriers, struct zink_descriptor_barrier, key);
|
||||
t = util_dynarray_element(barriers, struct zink_descriptor_barrier,
|
||||
util_dynarray_num_elements(barriers, struct zink_descriptor_barrier) - 1);
|
||||
t->stage = 0;
|
||||
t->layout = layout;
|
||||
t->res = res;
|
||||
t->access = flags;
|
||||
_mesa_set_add_pre_hashed(ht, hash, t);
|
||||
}
|
||||
t->stage |= pipeline;
|
||||
}
|
||||
|
||||
static int
|
||||
cmp_dynamic_offset_binding(const void *a, const void *b)
|
||||
{
|
||||
const uint32_t *binding_a = a, *binding_b = b;
|
||||
return *binding_a - *binding_b;
|
||||
}
|
||||
|
||||
static void
|
||||
write_descriptors(struct zink_context *ctx, struct zink_descriptor_set *zds, unsigned num_wds, VkWriteDescriptorSet *wds,
|
||||
bool cache_hit, bool need_resource_refs)
|
||||
{
|
||||
struct zink_batch *batch = &ctx->batch;
|
||||
struct zink_screen *screen = zink_screen(ctx->base.screen);
|
||||
assert(zds->desc_set);
|
||||
|
||||
if (!cache_hit && num_wds)
|
||||
vkUpdateDescriptorSets(screen->dev, num_wds, wds, 0, NULL);
|
||||
for (int i = 0; zds->pool->key.num_descriptors && i < util_dynarray_num_elements(&zds->barriers, struct zink_descriptor_barrier); ++i) {
|
||||
struct zink_descriptor_barrier *barrier = util_dynarray_element(&zds->barriers, struct zink_descriptor_barrier, i);
|
||||
if (need_resource_refs)
|
||||
zink_batch_reference_resource_rw(batch, barrier->res, zink_resource_access_is_write(barrier->access));
|
||||
zink_resource_barrier(ctx, NULL, barrier->res,
|
||||
barrier->layout, barrier->access, barrier->stage);
|
||||
}
|
||||
}
|
||||
|
||||
static unsigned
|
||||
init_write_descriptor(struct zink_shader *shader, struct zink_descriptor_set *zds, int idx, VkWriteDescriptorSet *wd, unsigned num_wds)
|
||||
{
|
||||
wd->sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
|
||||
wd->pNext = NULL;
|
||||
wd->dstBinding = shader->bindings[zds->pool->type][idx].binding;
|
||||
wd->dstArrayElement = 0;
|
||||
wd->descriptorCount = shader->bindings[zds->pool->type][idx].size;
|
||||
wd->descriptorType = shader->bindings[zds->pool->type][idx].type;
|
||||
wd->dstSet = zds->desc_set;
|
||||
return num_wds + 1;
|
||||
}
|
||||
|
||||
static void
|
||||
update_ubo_descriptors(struct zink_context *ctx, struct zink_descriptor_set *zds,
|
||||
bool is_compute, bool cache_hit, bool need_resource_refs,
|
||||
uint32_t *dynamic_offsets, unsigned *dynamic_offset_idx)
|
||||
{
|
||||
struct zink_program *pg = is_compute ? (struct zink_program *)ctx->curr_compute : (struct zink_program *)ctx->curr_program;
|
||||
struct zink_screen *screen = zink_screen(ctx->base.screen);
|
||||
unsigned num_descriptors = pg->pool[zds->pool->type]->key.num_descriptors;
|
||||
unsigned num_bindings = zds->pool->num_resources;
|
||||
VkWriteDescriptorSet wds[num_descriptors];
|
||||
VkDescriptorBufferInfo buffer_infos[num_bindings];
|
||||
unsigned num_wds = 0;
|
||||
unsigned num_buffer_info = 0;
|
||||
unsigned num_resources = 0;
|
||||
struct zink_shader **stages;
|
||||
struct {
|
||||
uint32_t binding;
|
||||
uint32_t offset;
|
||||
} dynamic_buffers[PIPE_MAX_CONSTANT_BUFFERS];
|
||||
unsigned dynamic_offset_count = 0;
|
||||
struct set *ht = NULL;
|
||||
if (!cache_hit) {
|
||||
ht = _mesa_set_create(NULL, barrier_hash, barrier_equals);
|
||||
_mesa_set_resize(ht, num_bindings);
|
||||
}
|
||||
|
||||
unsigned num_stages = is_compute ? 1 : ZINK_SHADER_COUNT;
|
||||
if (is_compute)
|
||||
stages = &ctx->curr_compute->shader;
|
||||
else
|
||||
stages = &ctx->gfx_stages[0];
|
||||
|
||||
for (int i = 0; i < num_stages; i++) {
|
||||
struct zink_shader *shader = stages[i];
|
||||
if (!shader)
|
||||
continue;
|
||||
enum pipe_shader_type stage = pipe_shader_type_from_mesa(shader->nir->info.stage);
|
||||
|
||||
for (int j = 0; j < shader->num_bindings[zds->pool->type]; j++) {
|
||||
int index = shader->bindings[zds->pool->type][j].index;
|
||||
assert(shader->bindings[zds->pool->type][j].type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
|
||||
shader->bindings[zds->pool->type][j].type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC);
|
||||
assert(ctx->ubos[stage][index].buffer_size <= screen->info.props.limits.maxUniformBufferRange);
|
||||
struct zink_resource *res = zink_resource(ctx->ubos[stage][index].buffer);
|
||||
assert(!res || ctx->ubos[stage][index].buffer_size > 0);
|
||||
assert(!res || ctx->ubos[stage][index].buffer);
|
||||
assert(num_resources < num_bindings);
|
||||
desc_set_res_add(zds, res, num_resources++, cache_hit);
|
||||
assert(num_buffer_info < num_bindings);
|
||||
buffer_infos[num_buffer_info].buffer = res ? res->obj->buffer :
|
||||
(screen->info.rb2_feats.nullDescriptor ?
|
||||
VK_NULL_HANDLE :
|
||||
zink_resource(ctx->dummy_vertex_buffer)->obj->buffer);
|
||||
if (shader->bindings[zds->pool->type][j].type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) {
|
||||
buffer_infos[num_buffer_info].offset = 0;
|
||||
/* we're storing this to qsort later */
|
||||
dynamic_buffers[dynamic_offset_count].binding = shader->bindings[zds->pool->type][j].binding;
|
||||
dynamic_buffers[dynamic_offset_count++].offset = res ? ctx->ubos[stage][index].buffer_offset : 0;
|
||||
} else
|
||||
buffer_infos[num_buffer_info].offset = res ? ctx->ubos[stage][index].buffer_offset : 0;
|
||||
buffer_infos[num_buffer_info].range = res ? ctx->ubos[stage][index].buffer_size : VK_WHOLE_SIZE;
|
||||
if (res && !cache_hit)
|
||||
add_barrier(res, 0, VK_ACCESS_UNIFORM_READ_BIT, stage, &zds->barriers, ht);
|
||||
wds[num_wds].pBufferInfo = buffer_infos + num_buffer_info;
|
||||
++num_buffer_info;
|
||||
|
||||
num_wds = init_write_descriptor(shader, zds, j, &wds[num_wds], num_wds);
|
||||
}
|
||||
}
|
||||
_mesa_set_destroy(ht, NULL);
|
||||
/* Values are taken from pDynamicOffsets in an order such that all entries for set N come before set N+1;
|
||||
* within a set, entries are ordered by the binding numbers in the descriptor set layouts
|
||||
* - vkCmdBindDescriptorSets spec
|
||||
*
|
||||
* because of this, we have to sort all the dynamic offsets by their associated binding to ensure they
|
||||
* match what the driver expects
|
||||
*/
|
||||
if (dynamic_offset_count > 1)
|
||||
qsort(dynamic_buffers, dynamic_offset_count, sizeof(uint32_t) * 2, cmp_dynamic_offset_binding);
|
||||
for (int i = 0; i < dynamic_offset_count; i++)
|
||||
dynamic_offsets[i] = dynamic_buffers[i].offset;
|
||||
*dynamic_offset_idx = dynamic_offset_count;
|
||||
|
||||
write_descriptors(ctx, zds, num_wds, wds, cache_hit, need_resource_refs);
|
||||
}
|
||||
|
||||
static void
|
||||
update_ssbo_descriptors(struct zink_context *ctx, struct zink_descriptor_set *zds,
|
||||
bool is_compute, bool cache_hit, bool need_resource_refs)
|
||||
{
|
||||
struct zink_program *pg = is_compute ? (struct zink_program *)ctx->curr_compute : (struct zink_program *)ctx->curr_program;
|
||||
ASSERTED struct zink_screen *screen = zink_screen(ctx->base.screen);
|
||||
unsigned num_descriptors = pg->pool[zds->pool->type]->key.num_descriptors;
|
||||
unsigned num_bindings = zds->pool->num_resources;
|
||||
VkWriteDescriptorSet wds[num_descriptors];
|
||||
VkDescriptorBufferInfo buffer_infos[num_bindings];
|
||||
unsigned num_wds = 0;
|
||||
unsigned num_buffer_info = 0;
|
||||
unsigned num_resources = 0;
|
||||
struct zink_shader **stages;
|
||||
struct set *ht = NULL;
|
||||
if (!cache_hit) {
|
||||
ht = _mesa_set_create(NULL, barrier_hash, barrier_equals);
|
||||
_mesa_set_resize(ht, num_bindings);
|
||||
}
|
||||
|
||||
unsigned num_stages = is_compute ? 1 : ZINK_SHADER_COUNT;
|
||||
if (is_compute)
|
||||
stages = &ctx->curr_compute->shader;
|
||||
else
|
||||
stages = &ctx->gfx_stages[0];
|
||||
|
||||
for (int i = 0; (!cache_hit || need_resource_refs) && i < num_stages; i++) {
|
||||
struct zink_shader *shader = stages[i];
|
||||
if (!shader)
|
||||
continue;
|
||||
enum pipe_shader_type stage = pipe_shader_type_from_mesa(shader->nir->info.stage);
|
||||
|
||||
for (int j = 0; j < shader->num_bindings[zds->pool->type]; j++) {
|
||||
int index = shader->bindings[zds->pool->type][j].index;
|
||||
assert(shader->bindings[zds->pool->type][j].type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
|
||||
assert(num_resources < num_bindings);
|
||||
struct zink_resource *res = zink_resource(ctx->ssbos[stage][index].buffer);
|
||||
desc_set_res_add(zds, res, num_resources++, cache_hit);
|
||||
if (res) {
|
||||
assert(ctx->ssbos[stage][index].buffer_size > 0);
|
||||
assert(ctx->ssbos[stage][index].buffer_size <= screen->info.props.limits.maxStorageBufferRange);
|
||||
assert(num_buffer_info < num_bindings);
|
||||
unsigned flag = VK_ACCESS_SHADER_READ_BIT;
|
||||
if (ctx->writable_ssbos[stage] & (1 << index))
|
||||
flag |= VK_ACCESS_SHADER_WRITE_BIT;
|
||||
if (!cache_hit)
|
||||
add_barrier(res, 0, flag, stage, &zds->barriers, ht);
|
||||
buffer_infos[num_buffer_info].buffer = res->obj->buffer;
|
||||
buffer_infos[num_buffer_info].offset = ctx->ssbos[stage][index].buffer_offset;
|
||||
buffer_infos[num_buffer_info].range = ctx->ssbos[stage][index].buffer_size;
|
||||
} else {
|
||||
assert(screen->info.rb2_feats.nullDescriptor);
|
||||
buffer_infos[num_buffer_info].buffer = VK_NULL_HANDLE;
|
||||
buffer_infos[num_buffer_info].offset = 0;
|
||||
buffer_infos[num_buffer_info].range = VK_WHOLE_SIZE;
|
||||
}
|
||||
wds[num_wds].pBufferInfo = buffer_infos + num_buffer_info;
|
||||
++num_buffer_info;
|
||||
|
||||
num_wds = init_write_descriptor(shader, zds, j, &wds[num_wds], num_wds);
|
||||
}
|
||||
}
|
||||
_mesa_set_destroy(ht, NULL);
|
||||
write_descriptors(ctx, zds, num_wds, wds, cache_hit, need_resource_refs);
|
||||
}
|
||||
|
||||
static void
|
||||
handle_image_descriptor(struct zink_screen *screen, struct zink_resource *res, enum zink_descriptor_type type, VkDescriptorType vktype, VkWriteDescriptorSet *wd,
|
||||
VkImageLayout layout, unsigned *num_image_info, VkDescriptorImageInfo *image_info,
|
||||
unsigned *num_buffer_info, VkBufferView *buffer_info,
|
||||
struct zink_sampler_state *sampler,
|
||||
VkImageView imageview, VkBufferView bufferview, bool do_set)
|
||||
{
|
||||
if (!res) {
|
||||
/* if we're hitting this assert often, we can probably just throw a junk buffer in since
|
||||
* the results of this codepath are undefined in ARB_texture_buffer_object spec
|
||||
*/
|
||||
assert(screen->info.rb2_feats.nullDescriptor);
|
||||
|
||||
switch (vktype) {
|
||||
case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
|
||||
case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
|
||||
*buffer_info = VK_NULL_HANDLE;
|
||||
if (do_set)
|
||||
wd->pTexelBufferView = buffer_info;
|
||||
++(*num_buffer_info);
|
||||
break;
|
||||
case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
|
||||
case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
|
||||
image_info->imageLayout = VK_IMAGE_LAYOUT_UNDEFINED;
|
||||
image_info->imageView = VK_NULL_HANDLE;
|
||||
image_info->sampler = sampler ? sampler->sampler : VK_NULL_HANDLE;
|
||||
if (do_set)
|
||||
wd->pImageInfo = image_info;
|
||||
++(*num_image_info);
|
||||
break;
|
||||
default:
|
||||
unreachable("unknown descriptor type");
|
||||
}
|
||||
} else if (res->base.target != PIPE_BUFFER) {
|
||||
assert(layout != VK_IMAGE_LAYOUT_UNDEFINED);
|
||||
image_info->imageLayout = layout;
|
||||
image_info->imageView = imageview;
|
||||
image_info->sampler = sampler ? sampler->sampler : VK_NULL_HANDLE;
|
||||
if (do_set)
|
||||
wd->pImageInfo = image_info;
|
||||
++(*num_image_info);
|
||||
} else {
|
||||
if (do_set)
|
||||
wd->pTexelBufferView = buffer_info;
|
||||
*buffer_info = bufferview;
|
||||
++(*num_buffer_info);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
update_sampler_descriptors(struct zink_context *ctx, struct zink_descriptor_set *zds,
|
||||
bool is_compute, bool cache_hit, bool need_resource_refs)
|
||||
{
|
||||
struct zink_program *pg = is_compute ? (struct zink_program *)ctx->curr_compute : (struct zink_program *)ctx->curr_program;
|
||||
struct zink_screen *screen = zink_screen(ctx->base.screen);
|
||||
unsigned num_descriptors = pg->pool[zds->pool->type]->key.num_descriptors;
|
||||
unsigned num_bindings = zds->pool->num_resources;
|
||||
VkWriteDescriptorSet wds[num_descriptors];
|
||||
VkDescriptorImageInfo image_infos[num_bindings];
|
||||
VkBufferView buffer_views[num_bindings];
|
||||
unsigned num_wds = 0;
|
||||
unsigned num_image_info = 0;
|
||||
unsigned num_buffer_info = 0;
|
||||
unsigned num_resources = 0;
|
||||
struct zink_shader **stages;
|
||||
struct set *ht = NULL;
|
||||
if (!cache_hit) {
|
||||
ht = _mesa_set_create(NULL, barrier_hash, barrier_equals);
|
||||
_mesa_set_resize(ht, num_bindings);
|
||||
}
|
||||
|
||||
unsigned num_stages = is_compute ? 1 : ZINK_SHADER_COUNT;
|
||||
if (is_compute)
|
||||
stages = &ctx->curr_compute->shader;
|
||||
else
|
||||
stages = &ctx->gfx_stages[0];
|
||||
|
||||
for (int i = 0; (!cache_hit || need_resource_refs) && i < num_stages; i++) {
|
||||
struct zink_shader *shader = stages[i];
|
||||
if (!shader)
|
||||
continue;
|
||||
enum pipe_shader_type stage = pipe_shader_type_from_mesa(shader->nir->info.stage);
|
||||
|
||||
for (int j = 0; j < shader->num_bindings[zds->pool->type]; j++) {
|
||||
int index = shader->bindings[zds->pool->type][j].index;
|
||||
assert(shader->bindings[zds->pool->type][j].type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER ||
|
||||
shader->bindings[zds->pool->type][j].type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER);
|
||||
|
||||
for (unsigned k = 0; k < shader->bindings[zds->pool->type][j].size; k++) {
|
||||
VkImageView imageview = VK_NULL_HANDLE;
|
||||
VkBufferView bufferview = VK_NULL_HANDLE;
|
||||
struct zink_resource *res = NULL;
|
||||
VkImageLayout layout = VK_IMAGE_LAYOUT_UNDEFINED;
|
||||
struct zink_sampler_state *sampler = NULL;
|
||||
|
||||
struct pipe_sampler_view *psampler_view = ctx->sampler_views[stage][index + k];
|
||||
struct zink_sampler_view *sampler_view = zink_sampler_view(psampler_view);
|
||||
res = psampler_view ? zink_resource(psampler_view->texture) : NULL;
|
||||
if (res && res->base.target == PIPE_BUFFER) {
|
||||
bufferview = sampler_view->buffer_view->buffer_view;
|
||||
} else if (res) {
|
||||
imageview = sampler_view->image_view->image_view;
|
||||
layout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
|
||||
sampler = ctx->sampler_states[stage][index + k];
|
||||
}
|
||||
assert(num_resources < num_bindings);
|
||||
if (res) {
|
||||
if (!cache_hit)
|
||||
add_barrier(res, layout, VK_ACCESS_SHADER_READ_BIT, stage, &zds->barriers, ht);
|
||||
}
|
||||
assert(num_image_info < num_bindings);
|
||||
handle_image_descriptor(screen, res, zds->pool->type, shader->bindings[zds->pool->type][j].type,
|
||||
&wds[num_wds], layout, &num_image_info, &image_infos[num_image_info],
|
||||
&num_buffer_info, &buffer_views[num_buffer_info],
|
||||
sampler, imageview, bufferview, !k);
|
||||
desc_set_sampler_add(ctx, zds, sampler_view, sampler, num_resources++,
|
||||
zink_shader_descriptor_is_buffer(shader, ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW, j),
|
||||
cache_hit);
|
||||
struct zink_batch *batch = &ctx->batch;
|
||||
if (sampler_view)
|
||||
zink_batch_reference_sampler_view(batch, sampler_view);
|
||||
if (sampler)
|
||||
/* this only tracks the most recent usage for now */
|
||||
zink_batch_usage_set(&sampler->batch_uses, batch->state->fence.batch_id);
|
||||
}
|
||||
assert(num_wds < num_descriptors);
|
||||
|
||||
num_wds = init_write_descriptor(shader, zds, j, &wds[num_wds], num_wds);
|
||||
}
|
||||
}
|
||||
_mesa_set_destroy(ht, NULL);
|
||||
write_descriptors(ctx, zds, num_wds, wds, cache_hit, need_resource_refs);
|
||||
}
|
||||
|
||||
static void
|
||||
update_image_descriptors(struct zink_context *ctx, struct zink_descriptor_set *zds,
|
||||
bool is_compute, bool cache_hit, bool need_resource_refs)
|
||||
{
|
||||
struct zink_program *pg = is_compute ? (struct zink_program *)ctx->curr_compute : (struct zink_program *)ctx->curr_program;
|
||||
struct zink_screen *screen = zink_screen(ctx->base.screen);
|
||||
unsigned num_descriptors = pg->pool[zds->pool->type]->key.num_descriptors;
|
||||
unsigned num_bindings = zds->pool->num_resources;
|
||||
VkWriteDescriptorSet wds[num_descriptors];
|
||||
VkDescriptorImageInfo image_infos[num_bindings];
|
||||
VkBufferView buffer_views[num_bindings];
|
||||
unsigned num_wds = 0;
|
||||
unsigned num_image_info = 0;
|
||||
unsigned num_buffer_info = 0;
|
||||
unsigned num_resources = 0;
|
||||
struct zink_shader **stages;
|
||||
struct set *ht = NULL;
|
||||
if (!cache_hit) {
|
||||
ht = _mesa_set_create(NULL, barrier_hash, barrier_equals);
|
||||
_mesa_set_resize(ht, num_bindings);
|
||||
}
|
||||
|
||||
unsigned num_stages = is_compute ? 1 : ZINK_SHADER_COUNT;
|
||||
if (is_compute)
|
||||
stages = &ctx->curr_compute->shader;
|
||||
else
|
||||
stages = &ctx->gfx_stages[0];
|
||||
|
||||
for (int i = 0; (!cache_hit || need_resource_refs) && i < num_stages; i++) {
|
||||
struct zink_shader *shader = stages[i];
|
||||
if (!shader)
|
||||
continue;
|
||||
enum pipe_shader_type stage = pipe_shader_type_from_mesa(shader->nir->info.stage);
|
||||
|
||||
for (int j = 0; j < shader->num_bindings[zds->pool->type]; j++) {
|
||||
int index = shader->bindings[zds->pool->type][j].index;
|
||||
assert(shader->bindings[zds->pool->type][j].type == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER ||
|
||||
shader->bindings[zds->pool->type][j].type == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE);
|
||||
|
||||
for (unsigned k = 0; k < shader->bindings[zds->pool->type][j].size; k++) {
|
||||
VkImageView imageview = VK_NULL_HANDLE;
|
||||
VkBufferView bufferview = VK_NULL_HANDLE;
|
||||
struct zink_resource *res = NULL;
|
||||
VkImageLayout layout = VK_IMAGE_LAYOUT_UNDEFINED;
|
||||
struct zink_image_view *image_view = &ctx->image_views[stage][index + k];
|
||||
assert(image_view);
|
||||
res = zink_resource(image_view->base.resource);
|
||||
|
||||
if (res && image_view->base.resource->target == PIPE_BUFFER) {
|
||||
bufferview = image_view->buffer_view->buffer_view;
|
||||
} else if (res) {
|
||||
imageview = image_view->surface->image_view;
|
||||
layout = VK_IMAGE_LAYOUT_GENERAL;
|
||||
}
|
||||
assert(num_resources < num_bindings);
|
||||
desc_set_image_add(ctx, zds, image_view, num_resources++,
|
||||
zink_shader_descriptor_is_buffer(shader, ZINK_DESCRIPTOR_TYPE_IMAGE, j),
|
||||
cache_hit);
|
||||
if (res) {
|
||||
VkAccessFlags flags = 0;
|
||||
if (image_view->base.access & PIPE_IMAGE_ACCESS_READ)
|
||||
flags |= VK_ACCESS_SHADER_READ_BIT;
|
||||
if (image_view->base.access & PIPE_IMAGE_ACCESS_WRITE)
|
||||
flags |= VK_ACCESS_SHADER_WRITE_BIT;
|
||||
if (!cache_hit)
|
||||
add_barrier(res, layout, flags, stage, &zds->barriers, ht);
|
||||
}
|
||||
|
||||
assert(num_image_info < num_bindings);
|
||||
handle_image_descriptor(screen, res, zds->pool->type, shader->bindings[zds->pool->type][j].type,
|
||||
&wds[num_wds], layout, &num_image_info, &image_infos[num_image_info],
|
||||
&num_buffer_info, &buffer_views[num_buffer_info],
|
||||
NULL, imageview, bufferview, !k);
|
||||
|
||||
struct zink_batch *batch = &ctx->batch;
|
||||
if (res)
|
||||
zink_batch_reference_image_view(batch, image_view);
|
||||
}
|
||||
assert(num_wds < num_descriptors);
|
||||
|
||||
num_wds = init_write_descriptor(shader, zds, j, &wds[num_wds], num_wds);
|
||||
}
|
||||
}
|
||||
_mesa_set_destroy(ht, NULL);
|
||||
write_descriptors(ctx, zds, num_wds, wds, cache_hit, need_resource_refs);
|
||||
}
|
||||
|
||||
void
|
||||
zink_descriptors_update(struct zink_context *ctx, struct zink_screen *screen, bool is_compute)
|
||||
{
|
||||
struct zink_program *pg = is_compute ? (struct zink_program *)ctx->curr_compute : (struct zink_program *)ctx->curr_program;
|
||||
|
||||
zink_context_update_descriptor_states(ctx, is_compute);
|
||||
bool cache_hit[ZINK_DESCRIPTOR_TYPES];
|
||||
bool need_resource_refs[ZINK_DESCRIPTOR_TYPES];
|
||||
struct zink_descriptor_set *zds[ZINK_DESCRIPTOR_TYPES];
|
||||
for (int h = 0; h < ZINK_DESCRIPTOR_TYPES; h++) {
|
||||
if (pg->pool[h])
|
||||
zds[h] = zink_descriptor_set_get(ctx, h, is_compute, &cache_hit[h], &need_resource_refs[h]);
|
||||
else
|
||||
zds[h] = NULL;
|
||||
}
|
||||
struct zink_batch *batch = &ctx->batch;
|
||||
zink_batch_reference_program(batch, pg);
|
||||
|
||||
uint32_t dynamic_offsets[PIPE_MAX_CONSTANT_BUFFERS];
|
||||
unsigned dynamic_offset_idx = 0;
|
||||
|
||||
if (zds[ZINK_DESCRIPTOR_TYPE_UBO])
|
||||
update_ubo_descriptors(ctx, zds[ZINK_DESCRIPTOR_TYPE_UBO],
|
||||
is_compute, cache_hit[ZINK_DESCRIPTOR_TYPE_UBO],
|
||||
need_resource_refs[ZINK_DESCRIPTOR_TYPE_UBO], dynamic_offsets, &dynamic_offset_idx);
|
||||
if (zds[ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW])
|
||||
update_sampler_descriptors(ctx, zds[ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW],
|
||||
is_compute, cache_hit[ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW],
|
||||
need_resource_refs[ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW]);
|
||||
if (zds[ZINK_DESCRIPTOR_TYPE_SSBO])
|
||||
update_ssbo_descriptors(ctx, zds[ZINK_DESCRIPTOR_TYPE_SSBO],
|
||||
is_compute, cache_hit[ZINK_DESCRIPTOR_TYPE_SSBO],
|
||||
need_resource_refs[ZINK_DESCRIPTOR_TYPE_SSBO]);
|
||||
if (zds[ZINK_DESCRIPTOR_TYPE_IMAGE])
|
||||
update_image_descriptors(ctx, zds[ZINK_DESCRIPTOR_TYPE_IMAGE],
|
||||
is_compute, cache_hit[ZINK_DESCRIPTOR_TYPE_IMAGE],
|
||||
need_resource_refs[ZINK_DESCRIPTOR_TYPE_IMAGE]);
|
||||
|
||||
for (unsigned h = 0; h < ZINK_DESCRIPTOR_TYPES; h++) {
|
||||
if (zds[h]) {
|
||||
vkCmdBindDescriptorSets(batch->state->cmdbuf, is_compute ? VK_PIPELINE_BIND_POINT_COMPUTE : VK_PIPELINE_BIND_POINT_GRAPHICS,
|
||||
pg->layout, zds[h]->pool->type, 1, &zds[h]->desc_set,
|
||||
zds[h]->pool->type == ZINK_DESCRIPTOR_TYPE_UBO ? dynamic_offset_idx : 0, dynamic_offsets);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -170,4 +170,7 @@ zink_descriptor_pool_reference(struct zink_screen *screen,
|
|||
zink_descriptor_pool_free(screen, old_dst);
|
||||
if (dst) *dst = src;
|
||||
}
|
||||
|
||||
void
|
||||
zink_descriptors_update(struct zink_context *ctx, struct zink_screen *screen, bool is_compute);
|
||||
#endif
|
||||
|
|
|
@ -17,58 +17,6 @@
|
|||
#include "util/u_prim_restart.h"
|
||||
|
||||
|
||||
static void
|
||||
desc_set_res_add(struct zink_descriptor_set *zds, struct zink_resource *res, unsigned int i, bool cache_hit)
|
||||
{
|
||||
/* if we got a cache hit, we have to verify that the cached set is still valid;
|
||||
* we store the vk resource to the set here to avoid a more complex and costly mechanism of maintaining a
|
||||
* hash table on every resource with the associated descriptor sets that then needs to be iterated through
|
||||
* whenever a resource is destroyed
|
||||
*/
|
||||
assert(!cache_hit || zds->res_objs[i] == (res ? res->obj : NULL));
|
||||
if (!cache_hit)
|
||||
zink_resource_desc_set_add(res, zds, i);
|
||||
}
|
||||
|
||||
static void
|
||||
desc_set_sampler_add(struct zink_context *ctx, struct zink_descriptor_set *zds, struct zink_sampler_view *sv,
|
||||
struct zink_sampler_state *state, unsigned int i, bool is_buffer, bool cache_hit)
|
||||
{
|
||||
/* if we got a cache hit, we have to verify that the cached set is still valid;
|
||||
* we store the vk resource to the set here to avoid a more complex and costly mechanism of maintaining a
|
||||
* hash table on every resource with the associated descriptor sets that then needs to be iterated through
|
||||
* whenever a resource is destroyed
|
||||
*/
|
||||
#ifndef NDEBUG
|
||||
uint32_t cur_hash = zink_get_sampler_view_hash(ctx, zds->sampler_views[i], is_buffer);
|
||||
uint32_t new_hash = zink_get_sampler_view_hash(ctx, sv, is_buffer);
|
||||
#endif
|
||||
assert(!cache_hit || cur_hash == new_hash);
|
||||
assert(!cache_hit || zds->sampler_states[i] == state);
|
||||
if (!cache_hit) {
|
||||
zink_sampler_view_desc_set_add(sv, zds, i);
|
||||
zink_sampler_state_desc_set_add(state, zds, i);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
desc_set_image_add(struct zink_context *ctx, struct zink_descriptor_set *zds, struct zink_image_view *image_view,
|
||||
unsigned int i, bool is_buffer, bool cache_hit)
|
||||
{
|
||||
/* if we got a cache hit, we have to verify that the cached set is still valid;
|
||||
* we store the vk resource to the set here to avoid a more complex and costly mechanism of maintaining a
|
||||
* hash table on every resource with the associated descriptor sets that then needs to be iterated through
|
||||
* whenever a resource is destroyed
|
||||
*/
|
||||
#ifndef NDEBUG
|
||||
uint32_t cur_hash = zink_get_image_view_hash(ctx, zds->image_views[i], is_buffer);
|
||||
uint32_t new_hash = zink_get_image_view_hash(ctx, image_view, is_buffer);
|
||||
#endif
|
||||
assert(!cache_hit || cur_hash == new_hash);
|
||||
if (!cache_hit)
|
||||
zink_image_view_desc_set_add(image_view, zds, i);
|
||||
}
|
||||
|
||||
static void
|
||||
zink_emit_xfb_counter_barrier(struct zink_context *ctx)
|
||||
{
|
||||
|
@ -287,510 +235,6 @@ get_gfx_program(struct zink_context *ctx)
|
|||
return ctx->curr_program;
|
||||
}
|
||||
|
||||
#define MAX_DESCRIPTORS (PIPE_SHADER_TYPES * (PIPE_MAX_CONSTANT_BUFFERS + PIPE_MAX_SAMPLERS + PIPE_MAX_SHADER_BUFFERS + PIPE_MAX_SHADER_IMAGES))
|
||||
|
||||
static bool
|
||||
barrier_equals(const void *a, const void *b)
|
||||
{
|
||||
const struct zink_descriptor_barrier *t1 = a, *t2 = b;
|
||||
if (t1->res != t2->res)
|
||||
return false;
|
||||
if ((t1->access & t2->access) != t2->access)
|
||||
return false;
|
||||
if (t1->layout != t2->layout)
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
static uint32_t
|
||||
barrier_hash(const void *key)
|
||||
{
|
||||
return _mesa_hash_data(key, offsetof(struct zink_descriptor_barrier, stage));
|
||||
}
|
||||
|
||||
static inline void
|
||||
add_barrier(struct zink_resource *res, VkImageLayout layout, VkAccessFlags flags, enum pipe_shader_type stage, struct util_dynarray *barriers, struct set *ht)
|
||||
{
|
||||
VkPipelineStageFlags pipeline = zink_pipeline_flags_from_stage(zink_shader_stage(stage));
|
||||
struct zink_descriptor_barrier key = {res, layout, flags, 0}, *t;
|
||||
|
||||
uint32_t hash = barrier_hash(&key);
|
||||
struct set_entry *entry = _mesa_set_search_pre_hashed(ht, hash, &key);
|
||||
if (entry)
|
||||
t = (struct zink_descriptor_barrier*)entry->key;
|
||||
else {
|
||||
util_dynarray_append(barriers, struct zink_descriptor_barrier, key);
|
||||
t = util_dynarray_element(barriers, struct zink_descriptor_barrier,
|
||||
util_dynarray_num_elements(barriers, struct zink_descriptor_barrier) - 1);
|
||||
t->stage = 0;
|
||||
t->layout = layout;
|
||||
t->res = res;
|
||||
t->access = flags;
|
||||
_mesa_set_add_pre_hashed(ht, hash, t);
|
||||
}
|
||||
t->stage |= pipeline;
|
||||
}
|
||||
|
||||
static int
|
||||
cmp_dynamic_offset_binding(const void *a, const void *b)
|
||||
{
|
||||
const uint32_t *binding_a = a, *binding_b = b;
|
||||
return *binding_a - *binding_b;
|
||||
}
|
||||
|
||||
static void
|
||||
write_descriptors(struct zink_context *ctx, struct zink_descriptor_set *zds, unsigned num_wds, VkWriteDescriptorSet *wds,
|
||||
bool cache_hit, bool need_resource_refs)
|
||||
{
|
||||
struct zink_batch *batch = &ctx->batch;
|
||||
struct zink_screen *screen = zink_screen(ctx->base.screen);
|
||||
assert(zds->desc_set);
|
||||
|
||||
if (!cache_hit && num_wds)
|
||||
vkUpdateDescriptorSets(screen->dev, num_wds, wds, 0, NULL);
|
||||
|
||||
for (int i = 0; zds->pool->key.num_descriptors && i < util_dynarray_num_elements(&zds->barriers, struct zink_descriptor_barrier); ++i) {
|
||||
struct zink_descriptor_barrier *barrier = util_dynarray_element(&zds->barriers, struct zink_descriptor_barrier, i);
|
||||
if (need_resource_refs)
|
||||
zink_batch_reference_resource_rw(batch, barrier->res, zink_resource_access_is_write(barrier->access));
|
||||
zink_resource_barrier(ctx, NULL, barrier->res,
|
||||
barrier->layout, barrier->access, barrier->stage);
|
||||
}
|
||||
}
|
||||
|
||||
static unsigned
|
||||
init_write_descriptor(struct zink_shader *shader, struct zink_descriptor_set *zds, int idx, VkWriteDescriptorSet *wd, unsigned num_wds)
|
||||
{
|
||||
wd->sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
|
||||
wd->pNext = NULL;
|
||||
wd->dstBinding = shader->bindings[zds->pool->type][idx].binding;
|
||||
wd->dstArrayElement = 0;
|
||||
wd->descriptorCount = shader->bindings[zds->pool->type][idx].size;
|
||||
wd->descriptorType = shader->bindings[zds->pool->type][idx].type;
|
||||
wd->dstSet = zds->desc_set;
|
||||
return num_wds + 1;
|
||||
}
|
||||
|
||||
static void
|
||||
update_ubo_descriptors(struct zink_context *ctx, struct zink_descriptor_set *zds,
|
||||
bool is_compute, bool cache_hit, bool need_resource_refs,
|
||||
uint32_t *dynamic_offsets, unsigned *dynamic_offset_idx)
|
||||
{
|
||||
struct zink_program *pg = is_compute ? (struct zink_program *)ctx->curr_compute : (struct zink_program *)ctx->curr_program;
|
||||
struct zink_screen *screen = zink_screen(ctx->base.screen);
|
||||
unsigned num_descriptors = pg->pool[zds->pool->type]->key.num_descriptors;
|
||||
unsigned num_bindings = zds->pool->num_resources;
|
||||
VkWriteDescriptorSet wds[num_descriptors];
|
||||
VkDescriptorBufferInfo buffer_infos[num_bindings];
|
||||
unsigned num_wds = 0;
|
||||
unsigned num_buffer_info = 0;
|
||||
unsigned num_resources = 0;
|
||||
struct zink_shader **stages;
|
||||
struct {
|
||||
uint32_t binding;
|
||||
uint32_t offset;
|
||||
} dynamic_buffers[PIPE_MAX_CONSTANT_BUFFERS];
|
||||
unsigned dynamic_offset_count = 0;
|
||||
struct set *ht = NULL;
|
||||
if (!cache_hit) {
|
||||
ht = _mesa_set_create(NULL, barrier_hash, barrier_equals);
|
||||
_mesa_set_resize(ht, num_bindings);
|
||||
}
|
||||
|
||||
unsigned num_stages = is_compute ? 1 : ZINK_SHADER_COUNT;
|
||||
if (is_compute)
|
||||
stages = &ctx->curr_compute->shader;
|
||||
else
|
||||
stages = &ctx->gfx_stages[0];
|
||||
|
||||
for (int i = 0; i < num_stages; i++) {
|
||||
struct zink_shader *shader = stages[i];
|
||||
if (!shader)
|
||||
continue;
|
||||
enum pipe_shader_type stage = pipe_shader_type_from_mesa(shader->nir->info.stage);
|
||||
|
||||
for (int j = 0; j < shader->num_bindings[zds->pool->type]; j++) {
|
||||
int index = shader->bindings[zds->pool->type][j].index;
|
||||
assert(shader->bindings[zds->pool->type][j].type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
|
||||
shader->bindings[zds->pool->type][j].type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC);
|
||||
assert(ctx->ubos[stage][index].buffer_size <= screen->info.props.limits.maxUniformBufferRange);
|
||||
struct zink_resource *res = zink_resource(ctx->ubos[stage][index].buffer);
|
||||
assert(!res || ctx->ubos[stage][index].buffer_size > 0);
|
||||
assert(!res || ctx->ubos[stage][index].buffer);
|
||||
assert(num_resources < num_bindings);
|
||||
desc_set_res_add(zds, res, num_resources++, cache_hit);
|
||||
assert(num_buffer_info < num_bindings);
|
||||
buffer_infos[num_buffer_info].buffer = res ? res->obj->buffer :
|
||||
(screen->info.rb2_feats.nullDescriptor ?
|
||||
VK_NULL_HANDLE :
|
||||
zink_resource(ctx->dummy_vertex_buffer)->obj->buffer);
|
||||
if (shader->bindings[zds->pool->type][j].type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) {
|
||||
buffer_infos[num_buffer_info].offset = 0;
|
||||
/* we're storing this to qsort later */
|
||||
dynamic_buffers[dynamic_offset_count].binding = shader->bindings[zds->pool->type][j].binding;
|
||||
dynamic_buffers[dynamic_offset_count++].offset = res ? ctx->ubos[stage][index].buffer_offset : 0;
|
||||
} else
|
||||
buffer_infos[num_buffer_info].offset = res ? ctx->ubos[stage][index].buffer_offset : 0;
|
||||
buffer_infos[num_buffer_info].range = res ? ctx->ubos[stage][index].buffer_size : VK_WHOLE_SIZE;
|
||||
if (res && !cache_hit)
|
||||
add_barrier(res, 0, VK_ACCESS_UNIFORM_READ_BIT, stage, &zds->barriers, ht);
|
||||
wds[num_wds].pBufferInfo = buffer_infos + num_buffer_info;
|
||||
++num_buffer_info;
|
||||
|
||||
num_wds = init_write_descriptor(shader, zds, j, &wds[num_wds], num_wds);
|
||||
}
|
||||
}
|
||||
_mesa_set_destroy(ht, NULL);
|
||||
/* Values are taken from pDynamicOffsets in an order such that all entries for set N come before set N+1;
|
||||
* within a set, entries are ordered by the binding numbers in the descriptor set layouts
|
||||
* - vkCmdBindDescriptorSets spec
|
||||
*
|
||||
* because of this, we have to sort all the dynamic offsets by their associated binding to ensure they
|
||||
* match what the driver expects
|
||||
*/
|
||||
if (dynamic_offset_count > 1)
|
||||
qsort(dynamic_buffers, dynamic_offset_count, sizeof(uint32_t) * 2, cmp_dynamic_offset_binding);
|
||||
for (int i = 0; i < dynamic_offset_count; i++)
|
||||
dynamic_offsets[i] = dynamic_buffers[i].offset;
|
||||
*dynamic_offset_idx = dynamic_offset_count;
|
||||
|
||||
write_descriptors(ctx, zds, num_wds, wds, cache_hit, need_resource_refs);
|
||||
}
|
||||
|
||||
static void
|
||||
update_ssbo_descriptors(struct zink_context *ctx, struct zink_descriptor_set *zds,
|
||||
bool is_compute, bool cache_hit, bool need_resource_refs)
|
||||
{
|
||||
struct zink_program *pg = is_compute ? (struct zink_program *)ctx->curr_compute : (struct zink_program *)ctx->curr_program;
|
||||
ASSERTED struct zink_screen *screen = zink_screen(ctx->base.screen);
|
||||
unsigned num_descriptors = pg->pool[zds->pool->type]->key.num_descriptors;
|
||||
unsigned num_bindings = zds->pool->num_resources;
|
||||
VkWriteDescriptorSet wds[num_descriptors];
|
||||
VkDescriptorBufferInfo buffer_infos[num_bindings];
|
||||
unsigned num_wds = 0;
|
||||
unsigned num_buffer_info = 0;
|
||||
unsigned num_resources = 0;
|
||||
struct zink_shader **stages;
|
||||
struct set *ht = NULL;
|
||||
if (!cache_hit) {
|
||||
ht = _mesa_set_create(NULL, barrier_hash, barrier_equals);
|
||||
_mesa_set_resize(ht, num_bindings);
|
||||
}
|
||||
|
||||
unsigned num_stages = is_compute ? 1 : ZINK_SHADER_COUNT;
|
||||
if (is_compute)
|
||||
stages = &ctx->curr_compute->shader;
|
||||
else
|
||||
stages = &ctx->gfx_stages[0];
|
||||
|
||||
for (int i = 0; (!cache_hit || need_resource_refs) && i < num_stages; i++) {
|
||||
struct zink_shader *shader = stages[i];
|
||||
if (!shader)
|
||||
continue;
|
||||
enum pipe_shader_type stage = pipe_shader_type_from_mesa(shader->nir->info.stage);
|
||||
|
||||
for (int j = 0; j < shader->num_bindings[zds->pool->type]; j++) {
|
||||
int index = shader->bindings[zds->pool->type][j].index;
|
||||
assert(shader->bindings[zds->pool->type][j].type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
|
||||
assert(num_resources < num_bindings);
|
||||
struct zink_resource *res = zink_resource(ctx->ssbos[stage][index].buffer);
|
||||
desc_set_res_add(zds, res, num_resources++, cache_hit);
|
||||
if (res) {
|
||||
assert(ctx->ssbos[stage][index].buffer_size > 0);
|
||||
assert(ctx->ssbos[stage][index].buffer_size <= screen->info.props.limits.maxStorageBufferRange);
|
||||
assert(num_buffer_info < num_bindings);
|
||||
unsigned flag = VK_ACCESS_SHADER_READ_BIT;
|
||||
if (ctx->writable_ssbos[stage] & (1 << index))
|
||||
flag |= VK_ACCESS_SHADER_WRITE_BIT;
|
||||
if (!cache_hit)
|
||||
add_barrier(res, 0, flag, stage, &zds->barriers, ht);
|
||||
buffer_infos[num_buffer_info].buffer = res->obj->buffer;
|
||||
buffer_infos[num_buffer_info].offset = ctx->ssbos[stage][index].buffer_offset;
|
||||
buffer_infos[num_buffer_info].range = ctx->ssbos[stage][index].buffer_size;
|
||||
} else {
|
||||
assert(screen->info.rb2_feats.nullDescriptor);
|
||||
buffer_infos[num_buffer_info].buffer = VK_NULL_HANDLE;
|
||||
buffer_infos[num_buffer_info].offset = 0;
|
||||
buffer_infos[num_buffer_info].range = VK_WHOLE_SIZE;
|
||||
}
|
||||
wds[num_wds].pBufferInfo = buffer_infos + num_buffer_info;
|
||||
++num_buffer_info;
|
||||
|
||||
num_wds = init_write_descriptor(shader, zds, j, &wds[num_wds], num_wds);
|
||||
}
|
||||
}
|
||||
_mesa_set_destroy(ht, NULL);
|
||||
write_descriptors(ctx, zds, num_wds, wds, cache_hit, need_resource_refs);
|
||||
}
|
||||
|
||||
static void
|
||||
handle_image_descriptor(struct zink_screen *screen, struct zink_resource *res, enum zink_descriptor_type type, VkDescriptorType vktype, VkWriteDescriptorSet *wd,
|
||||
VkImageLayout layout, unsigned *num_image_info, VkDescriptorImageInfo *image_info,
|
||||
unsigned *num_buffer_info, VkBufferView *buffer_info,
|
||||
struct zink_sampler_state *sampler,
|
||||
VkImageView imageview, VkBufferView bufferview, bool do_set)
|
||||
{
|
||||
if (!res) {
|
||||
/* if we're hitting this assert often, we can probably just throw a junk buffer in since
|
||||
* the results of this codepath are undefined in ARB_texture_buffer_object spec
|
||||
*/
|
||||
assert(screen->info.rb2_feats.nullDescriptor);
|
||||
|
||||
switch (vktype) {
|
||||
case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
|
||||
case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
|
||||
*buffer_info = VK_NULL_HANDLE;
|
||||
if (do_set)
|
||||
wd->pTexelBufferView = buffer_info;
|
||||
++(*num_buffer_info);
|
||||
break;
|
||||
case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
|
||||
case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
|
||||
image_info->imageLayout = VK_IMAGE_LAYOUT_UNDEFINED;
|
||||
image_info->imageView = VK_NULL_HANDLE;
|
||||
image_info->sampler = sampler ? sampler->sampler : VK_NULL_HANDLE;
|
||||
if (do_set)
|
||||
wd->pImageInfo = image_info;
|
||||
++(*num_image_info);
|
||||
break;
|
||||
default:
|
||||
unreachable("unknown descriptor type");
|
||||
}
|
||||
} else if (res->base.target != PIPE_BUFFER) {
|
||||
assert(layout != VK_IMAGE_LAYOUT_UNDEFINED);
|
||||
image_info->imageLayout = layout;
|
||||
image_info->imageView = imageview;
|
||||
image_info->sampler = sampler ? sampler->sampler : VK_NULL_HANDLE;
|
||||
if (do_set)
|
||||
wd->pImageInfo = image_info;
|
||||
++(*num_image_info);
|
||||
} else {
|
||||
if (do_set)
|
||||
wd->pTexelBufferView = buffer_info;
|
||||
*buffer_info = bufferview;
|
||||
++(*num_buffer_info);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
update_sampler_descriptors(struct zink_context *ctx, struct zink_descriptor_set *zds,
|
||||
bool is_compute, bool cache_hit, bool need_resource_refs)
|
||||
{
|
||||
struct zink_program *pg = is_compute ? (struct zink_program *)ctx->curr_compute : (struct zink_program *)ctx->curr_program;
|
||||
struct zink_screen *screen = zink_screen(ctx->base.screen);
|
||||
unsigned num_descriptors = pg->pool[zds->pool->type]->key.num_descriptors;
|
||||
unsigned num_bindings = zds->pool->num_resources;
|
||||
VkWriteDescriptorSet wds[num_descriptors];
|
||||
VkDescriptorImageInfo image_infos[num_bindings];
|
||||
VkBufferView buffer_views[num_bindings];
|
||||
unsigned num_wds = 0;
|
||||
unsigned num_image_info = 0;
|
||||
unsigned num_buffer_info = 0;
|
||||
unsigned num_resources = 0;
|
||||
struct zink_shader **stages;
|
||||
struct set *ht = NULL;
|
||||
if (!cache_hit) {
|
||||
ht = _mesa_set_create(NULL, barrier_hash, barrier_equals);
|
||||
_mesa_set_resize(ht, num_bindings);
|
||||
}
|
||||
|
||||
unsigned num_stages = is_compute ? 1 : ZINK_SHADER_COUNT;
|
||||
if (is_compute)
|
||||
stages = &ctx->curr_compute->shader;
|
||||
else
|
||||
stages = &ctx->gfx_stages[0];
|
||||
|
||||
for (int i = 0; (!cache_hit || need_resource_refs) && i < num_stages; i++) {
|
||||
struct zink_shader *shader = stages[i];
|
||||
if (!shader)
|
||||
continue;
|
||||
enum pipe_shader_type stage = pipe_shader_type_from_mesa(shader->nir->info.stage);
|
||||
|
||||
for (int j = 0; j < shader->num_bindings[zds->pool->type]; j++) {
|
||||
int index = shader->bindings[zds->pool->type][j].index;
|
||||
assert(shader->bindings[zds->pool->type][j].type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER ||
|
||||
shader->bindings[zds->pool->type][j].type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER);
|
||||
|
||||
for (unsigned k = 0; k < shader->bindings[zds->pool->type][j].size; k++) {
|
||||
VkImageView imageview = VK_NULL_HANDLE;
|
||||
VkBufferView bufferview = VK_NULL_HANDLE;
|
||||
struct zink_resource *res = NULL;
|
||||
VkImageLayout layout = VK_IMAGE_LAYOUT_UNDEFINED;
|
||||
struct zink_sampler_state *sampler = NULL;
|
||||
|
||||
struct pipe_sampler_view *psampler_view = ctx->sampler_views[stage][index + k];
|
||||
struct zink_sampler_view *sampler_view = zink_sampler_view(psampler_view);
|
||||
res = psampler_view ? zink_resource(psampler_view->texture) : NULL;
|
||||
if (res && res->base.target == PIPE_BUFFER) {
|
||||
bufferview = sampler_view->buffer_view->buffer_view;
|
||||
} else if (res) {
|
||||
imageview = sampler_view->image_view->image_view;
|
||||
layout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
|
||||
sampler = ctx->sampler_states[stage][index + k];
|
||||
}
|
||||
assert(num_resources < num_bindings);
|
||||
if (res) {
|
||||
if (!cache_hit)
|
||||
add_barrier(res, layout, VK_ACCESS_SHADER_READ_BIT, stage, &zds->barriers, ht);
|
||||
}
|
||||
assert(num_image_info < num_bindings);
|
||||
handle_image_descriptor(screen, res, zds->pool->type, shader->bindings[zds->pool->type][j].type,
|
||||
&wds[num_wds], layout, &num_image_info, &image_infos[num_image_info],
|
||||
&num_buffer_info, &buffer_views[num_buffer_info],
|
||||
sampler, imageview, bufferview, !k);
|
||||
desc_set_sampler_add(ctx, zds, sampler_view, sampler, num_resources++,
|
||||
zink_shader_descriptor_is_buffer(shader, ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW, j),
|
||||
cache_hit);
|
||||
struct zink_batch *batch = &ctx->batch;
|
||||
if (sampler_view)
|
||||
zink_batch_reference_sampler_view(batch, sampler_view);
|
||||
if (sampler)
|
||||
/* this only tracks the most recent usage for now */
|
||||
zink_batch_usage_set(&sampler->batch_uses, batch->state->fence.batch_id);
|
||||
}
|
||||
assert(num_wds < num_descriptors);
|
||||
|
||||
num_wds = init_write_descriptor(shader, zds, j, &wds[num_wds], num_wds);
|
||||
}
|
||||
}
|
||||
_mesa_set_destroy(ht, NULL);
|
||||
write_descriptors(ctx, zds, num_wds, wds, cache_hit, need_resource_refs);
|
||||
}
|
||||
|
||||
static void
|
||||
update_image_descriptors(struct zink_context *ctx, struct zink_descriptor_set *zds,
|
||||
bool is_compute, bool cache_hit, bool need_resource_refs)
|
||||
{
|
||||
struct zink_program *pg = is_compute ? (struct zink_program *)ctx->curr_compute : (struct zink_program *)ctx->curr_program;
|
||||
struct zink_screen *screen = zink_screen(ctx->base.screen);
|
||||
unsigned num_descriptors = pg->pool[zds->pool->type]->key.num_descriptors;
|
||||
unsigned num_bindings = zds->pool->num_resources;
|
||||
VkWriteDescriptorSet wds[num_descriptors];
|
||||
VkDescriptorImageInfo image_infos[num_bindings];
|
||||
VkBufferView buffer_views[num_bindings];
|
||||
unsigned num_wds = 0;
|
||||
unsigned num_image_info = 0;
|
||||
unsigned num_buffer_info = 0;
|
||||
unsigned num_resources = 0;
|
||||
struct zink_shader **stages;
|
||||
struct set *ht = NULL;
|
||||
if (!cache_hit) {
|
||||
ht = _mesa_set_create(NULL, barrier_hash, barrier_equals);
|
||||
_mesa_set_resize(ht, num_bindings);
|
||||
}
|
||||
|
||||
unsigned num_stages = is_compute ? 1 : ZINK_SHADER_COUNT;
|
||||
if (is_compute)
|
||||
stages = &ctx->curr_compute->shader;
|
||||
else
|
||||
stages = &ctx->gfx_stages[0];
|
||||
|
||||
for (int i = 0; (!cache_hit || need_resource_refs) && i < num_stages; i++) {
|
||||
struct zink_shader *shader = stages[i];
|
||||
if (!shader)
|
||||
continue;
|
||||
enum pipe_shader_type stage = pipe_shader_type_from_mesa(shader->nir->info.stage);
|
||||
|
||||
for (int j = 0; j < shader->num_bindings[zds->pool->type]; j++) {
|
||||
int index = shader->bindings[zds->pool->type][j].index;
|
||||
assert(shader->bindings[zds->pool->type][j].type == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER ||
|
||||
shader->bindings[zds->pool->type][j].type == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE);
|
||||
|
||||
for (unsigned k = 0; k < shader->bindings[zds->pool->type][j].size; k++) {
|
||||
VkImageView imageview = VK_NULL_HANDLE;
|
||||
VkBufferView bufferview = VK_NULL_HANDLE;
|
||||
struct zink_resource *res = NULL;
|
||||
VkImageLayout layout = VK_IMAGE_LAYOUT_UNDEFINED;
|
||||
struct zink_image_view *image_view = &ctx->image_views[stage][index + k];
|
||||
assert(image_view);
|
||||
res = zink_resource(image_view->base.resource);
|
||||
|
||||
if (res && image_view->base.resource->target == PIPE_BUFFER) {
|
||||
bufferview = image_view->buffer_view->buffer_view;
|
||||
} else if (res) {
|
||||
imageview = image_view->surface->image_view;
|
||||
layout = VK_IMAGE_LAYOUT_GENERAL;
|
||||
}
|
||||
assert(num_resources < num_bindings);
|
||||
desc_set_image_add(ctx, zds, image_view, num_resources++,
|
||||
zink_shader_descriptor_is_buffer(shader, ZINK_DESCRIPTOR_TYPE_IMAGE, j),
|
||||
cache_hit);
|
||||
if (res) {
|
||||
VkAccessFlags flags = 0;
|
||||
if (image_view->base.access & PIPE_IMAGE_ACCESS_READ)
|
||||
flags |= VK_ACCESS_SHADER_READ_BIT;
|
||||
if (image_view->base.access & PIPE_IMAGE_ACCESS_WRITE)
|
||||
flags |= VK_ACCESS_SHADER_WRITE_BIT;
|
||||
if (!cache_hit)
|
||||
add_barrier(res, layout, flags, stage, &zds->barriers, ht);
|
||||
}
|
||||
|
||||
assert(num_image_info < num_bindings);
|
||||
handle_image_descriptor(screen, res, zds->pool->type, shader->bindings[zds->pool->type][j].type,
|
||||
&wds[num_wds], layout, &num_image_info, &image_infos[num_image_info],
|
||||
&num_buffer_info, &buffer_views[num_buffer_info],
|
||||
NULL, imageview, bufferview, !k);
|
||||
|
||||
struct zink_batch *batch = &ctx->batch;
|
||||
if (res)
|
||||
zink_batch_reference_image_view(batch, image_view);
|
||||
}
|
||||
assert(num_wds < num_descriptors);
|
||||
|
||||
num_wds = init_write_descriptor(shader, zds, j, &wds[num_wds], num_wds);
|
||||
}
|
||||
}
|
||||
_mesa_set_destroy(ht, NULL);
|
||||
write_descriptors(ctx, zds, num_wds, wds, cache_hit, need_resource_refs);
|
||||
}
|
||||
|
||||
static void
|
||||
update_descriptors(struct zink_context *ctx, struct zink_screen *screen, bool is_compute)
|
||||
{
|
||||
struct zink_program *pg = is_compute ? (struct zink_program *)ctx->curr_compute : (struct zink_program *)ctx->curr_program;
|
||||
|
||||
zink_context_update_descriptor_states(ctx, is_compute);
|
||||
bool cache_hit[ZINK_DESCRIPTOR_TYPES];
|
||||
bool need_resource_refs[ZINK_DESCRIPTOR_TYPES];
|
||||
struct zink_descriptor_set *zds[ZINK_DESCRIPTOR_TYPES];
|
||||
for (int h = 0; h < ZINK_DESCRIPTOR_TYPES; h++) {
|
||||
if (pg->pool[h])
|
||||
zds[h] = zink_descriptor_set_get(ctx, h, is_compute, &cache_hit[h], &need_resource_refs[h]);
|
||||
else
|
||||
zds[h] = NULL;
|
||||
}
|
||||
struct zink_batch *batch = &ctx->batch;
|
||||
zink_batch_reference_program(batch, pg);
|
||||
|
||||
uint32_t dynamic_offsets[PIPE_MAX_CONSTANT_BUFFERS];
|
||||
unsigned dynamic_offset_idx = 0;
|
||||
|
||||
if (zds[ZINK_DESCRIPTOR_TYPE_UBO])
|
||||
update_ubo_descriptors(ctx, zds[ZINK_DESCRIPTOR_TYPE_UBO],
|
||||
is_compute, cache_hit[ZINK_DESCRIPTOR_TYPE_UBO],
|
||||
need_resource_refs[ZINK_DESCRIPTOR_TYPE_UBO], dynamic_offsets, &dynamic_offset_idx);
|
||||
if (zds[ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW])
|
||||
update_sampler_descriptors(ctx, zds[ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW],
|
||||
is_compute, cache_hit[ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW],
|
||||
need_resource_refs[ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW]);
|
||||
if (zds[ZINK_DESCRIPTOR_TYPE_SSBO])
|
||||
update_ssbo_descriptors(ctx, zds[ZINK_DESCRIPTOR_TYPE_SSBO],
|
||||
is_compute, cache_hit[ZINK_DESCRIPTOR_TYPE_SSBO],
|
||||
need_resource_refs[ZINK_DESCRIPTOR_TYPE_SSBO]);
|
||||
if (zds[ZINK_DESCRIPTOR_TYPE_IMAGE])
|
||||
update_image_descriptors(ctx, zds[ZINK_DESCRIPTOR_TYPE_IMAGE],
|
||||
is_compute, cache_hit[ZINK_DESCRIPTOR_TYPE_IMAGE],
|
||||
need_resource_refs[ZINK_DESCRIPTOR_TYPE_IMAGE]);
|
||||
|
||||
for (unsigned h = 0; h < ZINK_DESCRIPTOR_TYPES; h++) {
|
||||
if (zds[h]) {
|
||||
vkCmdBindDescriptorSets(batch->state->cmdbuf, is_compute ? VK_PIPELINE_BIND_POINT_COMPUTE : VK_PIPELINE_BIND_POINT_GRAPHICS,
|
||||
pg->layout, zds[h]->pool->type, 1, &zds[h]->desc_set,
|
||||
zds[h]->pool->type == ZINK_DESCRIPTOR_TYPE_UBO ? dynamic_offset_idx : 0, dynamic_offsets);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static bool
|
||||
line_width_needed(enum pipe_prim_type reduced_prim,
|
||||
VkPolygonMode polygon_mode)
|
||||
|
@ -965,7 +409,7 @@ zink_draw_vbo(struct pipe_context *pctx,
|
|||
}
|
||||
|
||||
if (zink_program_has_descriptors(&gfx_program->base))
|
||||
update_descriptors(ctx, screen, false);
|
||||
zink_descriptors_update(ctx, screen, false);
|
||||
|
||||
struct zink_batch *batch = zink_batch_rp(ctx);
|
||||
VkViewport viewports[PIPE_MAX_VIEWPORTS] = {};
|
||||
|
@ -1183,7 +627,7 @@ zink_launch_grid(struct pipe_context *pctx, const struct pipe_grid_info *info)
|
|||
&ctx->compute_pipeline_state);
|
||||
|
||||
if (zink_program_has_descriptors(&comp_program->base))
|
||||
update_descriptors(ctx, screen, true);
|
||||
zink_descriptors_update(ctx, screen, true);
|
||||
|
||||
|
||||
vkCmdBindPipeline(batch->state->cmdbuf, VK_PIPELINE_BIND_POINT_COMPUTE, pipeline);
|
||||
|
|
Loading…
Reference in New Issue