2015-11-18 20:25:11 +00:00
|
|
|
/*
|
|
|
|
* Copyright © 2015 Intel Corporation
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
|
|
* to deal in the Software without restriction, including without limitation
|
|
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice (including the next
|
|
|
|
* paragraph) shall be included in all copies or substantial portions of the
|
|
|
|
* Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
|
|
|
* IN THE SOFTWARE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <assert.h>
|
|
|
|
#include <stdbool.h>
|
|
|
|
|
|
|
|
#include "anv_private.h"
|
2016-10-19 19:33:55 +01:00
|
|
|
#include "vk_format_info.h"
|
2015-11-18 20:25:11 +00:00
|
|
|
|
2016-08-23 01:13:51 +01:00
|
|
|
#include "common/gen_l3_config.h"
|
2016-02-20 17:08:27 +00:00
|
|
|
#include "genxml/gen_macros.h"
|
|
|
|
#include "genxml/genX_pack.h"
|
2015-11-18 20:25:11 +00:00
|
|
|
|
2016-08-23 01:13:27 +01:00
|
|
|
static void
|
|
|
|
emit_lrm(struct anv_batch *batch,
|
|
|
|
uint32_t reg, struct anv_bo *bo, uint32_t offset)
|
|
|
|
{
|
|
|
|
anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
|
|
|
|
lrm.RegisterAddress = reg;
|
|
|
|
lrm.MemoryAddress = (struct anv_address) { bo, offset };
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
emit_lri(struct anv_batch *batch, uint32_t reg, uint32_t imm)
|
|
|
|
{
|
|
|
|
anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_IMM), lri) {
|
|
|
|
lri.RegisterOffset = reg;
|
|
|
|
lri.DataDWord = imm;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-11-18 20:25:11 +00:00
|
|
|
void
|
|
|
|
genX(cmd_buffer_emit_state_base_address)(struct anv_cmd_buffer *cmd_buffer)
|
|
|
|
{
|
|
|
|
struct anv_device *device = cmd_buffer->device;
|
|
|
|
|
|
|
|
/* XXX: Do we need this on more than just BDW? */
|
2016-02-20 17:08:27 +00:00
|
|
|
#if (GEN_GEN >= 8)
|
2015-11-18 20:25:11 +00:00
|
|
|
/* Emit a render target cache flush.
|
|
|
|
*
|
|
|
|
* This isn't documented anywhere in the PRM. However, it seems to be
|
|
|
|
* necessary prior to changing the surface state base adress. Without
|
|
|
|
* this, we get GPU hangs when using multi-level command buffers which
|
|
|
|
* clear depth, reset state base address, and then go render stuff.
|
|
|
|
*/
|
2016-04-19 01:03:00 +01:00
|
|
|
anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
|
2016-04-18 22:41:06 +01:00
|
|
|
pc.RenderTargetCacheFlushEnable = true;
|
|
|
|
}
|
2015-11-18 20:25:11 +00:00
|
|
|
#endif
|
|
|
|
|
2016-04-19 01:03:00 +01:00
|
|
|
anv_batch_emit(&cmd_buffer->batch, GENX(STATE_BASE_ADDRESS), sba) {
|
2016-06-16 23:26:54 +01:00
|
|
|
sba.GeneralStateBaseAddress = (struct anv_address) { NULL, 0 };
|
2016-04-18 22:41:06 +01:00
|
|
|
sba.GeneralStateMemoryObjectControlState = GENX(MOCS);
|
|
|
|
sba.GeneralStateBaseAddressModifyEnable = true;
|
2015-11-18 20:25:11 +00:00
|
|
|
|
2016-04-18 22:41:06 +01:00
|
|
|
sba.SurfaceStateBaseAddress =
|
|
|
|
anv_cmd_buffer_surface_base_address(cmd_buffer);
|
|
|
|
sba.SurfaceStateMemoryObjectControlState = GENX(MOCS);
|
|
|
|
sba.SurfaceStateBaseAddressModifyEnable = true;
|
2015-11-18 20:25:11 +00:00
|
|
|
|
2016-04-18 22:41:06 +01:00
|
|
|
sba.DynamicStateBaseAddress =
|
|
|
|
(struct anv_address) { &device->dynamic_state_block_pool.bo, 0 };
|
2016-10-12 23:28:03 +01:00
|
|
|
sba.DynamicStateMemoryObjectControlState = GENX(MOCS);
|
|
|
|
sba.DynamicStateBaseAddressModifyEnable = true;
|
2015-11-18 20:25:11 +00:00
|
|
|
|
2016-04-18 22:41:06 +01:00
|
|
|
sba.IndirectObjectBaseAddress = (struct anv_address) { NULL, 0 };
|
|
|
|
sba.IndirectObjectMemoryObjectControlState = GENX(MOCS);
|
|
|
|
sba.IndirectObjectBaseAddressModifyEnable = true;
|
2015-11-18 20:25:11 +00:00
|
|
|
|
2016-04-18 22:41:06 +01:00
|
|
|
sba.InstructionBaseAddress =
|
|
|
|
(struct anv_address) { &device->instruction_block_pool.bo, 0 };
|
|
|
|
sba.InstructionMemoryObjectControlState = GENX(MOCS);
|
|
|
|
sba.InstructionBaseAddressModifyEnable = true;
|
2015-11-18 20:25:11 +00:00
|
|
|
|
2016-02-20 17:08:27 +00:00
|
|
|
# if (GEN_GEN >= 8)
|
2015-11-18 20:25:11 +00:00
|
|
|
/* Broadwell requires that we specify a buffer size for a bunch of
|
|
|
|
* these fields. However, since we will be growing the BO's live, we
|
|
|
|
* just set them all to the maximum.
|
|
|
|
*/
|
2016-04-18 22:41:06 +01:00
|
|
|
sba.GeneralStateBufferSize = 0xfffff;
|
|
|
|
sba.GeneralStateBufferSizeModifyEnable = true;
|
|
|
|
sba.DynamicStateBufferSize = 0xfffff;
|
|
|
|
sba.DynamicStateBufferSizeModifyEnable = true;
|
|
|
|
sba.IndirectObjectBufferSize = 0xfffff;
|
|
|
|
sba.IndirectObjectBufferSizeModifyEnable = true;
|
|
|
|
sba.InstructionBufferSize = 0xfffff;
|
|
|
|
sba.InstructionBuffersizeModifyEnable = true;
|
2015-11-18 20:25:11 +00:00
|
|
|
# endif
|
2016-04-18 22:41:06 +01:00
|
|
|
}
|
2015-11-18 20:25:11 +00:00
|
|
|
|
|
|
|
/* After re-setting the surface state base address, we have to do some
|
|
|
|
* cache flusing so that the sampler engine will pick up the new
|
|
|
|
* SURFACE_STATE objects and binding tables. From the Broadwell PRM,
|
|
|
|
* Shared Function > 3D Sampler > State > State Caching (page 96):
|
|
|
|
*
|
|
|
|
* Coherency with system memory in the state cache, like the texture
|
|
|
|
* cache is handled partially by software. It is expected that the
|
|
|
|
* command stream or shader will issue Cache Flush operation or
|
|
|
|
* Cache_Flush sampler message to ensure that the L1 cache remains
|
|
|
|
* coherent with system memory.
|
|
|
|
*
|
|
|
|
* [...]
|
|
|
|
*
|
|
|
|
* Whenever the value of the Dynamic_State_Base_Addr,
|
|
|
|
* Surface_State_Base_Addr are altered, the L1 state cache must be
|
|
|
|
* invalidated to ensure the new surface or sampler state is fetched
|
|
|
|
* from system memory.
|
|
|
|
*
|
|
|
|
* The PIPE_CONTROL command has a "State Cache Invalidation Enable" bit
|
|
|
|
* which, according the PIPE_CONTROL instruction documentation in the
|
|
|
|
* Broadwell PRM:
|
|
|
|
*
|
|
|
|
* Setting this bit is independent of any other bit in this packet.
|
|
|
|
* This bit controls the invalidation of the L1 and L2 state caches
|
|
|
|
* at the top of the pipe i.e. at the parsing time.
|
|
|
|
*
|
|
|
|
* Unfortunately, experimentation seems to indicate that state cache
|
|
|
|
* invalidation through a PIPE_CONTROL does nothing whatsoever in
|
|
|
|
* regards to surface state and binding tables. In stead, it seems that
|
|
|
|
* invalidating the texture cache is what is actually needed.
|
|
|
|
*
|
|
|
|
* XXX: As far as we have been able to determine through
|
|
|
|
* experimentation, shows that flush the texture cache appears to be
|
|
|
|
* sufficient. The theory here is that all of the sampling/rendering
|
|
|
|
* units cache the binding table in the texture cache. However, we have
|
|
|
|
* yet to be able to actually confirm this.
|
|
|
|
*/
|
2016-04-19 01:03:00 +01:00
|
|
|
anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
|
2016-04-18 22:41:06 +01:00
|
|
|
pc.TextureCacheInvalidationEnable = true;
|
|
|
|
}
|
2015-11-18 20:25:11 +00:00
|
|
|
}
|
|
|
|
|
2016-10-25 03:50:20 +01:00
|
|
|
static void
|
|
|
|
add_surface_state_reloc(struct anv_cmd_buffer *cmd_buffer,
|
|
|
|
struct anv_state state,
|
|
|
|
struct anv_bo *bo, uint32_t offset)
|
|
|
|
{
|
|
|
|
const struct isl_device *isl_dev = &cmd_buffer->device->isl_dev;
|
|
|
|
|
|
|
|
anv_reloc_list_add(&cmd_buffer->surface_relocs, &cmd_buffer->pool->alloc,
|
|
|
|
state.offset + isl_dev->ss.addr_offset, bo, offset);
|
|
|
|
}
|
|
|
|
|
2016-10-25 03:25:20 +01:00
|
|
|
static void
|
|
|
|
add_image_view_relocs(struct anv_cmd_buffer *cmd_buffer,
|
|
|
|
const struct anv_image_view *iview,
|
2016-10-25 06:03:45 +01:00
|
|
|
enum isl_aux_usage aux_usage,
|
2016-10-25 03:25:20 +01:00
|
|
|
struct anv_state state)
|
|
|
|
{
|
|
|
|
const struct isl_device *isl_dev = &cmd_buffer->device->isl_dev;
|
|
|
|
|
|
|
|
anv_reloc_list_add(&cmd_buffer->surface_relocs, &cmd_buffer->pool->alloc,
|
|
|
|
state.offset + isl_dev->ss.addr_offset,
|
|
|
|
iview->bo, iview->offset);
|
2016-10-25 06:03:45 +01:00
|
|
|
|
|
|
|
if (aux_usage != ISL_AUX_USAGE_NONE) {
|
|
|
|
uint32_t aux_offset = iview->offset + iview->image->aux_surface.offset;
|
|
|
|
|
|
|
|
/* On gen7 and prior, the bottom 12 bits of the MCS base address are
|
|
|
|
* used to store other information. This should be ok, however, because
|
|
|
|
* surface buffer addresses are always 4K page alinged.
|
|
|
|
*/
|
|
|
|
assert((aux_offset & 0xfff) == 0);
|
|
|
|
uint32_t *aux_addr_dw = state.map + isl_dev->ss.aux_addr_offset;
|
|
|
|
aux_offset += *aux_addr_dw & 0xfff;
|
|
|
|
|
|
|
|
anv_reloc_list_add(&cmd_buffer->surface_relocs, &cmd_buffer->pool->alloc,
|
|
|
|
state.offset + isl_dev->ss.aux_addr_offset,
|
|
|
|
iview->bo, aux_offset);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-11-18 06:55:30 +00:00
|
|
|
static bool
|
|
|
|
color_is_zero_one(VkClearColorValue value, enum isl_format format)
|
2016-10-25 06:03:45 +01:00
|
|
|
{
|
2016-11-18 06:55:30 +00:00
|
|
|
if (isl_format_has_int_channel(format)) {
|
|
|
|
for (unsigned i = 0; i < 4; i++) {
|
|
|
|
if (value.int32[i] != 0 && value.int32[i] != 1)
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
for (unsigned i = 0; i < 4; i++) {
|
|
|
|
if (value.float32[i] != 0.0f && value.float32[i] != 1.0f)
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
2016-10-25 06:03:45 +01:00
|
|
|
|
2016-11-18 06:55:30 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
color_attachment_compute_aux_usage(struct anv_device *device,
|
|
|
|
struct anv_attachment_state *att_state,
|
|
|
|
struct anv_image_view *iview,
|
|
|
|
VkRect2D render_area,
|
|
|
|
union isl_color_value *fast_clear_color)
|
|
|
|
{
|
|
|
|
if (iview->image->aux_surface.isl.size == 0) {
|
|
|
|
att_state->aux_usage = ISL_AUX_USAGE_NONE;
|
|
|
|
att_state->input_aux_usage = ISL_AUX_USAGE_NONE;
|
|
|
|
att_state->fast_clear = false;
|
|
|
|
return;
|
|
|
|
}
|
2016-10-25 06:03:45 +01:00
|
|
|
|
|
|
|
assert(iview->image->aux_surface.isl.usage & ISL_SURF_USAGE_CCS_BIT);
|
|
|
|
|
2016-11-18 06:55:30 +00:00
|
|
|
att_state->clear_color_is_zero_one =
|
|
|
|
color_is_zero_one(att_state->clear_value.color, iview->isl.format);
|
2016-10-25 06:03:45 +01:00
|
|
|
|
2016-11-18 06:55:30 +00:00
|
|
|
if (att_state->pending_clear_aspects == VK_IMAGE_ASPECT_COLOR_BIT) {
|
|
|
|
/* Start off assuming fast clears are possible */
|
|
|
|
att_state->fast_clear = true;
|
|
|
|
|
|
|
|
/* Potentially, we could do partial fast-clears but doing so has crazy
|
|
|
|
* alignment restrictions. It's easier to just restrict to full size
|
|
|
|
* fast clears for now.
|
|
|
|
*/
|
|
|
|
if (render_area.offset.x != 0 ||
|
|
|
|
render_area.offset.y != 0 ||
|
|
|
|
render_area.extent.width != iview->extent.width ||
|
|
|
|
render_area.extent.height != iview->extent.height)
|
|
|
|
att_state->fast_clear = false;
|
|
|
|
|
2016-11-19 16:47:25 +00:00
|
|
|
if (GEN_GEN <= 7) {
|
|
|
|
/* On gen7, we can't do multi-LOD or multi-layer fast-clears. We
|
|
|
|
* technically can, but it comes with crazy restrictions that we
|
|
|
|
* don't want to deal with now.
|
|
|
|
*/
|
|
|
|
if (iview->isl.base_level > 0 ||
|
|
|
|
iview->isl.base_array_layer > 0 ||
|
|
|
|
iview->isl.array_len > 1)
|
|
|
|
att_state->fast_clear = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* On Broadwell and earlier, we can only handle 0/1 clear colors */
|
|
|
|
if (GEN_GEN <= 8 && !att_state->clear_color_is_zero_one)
|
|
|
|
att_state->fast_clear = false;
|
|
|
|
|
2016-11-18 06:55:30 +00:00
|
|
|
if (att_state->fast_clear) {
|
|
|
|
memcpy(fast_clear_color->u32, att_state->clear_value.color.uint32,
|
|
|
|
sizeof(fast_clear_color->u32));
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
att_state->fast_clear = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (isl_format_supports_lossless_compression(&device->info,
|
|
|
|
iview->isl.format)) {
|
|
|
|
att_state->aux_usage = ISL_AUX_USAGE_CCS_E;
|
|
|
|
att_state->input_aux_usage = ISL_AUX_USAGE_CCS_E;
|
|
|
|
} else if (att_state->fast_clear) {
|
|
|
|
att_state->aux_usage = ISL_AUX_USAGE_CCS_D;
|
2016-11-19 16:47:25 +00:00
|
|
|
if (GEN_GEN >= 9) {
|
|
|
|
/* From the Sky Lake PRM, RENDER_SURFACE_STATE::AuxiliarySurfaceMode:
|
|
|
|
*
|
|
|
|
* "If Number of Multisamples is MULTISAMPLECOUNT_1, AUX_CCS_D
|
|
|
|
* setting is only allowed if Surface Format supported for Fast
|
|
|
|
* Clear. In addition, if the surface is bound to the sampling
|
|
|
|
* engine, Surface Format must be supported for Render Target
|
|
|
|
* Compression for surfaces bound to the sampling engine."
|
|
|
|
*
|
|
|
|
* In other words, we can't sample from a fast-cleared image if it
|
|
|
|
* doesn't also support color compression.
|
|
|
|
*/
|
|
|
|
att_state->input_aux_usage = ISL_AUX_USAGE_NONE;
|
|
|
|
} else if (GEN_GEN == 8) {
|
|
|
|
/* Broadwell can sample from fast-cleared images */
|
|
|
|
att_state->input_aux_usage = ISL_AUX_USAGE_CCS_D;
|
|
|
|
} else {
|
|
|
|
/* Ivy Bridge and Haswell cannot */
|
|
|
|
att_state->input_aux_usage = ISL_AUX_USAGE_NONE;
|
|
|
|
}
|
2016-11-18 06:55:30 +00:00
|
|
|
} else {
|
|
|
|
att_state->aux_usage = ISL_AUX_USAGE_NONE;
|
|
|
|
att_state->input_aux_usage = ISL_AUX_USAGE_NONE;
|
|
|
|
}
|
2016-10-25 03:25:20 +01:00
|
|
|
}
|
|
|
|
|
2016-11-15 23:25:55 +00:00
|
|
|
static bool
|
|
|
|
need_input_attachment_state(const struct anv_render_pass_attachment *att)
|
|
|
|
{
|
|
|
|
if (!(att->usage & VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
/* We only allocate input attachment states for color and depth surfaces.
|
|
|
|
* Stencil doesn't allow compression so we can just use the texture surface
|
|
|
|
* state from the view
|
|
|
|
*/
|
|
|
|
return vk_format_is_color(att->format) || vk_format_has_depth(att->format);
|
|
|
|
}
|
|
|
|
|
2016-12-15 01:58:41 +00:00
|
|
|
static enum isl_aux_usage
|
|
|
|
layout_to_hiz_usage(VkImageLayout layout)
|
|
|
|
{
|
|
|
|
switch (layout) {
|
|
|
|
case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
|
|
|
|
return ISL_AUX_USAGE_HIZ;
|
|
|
|
default:
|
|
|
|
return ISL_AUX_USAGE_NONE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Transitions a HiZ-enabled depth buffer from one layout to another. Unless
|
|
|
|
* the initial layout is undefined, the HiZ buffer and depth buffer will
|
|
|
|
* represent the same data at the end of this operation.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
transition_depth_buffer(struct anv_cmd_buffer *cmd_buffer,
|
|
|
|
const struct anv_image *image,
|
|
|
|
VkImageLayout initial_layout,
|
|
|
|
VkImageLayout final_layout)
|
|
|
|
{
|
|
|
|
assert(image);
|
|
|
|
|
|
|
|
if (image->aux_usage != ISL_AUX_USAGE_HIZ)
|
|
|
|
return;
|
|
|
|
|
|
|
|
const bool hiz_enabled = layout_to_hiz_usage(initial_layout) ==
|
|
|
|
ISL_AUX_USAGE_HIZ;
|
|
|
|
const bool enable_hiz = layout_to_hiz_usage(final_layout) ==
|
|
|
|
ISL_AUX_USAGE_HIZ;
|
|
|
|
|
|
|
|
/* We've already initialized the aux HiZ buffer at BindImageMemory time,
|
|
|
|
* so there's no need to perform a HIZ resolve or clear to avoid GPU hangs.
|
|
|
|
* This initial layout indicates that the user doesn't care about the data
|
|
|
|
* that's currently in the buffer, so no resolves are necessary.
|
|
|
|
*/
|
|
|
|
if (initial_layout == VK_IMAGE_LAYOUT_UNDEFINED)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (hiz_enabled == enable_hiz) {
|
|
|
|
/* The same buffer will be used, no resolves are necessary */
|
|
|
|
} else if (hiz_enabled && !enable_hiz) {
|
|
|
|
anv_gen8_hiz_op_resolve(cmd_buffer, image, BLORP_HIZ_OP_DEPTH_RESOLVE);
|
|
|
|
} else {
|
|
|
|
assert(!hiz_enabled && enable_hiz);
|
|
|
|
anv_gen8_hiz_op_resolve(cmd_buffer, image, BLORP_HIZ_OP_HIZ_RESOLVE);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-10-19 19:33:55 +01:00
|
|
|
/**
|
|
|
|
* Setup anv_cmd_state::attachments for vkCmdBeginRenderPass.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
genX(cmd_buffer_setup_attachments)(struct anv_cmd_buffer *cmd_buffer,
|
|
|
|
struct anv_render_pass *pass,
|
2016-11-18 06:26:52 +00:00
|
|
|
const VkRenderPassBeginInfo *begin)
|
2016-10-19 19:33:55 +01:00
|
|
|
{
|
|
|
|
const struct isl_device *isl_dev = &cmd_buffer->device->isl_dev;
|
|
|
|
struct anv_cmd_state *state = &cmd_buffer->state;
|
|
|
|
|
|
|
|
vk_free(&cmd_buffer->pool->alloc, state->attachments);
|
|
|
|
|
|
|
|
if (pass->attachment_count == 0) {
|
|
|
|
state->attachments = NULL;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
state->attachments = vk_alloc(&cmd_buffer->pool->alloc,
|
|
|
|
pass->attachment_count *
|
|
|
|
sizeof(state->attachments[0]),
|
|
|
|
8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
|
|
|
|
if (state->attachments == NULL) {
|
|
|
|
/* FIXME: Propagate VK_ERROR_OUT_OF_HOST_MEMORY to vkEndCommandBuffer */
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
|
|
|
|
bool need_null_state = false;
|
2016-10-22 02:11:42 +01:00
|
|
|
unsigned num_states = 0;
|
2016-10-19 19:33:55 +01:00
|
|
|
for (uint32_t i = 0; i < pass->attachment_count; ++i) {
|
2016-10-22 02:11:42 +01:00
|
|
|
if (vk_format_is_color(pass->attachments[i].format)) {
|
2016-10-19 19:33:55 +01:00
|
|
|
num_states++;
|
2016-10-22 02:11:42 +01:00
|
|
|
} else {
|
|
|
|
/* We need a null state for any depth-stencil-only subpasses.
|
|
|
|
* Importantly, this includes depth/stencil clears so we create one
|
|
|
|
* whenever we have depth or stencil
|
|
|
|
*/
|
|
|
|
need_null_state = true;
|
|
|
|
}
|
2016-11-15 23:25:55 +00:00
|
|
|
|
|
|
|
if (need_input_attachment_state(&pass->attachments[i]))
|
|
|
|
num_states++;
|
2016-10-19 19:33:55 +01:00
|
|
|
}
|
2016-10-22 02:11:42 +01:00
|
|
|
num_states += need_null_state;
|
2016-10-19 19:33:55 +01:00
|
|
|
|
|
|
|
const uint32_t ss_stride = align_u32(isl_dev->ss.size, isl_dev->ss.align);
|
|
|
|
state->render_pass_states =
|
|
|
|
anv_state_stream_alloc(&cmd_buffer->surface_state_stream,
|
|
|
|
num_states * ss_stride, isl_dev->ss.align);
|
|
|
|
|
|
|
|
struct anv_state next_state = state->render_pass_states;
|
|
|
|
next_state.alloc_size = isl_dev->ss.size;
|
|
|
|
|
|
|
|
if (need_null_state) {
|
|
|
|
state->null_surface_state = next_state;
|
|
|
|
next_state.offset += ss_stride;
|
|
|
|
next_state.map += ss_stride;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (uint32_t i = 0; i < pass->attachment_count; ++i) {
|
|
|
|
if (vk_format_is_color(pass->attachments[i].format)) {
|
|
|
|
state->attachments[i].color_rt_state = next_state;
|
|
|
|
next_state.offset += ss_stride;
|
|
|
|
next_state.map += ss_stride;
|
|
|
|
}
|
2016-11-15 23:25:55 +00:00
|
|
|
|
|
|
|
if (need_input_attachment_state(&pass->attachments[i])) {
|
|
|
|
state->attachments[i].input_att_state = next_state;
|
|
|
|
next_state.offset += ss_stride;
|
|
|
|
next_state.map += ss_stride;
|
|
|
|
}
|
2016-10-19 19:33:55 +01:00
|
|
|
}
|
|
|
|
assert(next_state.offset == state->render_pass_states.offset +
|
|
|
|
state->render_pass_states.alloc_size);
|
|
|
|
|
2016-11-18 06:26:52 +00:00
|
|
|
if (begin) {
|
|
|
|
ANV_FROM_HANDLE(anv_framebuffer, framebuffer, begin->framebuffer);
|
2016-10-19 19:33:55 +01:00
|
|
|
assert(pass->attachment_count == framebuffer->attachment_count);
|
|
|
|
|
|
|
|
if (need_null_state) {
|
|
|
|
struct GENX(RENDER_SURFACE_STATE) null_ss = {
|
|
|
|
.SurfaceType = SURFTYPE_NULL,
|
|
|
|
.SurfaceArray = framebuffer->layers > 0,
|
|
|
|
.SurfaceFormat = ISL_FORMAT_R8G8B8A8_UNORM,
|
|
|
|
#if GEN_GEN >= 8
|
|
|
|
.TileMode = YMAJOR,
|
|
|
|
#else
|
|
|
|
.TiledSurface = true,
|
|
|
|
#endif
|
|
|
|
.Width = framebuffer->width - 1,
|
|
|
|
.Height = framebuffer->height - 1,
|
|
|
|
.Depth = framebuffer->layers - 1,
|
|
|
|
.RenderTargetViewExtent = framebuffer->layers - 1,
|
|
|
|
};
|
|
|
|
GENX(RENDER_SURFACE_STATE_pack)(NULL, state->null_surface_state.map,
|
|
|
|
&null_ss);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (uint32_t i = 0; i < pass->attachment_count; ++i) {
|
|
|
|
struct anv_render_pass_attachment *att = &pass->attachments[i];
|
|
|
|
VkImageAspectFlags att_aspects = vk_format_aspects(att->format);
|
|
|
|
VkImageAspectFlags clear_aspects = 0;
|
|
|
|
|
|
|
|
if (att_aspects == VK_IMAGE_ASPECT_COLOR_BIT) {
|
|
|
|
/* color attachment */
|
|
|
|
if (att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
|
|
|
|
clear_aspects |= VK_IMAGE_ASPECT_COLOR_BIT;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/* depthstencil attachment */
|
|
|
|
if ((att_aspects & VK_IMAGE_ASPECT_DEPTH_BIT) &&
|
|
|
|
att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
|
|
|
|
clear_aspects |= VK_IMAGE_ASPECT_DEPTH_BIT;
|
|
|
|
}
|
|
|
|
if ((att_aspects & VK_IMAGE_ASPECT_STENCIL_BIT) &&
|
|
|
|
att->stencil_load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
|
|
|
|
clear_aspects |= VK_IMAGE_ASPECT_STENCIL_BIT;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-02 08:44:08 +00:00
|
|
|
state->attachments[i].current_layout = att->initial_layout;
|
2016-10-19 19:33:55 +01:00
|
|
|
state->attachments[i].pending_clear_aspects = clear_aspects;
|
|
|
|
if (clear_aspects)
|
2016-11-18 06:26:52 +00:00
|
|
|
state->attachments[i].clear_value = begin->pClearValues[i];
|
2016-10-19 19:33:55 +01:00
|
|
|
|
|
|
|
struct anv_image_view *iview = framebuffer->attachments[i];
|
|
|
|
assert(iview->vk_format == att->format);
|
|
|
|
|
2016-11-18 06:55:30 +00:00
|
|
|
union isl_color_value clear_color = { .u32 = { 0, } };
|
2016-10-19 19:33:55 +01:00
|
|
|
if (att_aspects == VK_IMAGE_ASPECT_COLOR_BIT) {
|
2016-11-18 06:55:30 +00:00
|
|
|
color_attachment_compute_aux_usage(cmd_buffer->device,
|
|
|
|
&state->attachments[i],
|
|
|
|
iview, begin->renderArea,
|
|
|
|
&clear_color);
|
2016-10-25 06:03:45 +01:00
|
|
|
|
2016-10-19 19:33:55 +01:00
|
|
|
struct isl_view view = iview->isl;
|
|
|
|
view.usage |= ISL_SURF_USAGE_RENDER_TARGET_BIT;
|
|
|
|
isl_surf_fill_state(isl_dev,
|
|
|
|
state->attachments[i].color_rt_state.map,
|
|
|
|
.surf = &iview->image->color_surface.isl,
|
|
|
|
.view = &view,
|
2016-10-25 06:03:45 +01:00
|
|
|
.aux_surf = &iview->image->aux_surface.isl,
|
|
|
|
.aux_usage = state->attachments[i].aux_usage,
|
2016-11-18 06:55:30 +00:00
|
|
|
.clear_color = clear_color,
|
2016-10-19 19:33:55 +01:00
|
|
|
.mocs = cmd_buffer->device->default_mocs);
|
|
|
|
|
2016-10-25 03:25:20 +01:00
|
|
|
add_image_view_relocs(cmd_buffer, iview,
|
2016-10-25 06:03:45 +01:00
|
|
|
state->attachments[i].aux_usage,
|
2016-10-25 03:25:20 +01:00
|
|
|
state->attachments[i].color_rt_state);
|
2016-10-25 06:03:45 +01:00
|
|
|
} else {
|
2017-01-09 12:47:31 +00:00
|
|
|
state->attachments[i].aux_usage = iview->image->aux_usage;
|
2016-11-18 06:55:30 +00:00
|
|
|
state->attachments[i].input_aux_usage = ISL_AUX_USAGE_NONE;
|
2016-10-19 19:33:55 +01:00
|
|
|
}
|
2016-11-15 23:25:55 +00:00
|
|
|
|
|
|
|
if (need_input_attachment_state(&pass->attachments[i])) {
|
|
|
|
const struct isl_surf *surf;
|
|
|
|
if (att_aspects == VK_IMAGE_ASPECT_COLOR_BIT) {
|
|
|
|
surf = &iview->image->color_surface.isl;
|
|
|
|
} else {
|
|
|
|
surf = &iview->image->depth_surface.isl;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct isl_view view = iview->isl;
|
|
|
|
view.usage |= ISL_SURF_USAGE_TEXTURE_BIT;
|
|
|
|
isl_surf_fill_state(isl_dev,
|
|
|
|
state->attachments[i].input_att_state.map,
|
|
|
|
.surf = surf,
|
|
|
|
.view = &view,
|
|
|
|
.aux_surf = &iview->image->aux_surface.isl,
|
2016-11-18 06:55:30 +00:00
|
|
|
.aux_usage = state->attachments[i].input_aux_usage,
|
|
|
|
.clear_color = clear_color,
|
2016-11-15 23:25:55 +00:00
|
|
|
.mocs = cmd_buffer->device->default_mocs);
|
|
|
|
|
|
|
|
add_image_view_relocs(cmd_buffer, iview,
|
2016-11-18 06:55:30 +00:00
|
|
|
state->attachments[i].input_aux_usage,
|
2016-11-15 23:25:55 +00:00
|
|
|
state->attachments[i].input_att_state);
|
|
|
|
}
|
2016-10-19 19:33:55 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!cmd_buffer->device->info.has_llc)
|
|
|
|
anv_state_clflush(state->render_pass_states);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-10-17 17:28:46 +01:00
|
|
|
VkResult
|
|
|
|
genX(BeginCommandBuffer)(
|
|
|
|
VkCommandBuffer commandBuffer,
|
|
|
|
const VkCommandBufferBeginInfo* pBeginInfo)
|
|
|
|
{
|
|
|
|
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
|
|
|
|
|
|
|
|
/* If this is the first vkBeginCommandBuffer, we must *initialize* the
|
|
|
|
* command buffer's state. Otherwise, we must *reset* its state. In both
|
|
|
|
* cases we reset it.
|
|
|
|
*
|
|
|
|
* From the Vulkan 1.0 spec:
|
|
|
|
*
|
|
|
|
* If a command buffer is in the executable state and the command buffer
|
|
|
|
* was allocated from a command pool with the
|
|
|
|
* VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT flag set, then
|
|
|
|
* vkBeginCommandBuffer implicitly resets the command buffer, behaving
|
|
|
|
* as if vkResetCommandBuffer had been called with
|
|
|
|
* VK_COMMAND_BUFFER_RESET_RELEASE_RESOURCES_BIT not set. It then puts
|
|
|
|
* the command buffer in the recording state.
|
|
|
|
*/
|
|
|
|
anv_cmd_buffer_reset(cmd_buffer);
|
|
|
|
|
|
|
|
cmd_buffer->usage_flags = pBeginInfo->flags;
|
|
|
|
|
|
|
|
assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY ||
|
|
|
|
!(cmd_buffer->usage_flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT));
|
|
|
|
|
|
|
|
genX(cmd_buffer_emit_state_base_address)(cmd_buffer);
|
|
|
|
|
|
|
|
if (cmd_buffer->usage_flags &
|
|
|
|
VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
|
|
|
|
cmd_buffer->state.pass =
|
|
|
|
anv_render_pass_from_handle(pBeginInfo->pInheritanceInfo->renderPass);
|
|
|
|
cmd_buffer->state.subpass =
|
|
|
|
&cmd_buffer->state.pass->subpasses[pBeginInfo->pInheritanceInfo->subpass];
|
2016-10-22 01:46:17 +01:00
|
|
|
cmd_buffer->state.framebuffer = NULL;
|
2016-10-17 17:28:46 +01:00
|
|
|
|
2016-10-19 19:33:55 +01:00
|
|
|
genX(cmd_buffer_setup_attachments)(cmd_buffer, cmd_buffer->state.pass,
|
2016-11-18 06:26:52 +00:00
|
|
|
NULL);
|
2016-10-19 19:33:55 +01:00
|
|
|
|
2016-10-17 17:28:46 +01:00
|
|
|
cmd_buffer->state.dirty |= ANV_CMD_DIRTY_RENDER_TARGETS;
|
|
|
|
}
|
|
|
|
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
VkResult
|
|
|
|
genX(EndCommandBuffer)(
|
|
|
|
VkCommandBuffer commandBuffer)
|
|
|
|
{
|
|
|
|
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
|
|
|
|
|
2016-11-19 01:39:26 +00:00
|
|
|
genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
|
|
|
|
|
2016-10-17 17:28:46 +01:00
|
|
|
anv_cmd_buffer_end_batch_buffer(cmd_buffer);
|
|
|
|
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
genX(CmdExecuteCommands)(
|
|
|
|
VkCommandBuffer commandBuffer,
|
|
|
|
uint32_t commandBufferCount,
|
|
|
|
const VkCommandBuffer* pCmdBuffers)
|
|
|
|
{
|
|
|
|
ANV_FROM_HANDLE(anv_cmd_buffer, primary, commandBuffer);
|
|
|
|
|
|
|
|
assert(primary->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
|
|
|
|
|
|
|
|
for (uint32_t i = 0; i < commandBufferCount; i++) {
|
|
|
|
ANV_FROM_HANDLE(anv_cmd_buffer, secondary, pCmdBuffers[i]);
|
|
|
|
|
|
|
|
assert(secondary->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
|
|
|
|
|
2016-10-19 19:33:55 +01:00
|
|
|
if (secondary->usage_flags &
|
|
|
|
VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
|
|
|
|
/* If we're continuing a render pass from the primary, we need to
|
|
|
|
* copy the surface states for the current subpass into the storage
|
|
|
|
* we allocated for them in BeginCommandBuffer.
|
|
|
|
*/
|
|
|
|
struct anv_bo *ss_bo = &primary->device->surface_state_block_pool.bo;
|
|
|
|
struct anv_state src_state = primary->state.render_pass_states;
|
|
|
|
struct anv_state dst_state = secondary->state.render_pass_states;
|
|
|
|
assert(src_state.alloc_size == dst_state.alloc_size);
|
|
|
|
|
|
|
|
genX(cmd_buffer_gpu_memcpy)(primary, ss_bo, dst_state.offset,
|
|
|
|
ss_bo, src_state.offset,
|
|
|
|
src_state.alloc_size);
|
|
|
|
}
|
|
|
|
|
2016-10-17 17:28:46 +01:00
|
|
|
anv_cmd_buffer_add_secondary(primary, secondary);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Each of the secondary command buffers will use its own state base
|
|
|
|
* address. We need to re-emit state base address for the primary after
|
|
|
|
* all of the secondaries are done.
|
|
|
|
*
|
|
|
|
* TODO: Maybe we want to make this a dirty bit to avoid extra state base
|
|
|
|
* address calls?
|
|
|
|
*/
|
|
|
|
genX(cmd_buffer_emit_state_base_address)(primary);
|
|
|
|
}
|
|
|
|
|
2016-08-23 01:13:51 +01:00
|
|
|
#define IVB_L3SQCREG1_SQGHPCI_DEFAULT 0x00730000
|
|
|
|
#define VLV_L3SQCREG1_SQGHPCI_DEFAULT 0x00d30000
|
|
|
|
#define HSW_L3SQCREG1_SQGHPCI_DEFAULT 0x00610000
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Program the hardware to use the specified L3 configuration.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
genX(cmd_buffer_config_l3)(struct anv_cmd_buffer *cmd_buffer,
|
|
|
|
const struct gen_l3_config *cfg)
|
|
|
|
{
|
|
|
|
assert(cfg);
|
|
|
|
if (cfg == cmd_buffer->state.current_l3_config)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (unlikely(INTEL_DEBUG & DEBUG_L3)) {
|
|
|
|
fprintf(stderr, "L3 config transition: ");
|
|
|
|
gen_dump_l3_config(cfg, stderr);
|
|
|
|
}
|
|
|
|
|
|
|
|
const bool has_slm = cfg->n[GEN_L3P_SLM];
|
|
|
|
|
|
|
|
/* According to the hardware docs, the L3 partitioning can only be changed
|
|
|
|
* while the pipeline is completely drained and the caches are flushed,
|
|
|
|
* which involves a first PIPE_CONTROL flush which stalls the pipeline...
|
|
|
|
*/
|
|
|
|
anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
|
|
|
|
pc.DCFlushEnable = true;
|
|
|
|
pc.PostSyncOperation = NoWrite;
|
|
|
|
pc.CommandStreamerStallEnable = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ...followed by a second pipelined PIPE_CONTROL that initiates
|
|
|
|
* invalidation of the relevant caches. Note that because RO invalidation
|
|
|
|
* happens at the top of the pipeline (i.e. right away as the PIPE_CONTROL
|
|
|
|
* command is processed by the CS) we cannot combine it with the previous
|
|
|
|
* stalling flush as the hardware documentation suggests, because that
|
|
|
|
* would cause the CS to stall on previous rendering *after* RO
|
|
|
|
* invalidation and wouldn't prevent the RO caches from being polluted by
|
|
|
|
* concurrent rendering before the stall completes. This intentionally
|
|
|
|
* doesn't implement the SKL+ hardware workaround suggesting to enable CS
|
|
|
|
* stall on PIPE_CONTROLs with the texture cache invalidation bit set for
|
|
|
|
* GPGPU workloads because the previous and subsequent PIPE_CONTROLs
|
|
|
|
* already guarantee that there is no concurrent GPGPU kernel execution
|
|
|
|
* (see SKL HSD 2132585).
|
|
|
|
*/
|
|
|
|
anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
|
|
|
|
pc.TextureCacheInvalidationEnable = true;
|
|
|
|
pc.ConstantCacheInvalidationEnable = true;
|
|
|
|
pc.InstructionCacheInvalidateEnable = true;
|
|
|
|
pc.StateCacheInvalidationEnable = true;
|
|
|
|
pc.PostSyncOperation = NoWrite;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Now send a third stalling flush to make sure that invalidation is
|
|
|
|
* complete when the L3 configuration registers are modified.
|
|
|
|
*/
|
|
|
|
anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
|
|
|
|
pc.DCFlushEnable = true;
|
|
|
|
pc.PostSyncOperation = NoWrite;
|
|
|
|
pc.CommandStreamerStallEnable = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
#if GEN_GEN >= 8
|
|
|
|
|
|
|
|
assert(!cfg->n[GEN_L3P_IS] && !cfg->n[GEN_L3P_C] && !cfg->n[GEN_L3P_T]);
|
|
|
|
|
|
|
|
uint32_t l3cr;
|
|
|
|
anv_pack_struct(&l3cr, GENX(L3CNTLREG),
|
|
|
|
.SLMEnable = has_slm,
|
|
|
|
.URBAllocation = cfg->n[GEN_L3P_URB],
|
|
|
|
.ROAllocation = cfg->n[GEN_L3P_RO],
|
|
|
|
.DCAllocation = cfg->n[GEN_L3P_DC],
|
|
|
|
.AllAllocation = cfg->n[GEN_L3P_ALL]);
|
|
|
|
|
|
|
|
/* Set up the L3 partitioning. */
|
|
|
|
emit_lri(&cmd_buffer->batch, GENX(L3CNTLREG_num), l3cr);
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
const bool has_dc = cfg->n[GEN_L3P_DC] || cfg->n[GEN_L3P_ALL];
|
|
|
|
const bool has_is = cfg->n[GEN_L3P_IS] || cfg->n[GEN_L3P_RO] ||
|
|
|
|
cfg->n[GEN_L3P_ALL];
|
|
|
|
const bool has_c = cfg->n[GEN_L3P_C] || cfg->n[GEN_L3P_RO] ||
|
|
|
|
cfg->n[GEN_L3P_ALL];
|
|
|
|
const bool has_t = cfg->n[GEN_L3P_T] || cfg->n[GEN_L3P_RO] ||
|
|
|
|
cfg->n[GEN_L3P_ALL];
|
|
|
|
|
|
|
|
assert(!cfg->n[GEN_L3P_ALL]);
|
|
|
|
|
|
|
|
/* When enabled SLM only uses a portion of the L3 on half of the banks,
|
|
|
|
* the matching space on the remaining banks has to be allocated to a
|
|
|
|
* client (URB for all validated configurations) set to the
|
|
|
|
* lower-bandwidth 2-bank address hashing mode.
|
|
|
|
*/
|
|
|
|
const struct gen_device_info *devinfo = &cmd_buffer->device->info;
|
|
|
|
const bool urb_low_bw = has_slm && !devinfo->is_baytrail;
|
|
|
|
assert(!urb_low_bw || cfg->n[GEN_L3P_URB] == cfg->n[GEN_L3P_SLM]);
|
|
|
|
|
|
|
|
/* Minimum number of ways that can be allocated to the URB. */
|
2016-12-10 19:10:48 +00:00
|
|
|
MAYBE_UNUSED const unsigned n0_urb = devinfo->is_baytrail ? 32 : 0;
|
2016-08-23 01:13:51 +01:00
|
|
|
assert(cfg->n[GEN_L3P_URB] >= n0_urb);
|
|
|
|
|
|
|
|
uint32_t l3sqcr1, l3cr2, l3cr3;
|
|
|
|
anv_pack_struct(&l3sqcr1, GENX(L3SQCREG1),
|
|
|
|
.ConvertDC_UC = !has_dc,
|
|
|
|
.ConvertIS_UC = !has_is,
|
|
|
|
.ConvertC_UC = !has_c,
|
|
|
|
.ConvertT_UC = !has_t);
|
|
|
|
l3sqcr1 |=
|
|
|
|
GEN_IS_HASWELL ? HSW_L3SQCREG1_SQGHPCI_DEFAULT :
|
|
|
|
devinfo->is_baytrail ? VLV_L3SQCREG1_SQGHPCI_DEFAULT :
|
|
|
|
IVB_L3SQCREG1_SQGHPCI_DEFAULT;
|
|
|
|
|
|
|
|
anv_pack_struct(&l3cr2, GENX(L3CNTLREG2),
|
|
|
|
.SLMEnable = has_slm,
|
|
|
|
.URBLowBandwidth = urb_low_bw,
|
|
|
|
.URBAllocation = cfg->n[GEN_L3P_URB],
|
|
|
|
#if !GEN_IS_HASWELL
|
|
|
|
.ALLAllocation = cfg->n[GEN_L3P_ALL],
|
|
|
|
#endif
|
|
|
|
.ROAllocation = cfg->n[GEN_L3P_RO],
|
|
|
|
.DCAllocation = cfg->n[GEN_L3P_DC]);
|
|
|
|
|
|
|
|
anv_pack_struct(&l3cr3, GENX(L3CNTLREG3),
|
|
|
|
.ISAllocation = cfg->n[GEN_L3P_IS],
|
|
|
|
.ISLowBandwidth = 0,
|
|
|
|
.CAllocation = cfg->n[GEN_L3P_C],
|
|
|
|
.CLowBandwidth = 0,
|
|
|
|
.TAllocation = cfg->n[GEN_L3P_T],
|
|
|
|
.TLowBandwidth = 0);
|
|
|
|
|
|
|
|
/* Set up the L3 partitioning. */
|
|
|
|
emit_lri(&cmd_buffer->batch, GENX(L3SQCREG1_num), l3sqcr1);
|
|
|
|
emit_lri(&cmd_buffer->batch, GENX(L3CNTLREG2_num), l3cr2);
|
|
|
|
emit_lri(&cmd_buffer->batch, GENX(L3CNTLREG3_num), l3cr3);
|
|
|
|
|
|
|
|
#if GEN_IS_HASWELL
|
|
|
|
if (cmd_buffer->device->instance->physicalDevice.cmd_parser_version >= 4) {
|
|
|
|
/* Enable L3 atomics on HSW if we have a DC partition, otherwise keep
|
|
|
|
* them disabled to avoid crashing the system hard.
|
|
|
|
*/
|
|
|
|
uint32_t scratch1, chicken3;
|
|
|
|
anv_pack_struct(&scratch1, GENX(SCRATCH1),
|
|
|
|
.L3AtomicDisable = !has_dc);
|
|
|
|
anv_pack_struct(&chicken3, GENX(CHICKEN3),
|
2016-09-12 20:58:38 +01:00
|
|
|
.L3AtomicDisableMask = true,
|
2016-08-23 01:13:51 +01:00
|
|
|
.L3AtomicDisable = !has_dc);
|
|
|
|
emit_lri(&cmd_buffer->batch, GENX(SCRATCH1_num), scratch1);
|
|
|
|
emit_lri(&cmd_buffer->batch, GENX(CHICKEN3_num), chicken3);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
cmd_buffer->state.current_l3_config = cfg;
|
|
|
|
}
|
|
|
|
|
2016-05-20 19:49:12 +01:00
|
|
|
void
|
|
|
|
genX(cmd_buffer_apply_pipe_flushes)(struct anv_cmd_buffer *cmd_buffer)
|
|
|
|
{
|
|
|
|
enum anv_pipe_bits bits = cmd_buffer->state.pending_pipe_bits;
|
|
|
|
|
|
|
|
/* Flushes are pipelined while invalidations are handled immediately.
|
|
|
|
* Therefore, if we're flushing anything then we need to schedule a stall
|
|
|
|
* before any invalidations can happen.
|
|
|
|
*/
|
|
|
|
if (bits & ANV_PIPE_FLUSH_BITS)
|
|
|
|
bits |= ANV_PIPE_NEEDS_CS_STALL_BIT;
|
|
|
|
|
|
|
|
/* If we're going to do an invalidate and we have a pending CS stall that
|
|
|
|
* has yet to be resolved, we do the CS stall now.
|
|
|
|
*/
|
|
|
|
if ((bits & ANV_PIPE_INVALIDATE_BITS) &&
|
|
|
|
(bits & ANV_PIPE_NEEDS_CS_STALL_BIT)) {
|
|
|
|
bits |= ANV_PIPE_CS_STALL_BIT;
|
|
|
|
bits &= ~ANV_PIPE_NEEDS_CS_STALL_BIT;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (bits & (ANV_PIPE_FLUSH_BITS | ANV_PIPE_CS_STALL_BIT)) {
|
|
|
|
anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pipe) {
|
|
|
|
pipe.DepthCacheFlushEnable = bits & ANV_PIPE_DEPTH_CACHE_FLUSH_BIT;
|
|
|
|
pipe.DCFlushEnable = bits & ANV_PIPE_DATA_CACHE_FLUSH_BIT;
|
|
|
|
pipe.RenderTargetCacheFlushEnable =
|
|
|
|
bits & ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT;
|
|
|
|
|
|
|
|
pipe.DepthStallEnable = bits & ANV_PIPE_DEPTH_STALL_BIT;
|
|
|
|
pipe.CommandStreamerStallEnable = bits & ANV_PIPE_CS_STALL_BIT;
|
|
|
|
pipe.StallAtPixelScoreboard = bits & ANV_PIPE_STALL_AT_SCOREBOARD_BIT;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* According to the Broadwell documentation, any PIPE_CONTROL with the
|
|
|
|
* "Command Streamer Stall" bit set must also have another bit set,
|
|
|
|
* with five different options:
|
|
|
|
*
|
|
|
|
* - Render Target Cache Flush
|
|
|
|
* - Depth Cache Flush
|
|
|
|
* - Stall at Pixel Scoreboard
|
|
|
|
* - Post-Sync Operation
|
|
|
|
* - Depth Stall
|
|
|
|
* - DC Flush Enable
|
|
|
|
*
|
|
|
|
* I chose "Stall at Pixel Scoreboard" since that's what we use in
|
|
|
|
* mesa and it seems to work fine. The choice is fairly arbitrary.
|
|
|
|
*/
|
|
|
|
if ((bits & ANV_PIPE_CS_STALL_BIT) &&
|
|
|
|
!(bits & (ANV_PIPE_FLUSH_BITS | ANV_PIPE_DEPTH_STALL_BIT |
|
|
|
|
ANV_PIPE_STALL_AT_SCOREBOARD_BIT)))
|
|
|
|
pipe.StallAtPixelScoreboard = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bits &= ~(ANV_PIPE_FLUSH_BITS | ANV_PIPE_CS_STALL_BIT);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (bits & ANV_PIPE_INVALIDATE_BITS) {
|
|
|
|
anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pipe) {
|
|
|
|
pipe.StateCacheInvalidationEnable =
|
|
|
|
bits & ANV_PIPE_STATE_CACHE_INVALIDATE_BIT;
|
|
|
|
pipe.ConstantCacheInvalidationEnable =
|
|
|
|
bits & ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT;
|
|
|
|
pipe.VFCacheInvalidationEnable =
|
|
|
|
bits & ANV_PIPE_VF_CACHE_INVALIDATE_BIT;
|
|
|
|
pipe.TextureCacheInvalidationEnable =
|
|
|
|
bits & ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT;
|
|
|
|
pipe.InstructionCacheInvalidateEnable =
|
|
|
|
bits & ANV_PIPE_INSTRUCTION_CACHE_INVALIDATE_BIT;
|
|
|
|
}
|
|
|
|
|
|
|
|
bits &= ~ANV_PIPE_INVALIDATE_BITS;
|
|
|
|
}
|
|
|
|
|
|
|
|
cmd_buffer->state.pending_pipe_bits = bits;
|
|
|
|
}
|
|
|
|
|
2015-11-18 20:25:11 +00:00
|
|
|
void genX(CmdPipelineBarrier)(
|
2015-11-30 19:48:08 +00:00
|
|
|
VkCommandBuffer commandBuffer,
|
2015-11-18 20:25:11 +00:00
|
|
|
VkPipelineStageFlags srcStageMask,
|
|
|
|
VkPipelineStageFlags destStageMask,
|
|
|
|
VkBool32 byRegion,
|
2016-01-14 16:09:39 +00:00
|
|
|
uint32_t memoryBarrierCount,
|
|
|
|
const VkMemoryBarrier* pMemoryBarriers,
|
|
|
|
uint32_t bufferMemoryBarrierCount,
|
|
|
|
const VkBufferMemoryBarrier* pBufferMemoryBarriers,
|
|
|
|
uint32_t imageMemoryBarrierCount,
|
|
|
|
const VkImageMemoryBarrier* pImageMemoryBarriers)
|
2015-11-18 20:25:11 +00:00
|
|
|
{
|
2015-11-30 19:48:08 +00:00
|
|
|
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
|
2016-05-20 19:49:12 +01:00
|
|
|
uint32_t b;
|
2015-11-18 20:25:11 +00:00
|
|
|
|
|
|
|
/* XXX: Right now, we're really dumb and just flush whatever categories
|
|
|
|
* the app asks for. One of these days we may make this a bit better
|
|
|
|
* but right now that's all the hardware allows for in most areas.
|
|
|
|
*/
|
2015-12-01 21:39:28 +00:00
|
|
|
VkAccessFlags src_flags = 0;
|
|
|
|
VkAccessFlags dst_flags = 0;
|
2015-11-18 20:25:11 +00:00
|
|
|
|
2016-01-14 16:09:39 +00:00
|
|
|
for (uint32_t i = 0; i < memoryBarrierCount; i++) {
|
|
|
|
src_flags |= pMemoryBarriers[i].srcAccessMask;
|
|
|
|
dst_flags |= pMemoryBarriers[i].dstAccessMask;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (uint32_t i = 0; i < bufferMemoryBarrierCount; i++) {
|
|
|
|
src_flags |= pBufferMemoryBarriers[i].srcAccessMask;
|
|
|
|
dst_flags |= pBufferMemoryBarriers[i].dstAccessMask;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) {
|
|
|
|
src_flags |= pImageMemoryBarriers[i].srcAccessMask;
|
|
|
|
dst_flags |= pImageMemoryBarriers[i].dstAccessMask;
|
2015-11-18 20:25:11 +00:00
|
|
|
}
|
|
|
|
|
2016-05-20 19:49:12 +01:00
|
|
|
enum anv_pipe_bits pipe_bits = 0;
|
2016-02-17 23:09:40 +00:00
|
|
|
|
2015-12-03 19:05:52 +00:00
|
|
|
for_each_bit(b, src_flags) {
|
2015-12-01 21:39:28 +00:00
|
|
|
switch ((VkAccessFlagBits)(1 << b)) {
|
|
|
|
case VK_ACCESS_SHADER_WRITE_BIT:
|
2016-05-20 19:49:12 +01:00
|
|
|
pipe_bits |= ANV_PIPE_DATA_CACHE_FLUSH_BIT;
|
2015-11-18 20:25:11 +00:00
|
|
|
break;
|
2015-12-01 21:39:28 +00:00
|
|
|
case VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT:
|
2016-05-20 19:49:12 +01:00
|
|
|
pipe_bits |= ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT;
|
2015-11-18 20:25:11 +00:00
|
|
|
break;
|
2015-12-01 21:39:28 +00:00
|
|
|
case VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT:
|
2016-05-20 19:49:12 +01:00
|
|
|
pipe_bits |= ANV_PIPE_DEPTH_CACHE_FLUSH_BIT;
|
2015-11-18 20:25:11 +00:00
|
|
|
break;
|
2015-12-01 21:39:28 +00:00
|
|
|
case VK_ACCESS_TRANSFER_WRITE_BIT:
|
2016-05-20 19:49:12 +01:00
|
|
|
pipe_bits |= ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT;
|
|
|
|
pipe_bits |= ANV_PIPE_DEPTH_CACHE_FLUSH_BIT;
|
2015-11-18 20:25:11 +00:00
|
|
|
break;
|
|
|
|
default:
|
2016-05-20 19:49:12 +01:00
|
|
|
break; /* Nothing to do */
|
2015-11-18 20:25:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-12-03 19:05:52 +00:00
|
|
|
for_each_bit(b, dst_flags) {
|
2015-12-01 21:39:28 +00:00
|
|
|
switch ((VkAccessFlagBits)(1 << b)) {
|
|
|
|
case VK_ACCESS_INDIRECT_COMMAND_READ_BIT:
|
|
|
|
case VK_ACCESS_INDEX_READ_BIT:
|
|
|
|
case VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT:
|
2016-05-20 19:49:12 +01:00
|
|
|
pipe_bits |= ANV_PIPE_VF_CACHE_INVALIDATE_BIT;
|
2015-11-18 20:25:11 +00:00
|
|
|
break;
|
2015-12-01 21:39:28 +00:00
|
|
|
case VK_ACCESS_UNIFORM_READ_BIT:
|
2016-05-20 19:49:12 +01:00
|
|
|
pipe_bits |= ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT;
|
|
|
|
pipe_bits |= ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT;
|
2015-11-18 20:25:11 +00:00
|
|
|
break;
|
2016-05-20 19:49:12 +01:00
|
|
|
case VK_ACCESS_SHADER_READ_BIT:
|
2016-11-16 19:20:50 +00:00
|
|
|
case VK_ACCESS_INPUT_ATTACHMENT_READ_BIT:
|
2015-12-01 21:39:28 +00:00
|
|
|
case VK_ACCESS_TRANSFER_READ_BIT:
|
2016-05-20 19:49:12 +01:00
|
|
|
pipe_bits |= ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT;
|
2015-12-01 21:39:28 +00:00
|
|
|
break;
|
|
|
|
default:
|
2016-05-20 19:49:12 +01:00
|
|
|
break; /* Nothing to do */
|
2015-11-18 20:25:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-20 19:49:12 +01:00
|
|
|
cmd_buffer->state.pending_pipe_bits |= pipe_bits;
|
2015-11-18 20:25:11 +00:00
|
|
|
}
|
2016-02-06 00:41:56 +00:00
|
|
|
|
2016-03-09 02:10:22 +00:00
|
|
|
static void
|
|
|
|
cmd_buffer_alloc_push_constants(struct anv_cmd_buffer *cmd_buffer)
|
|
|
|
{
|
|
|
|
VkShaderStageFlags stages = cmd_buffer->state.pipeline->active_stages;
|
|
|
|
|
|
|
|
/* In order to avoid thrash, we assume that vertex and fragment stages
|
|
|
|
* always exist. In the rare case where one is missing *and* the other
|
|
|
|
* uses push concstants, this may be suboptimal. However, avoiding stalls
|
|
|
|
* seems more important.
|
|
|
|
*/
|
|
|
|
stages |= VK_SHADER_STAGE_FRAGMENT_BIT | VK_SHADER_STAGE_VERTEX_BIT;
|
|
|
|
|
|
|
|
if (stages == cmd_buffer->state.push_constant_stages)
|
|
|
|
return;
|
|
|
|
|
|
|
|
#if GEN_GEN >= 8
|
|
|
|
const unsigned push_constant_kb = 32;
|
|
|
|
#elif GEN_IS_HASWELL
|
|
|
|
const unsigned push_constant_kb = cmd_buffer->device->info.gt == 3 ? 32 : 16;
|
|
|
|
#else
|
|
|
|
const unsigned push_constant_kb = 16;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
const unsigned num_stages =
|
|
|
|
_mesa_bitcount(stages & VK_SHADER_STAGE_ALL_GRAPHICS);
|
|
|
|
unsigned size_per_stage = push_constant_kb / num_stages;
|
|
|
|
|
|
|
|
/* Broadwell+ and Haswell gt3 require that the push constant sizes be in
|
|
|
|
* units of 2KB. Incidentally, these are the same platforms that have
|
|
|
|
* 32KB worth of push constant space.
|
|
|
|
*/
|
|
|
|
if (push_constant_kb == 32)
|
|
|
|
size_per_stage &= ~1u;
|
|
|
|
|
|
|
|
uint32_t kb_used = 0;
|
|
|
|
for (int i = MESA_SHADER_VERTEX; i < MESA_SHADER_FRAGMENT; i++) {
|
|
|
|
unsigned push_size = (stages & (1 << i)) ? size_per_stage : 0;
|
|
|
|
anv_batch_emit(&cmd_buffer->batch,
|
|
|
|
GENX(3DSTATE_PUSH_CONSTANT_ALLOC_VS), alloc) {
|
|
|
|
alloc._3DCommandSubOpcode = 18 + i;
|
|
|
|
alloc.ConstantBufferOffset = (push_size > 0) ? kb_used : 0;
|
|
|
|
alloc.ConstantBufferSize = push_size;
|
|
|
|
}
|
|
|
|
kb_used += push_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
anv_batch_emit(&cmd_buffer->batch,
|
|
|
|
GENX(3DSTATE_PUSH_CONSTANT_ALLOC_PS), alloc) {
|
|
|
|
alloc.ConstantBufferOffset = kb_used;
|
|
|
|
alloc.ConstantBufferSize = push_constant_kb - kb_used;
|
|
|
|
}
|
|
|
|
|
|
|
|
cmd_buffer->state.push_constant_stages = stages;
|
|
|
|
|
|
|
|
/* From the BDW PRM for 3DSTATE_PUSH_CONSTANT_ALLOC_VS:
|
|
|
|
*
|
|
|
|
* "The 3DSTATE_CONSTANT_VS must be reprogrammed prior to
|
|
|
|
* the next 3DPRIMITIVE command after programming the
|
|
|
|
* 3DSTATE_PUSH_CONSTANT_ALLOC_VS"
|
|
|
|
*
|
|
|
|
* Since 3DSTATE_PUSH_CONSTANT_ALLOC_VS is programmed as part of
|
|
|
|
* pipeline setup, we need to dirty push constants.
|
|
|
|
*/
|
|
|
|
cmd_buffer->state.push_constants_dirty |= VK_SHADER_STAGE_ALL_GRAPHICS;
|
|
|
|
}
|
|
|
|
|
2016-10-17 18:11:55 +01:00
|
|
|
static VkResult
|
|
|
|
emit_binding_table(struct anv_cmd_buffer *cmd_buffer,
|
|
|
|
gl_shader_stage stage,
|
|
|
|
struct anv_state *bt_state)
|
|
|
|
{
|
|
|
|
struct anv_subpass *subpass = cmd_buffer->state.subpass;
|
|
|
|
struct anv_pipeline *pipeline;
|
|
|
|
uint32_t bias, state_offset;
|
|
|
|
|
|
|
|
switch (stage) {
|
|
|
|
case MESA_SHADER_COMPUTE:
|
|
|
|
pipeline = cmd_buffer->state.compute_pipeline;
|
|
|
|
bias = 1;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
pipeline = cmd_buffer->state.pipeline;
|
|
|
|
bias = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!anv_pipeline_has_stage(pipeline, stage)) {
|
|
|
|
*bt_state = (struct anv_state) { 0, };
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct anv_pipeline_bind_map *map = &pipeline->shaders[stage]->bind_map;
|
|
|
|
if (bias + map->surface_count == 0) {
|
|
|
|
*bt_state = (struct anv_state) { 0, };
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
*bt_state = anv_cmd_buffer_alloc_binding_table(cmd_buffer,
|
|
|
|
bias + map->surface_count,
|
|
|
|
&state_offset);
|
|
|
|
uint32_t *bt_map = bt_state->map;
|
|
|
|
|
|
|
|
if (bt_state->map == NULL)
|
|
|
|
return VK_ERROR_OUT_OF_DEVICE_MEMORY;
|
|
|
|
|
|
|
|
if (stage == MESA_SHADER_COMPUTE &&
|
|
|
|
get_cs_prog_data(cmd_buffer->state.compute_pipeline)->uses_num_work_groups) {
|
|
|
|
struct anv_bo *bo = cmd_buffer->state.num_workgroups_bo;
|
|
|
|
uint32_t bo_offset = cmd_buffer->state.num_workgroups_offset;
|
|
|
|
|
|
|
|
struct anv_state surface_state;
|
|
|
|
surface_state =
|
|
|
|
anv_cmd_buffer_alloc_surface_state(cmd_buffer);
|
|
|
|
|
|
|
|
const enum isl_format format =
|
|
|
|
anv_isl_format_for_descriptor_type(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
|
|
|
|
anv_fill_buffer_surface_state(cmd_buffer->device, surface_state,
|
|
|
|
format, bo_offset, 12, 1);
|
|
|
|
|
|
|
|
bt_map[0] = surface_state.offset + state_offset;
|
2016-10-25 03:50:20 +01:00
|
|
|
add_surface_state_reloc(cmd_buffer, surface_state, bo, bo_offset);
|
2016-10-17 18:11:55 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if (map->surface_count == 0)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (map->image_count > 0) {
|
|
|
|
VkResult result =
|
|
|
|
anv_cmd_buffer_ensure_push_constant_field(cmd_buffer, stage, images);
|
|
|
|
if (result != VK_SUCCESS)
|
|
|
|
return result;
|
|
|
|
|
|
|
|
cmd_buffer->state.push_constants_dirty |= 1 << stage;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint32_t image = 0;
|
|
|
|
for (uint32_t s = 0; s < map->surface_count; s++) {
|
|
|
|
struct anv_pipeline_binding *binding = &map->surface_to_descriptor[s];
|
|
|
|
|
|
|
|
struct anv_state surface_state;
|
|
|
|
|
|
|
|
if (binding->set == ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS) {
|
|
|
|
/* Color attachment binding */
|
|
|
|
assert(stage == MESA_SHADER_FRAGMENT);
|
|
|
|
assert(binding->binding == 0);
|
|
|
|
if (binding->index < subpass->color_count) {
|
2016-10-19 19:33:55 +01:00
|
|
|
const unsigned att = subpass->color_attachments[binding->index];
|
|
|
|
surface_state = cmd_buffer->state.attachments[att].color_rt_state;
|
2016-10-17 18:11:55 +01:00
|
|
|
} else {
|
2016-10-19 19:33:55 +01:00
|
|
|
surface_state = cmd_buffer->state.null_surface_state;
|
2016-10-17 18:11:55 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
bt_map[bias + s] = surface_state.offset + state_offset;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct anv_descriptor_set *set =
|
|
|
|
cmd_buffer->state.descriptors[binding->set];
|
|
|
|
uint32_t offset = set->layout->binding[binding->binding].descriptor_index;
|
|
|
|
struct anv_descriptor *desc = &set->descriptors[offset + binding->index];
|
|
|
|
|
|
|
|
switch (desc->type) {
|
|
|
|
case VK_DESCRIPTOR_TYPE_SAMPLER:
|
|
|
|
/* Nothing for us to do here */
|
|
|
|
continue;
|
|
|
|
|
|
|
|
case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
|
|
|
|
case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
|
|
|
|
surface_state = desc->image_view->sampler_surface_state;
|
|
|
|
assert(surface_state.alloc_size);
|
2016-10-25 06:03:45 +01:00
|
|
|
add_image_view_relocs(cmd_buffer, desc->image_view,
|
2016-10-25 18:32:18 +01:00
|
|
|
desc->image_view->image->aux_usage,
|
|
|
|
surface_state);
|
2016-10-17 18:11:55 +01:00
|
|
|
break;
|
|
|
|
|
2016-11-15 23:25:55 +00:00
|
|
|
case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
|
|
|
|
assert(stage == MESA_SHADER_FRAGMENT);
|
|
|
|
if (desc->image_view->aspect_mask == VK_IMAGE_ASPECT_STENCIL_BIT) {
|
|
|
|
/* For stencil input attachments, we treat it like any old texture
|
|
|
|
* that a user may have bound.
|
|
|
|
*/
|
|
|
|
surface_state = desc->image_view->sampler_surface_state;
|
|
|
|
assert(surface_state.alloc_size);
|
|
|
|
add_image_view_relocs(cmd_buffer, desc->image_view,
|
|
|
|
desc->image_view->image->aux_usage,
|
|
|
|
surface_state);
|
|
|
|
} else {
|
|
|
|
/* For depth and color input attachments, we create the surface
|
|
|
|
* state at vkBeginRenderPass time so that we can include aux
|
|
|
|
* and clear color information.
|
|
|
|
*/
|
|
|
|
assert(binding->input_attachment_index < subpass->input_count);
|
|
|
|
const unsigned subpass_att = binding->input_attachment_index;
|
|
|
|
const unsigned att = subpass->input_attachments[subpass_att];
|
|
|
|
surface_state = cmd_buffer->state.attachments[att].input_att_state;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
2016-10-17 18:11:55 +01:00
|
|
|
case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: {
|
|
|
|
surface_state = desc->image_view->storage_surface_state;
|
|
|
|
assert(surface_state.alloc_size);
|
2016-10-25 06:03:45 +01:00
|
|
|
add_image_view_relocs(cmd_buffer, desc->image_view,
|
2016-10-25 18:32:18 +01:00
|
|
|
desc->image_view->image->aux_usage,
|
|
|
|
surface_state);
|
2016-10-17 18:11:55 +01:00
|
|
|
|
|
|
|
struct brw_image_param *image_param =
|
|
|
|
&cmd_buffer->state.push_constants[stage]->images[image++];
|
|
|
|
|
|
|
|
*image_param = desc->image_view->storage_image_param;
|
|
|
|
image_param->surface_idx = bias + s;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
|
|
|
|
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
|
|
|
|
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
|
|
|
|
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
|
|
|
|
case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
|
|
|
|
surface_state = desc->buffer_view->surface_state;
|
|
|
|
assert(surface_state.alloc_size);
|
2016-10-25 03:25:20 +01:00
|
|
|
add_surface_state_reloc(cmd_buffer, surface_state,
|
|
|
|
desc->buffer_view->bo,
|
|
|
|
desc->buffer_view->offset);
|
2016-10-17 18:11:55 +01:00
|
|
|
break;
|
|
|
|
|
|
|
|
case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
|
|
|
|
surface_state = desc->buffer_view->storage_surface_state;
|
|
|
|
assert(surface_state.alloc_size);
|
2016-10-25 03:25:20 +01:00
|
|
|
add_surface_state_reloc(cmd_buffer, surface_state,
|
|
|
|
desc->buffer_view->bo,
|
|
|
|
desc->buffer_view->offset);
|
2016-10-17 18:11:55 +01:00
|
|
|
|
|
|
|
struct brw_image_param *image_param =
|
|
|
|
&cmd_buffer->state.push_constants[stage]->images[image++];
|
|
|
|
|
|
|
|
*image_param = desc->buffer_view->storage_image_param;
|
|
|
|
image_param->surface_idx = bias + s;
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
assert(!"Invalid descriptor type");
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
bt_map[bias + s] = surface_state.offset + state_offset;
|
|
|
|
}
|
|
|
|
assert(image == map->image_count);
|
|
|
|
|
|
|
|
out:
|
|
|
|
if (!cmd_buffer->device->info.has_llc)
|
|
|
|
anv_state_clflush(*bt_state);
|
|
|
|
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
static VkResult
|
|
|
|
emit_samplers(struct anv_cmd_buffer *cmd_buffer,
|
|
|
|
gl_shader_stage stage,
|
|
|
|
struct anv_state *state)
|
|
|
|
{
|
|
|
|
struct anv_pipeline *pipeline;
|
|
|
|
|
|
|
|
if (stage == MESA_SHADER_COMPUTE)
|
|
|
|
pipeline = cmd_buffer->state.compute_pipeline;
|
|
|
|
else
|
|
|
|
pipeline = cmd_buffer->state.pipeline;
|
|
|
|
|
|
|
|
if (!anv_pipeline_has_stage(pipeline, stage)) {
|
|
|
|
*state = (struct anv_state) { 0, };
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct anv_pipeline_bind_map *map = &pipeline->shaders[stage]->bind_map;
|
|
|
|
if (map->sampler_count == 0) {
|
|
|
|
*state = (struct anv_state) { 0, };
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint32_t size = map->sampler_count * 16;
|
|
|
|
*state = anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, size, 32);
|
|
|
|
|
|
|
|
if (state->map == NULL)
|
|
|
|
return VK_ERROR_OUT_OF_DEVICE_MEMORY;
|
|
|
|
|
|
|
|
for (uint32_t s = 0; s < map->sampler_count; s++) {
|
|
|
|
struct anv_pipeline_binding *binding = &map->sampler_to_descriptor[s];
|
|
|
|
struct anv_descriptor_set *set =
|
|
|
|
cmd_buffer->state.descriptors[binding->set];
|
|
|
|
uint32_t offset = set->layout->binding[binding->binding].descriptor_index;
|
|
|
|
struct anv_descriptor *desc = &set->descriptors[offset + binding->index];
|
|
|
|
|
|
|
|
if (desc->type != VK_DESCRIPTOR_TYPE_SAMPLER &&
|
|
|
|
desc->type != VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
struct anv_sampler *sampler = desc->sampler;
|
|
|
|
|
|
|
|
/* This can happen if we have an unfilled slot since TYPE_SAMPLER
|
|
|
|
* happens to be zero.
|
|
|
|
*/
|
|
|
|
if (sampler == NULL)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
memcpy(state->map + (s * 16),
|
|
|
|
sampler->state, sizeof(sampler->state));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!cmd_buffer->device->info.has_llc)
|
|
|
|
anv_state_clflush(*state);
|
|
|
|
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint32_t
|
|
|
|
flush_descriptor_sets(struct anv_cmd_buffer *cmd_buffer)
|
|
|
|
{
|
|
|
|
VkShaderStageFlags dirty = cmd_buffer->state.descriptors_dirty &
|
|
|
|
cmd_buffer->state.pipeline->active_stages;
|
|
|
|
|
|
|
|
VkResult result = VK_SUCCESS;
|
|
|
|
anv_foreach_stage(s, dirty) {
|
|
|
|
result = emit_samplers(cmd_buffer, s, &cmd_buffer->state.samplers[s]);
|
|
|
|
if (result != VK_SUCCESS)
|
|
|
|
break;
|
|
|
|
result = emit_binding_table(cmd_buffer, s,
|
|
|
|
&cmd_buffer->state.binding_tables[s]);
|
|
|
|
if (result != VK_SUCCESS)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (result != VK_SUCCESS) {
|
|
|
|
assert(result == VK_ERROR_OUT_OF_DEVICE_MEMORY);
|
|
|
|
|
|
|
|
result = anv_cmd_buffer_new_binding_table_block(cmd_buffer);
|
|
|
|
assert(result == VK_SUCCESS);
|
|
|
|
|
|
|
|
/* Re-emit state base addresses so we get the new surface state base
|
|
|
|
* address before we start emitting binding tables etc.
|
|
|
|
*/
|
|
|
|
genX(cmd_buffer_emit_state_base_address)(cmd_buffer);
|
|
|
|
|
|
|
|
/* Re-emit all active binding tables */
|
|
|
|
dirty |= cmd_buffer->state.pipeline->active_stages;
|
|
|
|
anv_foreach_stage(s, dirty) {
|
|
|
|
result = emit_samplers(cmd_buffer, s, &cmd_buffer->state.samplers[s]);
|
|
|
|
if (result != VK_SUCCESS)
|
|
|
|
return result;
|
|
|
|
result = emit_binding_table(cmd_buffer, s,
|
|
|
|
&cmd_buffer->state.binding_tables[s]);
|
|
|
|
if (result != VK_SUCCESS)
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
cmd_buffer->state.descriptors_dirty &= ~dirty;
|
|
|
|
|
|
|
|
return dirty;
|
|
|
|
}
|
|
|
|
|
2016-06-21 23:31:14 +01:00
|
|
|
static void
|
|
|
|
cmd_buffer_emit_descriptor_pointers(struct anv_cmd_buffer *cmd_buffer,
|
|
|
|
uint32_t stages)
|
|
|
|
{
|
|
|
|
static const uint32_t sampler_state_opcodes[] = {
|
|
|
|
[MESA_SHADER_VERTEX] = 43,
|
|
|
|
[MESA_SHADER_TESS_CTRL] = 44, /* HS */
|
|
|
|
[MESA_SHADER_TESS_EVAL] = 45, /* DS */
|
|
|
|
[MESA_SHADER_GEOMETRY] = 46,
|
|
|
|
[MESA_SHADER_FRAGMENT] = 47,
|
|
|
|
[MESA_SHADER_COMPUTE] = 0,
|
|
|
|
};
|
|
|
|
|
|
|
|
static const uint32_t binding_table_opcodes[] = {
|
|
|
|
[MESA_SHADER_VERTEX] = 38,
|
|
|
|
[MESA_SHADER_TESS_CTRL] = 39,
|
|
|
|
[MESA_SHADER_TESS_EVAL] = 40,
|
|
|
|
[MESA_SHADER_GEOMETRY] = 41,
|
|
|
|
[MESA_SHADER_FRAGMENT] = 42,
|
|
|
|
[MESA_SHADER_COMPUTE] = 0,
|
|
|
|
};
|
|
|
|
|
|
|
|
anv_foreach_stage(s, stages) {
|
|
|
|
if (cmd_buffer->state.samplers[s].alloc_size > 0) {
|
|
|
|
anv_batch_emit(&cmd_buffer->batch,
|
|
|
|
GENX(3DSTATE_SAMPLER_STATE_POINTERS_VS), ssp) {
|
|
|
|
ssp._3DCommandSubOpcode = sampler_state_opcodes[s];
|
|
|
|
ssp.PointertoVSSamplerState = cmd_buffer->state.samplers[s].offset;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Always emit binding table pointers if we're asked to, since on SKL
|
|
|
|
* this is what flushes push constants. */
|
|
|
|
anv_batch_emit(&cmd_buffer->batch,
|
|
|
|
GENX(3DSTATE_BINDING_TABLE_POINTERS_VS), btp) {
|
|
|
|
btp._3DCommandSubOpcode = binding_table_opcodes[s];
|
|
|
|
btp.PointertoVSBindingTable = cmd_buffer->state.binding_tables[s].offset;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-03-09 01:10:05 +00:00
|
|
|
static uint32_t
|
|
|
|
cmd_buffer_flush_push_constants(struct anv_cmd_buffer *cmd_buffer)
|
|
|
|
{
|
|
|
|
static const uint32_t push_constant_opcodes[] = {
|
|
|
|
[MESA_SHADER_VERTEX] = 21,
|
|
|
|
[MESA_SHADER_TESS_CTRL] = 25, /* HS */
|
|
|
|
[MESA_SHADER_TESS_EVAL] = 26, /* DS */
|
|
|
|
[MESA_SHADER_GEOMETRY] = 22,
|
|
|
|
[MESA_SHADER_FRAGMENT] = 23,
|
|
|
|
[MESA_SHADER_COMPUTE] = 0,
|
|
|
|
};
|
|
|
|
|
|
|
|
VkShaderStageFlags flushed = 0;
|
|
|
|
|
|
|
|
anv_foreach_stage(stage, cmd_buffer->state.push_constants_dirty) {
|
|
|
|
if (stage == MESA_SHADER_COMPUTE)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
struct anv_state state = anv_cmd_buffer_push_constants(cmd_buffer, stage);
|
|
|
|
|
|
|
|
if (state.offset == 0) {
|
2016-04-19 01:03:00 +01:00
|
|
|
anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CONSTANT_VS), c)
|
2016-04-18 22:55:10 +01:00
|
|
|
c._3DCommandSubOpcode = push_constant_opcodes[stage];
|
2016-03-09 01:10:05 +00:00
|
|
|
} else {
|
2016-04-19 01:03:00 +01:00
|
|
|
anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CONSTANT_VS), c) {
|
2016-04-18 22:55:10 +01:00
|
|
|
c._3DCommandSubOpcode = push_constant_opcodes[stage],
|
|
|
|
c.ConstantBody = (struct GENX(3DSTATE_CONSTANT_BODY)) {
|
2016-03-09 01:10:05 +00:00
|
|
|
#if GEN_GEN >= 9
|
2016-04-18 22:55:10 +01:00
|
|
|
.PointerToConstantBuffer2 = { &cmd_buffer->device->dynamic_state_block_pool.bo, state.offset },
|
|
|
|
.ConstantBuffer2ReadLength = DIV_ROUND_UP(state.alloc_size, 32),
|
2016-03-09 01:10:05 +00:00
|
|
|
#else
|
2016-04-18 22:55:10 +01:00
|
|
|
.PointerToConstantBuffer0 = { .offset = state.offset },
|
|
|
|
.ConstantBuffer0ReadLength = DIV_ROUND_UP(state.alloc_size, 32),
|
2016-03-09 01:10:05 +00:00
|
|
|
#endif
|
2016-04-18 22:55:10 +01:00
|
|
|
};
|
|
|
|
}
|
2016-03-09 01:10:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
flushed |= mesa_to_vk_shader_stage(stage);
|
|
|
|
}
|
|
|
|
|
|
|
|
cmd_buffer->state.push_constants_dirty &= ~VK_SHADER_STAGE_ALL_GRAPHICS;
|
|
|
|
|
|
|
|
return flushed;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
genX(cmd_buffer_flush_state)(struct anv_cmd_buffer *cmd_buffer)
|
|
|
|
{
|
|
|
|
struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
|
|
|
|
uint32_t *p;
|
|
|
|
|
|
|
|
uint32_t vb_emit = cmd_buffer->state.vb_dirty & pipeline->vb_used;
|
|
|
|
|
|
|
|
assert((pipeline->active_stages & VK_SHADER_STAGE_COMPUTE_BIT) == 0);
|
|
|
|
|
2016-08-23 01:13:51 +01:00
|
|
|
genX(cmd_buffer_config_l3)(cmd_buffer, pipeline->urb.l3_config);
|
2016-03-09 01:10:05 +00:00
|
|
|
|
|
|
|
genX(flush_pipeline_select_3d)(cmd_buffer);
|
|
|
|
|
|
|
|
if (vb_emit) {
|
|
|
|
const uint32_t num_buffers = __builtin_popcount(vb_emit);
|
|
|
|
const uint32_t num_dwords = 1 + num_buffers * 4;
|
|
|
|
|
|
|
|
p = anv_batch_emitn(&cmd_buffer->batch, num_dwords,
|
|
|
|
GENX(3DSTATE_VERTEX_BUFFERS));
|
|
|
|
uint32_t vb, i = 0;
|
|
|
|
for_each_bit(vb, vb_emit) {
|
|
|
|
struct anv_buffer *buffer = cmd_buffer->state.vertex_bindings[vb].buffer;
|
|
|
|
uint32_t offset = cmd_buffer->state.vertex_bindings[vb].offset;
|
|
|
|
|
|
|
|
struct GENX(VERTEX_BUFFER_STATE) state = {
|
|
|
|
.VertexBufferIndex = vb,
|
|
|
|
|
|
|
|
#if GEN_GEN >= 8
|
|
|
|
.MemoryObjectControlState = GENX(MOCS),
|
|
|
|
#else
|
|
|
|
.BufferAccessType = pipeline->instancing_enable[vb] ? INSTANCEDATA : VERTEXDATA,
|
|
|
|
.InstanceDataStepRate = 1,
|
|
|
|
.VertexBufferMemoryObjectControlState = GENX(MOCS),
|
|
|
|
#endif
|
|
|
|
|
|
|
|
.AddressModifyEnable = true,
|
|
|
|
.BufferPitch = pipeline->binding_stride[vb],
|
|
|
|
.BufferStartingAddress = { buffer->bo, buffer->offset + offset },
|
|
|
|
|
|
|
|
#if GEN_GEN >= 8
|
|
|
|
.BufferSize = buffer->size - offset
|
|
|
|
#else
|
|
|
|
.EndAddress = { buffer->bo, buffer->offset + buffer->size - 1},
|
|
|
|
#endif
|
|
|
|
};
|
|
|
|
|
|
|
|
GENX(VERTEX_BUFFER_STATE_pack)(&cmd_buffer->batch, &p[1 + i * 4], &state);
|
|
|
|
i++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
cmd_buffer->state.vb_dirty &= ~vb_emit;
|
|
|
|
|
|
|
|
if (cmd_buffer->state.dirty & ANV_CMD_DIRTY_PIPELINE) {
|
|
|
|
anv_batch_emit_batch(&cmd_buffer->batch, &pipeline->batch);
|
|
|
|
|
2016-06-21 23:32:09 +01:00
|
|
|
/* The exact descriptor layout is pulled from the pipeline, so we need
|
|
|
|
* to re-emit binding tables on every pipeline change.
|
|
|
|
*/
|
|
|
|
cmd_buffer->state.descriptors_dirty |=
|
|
|
|
cmd_buffer->state.pipeline->active_stages;
|
|
|
|
|
2016-03-09 02:10:22 +00:00
|
|
|
/* If the pipeline changed, we may need to re-allocate push constant
|
|
|
|
* space in the URB.
|
2016-03-09 01:10:05 +00:00
|
|
|
*/
|
2016-03-09 02:10:22 +00:00
|
|
|
cmd_buffer_alloc_push_constants(cmd_buffer);
|
2016-03-09 01:10:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#if GEN_GEN <= 7
|
|
|
|
if (cmd_buffer->state.descriptors_dirty & VK_SHADER_STAGE_VERTEX_BIT ||
|
|
|
|
cmd_buffer->state.push_constants_dirty & VK_SHADER_STAGE_VERTEX_BIT) {
|
|
|
|
/* From the IVB PRM Vol. 2, Part 1, Section 3.2.1:
|
|
|
|
*
|
|
|
|
* "A PIPE_CONTROL with Post-Sync Operation set to 1h and a depth
|
|
|
|
* stall needs to be sent just prior to any 3DSTATE_VS,
|
|
|
|
* 3DSTATE_URB_VS, 3DSTATE_CONSTANT_VS,
|
|
|
|
* 3DSTATE_BINDING_TABLE_POINTER_VS,
|
|
|
|
* 3DSTATE_SAMPLER_STATE_POINTER_VS command. Only one
|
|
|
|
* PIPE_CONTROL needs to be sent before any combination of VS
|
|
|
|
* associated 3DSTATE."
|
|
|
|
*/
|
2016-04-19 01:03:00 +01:00
|
|
|
anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
|
2016-04-18 22:41:06 +01:00
|
|
|
pc.DepthStallEnable = true;
|
|
|
|
pc.PostSyncOperation = WriteImmediateData;
|
|
|
|
pc.Address =
|
|
|
|
(struct anv_address) { &cmd_buffer->device->workaround_bo, 0 };
|
|
|
|
}
|
2016-03-09 01:10:05 +00:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2016-10-06 23:50:21 +01:00
|
|
|
/* Render targets live in the same binding table as fragment descriptors */
|
|
|
|
if (cmd_buffer->state.dirty & ANV_CMD_DIRTY_RENDER_TARGETS)
|
|
|
|
cmd_buffer->state.descriptors_dirty |= VK_SHADER_STAGE_FRAGMENT_BIT;
|
|
|
|
|
2016-03-09 01:10:05 +00:00
|
|
|
/* We emit the binding tables and sampler tables first, then emit push
|
|
|
|
* constants and then finally emit binding table and sampler table
|
|
|
|
* pointers. It has to happen in this order, since emitting the binding
|
|
|
|
* tables may change the push constants (in case of storage images). After
|
|
|
|
* emitting push constants, on SKL+ we have to emit the corresponding
|
|
|
|
* 3DSTATE_BINDING_TABLE_POINTER_* for the push constants to take effect.
|
|
|
|
*/
|
|
|
|
uint32_t dirty = 0;
|
|
|
|
if (cmd_buffer->state.descriptors_dirty)
|
2016-10-17 18:11:55 +01:00
|
|
|
dirty = flush_descriptor_sets(cmd_buffer);
|
2016-03-09 01:10:05 +00:00
|
|
|
|
|
|
|
if (cmd_buffer->state.push_constants_dirty) {
|
|
|
|
#if GEN_GEN >= 9
|
|
|
|
/* On Sky Lake and later, the binding table pointers commands are
|
|
|
|
* what actually flush the changes to push constant state so we need
|
|
|
|
* to dirty them so they get re-emitted below.
|
|
|
|
*/
|
|
|
|
dirty |= cmd_buffer_flush_push_constants(cmd_buffer);
|
|
|
|
#else
|
|
|
|
cmd_buffer_flush_push_constants(cmd_buffer);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
if (dirty)
|
2016-06-21 23:31:14 +01:00
|
|
|
cmd_buffer_emit_descriptor_pointers(cmd_buffer, dirty);
|
2016-03-09 01:10:05 +00:00
|
|
|
|
2016-06-14 16:40:49 +01:00
|
|
|
if (cmd_buffer->state.dirty & ANV_CMD_DIRTY_DYNAMIC_VIEWPORT)
|
2016-03-09 01:10:05 +00:00
|
|
|
gen8_cmd_buffer_emit_viewport(cmd_buffer);
|
2016-06-14 16:40:49 +01:00
|
|
|
|
|
|
|
if (cmd_buffer->state.dirty & (ANV_CMD_DIRTY_DYNAMIC_VIEWPORT |
|
|
|
|
ANV_CMD_DIRTY_PIPELINE)) {
|
|
|
|
gen8_cmd_buffer_emit_depth_viewport(cmd_buffer,
|
|
|
|
pipeline->depth_clamp_enable);
|
2016-06-14 16:15:34 +01:00
|
|
|
}
|
2016-03-09 01:10:05 +00:00
|
|
|
|
|
|
|
if (cmd_buffer->state.dirty & ANV_CMD_DIRTY_DYNAMIC_SCISSOR)
|
|
|
|
gen7_cmd_buffer_emit_scissor(cmd_buffer);
|
|
|
|
|
|
|
|
genX(cmd_buffer_flush_dynamic_state)(cmd_buffer);
|
2016-05-20 19:49:12 +01:00
|
|
|
|
|
|
|
genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
|
2016-03-09 01:10:05 +00:00
|
|
|
}
|
|
|
|
|
2016-02-06 00:41:56 +00:00
|
|
|
static void
|
|
|
|
emit_base_vertex_instance_bo(struct anv_cmd_buffer *cmd_buffer,
|
|
|
|
struct anv_bo *bo, uint32_t offset)
|
|
|
|
{
|
|
|
|
uint32_t *p = anv_batch_emitn(&cmd_buffer->batch, 5,
|
|
|
|
GENX(3DSTATE_VERTEX_BUFFERS));
|
|
|
|
|
|
|
|
GENX(VERTEX_BUFFER_STATE_pack)(&cmd_buffer->batch, p + 1,
|
|
|
|
&(struct GENX(VERTEX_BUFFER_STATE)) {
|
|
|
|
.VertexBufferIndex = 32, /* Reserved for this */
|
|
|
|
.AddressModifyEnable = true,
|
|
|
|
.BufferPitch = 0,
|
2016-02-20 17:08:27 +00:00
|
|
|
#if (GEN_GEN >= 8)
|
2016-02-06 00:41:56 +00:00
|
|
|
.MemoryObjectControlState = GENX(MOCS),
|
|
|
|
.BufferStartingAddress = { bo, offset },
|
|
|
|
.BufferSize = 8
|
|
|
|
#else
|
|
|
|
.VertexBufferMemoryObjectControlState = GENX(MOCS),
|
|
|
|
.BufferStartingAddress = { bo, offset },
|
|
|
|
.EndAddress = { bo, offset + 8 },
|
|
|
|
#endif
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
emit_base_vertex_instance(struct anv_cmd_buffer *cmd_buffer,
|
|
|
|
uint32_t base_vertex, uint32_t base_instance)
|
|
|
|
{
|
|
|
|
struct anv_state id_state =
|
|
|
|
anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, 8, 4);
|
|
|
|
|
|
|
|
((uint32_t *)id_state.map)[0] = base_vertex;
|
|
|
|
((uint32_t *)id_state.map)[1] = base_instance;
|
|
|
|
|
|
|
|
if (!cmd_buffer->device->info.has_llc)
|
|
|
|
anv_state_clflush(id_state);
|
|
|
|
|
|
|
|
emit_base_vertex_instance_bo(cmd_buffer,
|
|
|
|
&cmd_buffer->device->dynamic_state_block_pool.bo, id_state.offset);
|
|
|
|
}
|
|
|
|
|
|
|
|
void genX(CmdDraw)(
|
|
|
|
VkCommandBuffer commandBuffer,
|
|
|
|
uint32_t vertexCount,
|
|
|
|
uint32_t instanceCount,
|
|
|
|
uint32_t firstVertex,
|
|
|
|
uint32_t firstInstance)
|
|
|
|
{
|
|
|
|
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
|
|
|
|
struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
|
2016-03-04 16:15:16 +00:00
|
|
|
const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
|
2016-02-06 00:41:56 +00:00
|
|
|
|
|
|
|
genX(cmd_buffer_flush_state)(cmd_buffer);
|
|
|
|
|
2016-03-04 16:15:16 +00:00
|
|
|
if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance)
|
2016-02-06 00:41:56 +00:00
|
|
|
emit_base_vertex_instance(cmd_buffer, firstVertex, firstInstance);
|
|
|
|
|
2016-04-19 01:03:00 +01:00
|
|
|
anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
|
2016-04-18 22:27:29 +01:00
|
|
|
prim.VertexAccessType = SEQUENTIAL;
|
|
|
|
prim.PrimitiveTopologyType = pipeline->topology;
|
|
|
|
prim.VertexCountPerInstance = vertexCount;
|
|
|
|
prim.StartVertexLocation = firstVertex;
|
|
|
|
prim.InstanceCount = instanceCount;
|
|
|
|
prim.StartInstanceLocation = firstInstance;
|
|
|
|
prim.BaseVertexLocation = 0;
|
|
|
|
}
|
2016-02-06 00:41:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void genX(CmdDrawIndexed)(
|
|
|
|
VkCommandBuffer commandBuffer,
|
|
|
|
uint32_t indexCount,
|
|
|
|
uint32_t instanceCount,
|
|
|
|
uint32_t firstIndex,
|
|
|
|
int32_t vertexOffset,
|
|
|
|
uint32_t firstInstance)
|
|
|
|
{
|
|
|
|
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
|
|
|
|
struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
|
2016-03-04 16:15:16 +00:00
|
|
|
const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
|
2016-02-06 00:41:56 +00:00
|
|
|
|
|
|
|
genX(cmd_buffer_flush_state)(cmd_buffer);
|
|
|
|
|
2016-03-04 16:15:16 +00:00
|
|
|
if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance)
|
2016-02-06 00:41:56 +00:00
|
|
|
emit_base_vertex_instance(cmd_buffer, vertexOffset, firstInstance);
|
|
|
|
|
2016-04-19 01:03:00 +01:00
|
|
|
anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
|
2016-04-18 22:27:29 +01:00
|
|
|
prim.VertexAccessType = RANDOM;
|
|
|
|
prim.PrimitiveTopologyType = pipeline->topology;
|
|
|
|
prim.VertexCountPerInstance = indexCount;
|
|
|
|
prim.StartVertexLocation = firstIndex;
|
|
|
|
prim.InstanceCount = instanceCount;
|
|
|
|
prim.StartInstanceLocation = firstInstance;
|
|
|
|
prim.BaseVertexLocation = vertexOffset;
|
|
|
|
}
|
2016-02-06 00:41:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Auto-Draw / Indirect Registers */
|
|
|
|
#define GEN7_3DPRIM_END_OFFSET 0x2420
|
|
|
|
#define GEN7_3DPRIM_START_VERTEX 0x2430
|
|
|
|
#define GEN7_3DPRIM_VERTEX_COUNT 0x2434
|
|
|
|
#define GEN7_3DPRIM_INSTANCE_COUNT 0x2438
|
|
|
|
#define GEN7_3DPRIM_START_INSTANCE 0x243C
|
|
|
|
#define GEN7_3DPRIM_BASE_VERTEX 0x2440
|
|
|
|
|
|
|
|
void genX(CmdDrawIndirect)(
|
|
|
|
VkCommandBuffer commandBuffer,
|
|
|
|
VkBuffer _buffer,
|
|
|
|
VkDeviceSize offset,
|
|
|
|
uint32_t drawCount,
|
|
|
|
uint32_t stride)
|
|
|
|
{
|
|
|
|
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
|
|
|
|
ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
|
|
|
|
struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
|
2016-03-04 16:15:16 +00:00
|
|
|
const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
|
2016-02-06 00:41:56 +00:00
|
|
|
struct anv_bo *bo = buffer->bo;
|
|
|
|
uint32_t bo_offset = buffer->offset + offset;
|
|
|
|
|
|
|
|
genX(cmd_buffer_flush_state)(cmd_buffer);
|
|
|
|
|
2016-03-04 16:15:16 +00:00
|
|
|
if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance)
|
2016-02-06 00:41:56 +00:00
|
|
|
emit_base_vertex_instance_bo(cmd_buffer, bo, bo_offset + 8);
|
|
|
|
|
|
|
|
emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_VERTEX_COUNT, bo, bo_offset);
|
|
|
|
emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_INSTANCE_COUNT, bo, bo_offset + 4);
|
|
|
|
emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_VERTEX, bo, bo_offset + 8);
|
|
|
|
emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_INSTANCE, bo, bo_offset + 12);
|
|
|
|
emit_lri(&cmd_buffer->batch, GEN7_3DPRIM_BASE_VERTEX, 0);
|
|
|
|
|
2016-04-19 01:03:00 +01:00
|
|
|
anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
|
2016-04-18 22:27:29 +01:00
|
|
|
prim.IndirectParameterEnable = true;
|
|
|
|
prim.VertexAccessType = SEQUENTIAL;
|
|
|
|
prim.PrimitiveTopologyType = pipeline->topology;
|
|
|
|
}
|
2016-02-06 00:41:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void genX(CmdDrawIndexedIndirect)(
|
|
|
|
VkCommandBuffer commandBuffer,
|
|
|
|
VkBuffer _buffer,
|
|
|
|
VkDeviceSize offset,
|
|
|
|
uint32_t drawCount,
|
|
|
|
uint32_t stride)
|
|
|
|
{
|
|
|
|
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
|
|
|
|
ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
|
|
|
|
struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
|
2016-03-04 16:15:16 +00:00
|
|
|
const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
|
2016-02-06 00:41:56 +00:00
|
|
|
struct anv_bo *bo = buffer->bo;
|
|
|
|
uint32_t bo_offset = buffer->offset + offset;
|
|
|
|
|
|
|
|
genX(cmd_buffer_flush_state)(cmd_buffer);
|
|
|
|
|
|
|
|
/* TODO: We need to stomp base vertex to 0 somehow */
|
2016-03-04 16:15:16 +00:00
|
|
|
if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance)
|
2016-02-06 00:41:56 +00:00
|
|
|
emit_base_vertex_instance_bo(cmd_buffer, bo, bo_offset + 12);
|
|
|
|
|
|
|
|
emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_VERTEX_COUNT, bo, bo_offset);
|
|
|
|
emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_INSTANCE_COUNT, bo, bo_offset + 4);
|
|
|
|
emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_VERTEX, bo, bo_offset + 8);
|
|
|
|
emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_BASE_VERTEX, bo, bo_offset + 12);
|
|
|
|
emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_INSTANCE, bo, bo_offset + 16);
|
|
|
|
|
2016-04-19 01:03:00 +01:00
|
|
|
anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
|
2016-04-18 22:27:29 +01:00
|
|
|
prim.IndirectParameterEnable = true;
|
|
|
|
prim.VertexAccessType = RANDOM;
|
|
|
|
prim.PrimitiveTopologyType = pipeline->topology;
|
|
|
|
}
|
2016-02-06 00:41:56 +00:00
|
|
|
}
|
2016-02-06 06:36:53 +00:00
|
|
|
|
2016-10-17 18:03:16 +01:00
|
|
|
static VkResult
|
|
|
|
flush_compute_descriptor_set(struct anv_cmd_buffer *cmd_buffer)
|
|
|
|
{
|
|
|
|
struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
|
|
|
|
struct anv_state surfaces = { 0, }, samplers = { 0, };
|
|
|
|
VkResult result;
|
|
|
|
|
2016-10-17 18:11:55 +01:00
|
|
|
result = emit_binding_table(cmd_buffer, MESA_SHADER_COMPUTE, &surfaces);
|
2016-11-22 04:17:24 +00:00
|
|
|
if (result != VK_SUCCESS) {
|
2016-11-22 16:11:45 +00:00
|
|
|
assert(result == VK_ERROR_OUT_OF_DEVICE_MEMORY);
|
2016-11-22 04:17:24 +00:00
|
|
|
result = anv_cmd_buffer_new_binding_table_block(cmd_buffer);
|
|
|
|
assert(result == VK_SUCCESS);
|
|
|
|
|
|
|
|
/* Re-emit state base addresses so we get the new surface state base
|
|
|
|
* address before we start emitting binding tables etc.
|
|
|
|
*/
|
|
|
|
genX(cmd_buffer_emit_state_base_address)(cmd_buffer);
|
|
|
|
|
|
|
|
result = emit_binding_table(cmd_buffer, MESA_SHADER_COMPUTE, &surfaces);
|
|
|
|
assert(result == VK_SUCCESS);
|
|
|
|
}
|
2016-11-22 04:21:24 +00:00
|
|
|
|
2016-11-22 04:17:24 +00:00
|
|
|
result = emit_samplers(cmd_buffer, MESA_SHADER_COMPUTE, &samplers);
|
|
|
|
assert(result == VK_SUCCESS);
|
|
|
|
|
2016-11-12 20:13:17 +00:00
|
|
|
uint32_t iface_desc_data_dw[GENX(INTERFACE_DESCRIPTOR_DATA_length)];
|
|
|
|
struct GENX(INTERFACE_DESCRIPTOR_DATA) desc = {
|
|
|
|
.BindingTablePointer = surfaces.offset,
|
|
|
|
.SamplerStatePointer = samplers.offset,
|
|
|
|
};
|
|
|
|
GENX(INTERFACE_DESCRIPTOR_DATA_pack)(NULL, iface_desc_data_dw, &desc);
|
2016-10-17 18:03:16 +01:00
|
|
|
|
|
|
|
struct anv_state state =
|
2016-11-12 20:13:17 +00:00
|
|
|
anv_cmd_buffer_merge_dynamic(cmd_buffer, iface_desc_data_dw,
|
|
|
|
pipeline->interface_descriptor_data,
|
|
|
|
GENX(INTERFACE_DESCRIPTOR_DATA_length),
|
|
|
|
64);
|
2016-10-17 18:03:16 +01:00
|
|
|
|
|
|
|
uint32_t size = GENX(INTERFACE_DESCRIPTOR_DATA_length) * sizeof(uint32_t);
|
|
|
|
anv_batch_emit(&cmd_buffer->batch,
|
|
|
|
GENX(MEDIA_INTERFACE_DESCRIPTOR_LOAD), mid) {
|
|
|
|
mid.InterfaceDescriptorTotalLength = size;
|
|
|
|
mid.InterfaceDescriptorDataStartAddress = state.offset;
|
|
|
|
}
|
|
|
|
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
genX(cmd_buffer_flush_compute_state)(struct anv_cmd_buffer *cmd_buffer)
|
|
|
|
{
|
|
|
|
struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
|
|
|
|
MAYBE_UNUSED VkResult result;
|
|
|
|
|
|
|
|
assert(pipeline->active_stages == VK_SHADER_STAGE_COMPUTE_BIT);
|
|
|
|
|
|
|
|
genX(cmd_buffer_config_l3)(cmd_buffer, pipeline->urb.l3_config);
|
|
|
|
|
|
|
|
genX(flush_pipeline_select_gpgpu)(cmd_buffer);
|
|
|
|
|
2016-11-22 04:22:53 +00:00
|
|
|
if (cmd_buffer->state.compute_dirty & ANV_CMD_DIRTY_PIPELINE) {
|
|
|
|
/* From the Sky Lake PRM Vol 2a, MEDIA_VFE_STATE:
|
|
|
|
*
|
|
|
|
* "A stalling PIPE_CONTROL is required before MEDIA_VFE_STATE unless
|
|
|
|
* the only bits that are changed are scoreboard related: Scoreboard
|
|
|
|
* Enable, Scoreboard Type, Scoreboard Mask, Scoreboard * Delta. For
|
|
|
|
* these scoreboard related states, a MEDIA_STATE_FLUSH is
|
|
|
|
* sufficient."
|
|
|
|
*/
|
|
|
|
cmd_buffer->state.pending_pipe_bits |= ANV_PIPE_CS_STALL_BIT;
|
|
|
|
genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
|
|
|
|
|
2016-10-17 18:03:16 +01:00
|
|
|
anv_batch_emit_batch(&cmd_buffer->batch, &pipeline->batch);
|
2016-11-22 04:22:53 +00:00
|
|
|
}
|
2016-10-17 18:03:16 +01:00
|
|
|
|
2016-11-22 17:35:01 +00:00
|
|
|
if ((cmd_buffer->state.descriptors_dirty & VK_SHADER_STAGE_COMPUTE_BIT) ||
|
|
|
|
(cmd_buffer->state.compute_dirty & ANV_CMD_DIRTY_PIPELINE)) {
|
|
|
|
/* FIXME: figure out descriptors for gen7 */
|
|
|
|
result = flush_compute_descriptor_set(cmd_buffer);
|
|
|
|
assert(result == VK_SUCCESS);
|
|
|
|
cmd_buffer->state.descriptors_dirty &= ~VK_SHADER_STAGE_COMPUTE_BIT;
|
|
|
|
}
|
|
|
|
|
2016-11-22 04:21:24 +00:00
|
|
|
if (cmd_buffer->state.push_constants_dirty & VK_SHADER_STAGE_COMPUTE_BIT) {
|
|
|
|
struct anv_state push_state =
|
|
|
|
anv_cmd_buffer_cs_push_constants(cmd_buffer);
|
|
|
|
|
|
|
|
if (push_state.alloc_size) {
|
|
|
|
anv_batch_emit(&cmd_buffer->batch, GENX(MEDIA_CURBE_LOAD), curbe) {
|
|
|
|
curbe.CURBETotalDataLength = push_state.alloc_size;
|
|
|
|
curbe.CURBEDataStartAddress = push_state.offset;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-10-17 18:03:16 +01:00
|
|
|
cmd_buffer->state.compute_dirty = 0;
|
|
|
|
|
|
|
|
genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
|
|
|
|
}
|
|
|
|
|
2016-03-28 22:49:56 +01:00
|
|
|
#if GEN_GEN == 7
|
|
|
|
|
|
|
|
static bool
|
|
|
|
verify_cmd_parser(const struct anv_device *device,
|
|
|
|
int required_version,
|
|
|
|
const char *function)
|
|
|
|
{
|
|
|
|
if (device->instance->physicalDevice.cmd_parser_version < required_version) {
|
|
|
|
vk_errorf(VK_ERROR_FEATURE_NOT_PRESENT,
|
|
|
|
"cmd parser version %d is required for %s",
|
|
|
|
required_version, function);
|
|
|
|
return false;
|
|
|
|
} else {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|
2016-02-06 06:36:53 +00:00
|
|
|
|
|
|
|
void genX(CmdDispatch)(
|
|
|
|
VkCommandBuffer commandBuffer,
|
|
|
|
uint32_t x,
|
|
|
|
uint32_t y,
|
|
|
|
uint32_t z)
|
|
|
|
{
|
|
|
|
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
|
|
|
|
struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
|
2016-03-04 16:15:16 +00:00
|
|
|
const struct brw_cs_prog_data *prog_data = get_cs_prog_data(pipeline);
|
2016-02-06 06:36:53 +00:00
|
|
|
|
|
|
|
if (prog_data->uses_num_work_groups) {
|
|
|
|
struct anv_state state =
|
|
|
|
anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, 12, 4);
|
|
|
|
uint32_t *sizes = state.map;
|
|
|
|
sizes[0] = x;
|
|
|
|
sizes[1] = y;
|
|
|
|
sizes[2] = z;
|
|
|
|
if (!cmd_buffer->device->info.has_llc)
|
|
|
|
anv_state_clflush(state);
|
|
|
|
cmd_buffer->state.num_workgroups_offset = state.offset;
|
|
|
|
cmd_buffer->state.num_workgroups_bo =
|
|
|
|
&cmd_buffer->device->dynamic_state_block_pool.bo;
|
|
|
|
}
|
|
|
|
|
|
|
|
genX(cmd_buffer_flush_compute_state)(cmd_buffer);
|
|
|
|
|
2016-04-19 01:03:00 +01:00
|
|
|
anv_batch_emit(&cmd_buffer->batch, GENX(GPGPU_WALKER), ggw) {
|
2016-04-18 23:11:43 +01:00
|
|
|
ggw.SIMDSize = prog_data->simd_size / 16;
|
|
|
|
ggw.ThreadDepthCounterMaximum = 0;
|
|
|
|
ggw.ThreadHeightCounterMaximum = 0;
|
2016-05-26 21:49:07 +01:00
|
|
|
ggw.ThreadWidthCounterMaximum = prog_data->threads - 1;
|
2016-04-18 23:11:43 +01:00
|
|
|
ggw.ThreadGroupIDXDimension = x;
|
|
|
|
ggw.ThreadGroupIDYDimension = y;
|
|
|
|
ggw.ThreadGroupIDZDimension = z;
|
|
|
|
ggw.RightExecutionMask = pipeline->cs_right_mask;
|
|
|
|
ggw.BottomExecutionMask = 0xffffffff;
|
|
|
|
}
|
|
|
|
|
2016-04-19 01:03:00 +01:00
|
|
|
anv_batch_emit(&cmd_buffer->batch, GENX(MEDIA_STATE_FLUSH), msf);
|
2016-02-06 06:36:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#define GPGPU_DISPATCHDIMX 0x2500
|
|
|
|
#define GPGPU_DISPATCHDIMY 0x2504
|
|
|
|
#define GPGPU_DISPATCHDIMZ 0x2508
|
|
|
|
|
2016-03-02 09:11:29 +00:00
|
|
|
#define MI_PREDICATE_SRC0 0x2400
|
|
|
|
#define MI_PREDICATE_SRC1 0x2408
|
|
|
|
|
2016-02-06 06:36:53 +00:00
|
|
|
void genX(CmdDispatchIndirect)(
|
|
|
|
VkCommandBuffer commandBuffer,
|
|
|
|
VkBuffer _buffer,
|
|
|
|
VkDeviceSize offset)
|
|
|
|
{
|
|
|
|
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
|
|
|
|
ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
|
|
|
|
struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
|
2016-03-04 16:15:16 +00:00
|
|
|
const struct brw_cs_prog_data *prog_data = get_cs_prog_data(pipeline);
|
2016-02-06 06:36:53 +00:00
|
|
|
struct anv_bo *bo = buffer->bo;
|
|
|
|
uint32_t bo_offset = buffer->offset + offset;
|
2016-03-02 09:09:16 +00:00
|
|
|
struct anv_batch *batch = &cmd_buffer->batch;
|
2016-02-06 06:36:53 +00:00
|
|
|
|
2016-03-28 22:49:56 +01:00
|
|
|
#if GEN_GEN == 7
|
|
|
|
/* Linux 4.4 added command parser version 5 which allows the GPGPU
|
|
|
|
* indirect dispatch registers to be written.
|
|
|
|
*/
|
2016-03-29 06:26:47 +01:00
|
|
|
if (!verify_cmd_parser(cmd_buffer->device, 5, "vkCmdDispatchIndirect"))
|
2016-03-28 22:49:56 +01:00
|
|
|
return;
|
|
|
|
#endif
|
|
|
|
|
2016-02-06 06:36:53 +00:00
|
|
|
if (prog_data->uses_num_work_groups) {
|
|
|
|
cmd_buffer->state.num_workgroups_offset = bo_offset;
|
|
|
|
cmd_buffer->state.num_workgroups_bo = bo;
|
|
|
|
}
|
|
|
|
|
|
|
|
genX(cmd_buffer_flush_compute_state)(cmd_buffer);
|
|
|
|
|
2016-03-02 09:09:16 +00:00
|
|
|
emit_lrm(batch, GPGPU_DISPATCHDIMX, bo, bo_offset);
|
|
|
|
emit_lrm(batch, GPGPU_DISPATCHDIMY, bo, bo_offset + 4);
|
|
|
|
emit_lrm(batch, GPGPU_DISPATCHDIMZ, bo, bo_offset + 8);
|
2016-02-06 06:36:53 +00:00
|
|
|
|
2016-03-02 09:11:29 +00:00
|
|
|
#if GEN_GEN <= 7
|
|
|
|
/* Clear upper 32-bits of SRC0 and all 64-bits of SRC1 */
|
|
|
|
emit_lri(batch, MI_PREDICATE_SRC0 + 4, 0);
|
|
|
|
emit_lri(batch, MI_PREDICATE_SRC1 + 0, 0);
|
|
|
|
emit_lri(batch, MI_PREDICATE_SRC1 + 4, 0);
|
|
|
|
|
|
|
|
/* Load compute_dispatch_indirect_x_size into SRC0 */
|
|
|
|
emit_lrm(batch, MI_PREDICATE_SRC0, bo, bo_offset + 0);
|
|
|
|
|
|
|
|
/* predicate = (compute_dispatch_indirect_x_size == 0); */
|
2016-04-19 01:03:00 +01:00
|
|
|
anv_batch_emit(batch, GENX(MI_PREDICATE), mip) {
|
2016-04-18 23:11:43 +01:00
|
|
|
mip.LoadOperation = LOAD_LOAD;
|
|
|
|
mip.CombineOperation = COMBINE_SET;
|
|
|
|
mip.CompareOperation = COMPARE_SRCS_EQUAL;
|
|
|
|
}
|
2016-03-02 09:11:29 +00:00
|
|
|
|
|
|
|
/* Load compute_dispatch_indirect_y_size into SRC0 */
|
|
|
|
emit_lrm(batch, MI_PREDICATE_SRC0, bo, bo_offset + 4);
|
|
|
|
|
|
|
|
/* predicate |= (compute_dispatch_indirect_y_size == 0); */
|
2016-04-19 01:03:00 +01:00
|
|
|
anv_batch_emit(batch, GENX(MI_PREDICATE), mip) {
|
2016-04-18 23:11:43 +01:00
|
|
|
mip.LoadOperation = LOAD_LOAD;
|
|
|
|
mip.CombineOperation = COMBINE_OR;
|
|
|
|
mip.CompareOperation = COMPARE_SRCS_EQUAL;
|
|
|
|
}
|
2016-03-02 09:11:29 +00:00
|
|
|
|
|
|
|
/* Load compute_dispatch_indirect_z_size into SRC0 */
|
|
|
|
emit_lrm(batch, MI_PREDICATE_SRC0, bo, bo_offset + 8);
|
|
|
|
|
|
|
|
/* predicate |= (compute_dispatch_indirect_z_size == 0); */
|
2016-04-19 01:03:00 +01:00
|
|
|
anv_batch_emit(batch, GENX(MI_PREDICATE), mip) {
|
2016-04-18 23:11:43 +01:00
|
|
|
mip.LoadOperation = LOAD_LOAD;
|
|
|
|
mip.CombineOperation = COMBINE_OR;
|
|
|
|
mip.CompareOperation = COMPARE_SRCS_EQUAL;
|
|
|
|
}
|
2016-03-02 09:11:29 +00:00
|
|
|
|
|
|
|
/* predicate = !predicate; */
|
|
|
|
#define COMPARE_FALSE 1
|
2016-04-19 01:03:00 +01:00
|
|
|
anv_batch_emit(batch, GENX(MI_PREDICATE), mip) {
|
2016-04-18 23:11:43 +01:00
|
|
|
mip.LoadOperation = LOAD_LOADINV;
|
|
|
|
mip.CombineOperation = COMBINE_OR;
|
|
|
|
mip.CompareOperation = COMPARE_FALSE;
|
|
|
|
}
|
2016-03-02 09:11:29 +00:00
|
|
|
#endif
|
|
|
|
|
2016-04-19 01:03:00 +01:00
|
|
|
anv_batch_emit(batch, GENX(GPGPU_WALKER), ggw) {
|
2016-04-18 23:11:43 +01:00
|
|
|
ggw.IndirectParameterEnable = true;
|
|
|
|
ggw.PredicateEnable = GEN_GEN <= 7;
|
|
|
|
ggw.SIMDSize = prog_data->simd_size / 16;
|
|
|
|
ggw.ThreadDepthCounterMaximum = 0;
|
|
|
|
ggw.ThreadHeightCounterMaximum = 0;
|
2016-05-26 21:49:07 +01:00
|
|
|
ggw.ThreadWidthCounterMaximum = prog_data->threads - 1;
|
2016-04-18 23:11:43 +01:00
|
|
|
ggw.RightExecutionMask = pipeline->cs_right_mask;
|
|
|
|
ggw.BottomExecutionMask = 0xffffffff;
|
|
|
|
}
|
|
|
|
|
2016-04-19 01:03:00 +01:00
|
|
|
anv_batch_emit(batch, GENX(MEDIA_STATE_FLUSH), msf);
|
2016-02-06 06:36:53 +00:00
|
|
|
}
|
2016-02-15 06:45:41 +00:00
|
|
|
|
2016-03-11 01:25:45 +00:00
|
|
|
static void
|
|
|
|
flush_pipeline_before_pipeline_select(struct anv_cmd_buffer *cmd_buffer,
|
|
|
|
uint32_t pipeline)
|
|
|
|
{
|
|
|
|
#if GEN_GEN >= 8 && GEN_GEN < 10
|
|
|
|
/* From the Broadwell PRM, Volume 2a: Instructions, PIPELINE_SELECT:
|
|
|
|
*
|
|
|
|
* Software must clear the COLOR_CALC_STATE Valid field in
|
|
|
|
* 3DSTATE_CC_STATE_POINTERS command prior to send a PIPELINE_SELECT
|
|
|
|
* with Pipeline Select set to GPGPU.
|
|
|
|
*
|
|
|
|
* The internal hardware docs recommend the same workaround for Gen9
|
|
|
|
* hardware too.
|
|
|
|
*/
|
|
|
|
if (pipeline == GPGPU)
|
2016-04-19 01:03:00 +01:00
|
|
|
anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CC_STATE_POINTERS), t);
|
2016-03-11 01:19:13 +00:00
|
|
|
#elif GEN_GEN <= 7
|
|
|
|
/* From "BXML » GT » MI » vol1a GPU Overview » [Instruction]
|
|
|
|
* PIPELINE_SELECT [DevBWR+]":
|
|
|
|
*
|
|
|
|
* Project: DEVSNB+
|
|
|
|
*
|
|
|
|
* Software must ensure all the write caches are flushed through a
|
|
|
|
* stalling PIPE_CONTROL command followed by another PIPE_CONTROL
|
|
|
|
* command to invalidate read only caches prior to programming
|
|
|
|
* MI_PIPELINE_SELECT command to change the Pipeline Select Mode.
|
|
|
|
*/
|
2016-04-19 01:03:00 +01:00
|
|
|
anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
|
2016-04-18 22:41:06 +01:00
|
|
|
pc.RenderTargetCacheFlushEnable = true;
|
|
|
|
pc.DepthCacheFlushEnable = true;
|
|
|
|
pc.DCFlushEnable = true;
|
|
|
|
pc.PostSyncOperation = NoWrite;
|
|
|
|
pc.CommandStreamerStallEnable = true;
|
|
|
|
}
|
2016-03-11 01:19:13 +00:00
|
|
|
|
2016-04-19 01:03:00 +01:00
|
|
|
anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
|
2016-04-18 22:41:06 +01:00
|
|
|
pc.TextureCacheInvalidationEnable = true;
|
|
|
|
pc.ConstantCacheInvalidationEnable = true;
|
|
|
|
pc.StateCacheInvalidationEnable = true;
|
|
|
|
pc.InstructionCacheInvalidateEnable = true;
|
|
|
|
pc.PostSyncOperation = NoWrite;
|
|
|
|
}
|
2016-03-11 01:25:45 +00:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2016-02-15 06:45:41 +00:00
|
|
|
void
|
|
|
|
genX(flush_pipeline_select_3d)(struct anv_cmd_buffer *cmd_buffer)
|
|
|
|
{
|
|
|
|
if (cmd_buffer->state.current_pipeline != _3D) {
|
2016-03-11 01:25:45 +00:00
|
|
|
flush_pipeline_before_pipeline_select(cmd_buffer, _3D);
|
|
|
|
|
2016-04-19 01:03:00 +01:00
|
|
|
anv_batch_emit(&cmd_buffer->batch, GENX(PIPELINE_SELECT), ps) {
|
2016-02-20 17:08:27 +00:00
|
|
|
#if GEN_GEN >= 9
|
2016-04-18 23:11:43 +01:00
|
|
|
ps.MaskBits = 3;
|
2016-02-15 06:45:41 +00:00
|
|
|
#endif
|
2016-04-18 23:11:43 +01:00
|
|
|
ps.PipelineSelection = _3D;
|
|
|
|
}
|
|
|
|
|
2016-02-15 06:45:41 +00:00
|
|
|
cmd_buffer->state.current_pipeline = _3D;
|
|
|
|
}
|
|
|
|
}
|
2016-02-15 07:01:42 +00:00
|
|
|
|
2016-03-11 01:16:58 +00:00
|
|
|
void
|
|
|
|
genX(flush_pipeline_select_gpgpu)(struct anv_cmd_buffer *cmd_buffer)
|
|
|
|
{
|
|
|
|
if (cmd_buffer->state.current_pipeline != GPGPU) {
|
2016-03-11 01:25:45 +00:00
|
|
|
flush_pipeline_before_pipeline_select(cmd_buffer, GPGPU);
|
2016-03-11 01:16:58 +00:00
|
|
|
|
2016-04-19 01:03:00 +01:00
|
|
|
anv_batch_emit(&cmd_buffer->batch, GENX(PIPELINE_SELECT), ps) {
|
2016-03-11 01:16:58 +00:00
|
|
|
#if GEN_GEN >= 9
|
2016-04-18 23:11:43 +01:00
|
|
|
ps.MaskBits = 3;
|
2016-03-11 01:16:58 +00:00
|
|
|
#endif
|
2016-04-18 23:11:43 +01:00
|
|
|
ps.PipelineSelection = GPGPU;
|
|
|
|
}
|
|
|
|
|
2016-03-11 01:16:58 +00:00
|
|
|
cmd_buffer->state.current_pipeline = GPGPU;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-11-19 22:05:06 +00:00
|
|
|
void
|
|
|
|
genX(cmd_buffer_emit_gen7_depth_flush)(struct anv_cmd_buffer *cmd_buffer)
|
|
|
|
{
|
|
|
|
if (GEN_GEN >= 8)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* From the Haswell PRM, documentation for 3DSTATE_DEPTH_BUFFER:
|
|
|
|
*
|
|
|
|
* "Restriction: Prior to changing Depth/Stencil Buffer state (i.e., any
|
|
|
|
* combination of 3DSTATE_DEPTH_BUFFER, 3DSTATE_CLEAR_PARAMS,
|
|
|
|
* 3DSTATE_STENCIL_BUFFER, 3DSTATE_HIER_DEPTH_BUFFER) SW must first
|
|
|
|
* issue a pipelined depth stall (PIPE_CONTROL with Depth Stall bit
|
|
|
|
* set), followed by a pipelined depth cache flush (PIPE_CONTROL with
|
|
|
|
* Depth Flush Bit set, followed by another pipelined depth stall
|
|
|
|
* (PIPE_CONTROL with Depth Stall Bit set), unless SW can otherwise
|
|
|
|
* guarantee that the pipeline from WM onwards is already flushed (e.g.,
|
|
|
|
* via a preceding MI_FLUSH)."
|
|
|
|
*/
|
|
|
|
anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pipe) {
|
|
|
|
pipe.DepthStallEnable = true;
|
|
|
|
}
|
|
|
|
anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pipe) {
|
|
|
|
pipe.DepthCacheFlushEnable = true;
|
|
|
|
}
|
|
|
|
anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pipe) {
|
|
|
|
pipe.DepthStallEnable = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-11-28 23:44:13 +00:00
|
|
|
static uint32_t
|
|
|
|
depth_stencil_surface_type(enum isl_surf_dim dim)
|
|
|
|
{
|
|
|
|
switch (dim) {
|
|
|
|
case ISL_SURF_DIM_1D:
|
|
|
|
if (GEN_GEN >= 9) {
|
|
|
|
/* From the Sky Lake PRM, 3DSTATAE_DEPTH_BUFFER::SurfaceType
|
|
|
|
*
|
|
|
|
* Programming Notes:
|
|
|
|
* The Surface Type of the depth buffer must be the same as the
|
|
|
|
* Surface Type of the render target(s) (defined in
|
|
|
|
* SURFACE_STATE), unless either the depth buffer or render
|
|
|
|
* targets are SURFTYPE_NULL (see exception below for SKL). 1D
|
|
|
|
* surface type not allowed for depth surface and stencil surface.
|
|
|
|
*
|
|
|
|
* Workaround:
|
|
|
|
* If depth/stencil is enabled with 1D render target,
|
|
|
|
* depth/stencil surface type needs to be set to 2D surface type
|
|
|
|
* and height set to 1. Depth will use (legacy) TileY and stencil
|
|
|
|
* will use TileW. For this case only, the Surface Type of the
|
|
|
|
* depth buffer can be 2D while the Surface Type of the render
|
|
|
|
* target(s) are 1D, representing an exception to a programming
|
|
|
|
* note above.
|
|
|
|
*/
|
|
|
|
return SURFTYPE_2D;
|
|
|
|
} else {
|
|
|
|
return SURFTYPE_1D;
|
|
|
|
}
|
|
|
|
case ISL_SURF_DIM_2D:
|
|
|
|
return SURFTYPE_2D;
|
|
|
|
case ISL_SURF_DIM_3D:
|
|
|
|
if (GEN_GEN >= 9) {
|
|
|
|
/* The Sky Lake docs list the value for 3D as "Reserved". However,
|
|
|
|
* they have the exact same layout as 2D arrays on gen9+, so we can
|
|
|
|
* just use 2D here.
|
|
|
|
*/
|
|
|
|
return SURFTYPE_2D;
|
|
|
|
} else {
|
|
|
|
return SURFTYPE_3D;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
unreachable("Invalid surface dimension");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-02-15 07:01:42 +00:00
|
|
|
static void
|
|
|
|
cmd_buffer_emit_depth_stencil(struct anv_cmd_buffer *cmd_buffer)
|
|
|
|
{
|
|
|
|
struct anv_device *device = cmd_buffer->device;
|
|
|
|
const struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
|
|
|
|
const struct anv_image_view *iview =
|
|
|
|
anv_cmd_buffer_get_depth_stencil_view(cmd_buffer);
|
|
|
|
const struct anv_image *image = iview ? iview->image : NULL;
|
2016-05-13 20:50:11 +01:00
|
|
|
const bool has_depth = image && (image->aspects & VK_IMAGE_ASPECT_DEPTH_BIT);
|
2017-01-09 12:47:31 +00:00
|
|
|
const uint32_t ds = cmd_buffer->state.subpass->depth_stencil_attachment;
|
|
|
|
const bool has_hiz = image != NULL &&
|
|
|
|
cmd_buffer->state.attachments[ds].aux_usage == ISL_AUX_USAGE_HIZ;
|
2016-05-13 20:50:11 +01:00
|
|
|
const bool has_stencil =
|
|
|
|
image && (image->aspects & VK_IMAGE_ASPECT_STENCIL_BIT);
|
2016-02-15 07:01:42 +00:00
|
|
|
|
|
|
|
/* FIXME: Implement the PMA stall W/A */
|
|
|
|
/* FIXME: Width and Height are wrong */
|
|
|
|
|
2016-11-19 22:05:06 +00:00
|
|
|
genX(cmd_buffer_emit_gen7_depth_flush)(cmd_buffer);
|
|
|
|
|
2016-02-15 07:01:42 +00:00
|
|
|
/* Emit 3DSTATE_DEPTH_BUFFER */
|
|
|
|
if (has_depth) {
|
2016-04-19 01:03:00 +01:00
|
|
|
anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_DEPTH_BUFFER), db) {
|
2016-11-28 23:44:13 +00:00
|
|
|
db.SurfaceType =
|
|
|
|
depth_stencil_surface_type(image->depth_surface.isl.dim);
|
2016-04-18 22:48:33 +01:00
|
|
|
db.DepthWriteEnable = true;
|
|
|
|
db.StencilWriteEnable = has_stencil;
|
2017-01-08 02:18:43 +00:00
|
|
|
db.HierarchicalDepthBufferEnable = has_hiz;
|
2016-04-18 22:48:33 +01:00
|
|
|
|
|
|
|
db.SurfaceFormat = isl_surf_get_depth_format(&device->isl_dev,
|
|
|
|
&image->depth_surface.isl);
|
|
|
|
|
|
|
|
db.SurfaceBaseAddress = (struct anv_address) {
|
2016-02-15 07:01:42 +00:00
|
|
|
.bo = image->bo,
|
2016-03-01 17:17:16 +00:00
|
|
|
.offset = image->offset + image->depth_surface.offset,
|
2016-04-18 22:48:33 +01:00
|
|
|
};
|
2016-10-12 23:28:03 +01:00
|
|
|
db.DepthBufferObjectControlState = GENX(MOCS);
|
2016-04-18 22:48:33 +01:00
|
|
|
|
|
|
|
db.SurfacePitch = image->depth_surface.isl.row_pitch - 1;
|
2016-06-14 01:09:37 +01:00
|
|
|
db.Height = image->extent.height - 1;
|
|
|
|
db.Width = image->extent.width - 1;
|
2016-09-19 23:28:43 +01:00
|
|
|
db.LOD = iview->isl.base_level;
|
|
|
|
db.MinimumArrayElement = iview->isl.base_array_layer;
|
2016-04-18 22:48:33 +01:00
|
|
|
|
2016-12-19 17:37:03 +00:00
|
|
|
assert(image->depth_surface.isl.dim != ISL_SURF_DIM_3D);
|
|
|
|
db.Depth =
|
|
|
|
db.RenderTargetViewExtent =
|
|
|
|
iview->isl.array_len - iview->isl.base_array_layer - 1;
|
|
|
|
|
2016-02-20 17:08:27 +00:00
|
|
|
#if GEN_GEN >= 8
|
2016-04-18 22:48:33 +01:00
|
|
|
db.SurfaceQPitch =
|
2016-10-12 23:28:03 +01:00
|
|
|
isl_surf_get_array_pitch_el_rows(&image->depth_surface.isl) >> 2;
|
2016-02-15 07:01:42 +00:00
|
|
|
#endif
|
2016-04-18 22:48:33 +01:00
|
|
|
}
|
2016-02-15 07:01:42 +00:00
|
|
|
} else {
|
|
|
|
/* Even when no depth buffer is present, the hardware requires that
|
|
|
|
* 3DSTATE_DEPTH_BUFFER be programmed correctly. The Broadwell PRM says:
|
|
|
|
*
|
|
|
|
* If a null depth buffer is bound, the driver must instead bind depth as:
|
|
|
|
* 3DSTATE_DEPTH.SurfaceType = SURFTYPE_2D
|
|
|
|
* 3DSTATE_DEPTH.Width = 1
|
|
|
|
* 3DSTATE_DEPTH.Height = 1
|
|
|
|
* 3DSTATE_DEPTH.SuraceFormat = D16_UNORM
|
|
|
|
* 3DSTATE_DEPTH.SurfaceBaseAddress = 0
|
|
|
|
* 3DSTATE_DEPTH.HierarchicalDepthBufferEnable = 0
|
|
|
|
* 3DSTATE_WM_DEPTH_STENCIL.DepthTestEnable = 0
|
|
|
|
* 3DSTATE_WM_DEPTH_STENCIL.DepthBufferWriteEnable = 0
|
|
|
|
*
|
|
|
|
* The PRM is wrong, though. The width and height must be programmed to
|
|
|
|
* actual framebuffer's width and height, even when neither depth buffer
|
2016-03-05 17:13:44 +00:00
|
|
|
* nor stencil buffer is present. Also, D16_UNORM is not allowed to
|
|
|
|
* be combined with a stencil buffer so we use D32_FLOAT instead.
|
2016-02-15 07:01:42 +00:00
|
|
|
*/
|
2016-04-19 01:03:00 +01:00
|
|
|
anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_DEPTH_BUFFER), db) {
|
2016-11-28 23:44:13 +00:00
|
|
|
if (has_stencil) {
|
2016-11-29 22:00:39 +00:00
|
|
|
db.SurfaceType =
|
2016-11-28 23:44:13 +00:00
|
|
|
depth_stencil_surface_type(image->stencil_surface.isl.dim);
|
|
|
|
} else {
|
|
|
|
db.SurfaceType = SURFTYPE_2D;
|
|
|
|
}
|
2016-04-18 22:48:33 +01:00
|
|
|
db.SurfaceFormat = D32_FLOAT;
|
2016-09-29 19:53:17 +01:00
|
|
|
db.Width = MAX2(fb->width, 1) - 1;
|
|
|
|
db.Height = MAX2(fb->height, 1) - 1;
|
2016-04-18 22:48:33 +01:00
|
|
|
db.StencilWriteEnable = has_stencil;
|
|
|
|
}
|
2016-02-15 07:01:42 +00:00
|
|
|
}
|
|
|
|
|
2016-10-06 23:21:53 +01:00
|
|
|
if (has_hiz) {
|
|
|
|
anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_HIER_DEPTH_BUFFER), hdb) {
|
|
|
|
hdb.HierarchicalDepthBufferObjectControlState = GENX(MOCS);
|
2016-10-25 03:31:36 +01:00
|
|
|
hdb.SurfacePitch = image->aux_surface.isl.row_pitch - 1;
|
2016-10-06 23:21:53 +01:00
|
|
|
hdb.SurfaceBaseAddress = (struct anv_address) {
|
|
|
|
.bo = image->bo,
|
2016-10-25 03:31:36 +01:00
|
|
|
.offset = image->offset + image->aux_surface.offset,
|
2016-10-06 23:21:53 +01:00
|
|
|
};
|
|
|
|
#if GEN_GEN >= 8
|
|
|
|
/* From the SKL PRM Vol2a:
|
|
|
|
*
|
|
|
|
* The interpretation of this field is dependent on Surface Type
|
|
|
|
* as follows:
|
|
|
|
* - SURFTYPE_1D: distance in pixels between array slices
|
|
|
|
* - SURFTYPE_2D/CUBE: distance in rows between array slices
|
|
|
|
* - SURFTYPE_3D: distance in rows between R - slices
|
2016-11-26 06:09:30 +00:00
|
|
|
*
|
|
|
|
* Unfortunately, the docs aren't 100% accurate here. They fail to
|
|
|
|
* mention that the 1-D rule only applies to linear 1-D images.
|
|
|
|
* Since depth and HiZ buffers are always tiled, they are treated as
|
|
|
|
* 2-D images. Prior to Sky Lake, this field is always in rows.
|
2016-10-06 23:21:53 +01:00
|
|
|
*/
|
|
|
|
hdb.SurfaceQPitch =
|
2017-01-05 09:23:27 +00:00
|
|
|
isl_surf_get_array_pitch_sa_rows(&image->aux_surface.isl) >> 2;
|
2016-10-06 23:21:53 +01:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_HIER_DEPTH_BUFFER), hdb);
|
|
|
|
}
|
|
|
|
|
2016-02-15 07:01:42 +00:00
|
|
|
/* Emit 3DSTATE_STENCIL_BUFFER */
|
|
|
|
if (has_stencil) {
|
2016-04-19 01:03:00 +01:00
|
|
|
anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_STENCIL_BUFFER), sb) {
|
2016-02-20 17:08:27 +00:00
|
|
|
#if GEN_GEN >= 8 || GEN_IS_HASWELL
|
2016-10-12 23:28:03 +01:00
|
|
|
sb.StencilBufferEnable = true;
|
2016-02-15 07:01:42 +00:00
|
|
|
#endif
|
2016-10-12 23:28:03 +01:00
|
|
|
sb.StencilBufferObjectControlState = GENX(MOCS);
|
2016-02-15 07:01:42 +00:00
|
|
|
|
2016-10-12 23:28:03 +01:00
|
|
|
sb.SurfacePitch = image->stencil_surface.isl.row_pitch - 1;
|
2016-02-15 07:01:42 +00:00
|
|
|
|
2016-02-20 17:08:27 +00:00
|
|
|
#if GEN_GEN >= 8
|
2016-10-12 23:28:03 +01:00
|
|
|
sb.SurfaceQPitch = isl_surf_get_array_pitch_el_rows(&image->stencil_surface.isl) >> 2;
|
2016-02-15 07:01:42 +00:00
|
|
|
#endif
|
2016-04-18 22:48:33 +01:00
|
|
|
sb.SurfaceBaseAddress = (struct anv_address) {
|
2016-02-15 07:01:42 +00:00
|
|
|
.bo = image->bo,
|
|
|
|
.offset = image->offset + image->stencil_surface.offset,
|
2016-04-18 22:48:33 +01:00
|
|
|
};
|
|
|
|
}
|
2016-02-15 07:01:42 +00:00
|
|
|
} else {
|
2016-04-19 01:03:00 +01:00
|
|
|
anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_STENCIL_BUFFER), sb);
|
2016-02-15 07:01:42 +00:00
|
|
|
}
|
|
|
|
|
2016-10-07 20:07:31 +01:00
|
|
|
/* From the IVB PRM Vol2P1, 11.5.5.4 3DSTATE_CLEAR_PARAMS:
|
|
|
|
*
|
|
|
|
* 3DSTATE_CLEAR_PARAMS must always be programmed in the along with
|
|
|
|
* the other Depth/Stencil state commands(i.e. 3DSTATE_DEPTH_BUFFER,
|
|
|
|
* 3DSTATE_STENCIL_BUFFER, or 3DSTATE_HIER_DEPTH_BUFFER)
|
|
|
|
*
|
|
|
|
* Testing also shows that some variant of this restriction may exist HSW+.
|
|
|
|
* On BDW+, it is not possible to emit 2 of these packets consecutively when
|
|
|
|
* both have DepthClearValueValid set. An analysis of such state programming
|
|
|
|
* on SKL showed that the GPU doesn't register the latter packet's clear
|
|
|
|
* value.
|
|
|
|
*/
|
|
|
|
anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CLEAR_PARAMS), cp) {
|
|
|
|
if (has_hiz) {
|
|
|
|
cp.DepthClearValueValid = true;
|
2016-12-06 17:08:09 +00:00
|
|
|
cp.DepthClearValue = ANV_HZ_FC_VAL;
|
2016-10-07 20:07:31 +01:00
|
|
|
}
|
|
|
|
}
|
2016-02-15 07:01:42 +00:00
|
|
|
}
|
|
|
|
|
2016-10-06 00:51:02 +01:00
|
|
|
static void
|
2016-02-15 07:01:42 +00:00
|
|
|
genX(cmd_buffer_set_subpass)(struct anv_cmd_buffer *cmd_buffer,
|
|
|
|
struct anv_subpass *subpass)
|
|
|
|
{
|
|
|
|
cmd_buffer->state.subpass = subpass;
|
|
|
|
|
2016-10-06 23:50:21 +01:00
|
|
|
cmd_buffer->state.dirty |= ANV_CMD_DIRTY_RENDER_TARGETS;
|
2016-02-15 07:01:42 +00:00
|
|
|
|
2017-01-11 21:05:13 +00:00
|
|
|
const struct anv_image_view *iview =
|
|
|
|
anv_cmd_buffer_get_depth_stencil_view(cmd_buffer);
|
|
|
|
|
|
|
|
if (iview) {
|
|
|
|
anv_gen8_hiz_op_resolve(cmd_buffer, iview->image,
|
|
|
|
BLORP_HIZ_OP_HIZ_RESOLVE);
|
|
|
|
}
|
|
|
|
|
2016-02-15 07:01:42 +00:00
|
|
|
cmd_buffer_emit_depth_stencil(cmd_buffer);
|
2016-10-06 00:54:57 +01:00
|
|
|
|
|
|
|
anv_cmd_buffer_clear_subpass(cmd_buffer);
|
2016-02-15 07:01:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void genX(CmdBeginRenderPass)(
|
|
|
|
VkCommandBuffer commandBuffer,
|
|
|
|
const VkRenderPassBeginInfo* pRenderPassBegin,
|
|
|
|
VkSubpassContents contents)
|
|
|
|
{
|
|
|
|
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
|
|
|
|
ANV_FROM_HANDLE(anv_render_pass, pass, pRenderPassBegin->renderPass);
|
|
|
|
ANV_FROM_HANDLE(anv_framebuffer, framebuffer, pRenderPassBegin->framebuffer);
|
|
|
|
|
|
|
|
cmd_buffer->state.framebuffer = framebuffer;
|
|
|
|
cmd_buffer->state.pass = pass;
|
2016-05-20 08:11:32 +01:00
|
|
|
cmd_buffer->state.render_area = pRenderPassBegin->renderArea;
|
2016-11-18 06:26:52 +00:00
|
|
|
genX(cmd_buffer_setup_attachments)(cmd_buffer, pass, pRenderPassBegin);
|
2016-02-15 07:01:42 +00:00
|
|
|
|
|
|
|
genX(flush_pipeline_select_3d)(cmd_buffer);
|
|
|
|
|
|
|
|
genX(cmd_buffer_set_subpass)(cmd_buffer, pass->subpasses);
|
|
|
|
}
|
|
|
|
|
|
|
|
void genX(CmdNextSubpass)(
|
|
|
|
VkCommandBuffer commandBuffer,
|
|
|
|
VkSubpassContents contents)
|
|
|
|
{
|
|
|
|
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
|
|
|
|
|
|
|
|
assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
|
|
|
|
|
2017-01-11 21:05:13 +00:00
|
|
|
const struct anv_image_view *iview =
|
|
|
|
anv_cmd_buffer_get_depth_stencil_view(cmd_buffer);
|
|
|
|
|
|
|
|
if (iview) {
|
|
|
|
anv_gen8_hiz_op_resolve(cmd_buffer, iview->image,
|
|
|
|
BLORP_HIZ_OP_DEPTH_RESOLVE);
|
|
|
|
}
|
|
|
|
|
2016-02-15 07:01:42 +00:00
|
|
|
anv_cmd_buffer_resolve_subpass(cmd_buffer);
|
|
|
|
genX(cmd_buffer_set_subpass)(cmd_buffer, cmd_buffer->state.subpass + 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
void genX(CmdEndRenderPass)(
|
|
|
|
VkCommandBuffer commandBuffer)
|
|
|
|
{
|
|
|
|
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
|
|
|
|
|
2017-01-11 21:05:13 +00:00
|
|
|
const struct anv_image_view *iview =
|
|
|
|
anv_cmd_buffer_get_depth_stencil_view(cmd_buffer);
|
|
|
|
|
|
|
|
if (iview) {
|
|
|
|
anv_gen8_hiz_op_resolve(cmd_buffer, iview->image,
|
|
|
|
BLORP_HIZ_OP_DEPTH_RESOLVE);
|
|
|
|
}
|
|
|
|
|
2016-02-15 07:01:42 +00:00
|
|
|
anv_cmd_buffer_resolve_subpass(cmd_buffer);
|
2016-06-16 00:13:46 +01:00
|
|
|
|
|
|
|
#ifndef NDEBUG
|
|
|
|
anv_dump_add_framebuffer(cmd_buffer, cmd_buffer->state.framebuffer);
|
|
|
|
#endif
|
2016-02-15 07:01:42 +00:00
|
|
|
}
|
2016-03-05 08:54:54 +00:00
|
|
|
|
|
|
|
static void
|
2016-11-10 22:08:43 +00:00
|
|
|
emit_ps_depth_count(struct anv_cmd_buffer *cmd_buffer,
|
2016-03-05 08:54:54 +00:00
|
|
|
struct anv_bo *bo, uint32_t offset)
|
|
|
|
{
|
2016-11-10 22:08:43 +00:00
|
|
|
anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
|
2016-04-18 22:41:06 +01:00
|
|
|
pc.DestinationAddressType = DAT_PPGTT;
|
|
|
|
pc.PostSyncOperation = WritePSDepthCount;
|
|
|
|
pc.DepthStallEnable = true;
|
|
|
|
pc.Address = (struct anv_address) { bo, offset };
|
2016-11-10 22:09:08 +00:00
|
|
|
|
|
|
|
if (GEN_GEN == 9 && cmd_buffer->device->info.gt == 4)
|
|
|
|
pc.CommandStreamerStallEnable = true;
|
2016-04-18 22:41:06 +01:00
|
|
|
}
|
2016-03-05 08:54:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2016-11-10 22:08:43 +00:00
|
|
|
emit_query_availability(struct anv_cmd_buffer *cmd_buffer,
|
2016-03-05 08:54:54 +00:00
|
|
|
struct anv_bo *bo, uint32_t offset)
|
|
|
|
{
|
2016-11-10 22:08:43 +00:00
|
|
|
anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
|
2016-04-18 22:41:06 +01:00
|
|
|
pc.DestinationAddressType = DAT_PPGTT;
|
|
|
|
pc.PostSyncOperation = WriteImmediateData;
|
|
|
|
pc.Address = (struct anv_address) { bo, offset };
|
|
|
|
pc.ImmediateData = 1;
|
|
|
|
}
|
2016-03-05 08:54:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void genX(CmdBeginQuery)(
|
|
|
|
VkCommandBuffer commandBuffer,
|
|
|
|
VkQueryPool queryPool,
|
|
|
|
uint32_t query,
|
|
|
|
VkQueryControlFlags flags)
|
|
|
|
{
|
|
|
|
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
|
|
|
|
ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
|
|
|
|
|
|
|
|
/* Workaround: When meta uses the pipeline with the VS disabled, it seems
|
|
|
|
* that the pipelining of the depth write breaks. What we see is that
|
|
|
|
* samples from the render pass clear leaks into the first query
|
|
|
|
* immediately after the clear. Doing a pipecontrol with a post-sync
|
|
|
|
* operation and DepthStallEnable seems to work around the issue.
|
|
|
|
*/
|
|
|
|
if (cmd_buffer->state.need_query_wa) {
|
|
|
|
cmd_buffer->state.need_query_wa = false;
|
2016-04-19 01:03:00 +01:00
|
|
|
anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
|
2016-04-18 22:41:06 +01:00
|
|
|
pc.DepthCacheFlushEnable = true;
|
|
|
|
pc.DepthStallEnable = true;
|
|
|
|
}
|
2016-03-05 08:54:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
switch (pool->type) {
|
|
|
|
case VK_QUERY_TYPE_OCCLUSION:
|
2016-11-10 22:08:43 +00:00
|
|
|
emit_ps_depth_count(cmd_buffer, &pool->bo,
|
2016-03-05 08:54:54 +00:00
|
|
|
query * sizeof(struct anv_query_pool_slot));
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VK_QUERY_TYPE_PIPELINE_STATISTICS:
|
|
|
|
default:
|
|
|
|
unreachable("");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void genX(CmdEndQuery)(
|
|
|
|
VkCommandBuffer commandBuffer,
|
|
|
|
VkQueryPool queryPool,
|
|
|
|
uint32_t query)
|
|
|
|
{
|
|
|
|
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
|
|
|
|
ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
|
|
|
|
|
|
|
|
switch (pool->type) {
|
|
|
|
case VK_QUERY_TYPE_OCCLUSION:
|
2016-11-10 22:08:43 +00:00
|
|
|
emit_ps_depth_count(cmd_buffer, &pool->bo,
|
2016-03-05 08:54:54 +00:00
|
|
|
query * sizeof(struct anv_query_pool_slot) + 8);
|
|
|
|
|
2016-11-10 22:08:43 +00:00
|
|
|
emit_query_availability(cmd_buffer, &pool->bo,
|
2016-03-05 08:54:54 +00:00
|
|
|
query * sizeof(struct anv_query_pool_slot) + 16);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VK_QUERY_TYPE_PIPELINE_STATISTICS:
|
|
|
|
default:
|
|
|
|
unreachable("");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#define TIMESTAMP 0x2358
|
|
|
|
|
|
|
|
void genX(CmdWriteTimestamp)(
|
|
|
|
VkCommandBuffer commandBuffer,
|
|
|
|
VkPipelineStageFlagBits pipelineStage,
|
|
|
|
VkQueryPool queryPool,
|
|
|
|
uint32_t query)
|
|
|
|
{
|
|
|
|
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
|
|
|
|
ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
|
|
|
|
uint32_t offset = query * sizeof(struct anv_query_pool_slot);
|
|
|
|
|
|
|
|
assert(pool->type == VK_QUERY_TYPE_TIMESTAMP);
|
|
|
|
|
|
|
|
switch (pipelineStage) {
|
|
|
|
case VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT:
|
2016-04-19 01:03:00 +01:00
|
|
|
anv_batch_emit(&cmd_buffer->batch, GENX(MI_STORE_REGISTER_MEM), srm) {
|
2016-04-18 23:20:06 +01:00
|
|
|
srm.RegisterAddress = TIMESTAMP;
|
|
|
|
srm.MemoryAddress = (struct anv_address) { &pool->bo, offset };
|
|
|
|
}
|
2016-04-19 01:03:00 +01:00
|
|
|
anv_batch_emit(&cmd_buffer->batch, GENX(MI_STORE_REGISTER_MEM), srm) {
|
2016-04-18 23:20:06 +01:00
|
|
|
srm.RegisterAddress = TIMESTAMP + 4;
|
|
|
|
srm.MemoryAddress = (struct anv_address) { &pool->bo, offset + 4 };
|
|
|
|
}
|
2016-03-05 08:54:54 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
/* Everything else is bottom-of-pipe */
|
2016-04-19 01:03:00 +01:00
|
|
|
anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
|
2016-10-12 23:28:03 +01:00
|
|
|
pc.DestinationAddressType = DAT_PPGTT;
|
|
|
|
pc.PostSyncOperation = WriteTimestamp;
|
2016-04-18 23:20:06 +01:00
|
|
|
pc.Address = (struct anv_address) { &pool->bo, offset };
|
2016-11-10 22:09:08 +00:00
|
|
|
|
|
|
|
if (GEN_GEN == 9 && cmd_buffer->device->info.gt == 4)
|
|
|
|
pc.CommandStreamerStallEnable = true;
|
2016-04-18 23:20:06 +01:00
|
|
|
}
|
2016-03-05 08:54:54 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2016-11-10 22:08:43 +00:00
|
|
|
emit_query_availability(cmd_buffer, &pool->bo, query + 16);
|
2016-03-05 08:54:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#if GEN_GEN > 7 || GEN_IS_HASWELL
|
|
|
|
|
|
|
|
#define alu_opcode(v) __gen_uint((v), 20, 31)
|
|
|
|
#define alu_operand1(v) __gen_uint((v), 10, 19)
|
|
|
|
#define alu_operand2(v) __gen_uint((v), 0, 9)
|
|
|
|
#define alu(opcode, operand1, operand2) \
|
|
|
|
alu_opcode(opcode) | alu_operand1(operand1) | alu_operand2(operand2)
|
|
|
|
|
|
|
|
#define OPCODE_NOOP 0x000
|
|
|
|
#define OPCODE_LOAD 0x080
|
|
|
|
#define OPCODE_LOADINV 0x480
|
|
|
|
#define OPCODE_LOAD0 0x081
|
|
|
|
#define OPCODE_LOAD1 0x481
|
|
|
|
#define OPCODE_ADD 0x100
|
|
|
|
#define OPCODE_SUB 0x101
|
|
|
|
#define OPCODE_AND 0x102
|
|
|
|
#define OPCODE_OR 0x103
|
|
|
|
#define OPCODE_XOR 0x104
|
|
|
|
#define OPCODE_STORE 0x180
|
|
|
|
#define OPCODE_STOREINV 0x580
|
|
|
|
|
|
|
|
#define OPERAND_R0 0x00
|
|
|
|
#define OPERAND_R1 0x01
|
|
|
|
#define OPERAND_R2 0x02
|
|
|
|
#define OPERAND_R3 0x03
|
|
|
|
#define OPERAND_R4 0x04
|
|
|
|
#define OPERAND_SRCA 0x20
|
|
|
|
#define OPERAND_SRCB 0x21
|
|
|
|
#define OPERAND_ACCU 0x31
|
|
|
|
#define OPERAND_ZF 0x32
|
|
|
|
#define OPERAND_CF 0x33
|
|
|
|
|
|
|
|
#define CS_GPR(n) (0x2600 + (n) * 8)
|
|
|
|
|
|
|
|
static void
|
|
|
|
emit_load_alu_reg_u64(struct anv_batch *batch, uint32_t reg,
|
|
|
|
struct anv_bo *bo, uint32_t offset)
|
|
|
|
{
|
2016-04-19 01:03:00 +01:00
|
|
|
anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
|
2016-04-18 23:20:06 +01:00
|
|
|
lrm.RegisterAddress = reg,
|
|
|
|
lrm.MemoryAddress = (struct anv_address) { bo, offset };
|
|
|
|
}
|
2016-04-19 01:03:00 +01:00
|
|
|
anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
|
2016-04-18 23:20:06 +01:00
|
|
|
lrm.RegisterAddress = reg + 4;
|
|
|
|
lrm.MemoryAddress = (struct anv_address) { bo, offset + 4 };
|
|
|
|
}
|
2016-03-05 08:54:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
store_query_result(struct anv_batch *batch, uint32_t reg,
|
|
|
|
struct anv_bo *bo, uint32_t offset, VkQueryResultFlags flags)
|
|
|
|
{
|
2016-04-19 01:03:00 +01:00
|
|
|
anv_batch_emit(batch, GENX(MI_STORE_REGISTER_MEM), srm) {
|
2016-04-18 23:20:06 +01:00
|
|
|
srm.RegisterAddress = reg;
|
|
|
|
srm.MemoryAddress = (struct anv_address) { bo, offset };
|
|
|
|
}
|
|
|
|
|
|
|
|
if (flags & VK_QUERY_RESULT_64_BIT) {
|
2016-04-19 01:03:00 +01:00
|
|
|
anv_batch_emit(batch, GENX(MI_STORE_REGISTER_MEM), srm) {
|
2016-04-18 23:20:06 +01:00
|
|
|
srm.RegisterAddress = reg + 4;
|
|
|
|
srm.MemoryAddress = (struct anv_address) { bo, offset + 4 };
|
|
|
|
}
|
|
|
|
}
|
2016-03-05 08:54:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void genX(CmdCopyQueryPoolResults)(
|
|
|
|
VkCommandBuffer commandBuffer,
|
|
|
|
VkQueryPool queryPool,
|
|
|
|
uint32_t firstQuery,
|
|
|
|
uint32_t queryCount,
|
|
|
|
VkBuffer destBuffer,
|
|
|
|
VkDeviceSize destOffset,
|
|
|
|
VkDeviceSize destStride,
|
|
|
|
VkQueryResultFlags flags)
|
|
|
|
{
|
|
|
|
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
|
|
|
|
ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
|
|
|
|
ANV_FROM_HANDLE(anv_buffer, buffer, destBuffer);
|
|
|
|
uint32_t slot_offset, dst_offset;
|
|
|
|
|
2016-04-18 22:41:06 +01:00
|
|
|
if (flags & VK_QUERY_RESULT_WAIT_BIT) {
|
2016-04-19 01:03:00 +01:00
|
|
|
anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
|
2016-04-18 22:41:06 +01:00
|
|
|
pc.CommandStreamerStallEnable = true;
|
|
|
|
pc.StallAtPixelScoreboard = true;
|
|
|
|
}
|
|
|
|
}
|
2016-03-05 08:54:54 +00:00
|
|
|
|
|
|
|
dst_offset = buffer->offset + destOffset;
|
|
|
|
for (uint32_t i = 0; i < queryCount; i++) {
|
|
|
|
|
|
|
|
slot_offset = (firstQuery + i) * sizeof(struct anv_query_pool_slot);
|
|
|
|
switch (pool->type) {
|
|
|
|
case VK_QUERY_TYPE_OCCLUSION:
|
|
|
|
emit_load_alu_reg_u64(&cmd_buffer->batch,
|
|
|
|
CS_GPR(0), &pool->bo, slot_offset);
|
|
|
|
emit_load_alu_reg_u64(&cmd_buffer->batch,
|
|
|
|
CS_GPR(1), &pool->bo, slot_offset + 8);
|
|
|
|
|
|
|
|
/* FIXME: We need to clamp the result for 32 bit. */
|
|
|
|
|
|
|
|
uint32_t *dw = anv_batch_emitn(&cmd_buffer->batch, 5, GENX(MI_MATH));
|
|
|
|
dw[1] = alu(OPCODE_LOAD, OPERAND_SRCA, OPERAND_R1);
|
|
|
|
dw[2] = alu(OPCODE_LOAD, OPERAND_SRCB, OPERAND_R0);
|
|
|
|
dw[3] = alu(OPCODE_SUB, 0, 0);
|
|
|
|
dw[4] = alu(OPCODE_STORE, OPERAND_R2, OPERAND_ACCU);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VK_QUERY_TYPE_TIMESTAMP:
|
|
|
|
emit_load_alu_reg_u64(&cmd_buffer->batch,
|
|
|
|
CS_GPR(2), &pool->bo, slot_offset);
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
unreachable("unhandled query type");
|
|
|
|
}
|
|
|
|
|
|
|
|
store_query_result(&cmd_buffer->batch,
|
|
|
|
CS_GPR(2), buffer->bo, dst_offset, flags);
|
|
|
|
|
|
|
|
if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
|
|
|
|
emit_load_alu_reg_u64(&cmd_buffer->batch, CS_GPR(0),
|
|
|
|
&pool->bo, slot_offset + 16);
|
|
|
|
if (flags & VK_QUERY_RESULT_64_BIT)
|
|
|
|
store_query_result(&cmd_buffer->batch,
|
|
|
|
CS_GPR(0), buffer->bo, dst_offset + 8, flags);
|
|
|
|
else
|
|
|
|
store_query_result(&cmd_buffer->batch,
|
|
|
|
CS_GPR(0), buffer->bo, dst_offset + 4, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
dst_offset += destStride;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-14 04:11:30 +01:00
|
|
|
#else
|
|
|
|
void genX(CmdCopyQueryPoolResults)(
|
|
|
|
VkCommandBuffer commandBuffer,
|
|
|
|
VkQueryPool queryPool,
|
|
|
|
uint32_t firstQuery,
|
|
|
|
uint32_t queryCount,
|
|
|
|
VkBuffer destBuffer,
|
|
|
|
VkDeviceSize destOffset,
|
|
|
|
VkDeviceSize destStride,
|
|
|
|
VkQueryResultFlags flags)
|
|
|
|
{
|
|
|
|
anv_finishme("Queries not yet supported on Ivy Bridge");
|
|
|
|
}
|
2016-03-05 08:54:54 +00:00
|
|
|
#endif
|