radeonsi: flatten / remove struct r600_ring

Acked-by: Timothy Arceri <tarceri@itsqueeze.com>
This commit is contained in:
Marek Olšák 2018-04-01 15:07:58 -04:00
parent f7de8686de
commit 2b70dd8c8a
24 changed files with 175 additions and 179 deletions

View File

@ -32,11 +32,11 @@ bool si_rings_is_buffer_referenced(struct r600_common_context *ctx,
struct pb_buffer *buf,
enum radeon_bo_usage usage)
{
if (ctx->ws->cs_is_buffer_referenced(ctx->gfx.cs, buf, usage)) {
if (ctx->ws->cs_is_buffer_referenced(ctx->gfx_cs, buf, usage)) {
return true;
}
if (radeon_emitted(ctx->dma.cs, 0) &&
ctx->ws->cs_is_buffer_referenced(ctx->dma.cs, buf, usage)) {
if (radeon_emitted(ctx->dma_cs, 0) &&
ctx->ws->cs_is_buffer_referenced(ctx->dma_cs, buf, usage)) {
return true;
}
return false;
@ -60,8 +60,8 @@ void *si_buffer_map_sync_with_rings(struct r600_common_context *ctx,
rusage = RADEON_USAGE_WRITE;
}
if (radeon_emitted(ctx->gfx.cs, ctx->initial_gfx_cs_size) &&
ctx->ws->cs_is_buffer_referenced(ctx->gfx.cs,
if (radeon_emitted(ctx->gfx_cs, ctx->initial_gfx_cs_size) &&
ctx->ws->cs_is_buffer_referenced(ctx->gfx_cs,
resource->buf, rusage)) {
if (usage & PIPE_TRANSFER_DONTBLOCK) {
si_flush_gfx_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
@ -71,8 +71,8 @@ void *si_buffer_map_sync_with_rings(struct r600_common_context *ctx,
busy = true;
}
}
if (radeon_emitted(ctx->dma.cs, 0) &&
ctx->ws->cs_is_buffer_referenced(ctx->dma.cs,
if (radeon_emitted(ctx->dma_cs, 0) &&
ctx->ws->cs_is_buffer_referenced(ctx->dma_cs,
resource->buf, rusage)) {
if (usage & PIPE_TRANSFER_DONTBLOCK) {
si_flush_dma_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
@ -89,9 +89,9 @@ void *si_buffer_map_sync_with_rings(struct r600_common_context *ctx,
} else {
/* We will be wait for the GPU. Wait for any offloaded
* CS flush to complete to avoid busy-waiting in the winsys. */
ctx->ws->cs_sync_flush(ctx->gfx.cs);
if (ctx->dma.cs)
ctx->ws->cs_sync_flush(ctx->dma.cs);
ctx->ws->cs_sync_flush(ctx->gfx_cs);
if (ctx->dma_cs)
ctx->ws->cs_sync_flush(ctx->dma_cs);
}
}

View File

@ -65,14 +65,14 @@ radeon_cs_memory_below_limit(struct si_screen *screen,
* rebuilt.
*/
static inline void radeon_add_to_buffer_list(struct r600_common_context *rctx,
struct r600_ring *ring,
struct radeon_winsys_cs *cs,
struct r600_resource *rbo,
enum radeon_bo_usage usage,
enum radeon_bo_priority priority)
{
assert(usage);
rctx->ws->cs_add_buffer(
ring->cs, rbo->buf,
cs, rbo->buf,
(enum radeon_bo_usage)(usage | RADEON_USAGE_SYNCHRONIZED),
rbo->domains, priority);
}
@ -102,12 +102,12 @@ radeon_add_to_gfx_buffer_list_check_mem(struct si_context *sctx,
bool check_mem)
{
if (check_mem &&
!radeon_cs_memory_below_limit(sctx->screen, sctx->b.gfx.cs,
!radeon_cs_memory_below_limit(sctx->screen, sctx->b.gfx_cs,
sctx->b.vram + rbo->vram_usage,
sctx->b.gtt + rbo->gart_usage))
si_flush_gfx_cs(&sctx->b, PIPE_FLUSH_ASYNC, NULL);
radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, rbo, usage, priority);
radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs, rbo, usage, priority);
}
static inline void radeon_set_config_reg_seq(struct radeon_winsys_cs *cs, unsigned reg, unsigned num)

View File

@ -89,19 +89,19 @@ static bool r600_resource_commit(struct pipe_context *pctx,
* (b) wait for threaded submit to finish, including those that were
* triggered by some other, earlier operation.
*/
if (radeon_emitted(ctx->gfx.cs, ctx->initial_gfx_cs_size) &&
ctx->ws->cs_is_buffer_referenced(ctx->gfx.cs,
if (radeon_emitted(ctx->gfx_cs, ctx->initial_gfx_cs_size) &&
ctx->ws->cs_is_buffer_referenced(ctx->gfx_cs,
res->buf, RADEON_USAGE_READWRITE)) {
si_flush_gfx_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
}
if (radeon_emitted(ctx->dma.cs, 0) &&
ctx->ws->cs_is_buffer_referenced(ctx->dma.cs,
if (radeon_emitted(ctx->dma_cs, 0) &&
ctx->ws->cs_is_buffer_referenced(ctx->dma_cs,
res->buf, RADEON_USAGE_READWRITE)) {
si_flush_dma_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
}
ctx->ws->cs_sync_flush(ctx->dma.cs);
ctx->ws->cs_sync_flush(ctx->gfx.cs);
ctx->ws->cs_sync_flush(ctx->dma_cs);
ctx->ws->cs_sync_flush(ctx->gfx_cs);
assert(resource->target == PIPE_BUFFER);
@ -174,7 +174,7 @@ bool si_common_context_init(struct r600_common_context *rctx,
return false;
if (sscreen->info.num_sdma_rings && !(sscreen->debug_flags & DBG(NO_ASYNC_DMA))) {
rctx->dma.cs = rctx->ws->cs_create(rctx->ctx, RING_DMA,
rctx->dma_cs = rctx->ws->cs_create(rctx->ctx, RING_DMA,
si_flush_dma_cs,
rctx);
}
@ -201,10 +201,10 @@ void si_common_context_cleanup(struct r600_common_context *rctx)
if (rctx->query_result_shader)
rctx->b.delete_compute_state(&rctx->b, rctx->query_result_shader);
if (rctx->gfx.cs)
rctx->ws->cs_destroy(rctx->gfx.cs);
if (rctx->dma.cs)
rctx->ws->cs_destroy(rctx->dma.cs);
if (rctx->gfx_cs)
rctx->ws->cs_destroy(rctx->gfx_cs);
if (rctx->dma_cs)
rctx->ws->cs_destroy(rctx->dma_cs);
if (rctx->ctx)
rctx->ws->ctx_destroy(rctx->ctx);

View File

@ -368,10 +368,6 @@ struct r600_atom {
unsigned short id;
};
struct r600_ring {
struct radeon_winsys_cs *cs;
};
/* Saved CS data for debugging features. */
struct radeon_saved_cs {
uint32_t *ib;
@ -389,8 +385,8 @@ struct r600_common_context {
struct radeon_winsys_ctx *ctx;
enum radeon_family family;
enum chip_class chip_class;
struct r600_ring gfx;
struct r600_ring dma;
struct radeon_winsys_cs *gfx_cs;
struct radeon_winsys_cs *dma_cs;
struct pipe_fence_handle *last_gfx_fence;
struct pipe_fence_handle *last_sdma_fence;
struct r600_resource *eop_bug_scratch;

View File

@ -743,7 +743,7 @@ static void r600_query_hw_do_emit_start(struct r600_common_context *ctx,
struct r600_resource *buffer,
uint64_t va)
{
struct radeon_winsys_cs *cs = ctx->gfx.cs;
struct radeon_winsys_cs *cs = ctx->gfx_cs;
switch (query->b.type) {
case PIPE_QUERY_OCCLUSION_COUNTER:
@ -786,7 +786,7 @@ static void r600_query_hw_do_emit_start(struct r600_common_context *ctx,
default:
assert(0);
}
radeon_add_to_buffer_list(ctx, &ctx->gfx, query->buffer.buf, RADEON_USAGE_WRITE,
radeon_add_to_buffer_list(ctx, ctx->gfx_cs, query->buffer.buf, RADEON_USAGE_WRITE,
RADEON_PRIO_QUERY);
}
@ -827,7 +827,7 @@ static void r600_query_hw_do_emit_stop(struct r600_common_context *ctx,
struct r600_resource *buffer,
uint64_t va)
{
struct radeon_winsys_cs *cs = ctx->gfx.cs;
struct radeon_winsys_cs *cs = ctx->gfx_cs;
uint64_t fence_va = 0;
switch (query->b.type) {
@ -878,7 +878,7 @@ static void r600_query_hw_do_emit_stop(struct r600_common_context *ctx,
default:
assert(0);
}
radeon_add_to_buffer_list(ctx, &ctx->gfx, query->buffer.buf, RADEON_USAGE_WRITE,
radeon_add_to_buffer_list(ctx, ctx->gfx_cs, query->buffer.buf, RADEON_USAGE_WRITE,
RADEON_PRIO_QUERY);
if (fence_va)
@ -918,7 +918,7 @@ static void emit_set_predicate(struct r600_common_context *ctx,
struct r600_resource *buf, uint64_t va,
uint32_t op)
{
struct radeon_winsys_cs *cs = ctx->gfx.cs;
struct radeon_winsys_cs *cs = ctx->gfx_cs;
if (ctx->chip_class >= GFX9) {
radeon_emit(cs, PKT3(PKT3_SET_PREDICATION, 2, 0));
@ -930,7 +930,7 @@ static void emit_set_predicate(struct r600_common_context *ctx,
radeon_emit(cs, va);
radeon_emit(cs, op | ((va >> 32) & 0xFF));
}
radeon_add_to_buffer_list(ctx, &ctx->gfx, buf, RADEON_USAGE_READ,
radeon_add_to_buffer_list(ctx, ctx->gfx_cs, buf, RADEON_USAGE_READ,
RADEON_PRIO_QUERY);
}

View File

@ -49,7 +49,7 @@ bool si_prepare_for_dma_blit(struct r600_common_context *rctx,
unsigned src_level,
const struct pipe_box *src_box)
{
if (!rctx->dma.cs)
if (!rctx->dma_cs)
return false;
if (rdst->surface.bpe != rsrc->surface.bpe)

View File

@ -32,7 +32,7 @@ static void cik_sdma_copy_buffer(struct si_context *ctx,
uint64_t src_offset,
uint64_t size)
{
struct radeon_winsys_cs *cs = ctx->b.dma.cs;
struct radeon_winsys_cs *cs = ctx->b.dma_cs;
unsigned i, ncopy, csize;
struct r600_resource *rdst = r600_resource(dst);
struct r600_resource *rsrc = r600_resource(src);
@ -73,7 +73,7 @@ static void cik_sdma_clear_buffer(struct pipe_context *ctx,
unsigned clear_value)
{
struct si_context *sctx = (struct si_context *)ctx;
struct radeon_winsys_cs *cs = sctx->b.dma.cs;
struct radeon_winsys_cs *cs = sctx->b.dma_cs;
unsigned i, ncopy, csize;
struct r600_resource *rdst = r600_resource(dst);
@ -230,7 +230,7 @@ static bool cik_sdma_copy_texture(struct si_context *sctx,
sctx->b.family != CHIP_KAVERI) ||
(srcx + copy_width != (1 << 14) &&
srcy + copy_height != (1 << 14)))) {
struct radeon_winsys_cs *cs = sctx->b.dma.cs;
struct radeon_winsys_cs *cs = sctx->b.dma_cs;
si_need_dma_space(&sctx->b, 13, &rdst->resource, &rsrc->resource);
@ -392,7 +392,7 @@ static bool cik_sdma_copy_texture(struct si_context *sctx,
copy_width_aligned <= (1 << 14) &&
copy_height <= (1 << 14) &&
copy_depth <= (1 << 11)) {
struct radeon_winsys_cs *cs = sctx->b.dma.cs;
struct radeon_winsys_cs *cs = sctx->b.dma_cs;
uint32_t direction = linear == rdst ? 1u << 31 : 0;
si_need_dma_space(&sctx->b, 14, &rdst->resource, &rsrc->resource);
@ -487,7 +487,7 @@ static bool cik_sdma_copy_texture(struct si_context *sctx,
(srcx + copy_width_aligned != (1 << 14) &&
srcy + copy_height_aligned != (1 << 14) &&
dstx + copy_width != (1 << 14)))) {
struct radeon_winsys_cs *cs = sctx->b.dma.cs;
struct radeon_winsys_cs *cs = sctx->b.dma_cs;
si_need_dma_space(&sctx->b, 15, &rdst->resource, &rsrc->resource);
@ -531,7 +531,7 @@ static void cik_sdma_copy(struct pipe_context *ctx,
{
struct si_context *sctx = (struct si_context *)ctx;
if (!sctx->b.dma.cs ||
if (!sctx->b.dma_cs ||
src->flags & PIPE_RESOURCE_FLAG_SPARSE ||
dst->flags & PIPE_RESOURCE_FLAG_SPARSE)
goto fallback;

View File

@ -293,7 +293,7 @@ static void si_set_global_binding(
static void si_initialize_compute(struct si_context *sctx)
{
struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
uint64_t bc_va;
radeon_set_sh_reg_seq(cs, R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE0, 2);
@ -388,7 +388,7 @@ static bool si_switch_compute_shader(struct si_context *sctx,
const amd_kernel_code_t *code_object,
unsigned offset)
{
struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
struct si_shader_config inline_config = {0};
struct si_shader_config *config;
uint64_t shader_va;
@ -438,7 +438,7 @@ static bool si_switch_compute_shader(struct si_context *sctx,
config->scratch_bytes_per_wave *
sctx->scratch_waves);
radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs,
shader->scratch_bo, RADEON_USAGE_READWRITE,
RADEON_PRIO_SCRATCH_BUFFER);
}
@ -462,7 +462,7 @@ static bool si_switch_compute_shader(struct si_context *sctx,
shader_va += sizeof(amd_kernel_code_t);
}
radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, shader->bo,
radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs, shader->bo,
RADEON_USAGE_READ, RADEON_PRIO_SHADER_BINARY);
radeon_set_sh_reg_seq(cs, R_00B830_COMPUTE_PGM_LO, 2);
@ -492,7 +492,7 @@ static void setup_scratch_rsrc_user_sgprs(struct si_context *sctx,
const amd_kernel_code_t *code_object,
unsigned user_sgpr)
{
struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
uint64_t scratch_va = sctx->compute_scratch_buffer->gpu_address;
unsigned max_private_element_size = AMD_HSA_BITS_GET(
@ -537,7 +537,7 @@ static void si_setup_user_sgprs_co_v2(struct si_context *sctx,
uint64_t kernel_args_va)
{
struct si_compute *program = sctx->cs_shader_state.program;
struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
static const enum amd_code_property_mask_t workgroup_count_masks [] = {
AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_X,
@ -586,7 +586,7 @@ static void si_setup_user_sgprs_co_v2(struct si_context *sctx,
fprintf(stderr, "Error: Failed to allocate dispatch "
"packet.");
}
radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, dispatch_buf,
radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs, dispatch_buf,
RADEON_USAGE_READ, RADEON_PRIO_CONST_BUFFER);
dispatch_va = dispatch_buf->gpu_address + dispatch_offset;
@ -626,7 +626,7 @@ static bool si_upload_compute_input(struct si_context *sctx,
const amd_kernel_code_t *code_object,
const struct pipe_grid_info *info)
{
struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
struct si_compute *program = sctx->cs_shader_state.program;
struct r600_resource *input_buffer = NULL;
unsigned kernel_args_size;
@ -669,7 +669,7 @@ static bool si_upload_compute_input(struct si_context *sctx,
}
radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, input_buffer,
radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs, input_buffer,
RADEON_USAGE_READ, RADEON_PRIO_CONST_BUFFER);
if (code_object) {
@ -690,7 +690,7 @@ static void si_setup_tgsi_grid(struct si_context *sctx,
const struct pipe_grid_info *info)
{
struct si_compute *program = sctx->cs_shader_state.program;
struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
unsigned grid_size_reg = R_00B900_COMPUTE_USER_DATA_0 +
4 * SI_NUM_RESOURCE_SGPRS;
unsigned block_size_reg = grid_size_reg +
@ -703,7 +703,7 @@ static void si_setup_tgsi_grid(struct si_context *sctx,
uint64_t va = base_va + info->indirect_offset;
int i;
radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs,
(struct r600_resource *)info->indirect,
RADEON_USAGE_READ, RADEON_PRIO_DRAW_INDIRECT);
@ -737,7 +737,7 @@ static void si_emit_dispatch_packets(struct si_context *sctx,
const struct pipe_grid_info *info)
{
struct si_screen *sscreen = sctx->screen;
struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
bool render_cond_bit = sctx->b.render_cond && !sctx->b.render_cond_force_off;
unsigned waves_per_threadgroup =
DIV_ROUND_UP(info->block[0] * info->block[1] * info->block[2], 64);
@ -774,7 +774,7 @@ static void si_emit_dispatch_packets(struct si_context *sctx,
if (info->indirect) {
uint64_t base_va = r600_resource(info->indirect)->gpu_address;
radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs,
(struct r600_resource *)info->indirect,
RADEON_USAGE_READ, RADEON_PRIO_DRAW_INDIRECT);
@ -883,7 +883,7 @@ static void si_launch_grid(
if (!buffer) {
continue;
}
radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, buffer,
radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs, buffer,
RADEON_USAGE_READWRITE,
RADEON_PRIO_COMPUTE_GLOBAL);
}

View File

@ -62,7 +62,7 @@ static void si_emit_cp_dma(struct si_context *sctx, uint64_t dst_va,
uint64_t src_va, unsigned size, unsigned flags,
enum r600_coherency coher)
{
struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
uint32_t header = 0, command = 0;
assert(size);
@ -175,11 +175,11 @@ static void si_cp_dma_prepare(struct si_context *sctx, struct pipe_resource *dst
/* This must be done after need_cs_space. */
if (!(user_flags & SI_CPDMA_SKIP_BO_LIST_UPDATE)) {
radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs,
(struct r600_resource*)dst,
RADEON_USAGE_WRITE, RADEON_PRIO_CP_DMA);
if (src)
radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs,
(struct r600_resource*)src,
RADEON_USAGE_READ, RADEON_PRIO_CP_DMA);
}
@ -228,7 +228,7 @@ void si_clear_buffer(struct pipe_context *ctx, struct pipe_resource *dst,
/* dma_clear_buffer can use clear_buffer on failure. Make sure that
* doesn't happen. We don't want an infinite recursion: */
if (sctx->b.dma.cs &&
if (sctx->b.dma_cs &&
!(dst->flags & PIPE_RESOURCE_FLAG_SPARSE) &&
(offset % 4 == 0) &&
/* CP DMA is very slow. Always use SDMA for big clears. This
@ -240,7 +240,7 @@ void si_clear_buffer(struct pipe_context *ctx, struct pipe_resource *dst,
* si_emit_framebuffer_state (in a draw call) adds them.
* For example, DeusEx:MD has 21 buffer clears per frame and all
* of them are moved to SDMA thanks to this. */
!ws->cs_is_buffer_referenced(sctx->b.gfx.cs, rdst->buf,
!ws->cs_is_buffer_referenced(sctx->b.gfx_cs, rdst->buf,
RADEON_USAGE_READWRITE))) {
sctx->b.dma_clear_buffer(ctx, dst, offset, dma_clear_size, value);

View File

@ -425,7 +425,7 @@ static void si_log_chunk_type_cs_print(void *data, FILE *f)
&last_trace_id, map ? 1 : 0, "IB", ctx->b.chip_class,
NULL, NULL);
} else {
si_parse_current_ib(f, ctx->b.gfx.cs, chunk->gfx_begin,
si_parse_current_ib(f, ctx->b.gfx_cs, chunk->gfx_begin,
chunk->gfx_end, &last_trace_id, map ? 1 : 0,
"IB", ctx->b.chip_class);
}
@ -450,7 +450,7 @@ static void si_log_cs(struct si_context *ctx, struct u_log_context *log,
assert(ctx->current_saved_cs);
struct si_saved_cs *scs = ctx->current_saved_cs;
unsigned gfx_cur = ctx->b.gfx.cs->prev_dw + ctx->b.gfx.cs->current.cdw;
unsigned gfx_cur = ctx->b.gfx_cs->prev_dw + ctx->b.gfx_cs->current.cdw;
if (!dump_bo_list &&
gfx_cur == scs->gfx_last_dw)

View File

@ -181,7 +181,7 @@ static bool si_upload_descriptors(struct si_context *sctx,
upload_size);
desc->gpu_list = ptr - first_slot_offset / 4;
radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, desc->buffer,
radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs, desc->buffer,
RADEON_USAGE_READ, RADEON_PRIO_DESCRIPTORS);
/* The shader pointer should point to slot 0. */
@ -202,7 +202,7 @@ si_descriptors_begin_new_cs(struct si_context *sctx, struct si_descriptors *desc
if (!desc->buffer)
return;
radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, desc->buffer,
radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs, desc->buffer,
RADEON_USAGE_READ, RADEON_PRIO_DESCRIPTORS);
}
@ -926,7 +926,7 @@ void si_update_ps_colorbuf0_slot(struct si_context *sctx)
si_set_shader_image_desc(sctx, &view, true, desc, desc + 8);
pipe_resource_reference(&buffers->buffers[slot], &tex->resource.b.b);
radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs,
&tex->resource, RADEON_USAGE_READ,
RADEON_PRIO_SHADER_RW_IMAGE);
buffers->enabled_mask |= 1u << slot;
@ -1031,7 +1031,7 @@ static void si_buffer_resources_begin_new_cs(struct si_context *sctx,
while (mask) {
int i = u_bit_scan(&mask);
radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs,
r600_resource(buffers->buffers[i]),
i < SI_NUM_SHADER_BUFFERS ? buffers->shader_usage :
buffers->shader_usage_constbuf,
@ -1076,14 +1076,14 @@ static void si_vertex_buffers_begin_new_cs(struct si_context *sctx)
if (!sctx->vertex_buffer[vb].buffer.resource)
continue;
radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs,
(struct r600_resource*)sctx->vertex_buffer[vb].buffer.resource,
RADEON_USAGE_READ, RADEON_PRIO_VERTEX_BUFFER);
}
if (!sctx->vb_descriptors_buffer)
return;
radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs,
sctx->vb_descriptors_buffer, RADEON_USAGE_READ,
RADEON_PRIO_DESCRIPTORS);
}
@ -1124,7 +1124,7 @@ bool si_upload_vertex_buffer_descriptors(struct si_context *sctx)
}
sctx->vb_descriptors_gpu_list = ptr;
radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs,
sctx->vb_descriptors_buffer, RADEON_USAGE_READ,
RADEON_PRIO_DESCRIPTORS);
@ -1162,7 +1162,7 @@ bool si_upload_vertex_buffer_descriptors(struct si_context *sctx)
desc[3] = velems->rsrc_word3[i];
if (first_vb_use_mask & (1 << i)) {
radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs,
(struct r600_resource*)vb->buffer.resource,
RADEON_USAGE_READ, RADEON_PRIO_VERTEX_BUFFER);
}
@ -1474,7 +1474,7 @@ void si_set_ring_buffer(struct pipe_context *ctx, uint slot,
desc[3] |= S_008F0C_ELEMENT_SIZE(element_size);
pipe_resource_reference(&buffers->buffers[slot], buffer);
radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs,
(struct r600_resource*)buffer,
buffers->shader_usage, buffers->priority);
buffers->enabled_mask |= 1u << slot;
@ -1814,7 +1814,7 @@ static void si_upload_bindless_descriptor(struct si_context *sctx,
unsigned num_dwords)
{
struct si_descriptors *desc = &sctx->bindless_descriptors;
struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
unsigned desc_slot_offset = desc_slot * 16;
uint32_t *data;
uint64_t va;
@ -2084,7 +2084,7 @@ static void si_emit_shader_pointer(struct si_context *sctx,
struct si_descriptors *desc,
unsigned sh_base)
{
struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
unsigned sh_offset = sh_base + desc->shader_userdata_offset;
si_emit_shader_pointer_head(cs, sh_offset, 1);
@ -2098,7 +2098,7 @@ static void si_emit_consecutive_shader_pointers(struct si_context *sctx,
if (!sh_base)
return;
struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
unsigned mask = sctx->shader_pointers_dirty & pointer_mask;
while (mask) {
@ -2122,7 +2122,7 @@ static void si_emit_disjoint_shader_pointers(struct si_context *sctx,
if (!sh_base)
return;
struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
unsigned mask = sctx->shader_pointers_dirty & pointer_mask;
while (mask) {
@ -2190,7 +2190,7 @@ void si_emit_graphics_shader_pointers(struct si_context *sctx,
~u_bit_consecutive(SI_DESCS_RW_BUFFERS, SI_DESCS_FIRST_COMPUTE);
if (sctx->vertex_buffer_pointer_dirty) {
struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
/* Find the location of the VB descriptor pointer. */
/* TODO: In the future, the pointer will be packed in unused

View File

@ -33,7 +33,7 @@ static void si_dma_copy_buffer(struct si_context *ctx,
uint64_t src_offset,
uint64_t size)
{
struct radeon_winsys_cs *cs = ctx->b.dma.cs;
struct radeon_winsys_cs *cs = ctx->b.dma_cs;
unsigned i, ncopy, count, max_size, sub_cmd, shift;
struct r600_resource *rdst = (struct r600_resource*)dst;
struct r600_resource *rsrc = (struct r600_resource*)src;
@ -82,7 +82,7 @@ static void si_dma_clear_buffer(struct pipe_context *ctx,
unsigned clear_value)
{
struct si_context *sctx = (struct si_context *)ctx;
struct radeon_winsys_cs *cs = sctx->b.dma.cs;
struct radeon_winsys_cs *cs = sctx->b.dma_cs;
unsigned i, ncopy, csize;
struct r600_resource *rdst = r600_resource(dst);
@ -130,7 +130,7 @@ static void si_dma_copy_tile(struct si_context *ctx,
unsigned pitch,
unsigned bpp)
{
struct radeon_winsys_cs *cs = ctx->b.dma.cs;
struct radeon_winsys_cs *cs = ctx->b.dma_cs;
struct r600_texture *rsrc = (struct r600_texture*)src;
struct r600_texture *rdst = (struct r600_texture*)dst;
unsigned dst_mode = rdst->surface.u.legacy.level[dst_level].mode;
@ -231,7 +231,7 @@ static void si_dma_copy(struct pipe_context *ctx,
unsigned src_x, src_y;
unsigned dst_x = dstx, dst_y = dsty, dst_z = dstz;
if (sctx->b.dma.cs == NULL ||
if (sctx->b.dma_cs == NULL ||
src->flags & PIPE_RESOURCE_FLAG_SPARSE ||
dst->flags & PIPE_RESOURCE_FLAG_SPARSE) {
goto fallback;

View File

@ -26,7 +26,7 @@
static void si_dma_emit_wait_idle(struct r600_common_context *rctx)
{
struct radeon_winsys_cs *cs = rctx->dma.cs;
struct radeon_winsys_cs *cs = rctx->dma_cs;
/* NOP waits for idle on Evergreen and later. */
if (rctx->chip_class >= CIK)
@ -38,8 +38,8 @@ static void si_dma_emit_wait_idle(struct r600_common_context *rctx)
void si_need_dma_space(struct r600_common_context *ctx, unsigned num_dw,
struct r600_resource *dst, struct r600_resource *src)
{
uint64_t vram = ctx->dma.cs->used_vram;
uint64_t gtt = ctx->dma.cs->used_gart;
uint64_t vram = ctx->dma_cs->used_vram;
uint64_t gtt = ctx->dma_cs->used_gart;
if (dst) {
vram += dst->vram_usage;
@ -51,12 +51,12 @@ void si_need_dma_space(struct r600_common_context *ctx, unsigned num_dw,
}
/* Flush the GFX IB if DMA depends on it. */
if (radeon_emitted(ctx->gfx.cs, ctx->initial_gfx_cs_size) &&
if (radeon_emitted(ctx->gfx_cs, ctx->initial_gfx_cs_size) &&
((dst &&
ctx->ws->cs_is_buffer_referenced(ctx->gfx.cs, dst->buf,
ctx->ws->cs_is_buffer_referenced(ctx->gfx_cs, dst->buf,
RADEON_USAGE_READWRITE)) ||
(src &&
ctx->ws->cs_is_buffer_referenced(ctx->gfx.cs, src->buf,
ctx->ws->cs_is_buffer_referenced(ctx->gfx_cs, src->buf,
RADEON_USAGE_WRITE))))
si_flush_gfx_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
@ -73,31 +73,31 @@ void si_need_dma_space(struct r600_common_context *ctx, unsigned num_dw,
* engine busy while uploads are being submitted.
*/
num_dw++; /* for emit_wait_idle below */
if (!ctx->ws->cs_check_space(ctx->dma.cs, num_dw) ||
ctx->dma.cs->used_vram + ctx->dma.cs->used_gart > 64 * 1024 * 1024 ||
!radeon_cs_memory_below_limit(ctx->screen, ctx->dma.cs, vram, gtt)) {
if (!ctx->ws->cs_check_space(ctx->dma_cs, num_dw) ||
ctx->dma_cs->used_vram + ctx->dma_cs->used_gart > 64 * 1024 * 1024 ||
!radeon_cs_memory_below_limit(ctx->screen, ctx->dma_cs, vram, gtt)) {
si_flush_dma_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
assert((num_dw + ctx->dma.cs->current.cdw) <= ctx->dma.cs->current.max_dw);
assert((num_dw + ctx->dma_cs->current.cdw) <= ctx->dma_cs->current.max_dw);
}
/* Wait for idle if either buffer has been used in the IB before to
* prevent read-after-write hazards.
*/
if ((dst &&
ctx->ws->cs_is_buffer_referenced(ctx->dma.cs, dst->buf,
ctx->ws->cs_is_buffer_referenced(ctx->dma_cs, dst->buf,
RADEON_USAGE_READWRITE)) ||
(src &&
ctx->ws->cs_is_buffer_referenced(ctx->dma.cs, src->buf,
ctx->ws->cs_is_buffer_referenced(ctx->dma_cs, src->buf,
RADEON_USAGE_WRITE)))
si_dma_emit_wait_idle(ctx);
if (dst) {
radeon_add_to_buffer_list(ctx, &ctx->dma, dst,
radeon_add_to_buffer_list(ctx, ctx->dma_cs, dst,
RADEON_USAGE_WRITE,
RADEON_PRIO_SDMA_BUFFER);
}
if (src) {
radeon_add_to_buffer_list(ctx, &ctx->dma, src,
radeon_add_to_buffer_list(ctx, ctx->dma_cs, src,
RADEON_USAGE_READ,
RADEON_PRIO_SDMA_BUFFER);
}
@ -109,7 +109,7 @@ void si_need_dma_space(struct r600_common_context *ctx, unsigned num_dw,
void si_flush_dma_cs(void *ctx, unsigned flags, struct pipe_fence_handle **fence)
{
struct r600_common_context *rctx = (struct r600_common_context *)ctx;
struct radeon_winsys_cs *cs = rctx->dma.cs;
struct radeon_winsys_cs *cs = rctx->dma_cs;
struct radeon_saved_cs saved;
bool check_vm = (rctx->screen->debug_flags & DBG(CHECK_VM));

View File

@ -70,7 +70,7 @@ void si_gfx_write_event_eop(struct r600_common_context *ctx,
struct r600_resource *buf, uint64_t va,
uint32_t new_fence, unsigned query_type)
{
struct radeon_winsys_cs *cs = ctx->gfx.cs;
struct radeon_winsys_cs *cs = ctx->gfx_cs;
unsigned op = EVENT_TYPE(event) |
EVENT_INDEX(5) |
event_flags;
@ -102,7 +102,7 @@ void si_gfx_write_event_eop(struct r600_common_context *ctx,
radeon_emit(cs, scratch->gpu_address);
radeon_emit(cs, scratch->gpu_address >> 32);
radeon_add_to_buffer_list(ctx, &ctx->gfx, scratch,
radeon_add_to_buffer_list(ctx, ctx->gfx_cs, scratch,
RADEON_USAGE_WRITE, RADEON_PRIO_QUERY);
}
@ -131,7 +131,7 @@ void si_gfx_write_event_eop(struct r600_common_context *ctx,
radeon_emit(cs, 0); /* immediate data */
radeon_emit(cs, 0); /* unused */
radeon_add_to_buffer_list(ctx, &ctx->gfx, scratch,
radeon_add_to_buffer_list(ctx, ctx->gfx_cs, scratch,
RADEON_USAGE_WRITE, RADEON_PRIO_QUERY);
}
@ -144,7 +144,7 @@ void si_gfx_write_event_eop(struct r600_common_context *ctx,
}
if (buf) {
radeon_add_to_buffer_list(ctx, &ctx->gfx, buf, RADEON_USAGE_WRITE,
radeon_add_to_buffer_list(ctx, ctx->gfx_cs, buf, RADEON_USAGE_WRITE,
RADEON_PRIO_QUERY);
}
}
@ -163,7 +163,7 @@ unsigned si_gfx_write_fence_dwords(struct si_screen *screen)
void si_gfx_wait_fence(struct r600_common_context *ctx,
uint64_t va, uint32_t ref, uint32_t mask)
{
struct radeon_winsys_cs *cs = ctx->gfx.cs;
struct radeon_winsys_cs *cs = ctx->gfx_cs;
radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, 0));
radeon_emit(cs, WAIT_REG_MEM_EQUAL | WAIT_REG_MEM_MEM_SPACE(1));
@ -179,9 +179,9 @@ static void si_add_fence_dependency(struct r600_common_context *rctx,
{
struct radeon_winsys *ws = rctx->ws;
if (rctx->dma.cs)
ws->cs_add_fence_dependency(rctx->dma.cs, fence);
ws->cs_add_fence_dependency(rctx->gfx.cs, fence);
if (rctx->dma_cs)
ws->cs_add_fence_dependency(rctx->dma_cs, fence);
ws->cs_add_fence_dependency(rctx->gfx_cs, fence);
}
static void si_add_syncobj_signal(struct r600_common_context *rctx,
@ -189,7 +189,7 @@ static void si_add_syncobj_signal(struct r600_common_context *rctx,
{
struct radeon_winsys *ws = rctx->ws;
ws->cs_add_syncobj_signal(rctx->gfx.cs, fence);
ws->cs_add_syncobj_signal(rctx->gfx_cs, fence);
}
static void si_fence_reference(struct pipe_screen *screen,
@ -265,10 +265,10 @@ static void si_fine_fence_set(struct si_context *ctx,
uint64_t fence_va = fine->buf->gpu_address + fine->offset;
radeon_add_to_buffer_list(&ctx->b, &ctx->b.gfx, fine->buf,
radeon_add_to_buffer_list(&ctx->b, ctx->b.gfx_cs, fine->buf,
RADEON_USAGE_WRITE, RADEON_PRIO_QUERY);
if (flags & PIPE_FLUSH_TOP_OF_PIPE) {
struct radeon_winsys_cs *cs = ctx->b.gfx.cs;
struct radeon_winsys_cs *cs = ctx->b.gfx_cs;
radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, 0));
radeon_emit(cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
S_370_WR_CONFIRM(1) |
@ -515,14 +515,14 @@ static void si_flush_from_st(struct pipe_context *ctx,
}
/* DMA IBs are preambles to gfx IBs, therefore must be flushed first. */
if (rctx->dma.cs)
if (rctx->dma_cs)
si_flush_dma_cs(rctx, rflags, fence ? &sdma_fence : NULL);
if (!radeon_emitted(rctx->gfx.cs, rctx->initial_gfx_cs_size)) {
if (!radeon_emitted(rctx->gfx_cs, rctx->initial_gfx_cs_size)) {
if (fence)
ws->fence_reference(&gfx_fence, rctx->last_gfx_fence);
if (!(flags & PIPE_FLUSH_DEFERRED))
ws->cs_sync_flush(rctx->gfx.cs);
ws->cs_sync_flush(rctx->gfx_cs);
} else {
/* Instead of flushing, create a deferred fence. Constraints:
* - The state tracker must allow a deferred flush.
@ -533,7 +533,7 @@ static void si_flush_from_st(struct pipe_context *ctx,
if (flags & PIPE_FLUSH_DEFERRED &&
!(flags & PIPE_FLUSH_FENCE_FD) &&
fence) {
gfx_fence = rctx->ws->cs_get_next_fence(rctx->gfx.cs);
gfx_fence = rctx->ws->cs_get_next_fence(rctx->gfx_cs);
deferred_fence = true;
} else {
si_flush_gfx_cs(rctx, rflags, fence ? &gfx_fence : NULL);
@ -579,9 +579,9 @@ static void si_flush_from_st(struct pipe_context *ctx,
assert(!fine.buf);
finish:
if (!(flags & PIPE_FLUSH_DEFERRED)) {
if (rctx->dma.cs)
ws->cs_sync_flush(rctx->dma.cs);
ws->cs_sync_flush(rctx->gfx.cs);
if (rctx->dma_cs)
ws->cs_sync_flush(rctx->dma_cs);
ws->cs_sync_flush(rctx->gfx_cs);
}
}

View File

@ -29,7 +29,7 @@
/* initialize */
void si_need_gfx_cs_space(struct si_context *ctx)
{
struct radeon_winsys_cs *cs = ctx->b.gfx.cs;
struct radeon_winsys_cs *cs = ctx->b.gfx_cs;
/* There is no need to flush the DMA IB here, because
* r600_need_dma_space always flushes the GFX IB if there is
@ -42,7 +42,7 @@ void si_need_gfx_cs_space(struct si_context *ctx)
* that have been added (cs_add_buffer) and two counters in the pipe
* driver for those that haven't been added yet.
*/
if (unlikely(!radeon_cs_memory_below_limit(ctx->b.screen, ctx->b.gfx.cs,
if (unlikely(!radeon_cs_memory_below_limit(ctx->b.screen, ctx->b.gfx_cs,
ctx->b.vram, ctx->b.gtt))) {
ctx->b.gtt = 0;
ctx->b.vram = 0;
@ -67,7 +67,7 @@ void si_flush_gfx_cs(void *context, unsigned flags,
struct pipe_fence_handle **fence)
{
struct si_context *ctx = context;
struct radeon_winsys_cs *cs = ctx->b.gfx.cs;
struct radeon_winsys_cs *cs = ctx->b.gfx_cs;
struct radeon_winsys *ws = ctx->b.ws;
if (ctx->gfx_flush_in_progress)
@ -87,7 +87,7 @@ void si_flush_gfx_cs(void *context, unsigned flags,
* This code is only needed when the driver flushes the GFX IB
* internally, and it never asks for a fence handle.
*/
if (radeon_emitted(ctx->b.dma.cs, 0)) {
if (radeon_emitted(ctx->b.dma_cs, 0)) {
assert(fence == NULL); /* internal flushes only */
si_flush_dma_cs(ctx, flags, NULL);
}
@ -175,7 +175,7 @@ static void si_begin_gfx_cs_debug(struct si_context *ctx)
si_trace_emit(ctx);
radeon_add_to_buffer_list(&ctx->b, &ctx->b.gfx, ctx->current_saved_cs->trace_buf,
radeon_add_to_buffer_list(&ctx->b, ctx->b.gfx_cs, ctx->current_saved_cs->trace_buf,
RADEON_USAGE_READWRITE, RADEON_PRIO_TRACE);
}
@ -275,8 +275,8 @@ void si_begin_new_gfx_cs(struct si_context *ctx)
if (!LIST_IS_EMPTY(&ctx->b.active_queries))
si_resume_queries(&ctx->b);
assert(!ctx->b.gfx.cs->prev_dw);
ctx->b.initial_gfx_cs_size = ctx->b.gfx.cs->current.cdw;
assert(!ctx->b.gfx_cs->prev_dw);
ctx->b.initial_gfx_cs_size = ctx->b.gfx_cs->current.cdw;
/* Invalidate various draw states so that they are emitted before
* the first draw call. */

View File

@ -426,7 +426,7 @@ static struct si_pc_block groups_gfx9[] = {
static void si_pc_emit_instance(struct r600_common_context *ctx,
int se, int instance)
{
struct radeon_winsys_cs *cs = ctx->gfx.cs;
struct radeon_winsys_cs *cs = ctx->gfx_cs;
unsigned value = S_030800_SH_BROADCAST_WRITES(1);
if (se >= 0) {
@ -447,7 +447,7 @@ static void si_pc_emit_instance(struct r600_common_context *ctx,
static void si_pc_emit_shaders(struct r600_common_context *ctx,
unsigned shaders)
{
struct radeon_winsys_cs *cs = ctx->gfx.cs;
struct radeon_winsys_cs *cs = ctx->gfx_cs;
radeon_set_uconfig_reg_seq(cs, R_036780_SQ_PERFCOUNTER_CTRL, 2);
radeon_emit(cs, shaders & 0x7f);
@ -460,7 +460,7 @@ static void si_pc_emit_select(struct r600_common_context *ctx,
{
struct si_pc_block *sigroup = (struct si_pc_block *)group->data;
struct si_pc_block_base *regs = sigroup->b;
struct radeon_winsys_cs *cs = ctx->gfx.cs;
struct radeon_winsys_cs *cs = ctx->gfx_cs;
unsigned idx;
unsigned layout_multi = regs->layout & SI_PC_MULTI_MASK;
unsigned dw;
@ -553,9 +553,9 @@ static void si_pc_emit_select(struct r600_common_context *ctx,
static void si_pc_emit_start(struct r600_common_context *ctx,
struct r600_resource *buffer, uint64_t va)
{
struct radeon_winsys_cs *cs = ctx->gfx.cs;
struct radeon_winsys_cs *cs = ctx->gfx_cs;
radeon_add_to_buffer_list(ctx, &ctx->gfx, buffer,
radeon_add_to_buffer_list(ctx, ctx->gfx_cs, buffer,
RADEON_USAGE_WRITE, RADEON_PRIO_QUERY);
radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
@ -579,7 +579,7 @@ static void si_pc_emit_start(struct r600_common_context *ctx,
static void si_pc_emit_stop(struct r600_common_context *ctx,
struct r600_resource *buffer, uint64_t va)
{
struct radeon_winsys_cs *cs = ctx->gfx.cs;
struct radeon_winsys_cs *cs = ctx->gfx_cs;
si_gfx_write_event_eop(ctx, V_028A90_BOTTOM_OF_PIPE_TS, 0,
EOP_DATA_SEL_VALUE_32BIT,
@ -602,7 +602,7 @@ static void si_pc_emit_read(struct r600_common_context *ctx,
{
struct si_pc_block *sigroup = (struct si_pc_block *)group->data;
struct si_pc_block_base *regs = sigroup->b;
struct radeon_winsys_cs *cs = ctx->gfx.cs;
struct radeon_winsys_cs *cs = ctx->gfx_cs;
unsigned idx;
unsigned reg = regs->counter0_lo;
unsigned reg_delta = 8;

View File

@ -285,7 +285,7 @@ static struct pipe_context *si_create_context(struct pipe_screen *screen,
sctx->b.b.create_video_buffer = vl_video_buffer_create;
}
sctx->b.gfx.cs = ws->cs_create(sctx->b.ctx, RING_GFX,
sctx->b.gfx_cs = ws->cs_create(sctx->b.ctx, RING_GFX,
si_flush_gfx_cs, sctx);
/* Border colors. */
@ -340,7 +340,7 @@ static struct pipe_context *si_create_context(struct pipe_screen *screen,
goto fail;
/* Initialize the memory. */
struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, 0));
radeon_emit(cs, S_370_DST_SEL(V_370_MEMORY_SYNC) |
S_370_WR_CONFIRM(1) |

View File

@ -123,10 +123,10 @@ void si_pm4_free_state(struct si_context *sctx,
void si_pm4_emit(struct si_context *sctx, struct si_pm4_state *state)
{
struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
for (int i = 0; i < state->nbo; ++i) {
radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, state->bo[i],
radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs, state->bo[i],
state->bo_usage[i], state->bo_priority[i]);
}
@ -135,7 +135,7 @@ void si_pm4_emit(struct si_context *sctx, struct si_pm4_state *state)
} else {
struct r600_resource *ib = state->indirect_buffer;
radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, ib,
radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs, ib,
RADEON_USAGE_READ,
RADEON_PRIO_IB2);

View File

@ -86,7 +86,7 @@ static unsigned si_pack_float_12p4(float x)
*/
static void si_emit_cb_render_state(struct si_context *sctx, struct r600_atom *atom)
{
struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
struct si_state_blend *blend = sctx->queued.named.blend;
/* CB_COLORn_INFO.FORMAT=INVALID should disable unbound colorbuffers,
* but you never know. */
@ -724,7 +724,7 @@ static void si_set_blend_color(struct pipe_context *ctx,
static void si_emit_blend_color(struct si_context *sctx, struct r600_atom *atom)
{
struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
radeon_set_context_reg_seq(cs, R_028414_CB_BLEND_RED, 4);
radeon_emit_array(cs, (uint32_t*)sctx->blend_color.state.color, 4);
@ -758,7 +758,7 @@ static void si_set_clip_state(struct pipe_context *ctx,
static void si_emit_clip_state(struct si_context *sctx, struct r600_atom *atom)
{
struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
radeon_set_context_reg_seq(cs, R_0285BC_PA_CL_UCP_0_X, 6*4);
radeon_emit_array(cs, (uint32_t*)sctx->clip_state.state.ucp, 6*4);
@ -766,7 +766,7 @@ static void si_emit_clip_state(struct si_context *sctx, struct r600_atom *atom)
static void si_emit_clip_regs(struct si_context *sctx, struct r600_atom *atom)
{
struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
struct si_shader *vs = si_get_vs_state(sctx);
struct si_shader_selector *vs_sel = vs->selector;
struct tgsi_shader_info *info = &vs_sel->info;
@ -1080,7 +1080,7 @@ static void si_delete_rs_state(struct pipe_context *ctx, void *state)
*/
static void si_emit_stencil_ref(struct si_context *sctx, struct r600_atom *atom)
{
struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
struct pipe_stencil_ref *ref = &sctx->stencil_ref.state;
struct si_dsa_stencil_ref_part *dsa = &sctx->stencil_ref.dsa_part;
@ -1372,7 +1372,7 @@ void si_save_qbo_state(struct pipe_context *ctx, struct r600_qbo_state *st)
static void si_emit_db_render_state(struct si_context *sctx, struct r600_atom *state)
{
struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
unsigned db_shader_control;
@ -2959,7 +2959,7 @@ static void si_set_framebuffer_state(struct pipe_context *ctx,
static void si_emit_framebuffer_state(struct si_context *sctx, struct r600_atom *atom)
{
struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
struct pipe_framebuffer_state *state = &sctx->framebuffer.state;
unsigned i, nr_cbufs = state->nr_cbufs;
struct r600_texture *tex = NULL;
@ -2982,20 +2982,20 @@ static void si_emit_framebuffer_state(struct si_context *sctx, struct r600_atom
}
tex = (struct r600_texture *)cb->base.texture;
radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs,
&tex->resource, RADEON_USAGE_READWRITE,
tex->resource.b.b.nr_samples > 1 ?
RADEON_PRIO_COLOR_BUFFER_MSAA :
RADEON_PRIO_COLOR_BUFFER);
if (tex->cmask_buffer && tex->cmask_buffer != &tex->resource) {
radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs,
tex->cmask_buffer, RADEON_USAGE_READWRITE,
RADEON_PRIO_CMASK);
}
if (tex->dcc_separate_buffer)
radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs,
tex->dcc_separate_buffer,
RADEON_USAGE_READWRITE,
RADEON_PRIO_DCC);
@ -3132,7 +3132,7 @@ static void si_emit_framebuffer_state(struct si_context *sctx, struct r600_atom
struct r600_surface *zb = (struct r600_surface*)state->zsbuf;
struct r600_texture *rtex = (struct r600_texture*)zb->base.texture;
radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs,
&rtex->resource, RADEON_USAGE_READWRITE,
zb->base.texture->nr_samples > 1 ?
RADEON_PRIO_DEPTH_BUFFER_MSAA :
@ -3209,7 +3209,7 @@ static void si_emit_framebuffer_state(struct si_context *sctx, struct r600_atom
static void si_emit_msaa_sample_locs(struct si_context *sctx,
struct r600_atom *atom)
{
struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
unsigned nr_samples = sctx->framebuffer.nr_samples;
bool has_msaa_sample_loc_bug = sctx->screen->has_msaa_sample_loc_bug;
@ -3320,7 +3320,7 @@ static bool si_out_of_order_rasterization(struct si_context *sctx)
static void si_emit_msaa_config(struct si_context *sctx, struct r600_atom *atom)
{
struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
unsigned num_tile_pipes = sctx->screen->info.num_tile_pipes;
/* 33% faster rendering to linear color buffers */
bool dst_is_linear = sctx->framebuffer.any_dst_linear;
@ -4175,7 +4175,7 @@ static void si_set_sample_mask(struct pipe_context *ctx, unsigned sample_mask)
static void si_emit_sample_mask(struct si_context *sctx, struct r600_atom *atom)
{
struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
unsigned mask = sctx->sample_mask.sample_mask;
/* Needed for line and polygon smoothing as well as for the Polaris

View File

@ -326,7 +326,7 @@ static struct uvec2 si_get_depth_bin_size(struct si_context *sctx)
static void si_emit_dpbb_disable(struct si_context *sctx)
{
struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
radeon_set_context_reg(cs, R_028C44_PA_SC_BINNER_CNTL_0,
S_028C44_BINNING_MODE(V_028C44_DISABLE_BINNING_USE_LEGACY_SC) |
@ -432,7 +432,7 @@ void si_emit_dpbb_state(struct si_context *sctx, struct r600_atom *state)
if (bin_size.y >= 32)
bin_size_extend.y = util_logbase2(bin_size.y) - 5;
struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
radeon_set_context_reg(cs, R_028C44_PA_SC_BINNER_CNTL_0,
S_028C44_BINNING_MODE(V_028C44_BINNING_ALLOWED) |
S_028C44_BIN_SIZE_X(bin_size.x == 16) |

View File

@ -96,7 +96,7 @@ static void si_emit_derived_tess_state(struct si_context *sctx,
const struct pipe_draw_info *info,
unsigned *num_patches)
{
struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
struct si_shader *ls_current;
struct si_shader_selector *ls;
/* The TES pointer will only be used for sctx->last_tcs.
@ -535,7 +535,7 @@ static unsigned si_get_ia_multi_vgt_param(struct si_context *sctx,
/* rast_prim is the primitive type after GS. */
static void si_emit_rasterizer_prim_state(struct si_context *sctx)
{
struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
enum pipe_prim_type rast_prim = sctx->current_rast_prim;
struct si_state_rasterizer *rs = sctx->emitted.named.rasterizer;
@ -575,7 +575,7 @@ static void si_emit_vs_state(struct si_context *sctx,
}
if (sctx->current_vs_state != sctx->last_vs_state) {
struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
radeon_set_sh_reg(cs,
sctx->shader_pointers.sh_base[PIPE_SHADER_VERTEX] +
@ -590,7 +590,7 @@ static void si_emit_draw_registers(struct si_context *sctx,
const struct pipe_draw_info *info,
unsigned num_patches)
{
struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
unsigned prim = si_conv_pipe_prim(info->mode);
unsigned gs_out_prim = si_conv_prim_to_gs_out(sctx->current_rast_prim);
unsigned ia_multi_vgt_param;
@ -650,7 +650,7 @@ static void si_emit_draw_packets(struct si_context *sctx,
unsigned index_offset)
{
struct pipe_draw_indirect_info *indirect = info->indirect;
struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
unsigned sh_base_reg = sctx->shader_pointers.sh_base[PIPE_SHADER_VERTEX];
bool render_cond_bit = sctx->b.render_cond && !sctx->b.render_cond_force_off;
uint32_t index_max_size = 0;
@ -674,7 +674,7 @@ static void si_emit_draw_packets(struct si_context *sctx,
radeon_emit(cs, R_028B2C_VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE >> 2);
radeon_emit(cs, 0); /* unused */
radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs,
t->buf_filled_size, RADEON_USAGE_READ,
RADEON_PRIO_SO_FILLED_SIZE);
}
@ -719,7 +719,7 @@ static void si_emit_draw_packets(struct si_context *sctx,
index_size;
index_va = r600_resource(indexbuf)->gpu_address + index_offset;
radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs,
(struct r600_resource *)indexbuf,
RADEON_USAGE_READ, RADEON_PRIO_INDEX_BUFFER);
} else {
@ -742,7 +742,7 @@ static void si_emit_draw_packets(struct si_context *sctx,
radeon_emit(cs, indirect_va);
radeon_emit(cs, indirect_va >> 32);
radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs,
(struct r600_resource *)indirect->buffer,
RADEON_USAGE_READ, RADEON_PRIO_DRAW_INDIRECT);
@ -776,7 +776,7 @@ static void si_emit_draw_packets(struct si_context *sctx,
(struct r600_resource *)indirect->indirect_draw_count;
radeon_add_to_buffer_list(
&sctx->b, &sctx->b.gfx, params_buf,
&sctx->b, sctx->b.gfx_cs, params_buf,
RADEON_USAGE_READ, RADEON_PRIO_DRAW_INDIRECT);
count_va = params_buf->gpu_address + indirect->indirect_draw_count_offset;
@ -852,7 +852,7 @@ static void si_emit_draw_packets(struct si_context *sctx,
static void si_emit_surface_sync(struct r600_common_context *rctx,
unsigned cp_coher_cntl)
{
struct radeon_winsys_cs *cs = rctx->gfx.cs;
struct radeon_winsys_cs *cs = rctx->gfx_cs;
if (rctx->chip_class >= GFX9) {
/* Flush caches and wait for the caches to assert idle. */
@ -876,7 +876,7 @@ static void si_emit_surface_sync(struct r600_common_context *rctx,
void si_emit_cache_flush(struct si_context *sctx)
{
struct r600_common_context *rctx = &sctx->b;
struct radeon_winsys_cs *cs = rctx->gfx.cs;
struct radeon_winsys_cs *cs = rctx->gfx_cs;
uint32_t cp_coher_cntl = 0;
uint32_t flush_cb_db = rctx->flags & (SI_CONTEXT_FLUSH_AND_INV_CB |
SI_CONTEXT_FLUSH_AND_INV_DB);
@ -1557,7 +1557,7 @@ void si_draw_rectangle(struct blitter_context *blitter,
void si_trace_emit(struct si_context *sctx)
{
struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
uint64_t va = sctx->current_saved_cs->trace_buf->gpu_address;
uint32_t trace_id = ++sctx->current_saved_cs->trace_id;

View File

@ -2601,7 +2601,7 @@ static unsigned si_get_ps_input_cntl(struct si_context *sctx,
static void si_emit_spi_map(struct si_context *sctx, struct r600_atom *atom)
{
struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
struct si_shader *ps = sctx->ps_shader.current;
struct si_shader *vs = si_get_vs_state(sctx);
struct tgsi_shader_info *psinfo = ps ? &ps->selector->info : NULL;
@ -3332,13 +3332,13 @@ bool si_update_shaders(struct si_context *sctx)
static void si_emit_scratch_state(struct si_context *sctx,
struct r600_atom *atom)
{
struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
radeon_set_context_reg(cs, R_0286E8_SPI_TMPRING_SIZE,
sctx->spi_tmpring_size);
if (sctx->scratch_buffer) {
radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs,
sctx->scratch_buffer, RADEON_USAGE_READWRITE,
RADEON_PRIO_SCRATCH_BUFFER);
}

View File

@ -231,7 +231,7 @@ static void si_set_streamout_targets(struct pipe_context *ctx,
static void si_flush_vgt_streamout(struct si_context *sctx)
{
struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
unsigned reg_strmout_cntl;
/* The register is at different places on different ASICs. */
@ -258,7 +258,7 @@ static void si_flush_vgt_streamout(struct si_context *sctx)
static void si_emit_streamout_begin(struct r600_common_context *rctx, struct r600_atom *atom)
{
struct si_context *sctx = (struct si_context*)rctx;
struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
struct si_streamout_target **t = sctx->streamout.targets;
uint16_t *stride_in_dw = sctx->streamout.stride_in_dw;
unsigned i;
@ -292,7 +292,7 @@ static void si_emit_streamout_begin(struct r600_common_context *rctx, struct r60
radeon_emit(cs, va); /* src address lo */
radeon_emit(cs, va >> 32); /* src address hi */
radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs,
t[i]->buf_filled_size,
RADEON_USAGE_READ,
RADEON_PRIO_SO_FILLED_SIZE);
@ -313,7 +313,7 @@ static void si_emit_streamout_begin(struct r600_common_context *rctx, struct r60
void si_emit_streamout_end(struct si_context *sctx)
{
struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
struct si_streamout_target **t = sctx->streamout.targets;
unsigned i;
uint64_t va;
@ -334,7 +334,7 @@ void si_emit_streamout_end(struct si_context *sctx)
radeon_emit(cs, 0); /* unused */
radeon_emit(cs, 0); /* unused */
radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs,
t[i]->buf_filled_size,
RADEON_USAGE_WRITE,
RADEON_PRIO_SO_FILLED_SIZE);
@ -363,14 +363,14 @@ static void si_emit_streamout_enable(struct r600_common_context *rctx,
{
struct si_context *sctx = (struct si_context*)rctx;
radeon_set_context_reg_seq(sctx->b.gfx.cs, R_028B94_VGT_STRMOUT_CONFIG, 2);
radeon_emit(sctx->b.gfx.cs,
radeon_set_context_reg_seq(sctx->b.gfx_cs, R_028B94_VGT_STRMOUT_CONFIG, 2);
radeon_emit(sctx->b.gfx_cs,
S_028B94_STREAMOUT_0_EN(si_get_strmout_en(sctx)) |
S_028B94_RAST_STREAM(0) |
S_028B94_STREAMOUT_1_EN(si_get_strmout_en(sctx)) |
S_028B94_STREAMOUT_2_EN(si_get_strmout_en(sctx)) |
S_028B94_STREAMOUT_3_EN(si_get_strmout_en(sctx)));
radeon_emit(sctx->b.gfx.cs,
radeon_emit(sctx->b.gfx_cs,
sctx->streamout.hw_enabled_mask &
sctx->streamout.enabled_stream_buffers_mask);
}

View File

@ -140,7 +140,7 @@ static void si_emit_one_scissor(struct si_context *ctx,
static void si_emit_guardband(struct si_context *ctx,
struct si_signed_scissor *vp_as_scissor)
{
struct radeon_winsys_cs *cs = ctx->b.gfx.cs;
struct radeon_winsys_cs *cs = ctx->b.gfx_cs;
struct pipe_viewport_state vp;
float left, top, right, bottom, max_range, guardband_x, guardband_y;
float discard_x, discard_y;
@ -214,7 +214,7 @@ static void si_emit_guardband(struct si_context *ctx,
static void si_emit_scissors(struct r600_common_context *rctx, struct r600_atom *atom)
{
struct si_context *ctx = (struct si_context *)rctx;
struct radeon_winsys_cs *cs = ctx->b.gfx.cs;
struct radeon_winsys_cs *cs = ctx->b.gfx_cs;
struct pipe_scissor_state *states = ctx->scissors.states;
unsigned mask = ctx->scissors.dirty_mask;
bool scissor_enabled = false;
@ -288,7 +288,7 @@ static void si_set_viewport_states(struct pipe_context *pctx,
static void si_emit_one_viewport(struct si_context *ctx,
struct pipe_viewport_state *state)
{
struct radeon_winsys_cs *cs = ctx->b.gfx.cs;
struct radeon_winsys_cs *cs = ctx->b.gfx_cs;
radeon_emit(cs, fui(state->scale[0]));
radeon_emit(cs, fui(state->translate[0]));
@ -300,7 +300,7 @@ static void si_emit_one_viewport(struct si_context *ctx,
static void si_emit_viewports(struct si_context *ctx)
{
struct radeon_winsys_cs *cs = ctx->b.gfx.cs;
struct radeon_winsys_cs *cs = ctx->b.gfx_cs;
struct pipe_viewport_state *states = ctx->viewports.states;
unsigned mask = ctx->viewports.dirty_mask;
@ -342,7 +342,7 @@ si_viewport_zmin_zmax(const struct pipe_viewport_state *vp, bool halfz,
static void si_emit_depth_ranges(struct si_context *ctx)
{
struct radeon_winsys_cs *cs = ctx->b.gfx.cs;
struct radeon_winsys_cs *cs = ctx->b.gfx_cs;
struct pipe_viewport_state *states = ctx->viewports.states;
unsigned mask = ctx->viewports.depth_range_dirty_mask;
bool clip_halfz = false;