freedreno: driver-thread annotations

Use clangs thread-safety annotations to implement a virtual lock
protecting context fields that should only be accessed from driver-
thread.  This should let the compiler help us detect problems where
ctx is used unsafely from things that could be called by the fe/st
thread.

This does end up sprinkled far and wide, it would be nice if the
compiler could be a bit smarter about understanding call-graphs
(at least with static fxns), but at least it makes it clear where
things are called from which thread.

Signed-off-by: Rob Clark <robdclark@chromium.org>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/9061>
This commit is contained in:
Rob Clark 2021-02-12 12:42:01 -08:00 committed by Marge Bot
parent e7e19c6a4f
commit 3f77604623
72 changed files with 471 additions and 149 deletions

View File

@ -38,6 +38,7 @@
static void
fd2_context_destroy(struct pipe_context *pctx)
in_dt
{
fd_context_destroy(pctx);
free(pctx);

View File

@ -53,6 +53,7 @@ emit_cacheflush(struct fd_ringbuffer *ring)
static void
emit_vertexbufs(struct fd_context *ctx)
assert_dt
{
struct fd_vertex_stateobj *vtx = ctx->vtx.vtx;
struct fd_vertexbuf_stateobj *vertexbuf = &ctx->vtx.vertexbuf;
@ -80,8 +81,9 @@ emit_vertexbufs(struct fd_context *ctx)
static void
draw_impl(struct fd_context *ctx, const struct pipe_draw_info *info,
const struct pipe_draw_start_count *draw,
struct fd_ringbuffer *ring, unsigned index_offset, bool binning)
const struct pipe_draw_start_count *draw,
struct fd_ringbuffer *ring, unsigned index_offset, bool binning)
assert_dt
{
OUT_PKT3(ring, CP_SET_CONSTANT, 2);
OUT_RING(ring, CP_REG(REG_A2XX_VGT_INDX_OFFSET));
@ -156,6 +158,7 @@ fd2_draw_vbo(struct fd_context *ctx, const struct pipe_draw_info *pinfo,
const struct pipe_draw_indirect_info *indirect,
const struct pipe_draw_start_count *pdraw,
unsigned index_offset)
assert_dt
{
if (!ctx->prog.fs || !ctx->prog.vs)
return false;
@ -213,7 +216,8 @@ fd2_draw_vbo(struct fd_context *ctx, const struct pipe_draw_info *pinfo,
static void
clear_state(struct fd_batch *batch, struct fd_ringbuffer *ring,
unsigned buffers, bool fast_clear)
unsigned buffers, bool fast_clear)
assert_dt
{
struct fd_context *ctx = batch->ctx;
struct fd2_context *fd2_ctx = fd2_context(ctx);
@ -400,6 +404,7 @@ clear_fast(struct fd_batch *batch, struct fd_ringbuffer *ring,
static bool
fd2_clear_fast(struct fd_context *ctx, unsigned buffers,
const union pipe_color_union *color, double depth, unsigned stencil)
assert_dt
{
/* using 4x MSAA allows clearing ~2x faster
* then we can use higher bpp clearing to clear lower bpp
@ -512,6 +517,7 @@ fd2_clear_fast(struct fd_context *ctx, unsigned buffers,
static bool
fd2_clear(struct fd_context *ctx, unsigned buffers,
const union pipe_color_union *color, double depth, unsigned stencil)
assert_dt
{
struct fd_ringbuffer *ring = ctx->batch->draw;
struct pipe_framebuffer_state *fb = &ctx->batch->framebuffer;
@ -626,6 +632,7 @@ dirty:
void
fd2_draw_init(struct pipe_context *pctx)
disable_thread_safety_analysis
{
struct fd_context *ctx = fd_context(pctx);
ctx->draw_vbo = fd2_draw_vbo;

View File

@ -40,8 +40,8 @@ struct fd2_vertex_buf {
void fd2_emit_vertex_bufs(struct fd_ringbuffer *ring, uint32_t val,
struct fd2_vertex_buf *vbufs, uint32_t n);
void fd2_emit_state_binning(struct fd_context *ctx, const enum fd_dirty_3d_state dirty);
void fd2_emit_state(struct fd_context *ctx, const enum fd_dirty_3d_state dirty);
void fd2_emit_state_binning(struct fd_context *ctx, const enum fd_dirty_3d_state dirty) assert_dt;
void fd2_emit_state(struct fd_context *ctx, const enum fd_dirty_3d_state dirty) assert_dt;
void fd2_emit_restore(struct fd_context *ctx, struct fd_ringbuffer *ring);
void fd2_emit_init_screen(struct pipe_screen *pscreen);

View File

@ -133,6 +133,7 @@ emit_gmem2mem_surf(struct fd_batch *batch, uint32_t base,
static void
prepare_tile_fini_ib(struct fd_batch *batch)
assert_dt
{
struct fd_context *ctx = batch->ctx;
struct fd2_context *fd2_ctx = fd2_context(ctx);
@ -274,6 +275,7 @@ emit_mem2gmem_surf(struct fd_batch *batch, uint32_t base,
static void
fd2_emit_tile_mem2gmem(struct fd_batch *batch, const struct fd_tile *tile)
assert_dt
{
struct fd_context *ctx = batch->ctx;
struct fd2_context *fd2_ctx = fd2_context(ctx);
@ -474,6 +476,7 @@ fd2_emit_sysmem_prep(struct fd_batch *batch)
/* before first tile */
static void
fd2_emit_tile_init(struct fd_batch *batch)
assert_dt
{
struct fd_context *ctx = batch->ctx;
struct fd_ringbuffer *ring = batch->gmem;
@ -678,6 +681,7 @@ fd2_emit_tile_prep(struct fd_batch *batch, const struct fd_tile *tile)
/* before IB to rendering cmds: */
static void
fd2_emit_tile_renderprep(struct fd_batch *batch, const struct fd_tile *tile)
assert_dt
{
struct fd_context *ctx = batch->ctx;
struct fd2_context *fd2_ctx = fd2_context(ctx);
@ -739,6 +743,7 @@ fd2_emit_tile_renderprep(struct fd_batch *batch, const struct fd_tile *tile)
void
fd2_gmem_init(struct pipe_context *pctx)
disable_thread_safety_analysis
{
struct fd_context *ctx = fd_context(pctx);

View File

@ -166,7 +166,8 @@ fd2_vp_state_delete(struct pipe_context *pctx, void *hwcso)
static void
patch_vtx_fetch(struct fd_context *ctx, struct pipe_vertex_element *elem,
instr_fetch_vtx_t *instr, uint16_t dst_swiz)
instr_fetch_vtx_t *instr, uint16_t dst_swiz)
assert_dt
{
struct surface_format fmt = fd2_pipe2surface(elem->src_format);
@ -181,7 +182,8 @@ patch_vtx_fetch(struct fd_context *ctx, struct pipe_vertex_element *elem,
static void
patch_fetches(struct fd_context *ctx, struct ir2_shader_info *info,
struct fd_vertex_stateobj *vtx, struct fd_texture_stateobj *tex)
struct fd_vertex_stateobj *vtx, struct fd_texture_stateobj *tex)
assert_dt
{
for (int i = 0; i < info->num_fetch_instrs; i++) {
struct ir2_fetch_info *fi = &info->fetch_info[i];

View File

@ -63,7 +63,7 @@ struct fd2_shader_stateobj {
};
void fd2_program_emit(struct fd_context *ctx, struct fd_ringbuffer *ring,
struct fd_program_stateobj *prog);
struct fd_program_stateobj *prog) assert_dt;
void fd2_prog_init(struct pipe_context *pctx);

View File

@ -74,6 +74,7 @@ struct fd_batch_query_data {
static void
perfcntr_resume(struct fd_acc_query *aq, struct fd_batch *batch)
assert_dt
{
struct fd_batch_query_data *data = aq->query_data;
struct fd_screen *screen = data->screen;
@ -113,6 +114,7 @@ perfcntr_resume(struct fd_acc_query *aq, struct fd_batch *batch)
static void
perfcntr_pause(struct fd_acc_query *aq, struct fd_batch *batch)
assert_dt
{
struct fd_batch_query_data *data = aq->query_data;
struct fd_screen *screen = data->screen;
@ -234,6 +236,7 @@ error:
void
fd2_query_context_init(struct pipe_context *pctx)
disable_thread_safety_analysis
{
struct fd_context *ctx = fd_context(pctx);

View File

@ -126,6 +126,7 @@ static void
fd2_sampler_states_bind(struct pipe_context *pctx,
enum pipe_shader_type shader, unsigned start,
unsigned nr, void **hwcso)
in_dt
{
if (!hwcso)
nr = 0;
@ -214,6 +215,7 @@ static void
fd2_set_sampler_views(struct pipe_context *pctx, enum pipe_shader_type shader,
unsigned start, unsigned nr, unsigned unbind_num_trailing_slots,
struct pipe_sampler_view **views)
in_dt
{
if (shader == PIPE_SHADER_FRAGMENT) {
struct fd_context *ctx = fd_context(pctx);
@ -243,6 +245,7 @@ fd2_set_sampler_views(struct pipe_context *pctx, enum pipe_shader_type shader,
unsigned
fd2_get_const_idx(struct fd_context *ctx, struct fd_texture_stateobj *tex,
unsigned samp_id)
assert_dt
{
if (tex == &ctx->tex[PIPE_SHADER_FRAGMENT])
return samp_id;

View File

@ -39,6 +39,7 @@
static void
fd3_context_destroy(struct pipe_context *pctx)
in_dt
{
struct fd3_context *fd3_ctx = fd3_context(fd_context(pctx));

View File

@ -54,6 +54,7 @@ add_sat(uint32_t a, int32_t b)
static void
draw_impl(struct fd_context *ctx, struct fd_ringbuffer *ring,
struct fd3_emit *emit, unsigned index_offset)
assert_dt
{
const struct pipe_draw_info *info = emit->info;
enum pc_di_primtype primtype = ctx->primtypes[info->mode];
@ -93,6 +94,7 @@ draw_impl(struct fd_context *ctx, struct fd_ringbuffer *ring,
*/
static void
fixup_shader_state(struct fd_context *ctx, struct ir3_shader_key *key)
assert_dt
{
struct fd3_context *fd3_ctx = fd3_context(ctx);
struct ir3_shader_key *last_key = &fd3_ctx->last_key;
@ -117,6 +119,7 @@ fd3_draw_vbo(struct fd_context *ctx, const struct pipe_draw_info *info,
const struct pipe_draw_indirect_info *indirect,
const struct pipe_draw_start_count *draw,
unsigned index_offset)
in_dt
{
struct fd3_context *fd3_ctx = fd3_context(ctx);
struct fd3_emit emit = {
@ -178,6 +181,7 @@ fd3_draw_vbo(struct fd_context *ctx, const struct pipe_draw_info *info,
void
fd3_draw_init(struct pipe_context *pctx)
disable_thread_safety_analysis
{
struct fd_context *ctx = fd_context(pctx);
ctx->draw_vbo = fd3_draw_vbo;

View File

@ -88,12 +88,12 @@ fd3_emit_get_fp(struct fd3_emit *emit)
return emit->fs;
}
void fd3_emit_vertex_bufs(struct fd_ringbuffer *ring, struct fd3_emit *emit);
void fd3_emit_vertex_bufs(struct fd_ringbuffer *ring, struct fd3_emit *emit) assert_dt;
void fd3_emit_state(struct fd_context *ctx, struct fd_ringbuffer *ring,
struct fd3_emit *emit);
struct fd3_emit *emit) assert_dt;
void fd3_emit_restore(struct fd_batch *batch, struct fd_ringbuffer *ring);
void fd3_emit_restore(struct fd_batch *batch, struct fd_ringbuffer *ring) assert_dt;
void fd3_emit_init_screen(struct pipe_screen *pscreen);
void fd3_emit_init(struct pipe_context *pctx);
@ -106,6 +106,7 @@ fd3_emit_ib(struct fd_ringbuffer *ring, struct fd_ringbuffer *target)
static inline void
fd3_emit_cache_flush(struct fd_batch *batch, struct fd_ringbuffer *ring)
assert_dt
{
fd_wfi(batch, ring);
OUT_PKT0(ring, REG_A3XX_UCHE_CACHE_INVALIDATE0_REG, 2);

View File

@ -159,6 +159,7 @@ use_hw_binning(struct fd_batch *batch)
static void update_vsc_pipe(struct fd_batch *batch);
static void
emit_binning_workaround(struct fd_batch *batch)
assert_dt
{
struct fd_context *ctx = batch->ctx;
const struct fd_gmem_stateobj *gmem = batch->gmem_state;
@ -353,6 +354,7 @@ emit_gmem2mem_surf(struct fd_batch *batch,
static void
fd3_emit_tile_gmem2mem(struct fd_batch *batch, const struct fd_tile *tile)
assert_dt
{
struct fd_context *ctx = batch->ctx;
struct fd_ringbuffer *ring = batch->gmem;
@ -532,6 +534,7 @@ emit_mem2gmem_surf(struct fd_batch *batch, const uint32_t bases[],
static void
fd3_emit_tile_mem2gmem(struct fd_batch *batch, const struct fd_tile *tile)
assert_dt
{
struct fd_context *ctx = batch->ctx;
const struct fd_gmem_stateobj *gmem = batch->gmem_state;
@ -717,6 +720,7 @@ patch_rbrc(struct fd_batch *batch, uint32_t val)
/* for rendering directly to system memory: */
static void
fd3_emit_sysmem_prep(struct fd_batch *batch)
assert_dt
{
struct pipe_framebuffer_state *pfb = &batch->framebuffer;
struct fd_ringbuffer *ring = batch->gmem;
@ -761,6 +765,7 @@ fd3_emit_sysmem_prep(struct fd_batch *batch)
static void
update_vsc_pipe(struct fd_batch *batch)
assert_dt
{
struct fd_context *ctx = batch->ctx;
const struct fd_gmem_stateobj *gmem = batch->gmem_state;
@ -791,6 +796,7 @@ update_vsc_pipe(struct fd_batch *batch)
static void
emit_binning_pass(struct fd_batch *batch)
assert_dt
{
struct fd_context *ctx = batch->ctx;
const struct fd_gmem_stateobj *gmem = batch->gmem_state;
@ -919,6 +925,7 @@ emit_binning_pass(struct fd_batch *batch)
/* before first tile */
static void
fd3_emit_tile_init(struct fd_batch *batch)
assert_dt
{
struct fd_ringbuffer *ring = batch->gmem;
struct pipe_framebuffer_state *pfb = &batch->framebuffer;
@ -972,6 +979,7 @@ fd3_emit_tile_prep(struct fd_batch *batch, const struct fd_tile *tile)
/* before IB to rendering cmds: */
static void
fd3_emit_tile_renderprep(struct fd_batch *batch, const struct fd_tile *tile)
assert_dt
{
struct fd_context *ctx = batch->ctx;
struct fd3_context *fd3_ctx = fd3_context(ctx);
@ -1050,6 +1058,7 @@ fd3_emit_tile_renderprep(struct fd_batch *batch, const struct fd_tile *tile)
void
fd3_gmem_init(struct pipe_context *pctx)
disable_thread_safety_analysis
{
struct fd_context *ctx = fd_context(pctx);

View File

@ -135,6 +135,7 @@ static const struct fd_hw_sample_provider occlusion_predicate_conservative = {
};
void fd3_query_context_init(struct pipe_context *pctx)
disable_thread_safety_analysis
{
struct fd_context *ctx = fd_context(pctx);

View File

@ -39,6 +39,7 @@
static void
fd4_context_destroy(struct pipe_context *pctx)
in_dt
{
struct fd4_context *fd4_ctx = fd4_context(fd_context(pctx));

View File

@ -43,6 +43,7 @@
static void
draw_impl(struct fd_context *ctx, struct fd_ringbuffer *ring,
struct fd4_emit *emit, unsigned index_offset)
assert_dt
{
const struct pipe_draw_info *info = emit->info;
enum pc_di_primtype primtype = ctx->primtypes[info->mode];
@ -77,6 +78,7 @@ draw_impl(struct fd_context *ctx, struct fd_ringbuffer *ring,
*/
static void
fixup_shader_state(struct fd_context *ctx, struct ir3_shader_key *key)
assert_dt
{
struct fd4_context *fd4_ctx = fd4_context(ctx);
struct ir3_shader_key *last_key = &fd4_ctx->last_key;
@ -101,6 +103,7 @@ fd4_draw_vbo(struct fd_context *ctx, const struct pipe_draw_info *info,
const struct pipe_draw_indirect_info *indirect,
const struct pipe_draw_start_count *draw,
unsigned index_offset)
in_dt
{
struct fd4_context *fd4_ctx = fd4_context(ctx);
struct fd4_emit emit = {
@ -183,6 +186,7 @@ fd4_draw_vbo(struct fd_context *ctx, const struct pipe_draw_info *info,
void
fd4_draw_init(struct pipe_context *pctx)
disable_thread_safety_analysis
{
struct fd_context *ctx = fd_context(pctx);
ctx->draw_vbo = fd4_draw_vbo;

View File

@ -96,12 +96,12 @@ fd4_emit_get_fp(struct fd4_emit *emit)
return emit->fs;
}
void fd4_emit_vertex_bufs(struct fd_ringbuffer *ring, struct fd4_emit *emit);
void fd4_emit_vertex_bufs(struct fd_ringbuffer *ring, struct fd4_emit *emit) assert_dt;
void fd4_emit_state(struct fd_context *ctx, struct fd_ringbuffer *ring,
struct fd4_emit *emit);
struct fd4_emit *emit) assert_dt;
void fd4_emit_restore(struct fd_batch *batch, struct fd_ringbuffer *ring);
void fd4_emit_restore(struct fd_batch *batch, struct fd_ringbuffer *ring) assert_dt;
void fd4_emit_init_screen(struct pipe_screen *pscreen);
void fd4_emit_init(struct pipe_context *pctx);

View File

@ -185,6 +185,7 @@ emit_gmem2mem_surf(struct fd_batch *batch, bool stencil,
static void
fd4_emit_tile_gmem2mem(struct fd_batch *batch, const struct fd_tile *tile)
assert_dt
{
struct fd_context *ctx = batch->ctx;
const struct fd_gmem_stateobj *gmem = batch->gmem_state;
@ -320,6 +321,7 @@ emit_mem2gmem_surf(struct fd_batch *batch, const uint32_t *bases,
static void
fd4_emit_tile_mem2gmem(struct fd_batch *batch, const struct fd_tile *tile)
assert_dt
{
struct fd_context *ctx = batch->ctx;
const struct fd_gmem_stateobj *gmem = batch->gmem_state;
@ -512,6 +514,7 @@ patch_draws(struct fd_batch *batch, enum pc_di_vis_cull_mode vismode)
/* for rendering directly to system memory: */
static void
fd4_emit_sysmem_prep(struct fd_batch *batch)
assert_dt
{
struct pipe_framebuffer_state *pfb = &batch->framebuffer;
struct fd_ringbuffer *ring = batch->gmem;
@ -548,6 +551,7 @@ fd4_emit_sysmem_prep(struct fd_batch *batch)
static void
update_vsc_pipe(struct fd_batch *batch)
assert_dt
{
struct fd_context *ctx = batch->ctx;
const struct fd_gmem_stateobj *gmem = batch->gmem_state;
@ -584,6 +588,7 @@ update_vsc_pipe(struct fd_batch *batch)
static void
emit_binning_pass(struct fd_batch *batch)
assert_dt
{
const struct fd_gmem_stateobj *gmem = batch->gmem_state;
struct pipe_framebuffer_state *pfb = &batch->framebuffer;
@ -649,6 +654,7 @@ emit_binning_pass(struct fd_batch *batch)
/* before first tile */
static void
fd4_emit_tile_init(struct fd_batch *batch)
assert_dt
{
struct fd_ringbuffer *ring = batch->gmem;
struct pipe_framebuffer_state *pfb = &batch->framebuffer;
@ -741,6 +747,7 @@ fd4_emit_tile_prep(struct fd_batch *batch, const struct fd_tile *tile)
/* before IB to rendering cmds: */
static void
fd4_emit_tile_renderprep(struct fd_batch *batch, const struct fd_tile *tile)
assert_dt
{
struct fd_context *ctx = batch->ctx;
struct fd4_context *fd4_ctx = fd4_context(ctx);
@ -799,6 +806,7 @@ fd4_emit_tile_renderprep(struct fd_batch *batch, const struct fd_tile *tile)
void
fd4_gmem_init(struct pipe_context *pctx)
disable_thread_safety_analysis
{
struct fd_context *ctx = fd_context(pctx);

View File

@ -110,6 +110,7 @@ occlusion_predicate_accumulate_result(struct fd_context *ctx,
static void
time_elapsed_enable(struct fd_context *ctx, struct fd_ringbuffer *ring)
assert_dt
{
/* Right now, the assignment of countable to counter register is
* just hard coded. If we start exposing more countables than we
@ -125,6 +126,7 @@ time_elapsed_enable(struct fd_context *ctx, struct fd_ringbuffer *ring)
static struct fd_hw_sample *
time_elapsed_get_sample(struct fd_batch *batch, struct fd_ringbuffer *ring)
assert_dt
{
struct fd_hw_sample *samp = fd_hw_sample_init(batch, sizeof(uint64_t));
@ -279,6 +281,7 @@ static const struct fd_hw_sample_provider timestamp = {
};
void fd4_query_context_init(struct pipe_context *pctx)
disable_thread_safety_analysis
{
struct fd_context *ctx = fd_context(pctx);

View File

@ -433,6 +433,7 @@ emit_blit(struct fd_ringbuffer *ring, const struct pipe_blit_info *info)
bool
fd5_blitter_blit(struct fd_context *ctx, const struct pipe_blit_info *info)
assert_dt
{
struct fd_batch *batch;

View File

@ -116,6 +116,7 @@ cs_program_emit(struct fd_ringbuffer *ring, struct ir3_shader_variant *v,
static void
fd5_launch_grid(struct fd_context *ctx, const struct pipe_grid_info *info)
assert_dt
{
struct ir3_shader_key key = {};
struct ir3_shader_variant *v;
@ -192,6 +193,7 @@ fd5_launch_grid(struct fd_context *ctx, const struct pipe_grid_info *info)
void
fd5_compute_init(struct pipe_context *pctx)
disable_thread_safety_analysis
{
struct fd_context *ctx = fd_context(pctx);
ctx->launch_grid = fd5_launch_grid;

View File

@ -41,6 +41,7 @@
static void
fd5_context_destroy(struct pipe_context *pctx)
in_dt
{
struct fd5_context *fd5_ctx = fd5_context(fd_context(pctx));
@ -70,6 +71,7 @@ static const uint8_t primtypes[] = {
struct pipe_context *
fd5_context_create(struct pipe_screen *pscreen, void *priv, unsigned flags)
disable_thread_safety_analysis
{
struct fd_screen *screen = fd_screen(pscreen);
struct fd5_context *fd5_ctx = CALLOC_STRUCT(fd5_context);

View File

@ -43,6 +43,7 @@
static void
draw_impl(struct fd_context *ctx, struct fd_ringbuffer *ring,
struct fd5_emit *emit, unsigned index_offset)
assert_dt
{
const struct pipe_draw_info *info = emit->info;
enum pc_di_primtype primtype = ctx->primtypes[info->mode];
@ -72,6 +73,7 @@ draw_impl(struct fd_context *ctx, struct fd_ringbuffer *ring,
*/
static void
fixup_shader_state(struct fd_context *ctx, struct ir3_shader_key *key)
assert_dt
{
struct fd5_context *fd5_ctx = fd5_context(ctx);
struct ir3_shader_key *last_key = &fd5_ctx->last_key;
@ -96,6 +98,7 @@ fd5_draw_vbo(struct fd_context *ctx, const struct pipe_draw_info *info,
const struct pipe_draw_indirect_info *indirect,
const struct pipe_draw_start_count *draw,
unsigned index_offset)
in_dt
{
struct fd5_context *fd5_ctx = fd5_context(ctx);
struct fd5_emit emit = {
@ -261,6 +264,7 @@ fd5_clear_lrz(struct fd_batch *batch, struct fd_resource *zsbuf, double depth)
static bool
fd5_clear(struct fd_context *ctx, unsigned buffers,
const union pipe_color_union *color, double depth, unsigned stencil)
assert_dt
{
struct fd_ringbuffer *ring = ctx->batch->draw;
struct pipe_framebuffer_state *pfb = &ctx->batch->framebuffer;
@ -372,6 +376,7 @@ fd5_clear(struct fd_context *ctx, unsigned buffers,
void
fd5_draw_init(struct pipe_context *pctx)
disable_thread_safety_analysis
{
struct fd_context *ctx = fd_context(pctx);
ctx->draw_vbo = fd5_draw_vbo;

View File

@ -309,6 +309,7 @@ setup_border_colors(struct fd_texture_stateobj *tex, struct bcolor_entry *entrie
static void
emit_border_color(struct fd_context *ctx, struct fd_ringbuffer *ring)
assert_dt
{
struct fd5_context *fd5_ctx = fd5_context(ctx);
struct bcolor_entry *entries;
@ -338,6 +339,7 @@ emit_border_color(struct fd_context *ctx, struct fd_ringbuffer *ring)
static bool
emit_textures(struct fd_context *ctx, struct fd_ringbuffer *ring,
enum a4xx_state_block sb, struct fd_texture_stateobj *tex)
assert_dt
{
bool needs_border = false;
unsigned bcolor_offset = (sb == SB4_FS_TEX) ? ctx->tex[PIPE_SHADER_VERTEX].num_samplers : 0;

View File

@ -106,6 +106,7 @@ fd5_emit_get_fp(struct fd5_emit *emit)
static inline void
fd5_cache_flush(struct fd_batch *batch, struct fd_ringbuffer *ring)
assert_dt
{
fd_reset_wfi(batch);
OUT_PKT4(ring, REG_A5XX_UCHE_CACHE_INVALIDATE_MIN_LO, 5);
@ -150,6 +151,7 @@ fd5_emit_blit(struct fd_context *ctx, struct fd_ringbuffer *ring)
static inline void
fd5_emit_render_cntl(struct fd_context *ctx, bool blit, bool binning)
assert_dt
{
struct fd_ringbuffer *ring = binning ? ctx->batch->binning : ctx->batch->draw;
@ -191,17 +193,17 @@ fd5_emit_lrz_flush(struct fd_ringbuffer *ring)
OUT_RING(ring, 0x0);
}
void fd5_emit_vertex_bufs(struct fd_ringbuffer *ring, struct fd5_emit *emit);
void fd5_emit_vertex_bufs(struct fd_ringbuffer *ring, struct fd5_emit *emit) assert_dt;
void fd5_emit_state(struct fd_context *ctx, struct fd_ringbuffer *ring,
struct fd5_emit *emit);
struct fd5_emit *emit) assert_dt;
void fd5_emit_cs_state(struct fd_context *ctx, struct fd_ringbuffer *ring,
struct ir3_shader_variant *cp);
struct ir3_shader_variant *cp) assert_dt;
void fd5_emit_cs_consts(const struct ir3_shader_variant *v, struct fd_ringbuffer *ring,
struct fd_context *ctx, const struct pipe_grid_info *info);
struct fd_context *ctx, const struct pipe_grid_info *info) assert_dt;
void fd5_emit_restore(struct fd_batch *batch, struct fd_ringbuffer *ring);
void fd5_emit_restore(struct fd_batch *batch, struct fd_ringbuffer *ring) assert_dt;
void fd5_emit_init_screen(struct pipe_screen *pscreen);
void fd5_emit_init(struct pipe_context *pctx);

View File

@ -257,6 +257,7 @@ patch_draws(struct fd_batch *batch, enum pc_di_vis_cull_mode vismode)
static void
update_vsc_pipe(struct fd_batch *batch)
assert_dt
{
struct fd_context *ctx = batch->ctx;
struct fd5_context *fd5_ctx = fd5_context(ctx);
@ -299,6 +300,7 @@ update_vsc_pipe(struct fd_batch *batch)
static void
emit_binning_pass(struct fd_batch *batch)
assert_dt
{
struct fd_context *ctx = batch->ctx;
struct fd_ringbuffer *ring = batch->gmem;
@ -363,6 +365,7 @@ emit_binning_pass(struct fd_batch *batch)
/* before first tile */
static void
fd5_emit_tile_init(struct fd_batch *batch)
assert_dt
{
struct fd_ringbuffer *ring = batch->gmem;
struct pipe_framebuffer_state *pfb = &batch->framebuffer;
@ -408,6 +411,7 @@ fd5_emit_tile_init(struct fd_batch *batch)
/* before mem2gmem */
static void
fd5_emit_tile_prep(struct fd_batch *batch, const struct fd_tile *tile)
assert_dt
{
struct fd_context *ctx = batch->ctx;
const struct fd_gmem_stateobj *gmem = batch->gmem_state;
@ -678,6 +682,7 @@ fd5_emit_tile_gmem2mem(struct fd_batch *batch, const struct fd_tile *tile)
static void
fd5_emit_tile_fini(struct fd_batch *batch)
assert_dt
{
struct fd_ringbuffer *ring = batch->gmem;
@ -692,6 +697,7 @@ fd5_emit_tile_fini(struct fd_batch *batch)
static void
fd5_emit_sysmem_prep(struct fd_batch *batch)
assert_dt
{
struct fd_ringbuffer *ring = batch->gmem;
@ -789,6 +795,7 @@ fd5_emit_sysmem_fini(struct fd_batch *batch)
void
fd5_gmem_init(struct pipe_context *pctx)
disable_thread_safety_analysis
{
struct fd_context *ctx = fd_context(pctx);

View File

@ -162,6 +162,7 @@ static const struct fd_acc_sample_provider occlusion_predicate_conservative = {
static void
timestamp_resume(struct fd_acc_query *aq, struct fd_batch *batch)
assert_dt
{
struct fd_ringbuffer *ring = batch->draw;
@ -176,6 +177,7 @@ timestamp_resume(struct fd_acc_query *aq, struct fd_batch *batch)
static void
timestamp_pause(struct fd_acc_query *aq, struct fd_batch *batch)
assert_dt
{
struct fd_ringbuffer *ring = batch->draw;
@ -271,6 +273,7 @@ struct fd_batch_query_data {
static void
perfcntr_resume(struct fd_acc_query *aq, struct fd_batch *batch)
assert_dt
{
struct fd_batch_query_data *data = aq->query_data;
struct fd_screen *screen = data->screen;
@ -311,6 +314,7 @@ perfcntr_resume(struct fd_acc_query *aq, struct fd_batch *batch)
static void
perfcntr_pause(struct fd_acc_query *aq, struct fd_batch *batch)
assert_dt
{
struct fd_batch_query_data *data = aq->query_data;
struct fd_screen *screen = data->screen;
@ -446,6 +450,7 @@ error:
void
fd5_query_context_init(struct pipe_context *pctx)
disable_thread_safety_analysis
{
struct fd_context *ctx = fd_context(pctx);

View File

@ -432,6 +432,7 @@ emit_blit_buffer(struct fd_context *ctx, struct fd_ringbuffer *ring,
static void
fd6_clear_ubwc(struct fd_batch *batch, struct fd_resource *rsc)
assert_dt
{
struct fd_ringbuffer *ring = fd_batch_get_prologue(batch);
union pipe_color_union color = {};
@ -859,6 +860,7 @@ fd6_resolve_tile(struct fd_batch *batch, struct fd_ringbuffer *ring,
static bool
handle_rgba_blit(struct fd_context *ctx, const struct pipe_blit_info *info)
assert_dt
{
struct fd_batch *batch;
@ -934,6 +936,7 @@ handle_rgba_blit(struct fd_context *ctx, const struct pipe_blit_info *info)
*/
static bool
do_rewritten_blit(struct fd_context *ctx, const struct pipe_blit_info *info)
assert_dt
{
bool success = handle_rgba_blit(ctx, info);
if (!success)
@ -948,6 +951,7 @@ do_rewritten_blit(struct fd_context *ctx, const struct pipe_blit_info *info)
*/
static bool
handle_zs_blit(struct fd_context *ctx, const struct pipe_blit_info *info)
assert_dt
{
struct pipe_blit_info blit = *info;
@ -1027,6 +1031,7 @@ handle_zs_blit(struct fd_context *ctx, const struct pipe_blit_info *info)
static bool
handle_compressed_blit(struct fd_context *ctx, const struct pipe_blit_info *info)
assert_dt
{
struct pipe_blit_info blit = *info;
@ -1074,6 +1079,7 @@ handle_compressed_blit(struct fd_context *ctx, const struct pipe_blit_info *info
static bool
fd6_blit(struct fd_context *ctx, const struct pipe_blit_info *info)
assert_dt
{
if (info->mask & PIPE_MASK_ZS)
return handle_zs_blit(ctx, info);
@ -1086,6 +1092,7 @@ fd6_blit(struct fd_context *ctx, const struct pipe_blit_info *info)
void
fd6_blitter_init(struct pipe_context *pctx)
disable_thread_safety_analysis
{
fd_context(pctx)->clear_ubwc = fd6_clear_ubwc;

View File

@ -42,8 +42,8 @@ unsigned fd6_tile_mode(const struct pipe_resource *tmpl);
void fd6_clear_surface(struct fd_context *ctx,
struct fd_ringbuffer *ring, struct pipe_surface *psurf,
uint32_t width, uint32_t height, union pipe_color_union *color);
uint32_t width, uint32_t height, union pipe_color_union *color) assert_dt;
void fd6_resolve_tile(struct fd_batch *batch, struct fd_ringbuffer *ring,
uint32_t base, struct pipe_surface *psurf);
uint32_t base, struct pipe_surface *psurf) assert_dt;
#endif /* FD6_BLIT_H_ */

View File

@ -41,7 +41,7 @@
/* maybe move to fd6_program? */
static void
cs_program_emit(struct fd_context *ctx, struct fd_ringbuffer *ring,
struct ir3_shader_variant *v)
struct ir3_shader_variant *v) assert_dt
{
const struct ir3_info *i = &v->info;
enum a3xx_threadsize thrsz = FOUR_QUADS;
@ -100,6 +100,7 @@ cs_program_emit(struct fd_context *ctx, struct fd_ringbuffer *ring,
static void
fd6_launch_grid(struct fd_context *ctx, const struct pipe_grid_info *info)
in_dt
{
struct ir3_shader_key key = {};
struct ir3_shader_variant *v;
@ -186,6 +187,7 @@ fd6_launch_grid(struct fd_context *ctx, const struct pipe_grid_info *info)
void
fd6_compute_init(struct pipe_context *pctx)
disable_thread_safety_analysis
{
struct fd_context *ctx = fd_context(pctx);
ctx->launch_grid = fd6_launch_grid;

View File

@ -131,6 +131,7 @@ emit_const_ptrs(struct fd_ringbuffer *ring,
static void
emit_tess_bos(struct fd_ringbuffer *ring, struct fd6_emit *emit, struct ir3_shader_variant *s)
assert_dt
{
struct fd_context *ctx = emit->ctx;
const struct ir3_const_state *const_state = ir3_const_state(s);
@ -159,6 +160,7 @@ emit_stage_tess_consts(struct fd_ringbuffer *ring, struct ir3_shader_variant *v,
static void
emit_tess_consts(struct fd6_emit *emit)
assert_dt
{
struct fd_context *ctx = emit->ctx;
@ -312,6 +314,7 @@ user_consts_cmdstream_size(struct ir3_shader_variant *v)
static void
emit_user_consts(struct fd6_emit *emit)
assert_dt
{
static const enum pipe_shader_type types[] = {
PIPE_SHADER_VERTEX, PIPE_SHADER_TESS_CTRL, PIPE_SHADER_TESS_EVAL,

View File

@ -28,15 +28,15 @@
#include "fd6_emit.h"
void fd6_emit_consts(struct fd6_emit *emit);
void fd6_emit_consts(struct fd6_emit *emit) assert_dt;
void fd6_emit_ibo_consts(struct fd6_emit *emit, const struct ir3_shader_variant *v,
enum pipe_shader_type stage, struct fd_ringbuffer *ring);
enum pipe_shader_type stage, struct fd_ringbuffer *ring) assert_dt;
void fd6_emit_cs_consts(const struct ir3_shader_variant *v, struct fd_ringbuffer *ring,
struct fd_context *ctx, const struct pipe_grid_info *info);
struct fd_context *ctx, const struct pipe_grid_info *info) assert_dt;
void fd6_emit_immediates(struct fd_screen *screen, const struct ir3_shader_variant *v,
struct fd_ringbuffer *ring);
struct fd_ringbuffer *ring) assert_dt;
void fd6_emit_link_map(struct fd_screen *screen,
const struct ir3_shader_variant *producer,
const struct ir3_shader_variant *v, struct fd_ringbuffer *ring);
const struct ir3_shader_variant *v, struct fd_ringbuffer *ring) assert_dt;
#endif /* FD6_CONST_H */

View File

@ -43,6 +43,7 @@
static void
fd6_context_destroy(struct pipe_context *pctx)
in_dt
{
struct fd6_context *fd6_ctx = fd6_context(fd_context(pctx));
@ -127,6 +128,7 @@ fd6_vertex_state_delete(struct pipe_context *pctx, void *hwcso)
struct pipe_context *
fd6_context_create(struct pipe_screen *pscreen, void *priv, unsigned flags)
disable_thread_safety_analysis
{
struct fd_screen *screen = fd_screen(pscreen);
struct fd6_context *fd6_ctx = CALLOC_STRUCT(fd6_context);

View File

@ -134,6 +134,7 @@ draw_emit(struct fd_ringbuffer *ring,
*/
static void
fixup_shader_state(struct fd_context *ctx, struct ir3_shader_key *key)
assert_dt
{
struct fd6_context *fd6_ctx = fd6_context(ctx);
struct ir3_shader_key *last_key = &fd6_ctx->last_key;
@ -155,6 +156,7 @@ fixup_shader_state(struct fd_context *ctx, struct ir3_shader_key *key)
static void
fixup_draw_state(struct fd_context *ctx, struct fd6_emit *emit)
assert_dt
{
if (ctx->last.dirty ||
(ctx->last.primitive_restart != emit->primitive_restart)) {
@ -169,6 +171,7 @@ fd6_draw_vbo(struct fd_context *ctx, const struct pipe_draw_info *info,
const struct pipe_draw_indirect_info *indirect,
const struct pipe_draw_start_count *draw,
unsigned index_offset)
assert_dt
{
struct fd6_context *fd6_ctx = fd6_context(ctx);
struct shader_info *gs_info = ir3_get_shader_info(ctx->prog.gs);
@ -511,6 +514,7 @@ static bool is_z32(enum pipe_format format)
static bool
fd6_clear(struct fd_context *ctx, unsigned buffers,
const union pipe_color_union *color, double depth, unsigned stencil)
assert_dt
{
struct pipe_framebuffer_state *pfb = &ctx->batch->framebuffer;
const bool has_depth = pfb->zsbuf;
@ -550,6 +554,7 @@ fd6_clear(struct fd_context *ctx, unsigned buffers,
void
fd6_draw_init(struct pipe_context *pctx)
disable_thread_safety_analysis
{
struct fd_context *ctx = fd_context(pctx);
ctx->draw_vbo = fd6_draw_vbo;

View File

@ -216,6 +216,7 @@ setup_border_colors(struct fd_texture_stateobj *tex, struct bcolor_entry *entrie
static void
emit_border_color(struct fd_context *ctx, struct fd_ringbuffer *ring)
assert_dt
{
struct fd6_context *fd6_ctx = fd6_context(ctx);
struct bcolor_entry *entries;
@ -244,6 +245,7 @@ emit_border_color(struct fd_context *ctx, struct fd_ringbuffer *ring)
static void
fd6_emit_fb_tex(struct fd_ringbuffer *state, struct fd_context *ctx)
assert_dt
{
struct pipe_framebuffer_state *pfb = &ctx->batch->framebuffer;
struct pipe_surface *psurf = pfb->cbufs[0];
@ -478,6 +480,7 @@ fd6_emit_textures(struct fd_pipe *pipe, struct fd_ringbuffer *ring,
static bool
fd6_emit_combined_textures(struct fd_ringbuffer *ring, struct fd6_emit *emit,
enum pipe_shader_type type, const struct ir3_shader_variant *v)
assert_dt
{
struct fd_context *ctx = emit->ctx;
bool needs_border = false;
@ -550,6 +553,7 @@ fd6_emit_combined_textures(struct fd_ringbuffer *ring, struct fd6_emit *emit,
static struct fd_ringbuffer *
build_vbo_state(struct fd6_emit *emit)
assert_dt
{
const struct fd_vertex_state *vtx = emit->vtx;
@ -580,6 +584,7 @@ build_vbo_state(struct fd6_emit *emit)
static enum a6xx_ztest_mode
compute_ztest_mode(struct fd6_emit *emit, bool lrz_valid)
assert_dt
{
struct fd_context *ctx = emit->ctx;
struct pipe_framebuffer_state *pfb = &ctx->batch->framebuffer;
@ -613,6 +618,7 @@ compute_ztest_mode(struct fd6_emit *emit, bool lrz_valid)
*/
static struct fd6_lrz_state
compute_lrz_state(struct fd6_emit *emit, bool binning_pass)
assert_dt
{
struct fd_context *ctx = emit->ctx;
struct pipe_framebuffer_state *pfb = &ctx->batch->framebuffer;
@ -685,6 +691,7 @@ compute_lrz_state(struct fd6_emit *emit, bool binning_pass)
static struct fd_ringbuffer *
build_lrz(struct fd6_emit *emit, bool binning_pass)
assert_dt
{
struct fd_context *ctx = emit->ctx;
struct fd6_context *fd6_ctx = fd6_context(ctx);
@ -723,7 +730,9 @@ build_lrz(struct fd6_emit *emit, bool binning_pass)
}
static void
fd6_emit_streamout(struct fd_ringbuffer *ring, struct fd6_emit *emit, struct ir3_stream_output_info *info)
fd6_emit_streamout(struct fd_ringbuffer *ring, struct fd6_emit *emit,
struct ir3_stream_output_info *info)
assert_dt
{
struct fd_context *ctx = emit->ctx;
const struct fd6_program_state *prog = fd6_emit_get_prog(emit);
@ -1336,6 +1345,7 @@ fd6_mem_to_mem(struct fd_ringbuffer *ring, struct pipe_resource *dst,
*/
static void
fd6_framebuffer_barrier(struct fd_context *ctx)
assert_dt
{
struct fd6_context *fd6_ctx = fd6_context(ctx);
struct fd_batch *batch = ctx->batch;
@ -1375,6 +1385,7 @@ fd6_emit_init_screen(struct pipe_screen *pscreen)
void
fd6_emit_init(struct pipe_context *pctx)
disable_thread_safety_analysis
{
struct fd_context *ctx = fd_context(pctx);
ctx->framebuffer_barrier = fd6_framebuffer_barrier;

View File

@ -274,12 +274,12 @@ fd6_gl2spacing(enum gl_tess_spacing spacing)
bool fd6_emit_textures(struct fd_pipe *pipe, struct fd_ringbuffer *ring,
enum pipe_shader_type type, struct fd_texture_stateobj *tex,
unsigned bcolor_offset,
const struct ir3_shader_variant *v, struct fd_context *ctx);
const struct ir3_shader_variant *v, struct fd_context *ctx) assert_dt;
void fd6_emit_state(struct fd_ringbuffer *ring, struct fd6_emit *emit);
void fd6_emit_state(struct fd_ringbuffer *ring, struct fd6_emit *emit) assert_dt;
void fd6_emit_cs_state(struct fd_context *ctx, struct fd_ringbuffer *ring,
struct ir3_shader_variant *cp);
struct ir3_shader_variant *cp) assert_dt;
void fd6_emit_restore(struct fd_batch *batch, struct fd_ringbuffer *ring);

View File

@ -534,6 +534,7 @@ set_bin_size(struct fd_ringbuffer *ring, uint32_t w, uint32_t h, uint32_t flag)
static void
emit_binning_pass(struct fd_batch *batch)
assert_dt
{
struct fd_ringbuffer *ring = batch->gmem;
const struct fd_gmem_stateobj *gmem = batch->gmem_state;
@ -648,6 +649,7 @@ static void prepare_tile_fini_ib(struct fd_batch *batch);
/* before first tile */
static void
fd6_emit_tile_init(struct fd_batch *batch)
assert_dt
{
struct fd_ringbuffer *ring = batch->gmem;
struct pipe_framebuffer_state *pfb = &batch->framebuffer;
@ -1176,6 +1178,7 @@ emit_resolve_blit(struct fd_batch *batch,
uint32_t base,
struct pipe_surface *psurf,
unsigned buffer)
assert_dt
{
uint32_t info = 0;
bool stencil = false;
@ -1222,6 +1225,7 @@ emit_resolve_blit(struct fd_batch *batch,
static void
prepare_tile_fini_ib(struct fd_batch *batch)
assert_dt
{
const struct fd_gmem_stateobj *gmem = batch->gmem_state;
struct pipe_framebuffer_state *pfb = &batch->framebuffer;
@ -1327,6 +1331,7 @@ fd6_emit_tile_fini(struct fd_batch *batch)
static void
emit_sysmem_clears(struct fd_batch *batch, struct fd_ringbuffer *ring)
assert_dt
{
struct fd_context *ctx = batch->ctx;
struct pipe_framebuffer_state *pfb = &batch->framebuffer;
@ -1408,6 +1413,7 @@ setup_tess_buffers(struct fd_batch *batch, struct fd_ringbuffer *ring)
static void
fd6_emit_sysmem_prep(struct fd_batch *batch)
assert_dt
{
struct fd_ringbuffer *ring = batch->gmem;
struct fd_screen *screen = batch->ctx->screen;
@ -1495,6 +1501,7 @@ fd6_emit_sysmem_fini(struct fd_batch *batch)
void
fd6_gmem_init(struct pipe_context *pctx)
disable_thread_safety_analysis
{
struct fd_context *ctx = fd_context(pctx);

View File

@ -30,12 +30,12 @@
#include "freedreno_context.h"
void fd6_emit_image_tex(struct fd_ringbuffer *ring, const struct pipe_image_view *pimg);
void fd6_emit_ssbo_tex(struct fd_ringbuffer *ring, const struct pipe_shader_buffer *pbuf);
void fd6_emit_image_tex(struct fd_ringbuffer *ring, const struct pipe_image_view *pimg) assert_dt;
void fd6_emit_ssbo_tex(struct fd_ringbuffer *ring, const struct pipe_shader_buffer *pbuf) assert_dt;
struct ir3_shader_variant;
struct fd_ringbuffer * fd6_build_ibo_state(struct fd_context *ctx,
const struct ir3_shader_variant *v, enum pipe_shader_type shader);
const struct ir3_shader_variant *v, enum pipe_shader_type shader) assert_dt;
void fd6_image_init(struct pipe_context *pctx);

View File

@ -300,6 +300,7 @@ static void
setup_stateobj(struct fd_ringbuffer *ring, struct fd_context *ctx,
struct fd6_program_state *state, const struct ir3_shader_key *key,
bool binning_pass)
assert_dt
{
uint32_t pos_regid, psize_regid, color_regid[8], posz_regid;
uint32_t clip0_regid, clip1_regid;
@ -1065,8 +1066,9 @@ fd6_program_create(void *data, struct ir3_shader_variant *bs,
struct ir3_shader_variant *gs,
struct ir3_shader_variant *fs,
const struct ir3_shader_key *key)
in_dt
{
struct fd_context *ctx = data;
struct fd_context *ctx = fd_context(data);
struct fd6_program_state *state = CALLOC_STRUCT(fd6_program_state);
/* if we have streamout, use full VS in binning pass, as the

View File

@ -76,9 +76,9 @@ fd6_last_shader(const struct fd6_program_state *state)
}
void fd6_emit_shader(struct fd_context *ctx, struct fd_ringbuffer *ring,
const struct ir3_shader_variant *so);
const struct ir3_shader_variant *so) assert_dt;
struct fd_ringbuffer * fd6_program_interp_state(struct fd6_emit *emit);
struct fd_ringbuffer * fd6_program_interp_state(struct fd6_emit *emit) assert_dt;
void fd6_prog_init(struct pipe_context *pctx);

View File

@ -77,6 +77,7 @@ occlusion_resume(struct fd_acc_query *aq, struct fd_batch *batch)
static void
occlusion_pause(struct fd_acc_query *aq, struct fd_batch *batch)
assert_dt
{
struct fd_ringbuffer *ring = batch->draw;
@ -173,6 +174,7 @@ timestamp_resume(struct fd_acc_query *aq, struct fd_batch *batch)
static void
time_elapsed_pause(struct fd_acc_query *aq, struct fd_batch *batch)
assert_dt
{
struct fd_ringbuffer *ring = batch->draw;
@ -329,6 +331,7 @@ log_counters(struct fd6_primitives_sample *ps)
static void
primitives_generated_resume(struct fd_acc_query *aq, struct fd_batch *batch)
assert_dt
{
struct fd_ringbuffer *ring = batch->draw;
@ -345,6 +348,7 @@ primitives_generated_resume(struct fd_acc_query *aq, struct fd_batch *batch)
static void
primitives_generated_pause(struct fd_acc_query *aq, struct fd_batch *batch)
assert_dt
{
struct fd_ringbuffer *ring = batch->draw;
@ -390,6 +394,7 @@ static const struct fd_acc_sample_provider primitives_generated = {
static void
primitives_emitted_resume(struct fd_acc_query *aq, struct fd_batch *batch)
assert_dt
{
struct fd_ringbuffer *ring = batch->draw;
@ -402,6 +407,7 @@ primitives_emitted_resume(struct fd_acc_query *aq, struct fd_batch *batch)
static void
primitives_emitted_pause(struct fd_acc_query *aq, struct fd_batch *batch)
assert_dt
{
struct fd_ringbuffer *ring = batch->draw;
@ -464,6 +470,7 @@ struct fd_batch_query_data {
static void
perfcntr_resume(struct fd_acc_query *aq, struct fd_batch *batch)
assert_dt
{
struct fd_batch_query_data *data = aq->query_data;
struct fd_screen *screen = data->screen;
@ -504,6 +511,7 @@ perfcntr_resume(struct fd_acc_query *aq, struct fd_batch *batch)
static void
perfcntr_pause(struct fd_acc_query *aq, struct fd_batch *batch)
assert_dt
{
struct fd_batch_query_data *data = aq->query_data;
struct fd_screen *screen = data->screen;
@ -639,6 +647,7 @@ error:
void
fd6_query_context_init(struct pipe_context *pctx)
disable_thread_safety_analysis
{
struct fd_context *ctx = fd_context(pctx);

View File

@ -54,6 +54,7 @@ struct fd_ringbuffer * __fd6_setup_rasterizer_stateobj(struct fd_context *ctx,
static inline struct fd_ringbuffer *
fd6_rasterizer_state(struct fd_context *ctx, bool primitive_restart)
assert_dt
{
struct fd6_rasterizer_stateobj *rasterizer = fd6_rasterizer_stateobj(ctx->rasterizer);
unsigned variant = primitive_restart;

View File

@ -111,6 +111,7 @@ can_do_ubwc(struct pipe_resource *prsc)
void
fd6_validate_format(struct fd_context *ctx, struct fd_resource *rsc,
enum pipe_format format)
in_dt /* TODO this will be re-worked with threaded-ctx, this is just temporary */
{
if (!rsc->layout.ubwc)
return;

View File

@ -498,6 +498,7 @@ __fd6_texture_state_destroy(struct fd6_texture_state *state)
static void
fd6_rebind_resource(struct fd_context *ctx, struct fd_resource *rsc)
assert_dt
{
fd_screen_assert_locked(ctx->screen);
@ -520,6 +521,7 @@ fd6_rebind_resource(struct fd_context *ctx, struct fd_resource *rsc)
void
fd6_texture_init(struct pipe_context *pctx)
disable_thread_safety_analysis
{
struct fd_context *ctx = fd_context(pctx);
struct fd6_context *fd6_ctx = fd6_context(ctx);

View File

@ -93,6 +93,7 @@ fd6_tex_type(unsigned target)
static inline unsigned
fd6_border_color_offset(struct fd_context *ctx, enum pipe_shader_type type,
struct fd_texture_stateobj *tex)
assert_dt
{
/* Currently we put the FS border-color state after VS. Possibly
* we could swap the order.
@ -154,7 +155,7 @@ struct fd6_texture_state {
};
struct fd6_texture_state * fd6_texture_state(struct fd_context *ctx,
enum pipe_shader_type type, struct fd_texture_stateobj *tex);
enum pipe_shader_type type, struct fd_texture_stateobj *tex) assert_dt;
/* not called directly: */
void __fd6_texture_state_describe(char* buf, const struct fd6_texture_state *tex);

View File

@ -64,6 +64,7 @@ fd6_zsa_stateobj(struct pipe_depth_stencil_alpha_state *zsa)
static inline struct fd_ringbuffer *
fd6_zsa_state(struct fd_context *ctx, bool no_alpha, bool depth_clamp)
assert_dt
{
int variant = 0;
if (no_alpha)

View File

@ -226,6 +226,7 @@ batch_fini(struct fd_batch *batch)
static void
batch_flush_dependencies(struct fd_batch *batch)
assert_dt
{
struct fd_batch_cache *cache = &batch->ctx->screen->batch_cache;
struct fd_batch *dep;
@ -268,6 +269,7 @@ batch_reset_resources_locked(struct fd_batch *batch)
static void
batch_reset_resources(struct fd_batch *batch)
assert_dt
{
fd_screen_lock(batch->ctx->screen);
batch_reset_resources_locked(batch);
@ -276,6 +278,7 @@ batch_reset_resources(struct fd_batch *batch)
static void
batch_reset(struct fd_batch *batch)
assert_dt
{
DBG("%p", batch);
@ -343,6 +346,7 @@ fd_batch_get_prologue(struct fd_batch *batch)
/* Only called from fd_batch_flush() */
static void
batch_flush(struct fd_batch *batch)
assert_dt
{
DBG("%p: needs_flush=%d", batch, batch->needs_flush);
@ -430,6 +434,7 @@ fd_batch_add_dep(struct fd_batch *batch, struct fd_batch *dep)
static void
flush_write_batch(struct fd_resource *rsc)
assert_dt
{
struct fd_batch *b = NULL;
fd_batch_reference_locked(&b, rsc->write_batch);

View File

@ -249,15 +249,15 @@ struct fd_batch {
struct fd_batch * fd_batch_create(struct fd_context *ctx, bool nondraw);
void fd_batch_reset(struct fd_batch *batch);
void fd_batch_flush(struct fd_batch *batch);
void fd_batch_add_dep(struct fd_batch *batch, struct fd_batch *dep);
void fd_batch_resource_write(struct fd_batch *batch, struct fd_resource *rsc);
void fd_batch_resource_read_slowpath(struct fd_batch *batch, struct fd_resource *rsc);
void fd_batch_check_size(struct fd_batch *batch);
void fd_batch_reset(struct fd_batch *batch) assert_dt;
void fd_batch_flush(struct fd_batch *batch) assert_dt;
void fd_batch_add_dep(struct fd_batch *batch, struct fd_batch *dep) assert_dt;
void fd_batch_resource_write(struct fd_batch *batch, struct fd_resource *rsc) assert_dt;
void fd_batch_resource_read_slowpath(struct fd_batch *batch, struct fd_resource *rsc) assert_dt;
void fd_batch_check_size(struct fd_batch *batch) assert_dt;
/* not called directly: */
void __fd_batch_describe(char* buf, const struct fd_batch *batch);
void __fd_batch_describe(char* buf, const struct fd_batch *batch) assert_dt;
void __fd_batch_destroy(struct fd_batch *batch);
/*
@ -331,6 +331,7 @@ fd_batch_lock_submit(struct fd_batch *batch)
* the batch before each draw.
*/
static inline void fd_batch_update_queries(struct fd_batch *batch)
assert_dt
{
struct fd_context *ctx = batch->ctx;
@ -339,6 +340,7 @@ static inline void fd_batch_update_queries(struct fd_batch *batch)
}
static inline void fd_batch_finish_queries(struct fd_batch *batch)
assert_dt
{
struct fd_context *ctx = batch->ctx;
@ -352,7 +354,7 @@ fd_reset_wfi(struct fd_batch *batch)
batch->needs_wfi = true;
}
void fd_wfi(struct fd_batch *batch, struct fd_ringbuffer *ring);
void fd_wfi(struct fd_batch *batch, struct fd_ringbuffer *ring) assert_dt;
/* emit a CP_EVENT_WRITE:
*/

View File

@ -132,6 +132,7 @@ fd_bc_fini(struct fd_batch_cache *cache)
static void
bc_flush(struct fd_batch_cache *cache, struct fd_context *ctx, bool deferred)
assert_dt
{
/* fd_batch_flush() (and fd_batch_add_dep() which calls it indirectly)
* can cause batches to be unref'd and freed under our feet, so grab
@ -318,6 +319,7 @@ fd_bc_invalidate_resource(struct fd_resource *rsc, bool destroy)
static struct fd_batch *
alloc_batch_locked(struct fd_batch_cache *cache, struct fd_context *ctx, bool nondraw)
assert_dt
{
struct fd_batch *batch;
uint32_t idx;
@ -415,6 +417,7 @@ fd_bc_alloc_batch(struct fd_batch_cache *cache, struct fd_context *ctx, bool non
static struct fd_batch *
batch_from_key(struct fd_batch_cache *cache, struct fd_batch_key *key,
struct fd_context *ctx)
assert_dt
{
struct fd_batch *batch = NULL;
uint32_t hash = key_hash(key);

View File

@ -29,6 +29,8 @@
#include "pipe/p_state.h"
#include "freedreno_util.h"
struct fd_resource;
struct fd_batch;
struct fd_context;
@ -63,16 +65,16 @@ struct fd_batch_cache {
void fd_bc_init(struct fd_batch_cache *cache);
void fd_bc_fini(struct fd_batch_cache *cache);
void fd_bc_flush(struct fd_batch_cache *cache, struct fd_context *ctx);
void fd_bc_flush_deferred(struct fd_batch_cache *cache, struct fd_context *ctx);
void fd_bc_flush(struct fd_batch_cache *cache, struct fd_context *ctx) assert_dt;
void fd_bc_flush_deferred(struct fd_batch_cache *cache, struct fd_context *ctx) assert_dt;
void fd_bc_dump(struct fd_screen *screen, const char *fmt, ...) _util_printf_format(2, 3);
void fd_bc_invalidate_context(struct fd_context *ctx);
void fd_bc_invalidate_batch(struct fd_batch *batch, bool destroy);
void fd_bc_invalidate_resource(struct fd_resource *rsc, bool destroy);
struct fd_batch * fd_bc_alloc_batch(struct fd_batch_cache *cache, struct fd_context *ctx, bool nondraw);
struct fd_batch * fd_bc_alloc_batch(struct fd_batch_cache *cache, struct fd_context *ctx, bool nondraw) assert_dt;
struct fd_batch * fd_batch_from_fb(struct fd_batch_cache *cache,
struct fd_context *ctx, const struct pipe_framebuffer_state *pfb);
struct fd_context *ctx, const struct pipe_framebuffer_state *pfb) assert_dt;
#endif /* FREEDRENO_BATCH_CACHE_H_ */

View File

@ -78,6 +78,7 @@ default_src_texture(struct pipe_sampler_view *src_templ,
static void
fd_blitter_pipe_begin(struct fd_context *ctx, bool render_cond, bool discard)
assert_dt
{
fd_fence_ref(&ctx->last_fence, NULL);
@ -118,6 +119,7 @@ fd_blitter_pipe_begin(struct fd_context *ctx, bool render_cond, bool discard)
static void
fd_blitter_pipe_end(struct fd_context *ctx)
assert_dt
{
ctx->in_discard_blit = false;
}
@ -303,6 +305,7 @@ fd_blitter_pipe_copy_region(struct fd_context *ctx,
struct pipe_resource *src,
unsigned src_level,
const struct pipe_box *src_box)
assert_dt
{
/* not until we allow rendertargets to be buffers */
if (dst->target == PIPE_BUFFER || src->target == PIPE_BUFFER)

View File

@ -31,11 +31,11 @@
#include "freedreno_context.h"
bool fd_blitter_blit(struct fd_context *ctx, const struct pipe_blit_info *info);
bool fd_blitter_blit(struct fd_context *ctx, const struct pipe_blit_info *info) assert_dt;
void
fd_blitter_clear(struct pipe_context *pctx, unsigned buffers,
const union pipe_color_union *color, double depth, unsigned stencil);
const union pipe_color_union *color, double depth, unsigned stencil) assert_dt;
void fd_resource_copy_region(struct pipe_context *pctx,
struct pipe_resource *dst,
@ -43,8 +43,8 @@ void fd_resource_copy_region(struct pipe_context *pctx,
unsigned dstx, unsigned dsty, unsigned dstz,
struct pipe_resource *src,
unsigned src_level,
const struct pipe_box *src_box);
const struct pipe_box *src_box) assert_dt;
bool fd_blit(struct pipe_context *pctx, const struct pipe_blit_info *blit_info);
bool fd_blit(struct pipe_context *pctx, const struct pipe_blit_info *blit_info) assert_dt;
#endif /* FREEDRENO_BLIT_H_ */

View File

@ -41,6 +41,7 @@
static void
fd_context_flush(struct pipe_context *pctx, struct pipe_fence_handle **fencep,
unsigned flags)
in_dt
{
struct fd_context *ctx = fd_context(pctx);
struct pipe_fence_handle *fence = NULL;
@ -112,6 +113,7 @@ out:
static void
fd_texture_barrier(struct pipe_context *pctx, unsigned flags)
in_dt
{
if (flags == PIPE_TEXTURE_BARRIER_FRAMEBUFFER) {
struct fd_context *ctx = fd_context(pctx);
@ -189,6 +191,7 @@ fd_emit_string5(struct fd_ringbuffer *ring,
*/
static void
fd_emit_string_marker(struct pipe_context *pctx, const char *string, int len)
in_dt
{
struct fd_context *ctx = fd_context(pctx);
@ -375,6 +378,11 @@ fd_get_device_reset_status(struct pipe_context *pctx)
int global_faults = fd_get_reset_count(ctx, false);
enum pipe_reset_status status;
/* Not called in driver thread, but threaded_context syncs
* before calling this:
*/
fd_context_access_begin(ctx);
if (context_faults != ctx->context_reset_count) {
status = PIPE_GUILTY_CONTEXT_RESET;
} else if (global_faults != ctx->global_reset_count) {
@ -386,6 +394,8 @@ fd_get_device_reset_status(struct pipe_context *pctx)
ctx->context_reset_count = context_faults;
ctx->global_reset_count = global_faults;
fd_context_access_end(ctx);
return status;
}
@ -509,6 +519,7 @@ fd_context_cleanup_common_vbos(struct fd_context *ctx)
struct pipe_context *
fd_context_init(struct fd_context *ctx, struct pipe_screen *pscreen,
const uint8_t *primtypes, void *priv, unsigned flags)
disable_thread_safety_analysis
{
struct fd_screen *screen = fd_screen(pscreen);
struct pipe_context *pctx;

View File

@ -195,43 +195,43 @@ struct fd_context {
struct fd_screen *screen;
struct fd_pipe *pipe;
struct blitter_context *blitter;
void *clear_rs_state[2];
struct primconvert_context *primconvert;
struct blitter_context *blitter dt;
void *clear_rs_state[2] dt;
struct primconvert_context *primconvert dt;
/* slab for pipe_transfer allocations: */
struct slab_child_pool transfer_pool;
struct slab_child_pool transfer_pool dt;
/**
* query related state:
*/
/*@{*/
/* slabs for fd_hw_sample and fd_hw_sample_period allocations: */
struct slab_mempool sample_pool;
struct slab_mempool sample_period_pool;
struct slab_mempool sample_pool dt;
struct slab_mempool sample_period_pool dt;
/* sample-providers for hw queries: */
const struct fd_hw_sample_provider *hw_sample_providers[MAX_HW_SAMPLE_PROVIDERS];
/* list of active queries: */
struct list_head hw_active_queries;
struct list_head hw_active_queries dt;
/* sample-providers for accumulating hw queries: */
const struct fd_acc_sample_provider *acc_sample_providers[MAX_HW_SAMPLE_PROVIDERS];
/* list of active accumulating queries: */
struct list_head acc_active_queries;
struct list_head acc_active_queries dt;
/*@}*/
/* Whether we need to recheck the active_queries list next
* fd_batch_update_queries().
*/
bool update_active_queries;
bool update_active_queries dt;
/* Current state of pctx->set_active_query_state() (i.e. "should drawing
* be counted against non-perfcounter queries")
*/
bool active_queries;
bool active_queries dt;
/* table with PIPE_PRIM_MAX entries mapping PIPE_PRIM_x to
* DI_PT_x value to use for draw initiator. There are some
@ -257,7 +257,7 @@ struct fd_context {
uint64_t batch_total, batch_sysmem, batch_gmem, batch_nondraw, batch_restore;
uint64_t staging_uploads, shadow_uploads;
uint64_t vs_regs, hs_regs, ds_regs, gs_regs, fs_regs;
} stats;
} stats dt;
/* Current batch.. the rule here is that you can deref ctx->batch
* in codepaths from pipe_context entrypoints. But not in code-
@ -265,13 +265,13 @@ struct fd_context {
* called from GMEM code), since in those code-paths the batch
* you care about is not necessarily the same as ctx->batch.
*/
struct fd_batch *batch;
struct fd_batch *batch dt;
/* NULL if there has been rendering since last flush. Otherwise
* keeps a reference to the last fence so we can re-use it rather
* than having to flush no-op batch.
*/
struct pipe_fence_handle *last_fence;
struct pipe_fence_handle *last_fence dt;
/* Fence fd we are told to wait on via ->fence_server_sync() (or -1
* if none). The in-fence is transferred over to the batch on the
@ -286,7 +286,7 @@ struct fd_context {
* maturely, causing us to stall early in the frame where we could
* be building up cmdstream.
*/
int in_fence_fd;
int in_fence_fd dt;
/* track last known reset status globally and per-context to
* determine if more resets occurred since then. If global reset
@ -294,7 +294,8 @@ struct fd_context {
* per-context reset count increases, it means we crashed the
* gpu.
*/
uint32_t context_reset_count, global_reset_count;
uint32_t context_reset_count dt;
uint32_t global_reset_count dt;
/* Context sequence #, used for batch-cache key: */
uint16_t seqno;
@ -302,64 +303,64 @@ struct fd_context {
/* Are we in process of shadowing a resource? Used to detect recursion
* in transfer_map, and skip unneeded synchronization.
*/
bool in_shadow : 1;
bool in_shadow : 1 dt;
/* Ie. in blit situation where we no longer care about previous framebuffer
* contents. Main point is to eliminate blits from fd_try_shadow_resource().
* For example, in case of texture upload + gen-mipmaps.
*/
bool in_discard_blit : 1;
bool in_discard_blit : 1 dt;
/* points to either scissor or disabled_scissor depending on rast state: */
struct pipe_scissor_state *current_scissor;
struct pipe_scissor_state *current_scissor dt;
struct pipe_scissor_state scissor;
struct pipe_scissor_state scissor dt;
/* we don't have a disable/enable bit for scissor, so instead we keep
* a disabled-scissor state which matches the entire bound framebuffer
* and use that when scissor is not enabled.
*/
struct pipe_scissor_state disabled_scissor;
struct pipe_scissor_state disabled_scissor dt;
/* Per vsc pipe bo's (a2xx-a5xx): */
struct fd_bo *vsc_pipe_bo[32];
struct fd_bo *vsc_pipe_bo[32] dt;
/* which state objects need to be re-emit'd: */
enum fd_dirty_3d_state dirty;
enum fd_dirty_3d_state dirty dt;
/* per shader-stage dirty status: */
enum fd_dirty_shader_state dirty_shader[PIPE_SHADER_TYPES];
enum fd_dirty_shader_state dirty_shader[PIPE_SHADER_TYPES] dt;
void *compute;
struct pipe_blend_state *blend;
struct pipe_rasterizer_state *rasterizer;
struct pipe_depth_stencil_alpha_state *zsa;
void *compute dt;
struct pipe_blend_state *blend dt;
struct pipe_rasterizer_state *rasterizer dt;
struct pipe_depth_stencil_alpha_state *zsa dt;
struct fd_texture_stateobj tex[PIPE_SHADER_TYPES];
struct fd_texture_stateobj tex[PIPE_SHADER_TYPES] dt;
struct fd_program_stateobj prog;
struct fd_program_stateobj prog dt;
struct fd_vertex_state vtx;
struct fd_vertex_state vtx dt;
struct pipe_blend_color blend_color;
struct pipe_stencil_ref stencil_ref;
unsigned sample_mask;
unsigned min_samples;
struct pipe_blend_color blend_color dt;
struct pipe_stencil_ref stencil_ref dt;
unsigned sample_mask dt;
unsigned min_samples dt;
/* local context fb state, for when ctx->batch is null: */
struct pipe_framebuffer_state framebuffer;
struct pipe_poly_stipple stipple;
struct pipe_viewport_state viewport;
struct pipe_scissor_state viewport_scissor;
struct fd_constbuf_stateobj constbuf[PIPE_SHADER_TYPES];
struct fd_shaderbuf_stateobj shaderbuf[PIPE_SHADER_TYPES];
struct fd_shaderimg_stateobj shaderimg[PIPE_SHADER_TYPES];
struct fd_streamout_stateobj streamout;
struct fd_global_bindings_stateobj global_bindings;
struct pipe_clip_state ucp;
struct pipe_framebuffer_state framebuffer dt;
struct pipe_poly_stipple stipple dt;
struct pipe_viewport_state viewport dt;
struct pipe_scissor_state viewport_scissor dt;
struct fd_constbuf_stateobj constbuf[PIPE_SHADER_TYPES] dt;
struct fd_shaderbuf_stateobj shaderbuf[PIPE_SHADER_TYPES] dt;
struct fd_shaderimg_stateobj shaderimg[PIPE_SHADER_TYPES] dt;
struct fd_streamout_stateobj streamout dt;
struct fd_global_bindings_stateobj global_bindings dt;
struct pipe_clip_state ucp dt;
struct pipe_query *cond_query;
bool cond_cond; /* inverted rendering condition */
uint cond_mode;
struct pipe_query *cond_query dt;
bool cond_cond dt; /* inverted rendering condition */
uint cond_mode dt;
/* Private memory is a memory space where each fiber gets its own piece of
* memory, in addition to registers. It is backed by a buffer which needs
@ -380,52 +381,52 @@ struct fd_context {
struct {
struct fd_bo *bo;
uint32_t per_fiber_size;
} pvtmem[2];
} pvtmem[2] dt;
struct pipe_debug_callback debug;
struct u_trace_context trace_context;
struct u_trace_context trace_context dt;
/* Called on rebind_resource() for any per-gen cleanup required: */
void (*rebind_resource)(struct fd_context *ctx, struct fd_resource *rsc);
void (*rebind_resource)(struct fd_context *ctx, struct fd_resource *rsc) dt;
/* GMEM/tile handling fxns: */
void (*emit_tile_init)(struct fd_batch *batch);
void (*emit_tile_prep)(struct fd_batch *batch, const struct fd_tile *tile);
void (*emit_tile_mem2gmem)(struct fd_batch *batch, const struct fd_tile *tile);
void (*emit_tile_renderprep)(struct fd_batch *batch, const struct fd_tile *tile);
void (*emit_tile)(struct fd_batch *batch, const struct fd_tile *tile);
void (*emit_tile_gmem2mem)(struct fd_batch *batch, const struct fd_tile *tile);
void (*emit_tile_fini)(struct fd_batch *batch); /* optional */
void (*emit_tile_init)(struct fd_batch *batch) dt;
void (*emit_tile_prep)(struct fd_batch *batch, const struct fd_tile *tile) dt;
void (*emit_tile_mem2gmem)(struct fd_batch *batch, const struct fd_tile *tile) dt;
void (*emit_tile_renderprep)(struct fd_batch *batch, const struct fd_tile *tile) dt;
void (*emit_tile)(struct fd_batch *batch, const struct fd_tile *tile) dt;
void (*emit_tile_gmem2mem)(struct fd_batch *batch, const struct fd_tile *tile) dt;
void (*emit_tile_fini)(struct fd_batch *batch) dt; /* optional */
/* optional, for GMEM bypass: */
void (*emit_sysmem_prep)(struct fd_batch *batch);
void (*emit_sysmem_fini)(struct fd_batch *batch);
void (*emit_sysmem_prep)(struct fd_batch *batch) dt;
void (*emit_sysmem_fini)(struct fd_batch *batch) dt;
/* draw: */
bool (*draw_vbo)(struct fd_context *ctx, const struct pipe_draw_info *info,
const struct pipe_draw_indirect_info *indirect,
const struct pipe_draw_start_count *draw,
unsigned index_offset);
const struct pipe_draw_indirect_info *indirect,
const struct pipe_draw_start_count *draw,
unsigned index_offset) dt;
bool (*clear)(struct fd_context *ctx, unsigned buffers,
const union pipe_color_union *color, double depth, unsigned stencil);
const union pipe_color_union *color, double depth, unsigned stencil) dt;
/* compute: */
void (*launch_grid)(struct fd_context *ctx, const struct pipe_grid_info *info);
void (*launch_grid)(struct fd_context *ctx, const struct pipe_grid_info *info) dt;
/* query: */
struct fd_query * (*create_query)(struct fd_context *ctx, unsigned query_type, unsigned index);
void (*query_prepare)(struct fd_batch *batch, uint32_t num_tiles);
void (*query_prepare)(struct fd_batch *batch, uint32_t num_tiles) dt;
void (*query_prepare_tile)(struct fd_batch *batch, uint32_t n,
struct fd_ringbuffer *ring);
void (*query_update_batch)(struct fd_batch *batch, bool disable_all);
struct fd_ringbuffer *ring) dt;
void (*query_update_batch)(struct fd_batch *batch, bool disable_all) dt;
/* blitter: */
bool (*blit)(struct fd_context *ctx, const struct pipe_blit_info *info);
void (*clear_ubwc)(struct fd_batch *batch, struct fd_resource *rsc);
bool (*blit)(struct fd_context *ctx, const struct pipe_blit_info *info) dt;
void (*clear_ubwc)(struct fd_batch *batch, struct fd_resource *rsc) dt;
/* handling for barriers: */
void (*framebuffer_barrier)(struct fd_context *ctx);
void (*framebuffer_barrier)(struct fd_context *ctx) dt;
/* logger: */
void (*record_timestamp)(struct fd_ringbuffer *ring, struct fd_bo *bo, unsigned offset);
@ -465,7 +466,7 @@ struct fd_context {
uint32_t instance_start;
uint32_t restart_index;
uint32_t streamout_mask;
} last;
} last dt;
};
static inline struct fd_context *
@ -483,6 +484,7 @@ fd_stream_output_target(struct pipe_stream_output_target *target)
/* mark all state dirty: */
static inline void
fd_context_all_dirty(struct fd_context *ctx)
assert_dt
{
ctx->last.dirty = true;
ctx->dirty = ~0;
@ -492,6 +494,7 @@ fd_context_all_dirty(struct fd_context *ctx)
static inline void
fd_context_all_clean(struct fd_context *ctx)
assert_dt
{
ctx->last.dirty = false;
ctx->dirty = 0;
@ -509,6 +512,7 @@ fd_context_all_clean(struct fd_context *ctx)
static inline struct pipe_scissor_state *
fd_context_get_scissor(struct fd_context *ctx)
assert_dt
{
return ctx->current_scissor;
}
@ -519,10 +523,10 @@ fd_supported_prim(struct fd_context *ctx, unsigned prim)
return (1 << prim) & ctx->primtype_mask;
}
void fd_context_switch_from(struct fd_context *ctx);
void fd_context_switch_to(struct fd_context *ctx, struct fd_batch *batch);
struct fd_batch * fd_context_batch(struct fd_context *ctx);
struct fd_batch * fd_context_batch_locked(struct fd_context *ctx);
void fd_context_switch_from(struct fd_context *ctx) assert_dt;
void fd_context_switch_to(struct fd_context *ctx, struct fd_batch *batch) assert_dt;
struct fd_batch * fd_context_batch(struct fd_context *ctx) assert_dt;
struct fd_batch * fd_context_batch_locked(struct fd_context *ctx) assert_dt;
void fd_context_setup_common_vbos(struct fd_context *ctx);
void fd_context_cleanup_common_vbos(struct fd_context *ctx);
@ -533,6 +537,6 @@ struct pipe_context * fd_context_init(struct fd_context *ctx,
struct pipe_screen *pscreen, const uint8_t *primtypes,
void *priv, unsigned flags);
void fd_context_destroy(struct pipe_context *pctx);
void fd_context_destroy(struct pipe_context *pctx) assert_dt;
#endif /* FREEDRENO_CONTEXT_H_ */

View File

@ -44,6 +44,7 @@
static void
resource_read(struct fd_batch *batch, struct pipe_resource *prsc)
assert_dt
{
if (!prsc)
return;
@ -52,6 +53,7 @@ resource_read(struct fd_batch *batch, struct pipe_resource *prsc)
static void
resource_written(struct fd_batch *batch, struct pipe_resource *prsc)
assert_dt
{
if (!prsc)
return;
@ -60,6 +62,7 @@ resource_written(struct fd_batch *batch, struct pipe_resource *prsc)
static void
batch_draw_tracking_for_dirty_bits(struct fd_batch *batch)
assert_dt
{
struct fd_context *ctx = batch->ctx;
struct pipe_framebuffer_state *pfb = &batch->framebuffer;
@ -191,6 +194,7 @@ batch_draw_tracking_for_dirty_bits(struct fd_batch *batch)
static void
batch_draw_tracking(struct fd_batch *batch, const struct pipe_draw_info *info,
const struct pipe_draw_indirect_info *indirect)
assert_dt
{
struct fd_context *ctx = batch->ctx;
@ -233,6 +237,7 @@ fd_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info,
const struct pipe_draw_indirect_info *indirect,
const struct pipe_draw_start_count *draws,
unsigned num_draws)
in_dt
{
if (num_draws > 1) {
struct pipe_draw_info tmp_info = *info;
@ -376,6 +381,7 @@ fd_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info,
static void
batch_clear_tracking(struct fd_batch *batch, unsigned buffers)
assert_dt
{
struct fd_context *ctx = batch->ctx;
struct pipe_framebuffer_state *pfb = &batch->framebuffer;
@ -428,6 +434,7 @@ fd_clear(struct pipe_context *pctx, unsigned buffers,
const struct pipe_scissor_state *scissor_state,
const union pipe_color_union *color, double depth,
unsigned stencil)
in_dt
{
struct fd_context *ctx = fd_context(pctx);
@ -514,6 +521,7 @@ fd_clear_depth_stencil(struct pipe_context *pctx, struct pipe_surface *ps,
static void
fd_launch_grid(struct pipe_context *pctx, const struct pipe_grid_info *info)
in_dt
{
struct fd_context *ctx = fd_context(pctx);
const struct fd_shaderbuf_stateobj *so = &ctx->shaderbuf[PIPE_SHADER_COMPUTE];

View File

@ -49,6 +49,8 @@ struct pipe_fence_handle {
};
static void fence_flush(struct pipe_fence_handle *fence)
/* TODO this will change w/ threaded-ctx where we need to use threaded_context_flush().. */
in_dt
{
if (fence->batch)
fd_batch_flush(fence->batch);

View File

@ -574,6 +574,7 @@ found:
static void
render_tiles(struct fd_batch *batch, struct fd_gmem_stateobj *gmem)
assert_dt
{
struct fd_context *ctx = batch->ctx;
int i;
@ -624,6 +625,7 @@ render_tiles(struct fd_batch *batch, struct fd_gmem_stateobj *gmem)
static void
render_sysmem(struct fd_batch *batch)
assert_dt
{
struct fd_context *ctx = batch->ctx;

View File

@ -87,7 +87,7 @@ struct fd_gmem_cache {
struct fd_batch;
void fd_gmem_render_tiles(struct fd_batch *batch);
void fd_gmem_render_tiles(struct fd_batch *batch) assert_dt;
unsigned fd_gmem_estimate_bins_per_pipe(struct fd_batch *batch);
bool fd_gmem_needs_restore(struct fd_batch *batch, const struct fd_tile *tile,
uint32_t buffers);

View File

@ -34,6 +34,7 @@
static void
fd_vs_state_bind(struct pipe_context *pctx, void *hwcso)
in_dt
{
struct fd_context *ctx = fd_context(pctx);
ctx->prog.vs = hwcso;
@ -43,6 +44,7 @@ fd_vs_state_bind(struct pipe_context *pctx, void *hwcso)
static void
fd_tcs_state_bind(struct pipe_context *pctx, void *hwcso)
in_dt
{
struct fd_context *ctx = fd_context(pctx);
ctx->prog.hs = hwcso;
@ -52,6 +54,7 @@ fd_tcs_state_bind(struct pipe_context *pctx, void *hwcso)
static void
fd_tes_state_bind(struct pipe_context *pctx, void *hwcso)
in_dt
{
struct fd_context *ctx = fd_context(pctx);
ctx->prog.ds = hwcso;
@ -61,6 +64,7 @@ fd_tes_state_bind(struct pipe_context *pctx, void *hwcso)
static void
fd_gs_state_bind(struct pipe_context *pctx, void *hwcso)
in_dt
{
struct fd_context *ctx = fd_context(pctx);
ctx->prog.gs = hwcso;
@ -70,6 +74,7 @@ fd_gs_state_bind(struct pipe_context *pctx, void *hwcso)
static void
fd_fs_state_bind(struct pipe_context *pctx, void *hwcso)
in_dt
{
struct fd_context *ctx = fd_context(pctx);
ctx->prog.fs = hwcso;

View File

@ -53,6 +53,7 @@ fd_create_query(struct pipe_context *pctx, unsigned query_type, unsigned index)
static void
fd_destroy_query(struct pipe_context *pctx, struct pipe_query *pq)
in_dt
{
struct fd_query *q = fd_query(pq);
q->funcs->destroy_query(fd_context(pctx), q);
@ -60,6 +61,7 @@ fd_destroy_query(struct pipe_context *pctx, struct pipe_query *pq)
static bool
fd_begin_query(struct pipe_context *pctx, struct pipe_query *pq)
in_dt
{
struct fd_query *q = fd_query(pq);
@ -70,6 +72,7 @@ fd_begin_query(struct pipe_context *pctx, struct pipe_query *pq)
static bool
fd_end_query(struct pipe_context *pctx, struct pipe_query *pq)
in_dt
{
struct fd_query *q = fd_query(pq);
@ -98,6 +101,7 @@ fd_get_query_result(struct pipe_context *pctx, struct pipe_query *pq,
static void
fd_render_condition(struct pipe_context *pctx, struct pipe_query *pq,
bool condition, enum pipe_render_cond_flag mode)
in_dt
{
struct fd_context *ctx = fd_context(pctx);
ctx->cond_query = pq;
@ -177,6 +181,7 @@ fd_get_driver_query_group_info(struct pipe_screen *pscreen, unsigned index,
static void
fd_set_active_query_state(struct pipe_context *pctx, bool enable)
assert_dt
{
struct fd_context *ctx = fd_context(pctx);
ctx->active_queries = enable;

View File

@ -29,14 +29,16 @@
#include "pipe/p_context.h"
#include "freedreno_util.h"
struct fd_context;
struct fd_query;
struct fd_query_funcs {
void (*destroy_query)(struct fd_context *ctx,
struct fd_query *q);
void (*begin_query)(struct fd_context *ctx, struct fd_query *q);
void (*end_query)(struct fd_context *ctx, struct fd_query *q);
struct fd_query *q) dt;
void (*begin_query)(struct fd_context *ctx, struct fd_query *q) dt;
void (*end_query)(struct fd_context *ctx, struct fd_query *q) dt;
bool (*get_query_result)(struct fd_context *ctx,
struct fd_query *q, bool wait,
union pipe_query_result *result);

View File

@ -34,6 +34,7 @@
static void
fd_acc_destroy_query(struct fd_context *ctx, struct fd_query *q)
assert_dt
{
struct fd_acc_query *aq = fd_acc_query(q);
@ -69,6 +70,7 @@ realloc_query_bo(struct fd_context *ctx, struct fd_acc_query *aq)
static void
fd_acc_query_pause(struct fd_acc_query *aq)
assert_dt
{
const struct fd_acc_sample_provider *p = aq->provider;
@ -81,6 +83,7 @@ fd_acc_query_pause(struct fd_acc_query *aq)
static void
fd_acc_query_resume(struct fd_acc_query *aq, struct fd_batch *batch)
assert_dt
{
const struct fd_acc_sample_provider *p = aq->provider;
@ -94,6 +97,7 @@ fd_acc_query_resume(struct fd_acc_query *aq, struct fd_batch *batch)
static void
fd_acc_begin_query(struct fd_context *ctx, struct fd_query *q)
assert_dt
{
struct fd_acc_query *aq = fd_acc_query(q);
@ -122,6 +126,7 @@ fd_acc_begin_query(struct fd_context *ctx, struct fd_query *q)
static void
fd_acc_end_query(struct fd_context *ctx, struct fd_query *q)
assert_dt
{
struct fd_acc_query *aq = fd_acc_query(q);
@ -158,8 +163,11 @@ fd_acc_get_query_result(struct fd_context *ctx, struct fd_query *q,
* wait to flush unnecessarily but we also don't want to
* spin forever:
*/
if (aq->no_wait_cnt++ > 5)
if (aq->no_wait_cnt++ > 5) {
fd_context_access_begin(ctx);
fd_batch_flush(rsc->write_batch);
fd_context_access_end(ctx);
}
return false;
}
@ -171,8 +179,11 @@ fd_acc_get_query_result(struct fd_context *ctx, struct fd_query *q,
fd_bo_cpu_fini(rsc->bo);
}
if (rsc->write_batch)
if (rsc->write_batch) {
fd_context_access_begin(ctx);
fd_batch_flush(rsc->write_batch);
fd_context_access_end(ctx);
}
/* get the result: */
fd_bo_cpu_prep(rsc->bo, ctx->pipe, DRM_FREEDRENO_PREP_READ);

View File

@ -64,8 +64,8 @@ struct fd_acc_sample_provider {
unsigned size;
void (*resume)(struct fd_acc_query *aq, struct fd_batch *batch);
void (*pause)(struct fd_acc_query *aq, struct fd_batch *batch);
void (*resume)(struct fd_acc_query *aq, struct fd_batch *batch) dt;
void (*pause)(struct fd_acc_query *aq, struct fd_batch *batch) dt;
void (*result)(struct fd_acc_query *aq, void *buf,
union pipe_query_result *result);
@ -106,7 +106,7 @@ struct fd_query * fd_acc_create_query(struct fd_context *ctx, unsigned query_typ
unsigned index);
struct fd_query * fd_acc_create_query2(struct fd_context *ctx, unsigned query_type,
unsigned index, const struct fd_acc_sample_provider *provider);
void fd_acc_query_update_batch(struct fd_batch *batch, bool disable_all);
void fd_acc_query_update_batch(struct fd_batch *batch, bool disable_all) assert_dt;
void fd_acc_query_register_provider(struct pipe_context *pctx,
const struct fd_acc_sample_provider *provider);

View File

@ -41,6 +41,7 @@ struct fd_hw_sample_period {
static struct fd_hw_sample *
get_sample(struct fd_batch *batch, struct fd_ringbuffer *ring,
unsigned query_type)
assert_dt
{
struct fd_context *ctx = batch->ctx;
struct fd_hw_sample *samp = NULL;
@ -80,6 +81,7 @@ query_active_in_batch(struct fd_batch *batch, struct fd_hw_query *hq)
static void
resume_query(struct fd_batch *batch, struct fd_hw_query *hq,
struct fd_ringbuffer *ring)
assert_dt
{
int idx = pidx(hq->provider->query_type);
DBG("%p", hq);
@ -97,6 +99,7 @@ resume_query(struct fd_batch *batch, struct fd_hw_query *hq,
static void
pause_query(struct fd_batch *batch, struct fd_hw_query *hq,
struct fd_ringbuffer *ring)
assert_dt
{
ASSERTED int idx = pidx(hq->provider->query_type);
DBG("%p", hq);
@ -136,6 +139,7 @@ fd_hw_destroy_query(struct fd_context *ctx, struct fd_query *q)
static void
fd_hw_begin_query(struct fd_context *ctx, struct fd_query *q)
assert_dt
{
struct fd_batch *batch = fd_context_batch_locked(ctx);
struct fd_hw_query *hq = fd_hw_query(q);
@ -158,6 +162,7 @@ fd_hw_begin_query(struct fd_context *ctx, struct fd_query *q)
static void
fd_hw_end_query(struct fd_context *ctx, struct fd_query *q)
assert_dt
{
struct fd_batch *batch = fd_context_batch_locked(ctx);
struct fd_hw_query *hq = fd_hw_query(q);
@ -214,8 +219,11 @@ fd_hw_get_query_result(struct fd_context *ctx, struct fd_query *q,
* wait to flush unnecessarily but we also don't want to
* spin forever:
*/
if (hq->no_wait_cnt++ > 5)
if (hq->no_wait_cnt++ > 5) {
fd_context_access_begin(ctx);
fd_batch_flush(rsc->write_batch);
fd_context_access_end(ctx);
}
return false;
}
@ -242,8 +250,11 @@ fd_hw_get_query_result(struct fd_context *ctx, struct fd_query *q,
struct fd_resource *rsc = fd_resource(start->prsc);
if (rsc->write_batch)
if (rsc->write_batch) {
fd_context_access_begin(ctx);
fd_batch_flush(rsc->write_batch);
fd_context_access_end(ctx);
}
/* some piglit tests at least do query with no draws, I guess: */
if (!rsc->bo)

View File

@ -77,13 +77,13 @@ struct fd_hw_sample_provider {
/* Optional hook for enabling a counter. Guaranteed to happen
* at least once before the first ->get_sample() in a batch.
*/
void (*enable)(struct fd_context *ctx, struct fd_ringbuffer *ring);
void (*enable)(struct fd_context *ctx, struct fd_ringbuffer *ring) dt;
/* when a new sample is required, emit appropriate cmdstream
* and return a sample object:
*/
struct fd_hw_sample *(*get_sample)(struct fd_batch *batch,
struct fd_ringbuffer *ring);
struct fd_ringbuffer *ring) dt;
/* accumulate the results from specified sample period: */
void (*accumulate_result)(struct fd_context *ctx,
@ -141,11 +141,11 @@ struct fd_query * fd_hw_create_query(struct fd_context *ctx, unsigned query_type
struct fd_hw_sample * fd_hw_sample_init(struct fd_batch *batch, uint32_t size);
/* don't call directly, use fd_hw_sample_reference() */
void __fd_hw_sample_destroy(struct fd_context *ctx, struct fd_hw_sample *samp);
void fd_hw_query_prepare(struct fd_batch *batch, uint32_t num_tiles);
void fd_hw_query_prepare(struct fd_batch *batch, uint32_t num_tiles) assert_dt;
void fd_hw_query_prepare_tile(struct fd_batch *batch, uint32_t n,
struct fd_ringbuffer *ring);
void fd_hw_query_update_batch(struct fd_batch *batch, bool end_batch);
void fd_hw_query_enable(struct fd_batch *batch, struct fd_ringbuffer *ring);
struct fd_ringbuffer *ring) assert_dt;
void fd_hw_query_update_batch(struct fd_batch *batch, bool end_batch) assert_dt;
void fd_hw_query_enable(struct fd_batch *batch, struct fd_ringbuffer *ring) assert_dt;
void fd_hw_query_register_provider(struct pipe_context *pctx,
const struct fd_hw_sample_provider *provider);
void fd_hw_query_init(struct pipe_context *pctx);

View File

@ -49,6 +49,7 @@ fd_sw_destroy_query(struct fd_context *ctx, struct fd_query *q)
static uint64_t
read_counter(struct fd_context *ctx, int type)
assert_dt
{
switch (type) {
case PIPE_QUERY_PRIMITIVES_GENERATED:
@ -110,6 +111,7 @@ is_draw_rate_query(struct fd_query *q)
static void
fd_sw_begin_query(struct fd_context *ctx, struct fd_query *q)
assert_dt
{
struct fd_sw_query *sq = fd_sw_query(q);
sq->begin_value = read_counter(ctx, q->type);
@ -122,6 +124,7 @@ fd_sw_begin_query(struct fd_context *ctx, struct fd_query *q)
static void
fd_sw_end_query(struct fd_context *ctx, struct fd_query *q)
assert_dt
{
struct fd_sw_query *sq = fd_sw_query(q);
sq->end_value = read_counter(ctx, q->type);

View File

@ -68,6 +68,7 @@
*/
static void
rebind_resource_in_ctx(struct fd_context *ctx, struct fd_resource *rsc)
assert_dt
{
struct pipe_resource *prsc = &rsc->base;
@ -152,6 +153,7 @@ rebind_resource_in_ctx(struct fd_context *ctx, struct fd_resource *rsc)
static void
rebind_resource(struct fd_resource *rsc)
assert_dt
{
struct fd_screen *screen = fd_screen(rsc->base.screen);
@ -213,6 +215,7 @@ realloc_bo(struct fd_resource *rsc, uint32_t size)
static void
do_blit(struct fd_context *ctx, const struct pipe_blit_info *blit, bool fallback)
assert_dt
{
struct pipe_context *pctx = &ctx->base;
@ -238,6 +241,7 @@ flush_resource(struct fd_context *ctx, struct fd_resource *rsc, unsigned usage);
static bool
fd_try_shadow_resource(struct fd_context *ctx, struct fd_resource *rsc,
unsigned level, const struct pipe_box *box, uint64_t modifier)
assert_dt
{
struct pipe_context *pctx = &ctx->base;
struct pipe_resource *prsc = &rsc->base;
@ -472,6 +476,7 @@ fd_alloc_staging(struct fd_context *ctx, struct fd_resource *rsc,
static void
fd_blit_from_staging(struct fd_context *ctx, struct fd_transfer *trans)
assert_dt
{
struct pipe_resource *dst = trans->base.resource;
struct pipe_blit_info blit = {};
@ -492,6 +497,7 @@ fd_blit_from_staging(struct fd_context *ctx, struct fd_transfer *trans)
static void
fd_blit_to_staging(struct fd_context *ctx, struct fd_transfer *trans)
assert_dt
{
struct pipe_resource *src = trans->base.resource;
struct pipe_blit_info blit = {};
@ -524,6 +530,7 @@ static void fd_resource_transfer_flush_region(struct pipe_context *pctx,
static void
flush_resource(struct fd_context *ctx, struct fd_resource *rsc, unsigned usage)
assert_dt
{
struct fd_batch *write_batch = NULL;
@ -564,6 +571,7 @@ flush_resource(struct fd_context *ctx, struct fd_resource *rsc, unsigned usage)
static void
fd_flush_resource(struct pipe_context *pctx, struct pipe_resource *prsc)
in_dt
{
flush_resource(fd_context(pctx), fd_resource(prsc), PIPE_MAP_READ);
}
@ -571,6 +579,7 @@ fd_flush_resource(struct pipe_context *pctx, struct pipe_resource *prsc)
static void
fd_resource_transfer_unmap(struct pipe_context *pctx,
struct pipe_transfer *ptrans)
in_dt /* TODO for threaded-ctx we'll need to split out unsynchronized path */
{
struct fd_context *ctx = fd_context(pctx);
struct fd_resource *rsc = fd_resource(ptrans->resource);
@ -600,6 +609,7 @@ fd_resource_transfer_map(struct pipe_context *pctx,
unsigned level, unsigned usage,
const struct pipe_box *box,
struct pipe_transfer **pptrans)
in_dt /* TODO for threaded-ctx we'll need to split out unsynchronized path */
{
struct fd_context *ctx = fd_context(pctx);
struct fd_resource *rsc = fd_resource(prsc);
@ -1180,6 +1190,7 @@ fd_render_condition_check(struct pipe_context *pctx)
static void
fd_invalidate_resource(struct pipe_context *pctx, struct pipe_resource *prsc)
in_dt
{
struct fd_context *ctx = fd_context(pctx);
struct fd_resource *rsc = fd_resource(prsc);
@ -1419,6 +1430,7 @@ fd_get_sample_position(struct pipe_context *context,
static void
fd_blit_pipe(struct pipe_context *pctx, const struct pipe_blit_info *blit_info)
in_dt
{
/* wrap fd_blit to return void */
fd_blit(pctx, blit_info);

View File

@ -289,10 +289,10 @@ void fd_resource_context_init(struct pipe_context *pctx);
uint32_t fd_setup_slices(struct fd_resource *rsc);
void fd_resource_resize(struct pipe_resource *prsc, uint32_t sz);
void fd_resource_uncompress(struct fd_context *ctx, struct fd_resource *rsc);
void fd_resource_uncompress(struct fd_context *ctx, struct fd_resource *rsc) assert_dt;
void fd_resource_dump(struct fd_resource *rsc, const char *name);
bool fd_render_condition_check(struct pipe_context *pctx);
bool fd_render_condition_check(struct pipe_context *pctx) assert_dt;
static inline bool
fd_batch_references_resource(struct fd_batch *batch, struct fd_resource *rsc)
@ -302,6 +302,7 @@ fd_batch_references_resource(struct fd_batch *batch, struct fd_resource *rsc)
static inline void
fd_batch_write_prep(struct fd_batch *batch, struct fd_resource *rsc)
assert_dt
{
if (unlikely(rsc->needs_ubwc_clear)) {
batch->ctx->clear_ubwc(batch, rsc);
@ -312,6 +313,7 @@ fd_batch_write_prep(struct fd_batch *batch, struct fd_resource *rsc)
static inline void
fd_batch_resource_read(struct fd_batch *batch,
struct fd_resource *rsc)
assert_dt
{
/* Fast path: if we hit this then we know we don't have anyone else
* writing to it (since both _write and _read flush other writers), and

View File

@ -46,6 +46,7 @@
static void
fd_set_blend_color(struct pipe_context *pctx,
const struct pipe_blend_color *blend_color)
in_dt
{
struct fd_context *ctx = fd_context(pctx);
ctx->blend_color = *blend_color;
@ -55,6 +56,7 @@ fd_set_blend_color(struct pipe_context *pctx,
static void
fd_set_stencil_ref(struct pipe_context *pctx,
const struct pipe_stencil_ref stencil_ref)
in_dt
{
struct fd_context *ctx = fd_context(pctx);
ctx->stencil_ref = stencil_ref;
@ -64,6 +66,7 @@ fd_set_stencil_ref(struct pipe_context *pctx,
static void
fd_set_clip_state(struct pipe_context *pctx,
const struct pipe_clip_state *clip)
in_dt
{
struct fd_context *ctx = fd_context(pctx);
ctx->ucp = *clip;
@ -72,6 +75,7 @@ fd_set_clip_state(struct pipe_context *pctx,
static void
fd_set_sample_mask(struct pipe_context *pctx, unsigned sample_mask)
in_dt
{
struct fd_context *ctx = fd_context(pctx);
ctx->sample_mask = (uint16_t)sample_mask;
@ -80,6 +84,7 @@ fd_set_sample_mask(struct pipe_context *pctx, unsigned sample_mask)
static void
fd_set_min_samples(struct pipe_context *pctx, unsigned min_samples)
in_dt
{
struct fd_context *ctx = fd_context(pctx);
ctx->min_samples = min_samples;
@ -99,6 +104,7 @@ fd_set_constant_buffer(struct pipe_context *pctx,
enum pipe_shader_type shader, uint index,
bool take_ownership,
const struct pipe_constant_buffer *cb)
in_dt
{
struct fd_context *ctx = fd_context(pctx);
struct fd_constbuf_stateobj *so = &ctx->constbuf[shader];
@ -126,6 +132,7 @@ fd_set_shader_buffers(struct pipe_context *pctx,
unsigned start, unsigned count,
const struct pipe_shader_buffer *buffers,
unsigned writable_bitmask)
in_dt
{
struct fd_context *ctx = fd_context(pctx);
struct fd_shaderbuf_stateobj *so = &ctx->shaderbuf[shader];
@ -167,6 +174,7 @@ fd_set_shader_images(struct pipe_context *pctx,
unsigned start, unsigned count,
unsigned unbind_num_trailing_slots,
const struct pipe_image_view *images)
in_dt
{
struct fd_context *ctx = fd_context(pctx);
struct fd_shaderimg_stateobj *so = &ctx->shaderimg[shader];
@ -219,6 +227,7 @@ fd_set_shader_images(struct pipe_context *pctx,
static void
fd_set_framebuffer_state(struct pipe_context *pctx,
const struct pipe_framebuffer_state *framebuffer)
in_dt
{
struct fd_context *ctx = fd_context(pctx);
struct pipe_framebuffer_state *cso;
@ -286,6 +295,7 @@ fd_set_framebuffer_state(struct pipe_context *pctx,
static void
fd_set_polygon_stipple(struct pipe_context *pctx,
const struct pipe_poly_stipple *stipple)
in_dt
{
struct fd_context *ctx = fd_context(pctx);
ctx->stipple = *stipple;
@ -297,6 +307,7 @@ fd_set_scissor_states(struct pipe_context *pctx,
unsigned start_slot,
unsigned num_scissors,
const struct pipe_scissor_state *scissor)
in_dt
{
struct fd_context *ctx = fd_context(pctx);
@ -309,6 +320,7 @@ fd_set_viewport_states(struct pipe_context *pctx,
unsigned start_slot,
unsigned num_viewports,
const struct pipe_viewport_state *viewport)
in_dt
{
struct fd_context *ctx = fd_context(pctx);
struct pipe_scissor_state *scissor = &ctx->viewport_scissor;
@ -349,6 +361,7 @@ fd_set_vertex_buffers(struct pipe_context *pctx,
unsigned unbind_num_trailing_slots,
bool take_ownership,
const struct pipe_vertex_buffer *vb)
in_dt
{
struct fd_context *ctx = fd_context(pctx);
struct fd_vertexbuf_stateobj *so = &ctx->vtx.vertexbuf;
@ -389,6 +402,7 @@ fd_set_vertex_buffers(struct pipe_context *pctx,
static void
fd_blend_state_bind(struct pipe_context *pctx, void *hwcso)
in_dt
{
struct fd_context *ctx = fd_context(pctx);
struct pipe_blend_state *cso = hwcso;
@ -406,12 +420,14 @@ fd_blend_state_bind(struct pipe_context *pctx, void *hwcso)
static void
fd_blend_state_delete(struct pipe_context *pctx, void *hwcso)
in_dt
{
FREE(hwcso);
}
static void
fd_rasterizer_state_bind(struct pipe_context *pctx, void *hwcso)
in_dt
{
struct fd_context *ctx = fd_context(pctx);
struct pipe_scissor_state *old_scissor = fd_context_get_scissor(ctx);
@ -440,12 +456,14 @@ fd_rasterizer_state_bind(struct pipe_context *pctx, void *hwcso)
static void
fd_rasterizer_state_delete(struct pipe_context *pctx, void *hwcso)
in_dt
{
FREE(hwcso);
}
static void
fd_zsa_state_bind(struct pipe_context *pctx, void *hwcso)
in_dt
{
struct fd_context *ctx = fd_context(pctx);
ctx->zsa = hwcso;
@ -454,6 +472,7 @@ fd_zsa_state_bind(struct pipe_context *pctx, void *hwcso)
static void
fd_zsa_state_delete(struct pipe_context *pctx, void *hwcso)
in_dt
{
FREE(hwcso);
}
@ -475,12 +494,14 @@ fd_vertex_state_create(struct pipe_context *pctx, unsigned num_elements,
static void
fd_vertex_state_delete(struct pipe_context *pctx, void *hwcso)
in_dt
{
FREE(hwcso);
}
static void
fd_vertex_state_bind(struct pipe_context *pctx, void *hwcso)
in_dt
{
struct fd_context *ctx = fd_context(pctx);
ctx->vtx.vtx = hwcso;
@ -532,6 +553,7 @@ static void
fd_set_stream_output_targets(struct pipe_context *pctx,
unsigned num_targets, struct pipe_stream_output_target **targets,
const unsigned *offsets)
in_dt
{
struct fd_context *ctx = fd_context(pctx);
struct fd_streamout_stateobj *so = &ctx->streamout;
@ -565,6 +587,7 @@ fd_set_stream_output_targets(struct pipe_context *pctx,
static void
fd_bind_compute_state(struct pipe_context *pctx, void *state)
in_dt
{
struct fd_context *ctx = fd_context(pctx);
ctx->compute = state;
@ -574,6 +597,7 @@ fd_bind_compute_state(struct pipe_context *pctx, void *state)
static void
fd_set_compute_resources(struct pipe_context *pctx,
unsigned start, unsigned count, struct pipe_surface **prscs)
in_dt
{
// TODO
}
@ -585,6 +609,7 @@ static void
fd_set_global_binding(struct pipe_context *pctx,
unsigned first, unsigned count, struct pipe_resource **prscs,
uint32_t **handles)
in_dt
{
struct fd_context *ctx = fd_context(pctx);
struct fd_global_bindings_stateobj *so = &ctx->global_bindings;

View File

@ -31,21 +31,25 @@
#include "freedreno_context.h"
static inline bool fd_depth_enabled(struct fd_context *ctx)
assert_dt
{
return ctx->zsa && ctx->zsa->depth_enabled;
}
static inline bool fd_depth_write_enabled(struct fd_context *ctx)
assert_dt
{
return ctx->zsa && ctx->zsa->depth_writemask;
}
static inline bool fd_stencil_enabled(struct fd_context *ctx)
assert_dt
{
return ctx->zsa && ctx->zsa->stencil[0].enabled;
}
static inline bool fd_depth_clamp_enabled(struct fd_context *ctx)
assert_dt
{
return !(ctx->rasterizer->depth_clip_near && ctx->rasterizer->depth_clip_far);
}

View File

@ -104,6 +104,7 @@ void
fd_sampler_states_bind(struct pipe_context *pctx,
enum pipe_shader_type shader, unsigned start,
unsigned nr, void **hwcso)
in_dt
{
struct fd_context *ctx = fd_context(pctx);
@ -116,6 +117,7 @@ void
fd_set_sampler_views(struct pipe_context *pctx, enum pipe_shader_type shader,
unsigned start, unsigned nr, unsigned unbind_num_trailing_slots,
struct pipe_sampler_view **views)
in_dt
{
struct fd_context *ctx = fd_context(pctx);

View File

@ -111,6 +111,66 @@ extern bool fd_binning_enabled;
mesa_logw(__VA_ARGS__); \
} while(0)
struct fd_context;
/**
* A psuedo-variable for defining where various parts of the fd_context
* can be safely accessed.
*
* With threaded_context, certain pctx funcs are called from gallium
* front-end/state-tracker (eg. CSO creation), while others are called
* from the driver thread. Things called from driver thread can safely
* access anything in the ctx, while things called from the fe/st thread
* must limit themselves to "safe" things (ie. ctx->screen is safe as it
* is immutable, but the blitter_context is not).
*/
extern lock_cap_t fd_context_access_cap;
/**
* Make the annotation a bit less verbose.. mark fields which should only
* be accessed by driver-thread with 'dt'
*/
#define dt guarded_by(fd_context_access_cap)
/**
* Annotation for entry-point functions only called in driver thread.
*
* For static functions, apply the annotation to the function declaration.
* Otherwise apply to the function prototype.
*/
#define in_dt assert_cap(fd_context_access_cap)
/**
* Annotation for internal functions which are only called from entry-
* point functions (with 'in_dt' annotation) or other internal functions
* with the 'assert_dt' annotation.
*
* For static functions, apply the annotation to the function declaration.
* Otherwise apply to the function prototype.
*/
#define assert_dt requires_cap(fd_context_access_cap)
/**
* Special helpers for context access outside of driver thread. For ex,
* pctx->get_query_result() is not called on driver thread, but the
* query is guaranteed to be flushed, or the driver thread queue is
* guaranteed to be flushed.
*
* Use with caution!
*/
static inline void
fd_context_access_begin(struct fd_context *ctx)
acquire_cap(fd_context_access_cap)
{
}
static inline void
fd_context_access_end(struct fd_context *ctx)
release_cap(fd_context_access_cap)
{
}
/* for conditionally setting boolean flag(s): */
#define COND(bool, val) ((bool) ? (val) : 0)

View File

@ -73,6 +73,7 @@ emit_const_asserts(struct fd_ringbuffer *ring,
static void
ring_wfi(struct fd_batch *batch, struct fd_ringbuffer *ring)
assert_dt
{
/* when we emit const state via ring (IB2) we need a WFI, but when
* it is emit'd via stateobj, we don't
@ -458,6 +459,7 @@ max_tf_vtx(struct fd_context *ctx, const struct ir3_shader_variant *v)
static inline void
emit_common_consts(const struct ir3_shader_variant *v, struct fd_ringbuffer *ring,
struct fd_context *ctx, enum pipe_shader_type t)
assert_dt
{
enum fd_dirty_shader_state dirty = ctx->dirty_shader[t];
@ -600,6 +602,7 @@ ir3_emit_vs_consts(const struct ir3_shader_variant *v, struct fd_ringbuffer *rin
struct fd_context *ctx, const struct pipe_draw_info *info,
const struct pipe_draw_indirect_info *indirect,
const struct pipe_draw_start_count *draw)
assert_dt
{
debug_assert(v->type == MESA_SHADER_VERTEX);
@ -615,6 +618,7 @@ ir3_emit_vs_consts(const struct ir3_shader_variant *v, struct fd_ringbuffer *rin
static inline void
ir3_emit_fs_consts(const struct ir3_shader_variant *v, struct fd_ringbuffer *ring,
struct fd_context *ctx)
assert_dt
{
debug_assert(v->type == MESA_SHADER_FRAGMENT);
@ -625,6 +629,7 @@ ir3_emit_fs_consts(const struct ir3_shader_variant *v, struct fd_ringbuffer *rin
static inline void
ir3_emit_cs_consts(const struct ir3_shader_variant *v, struct fd_ringbuffer *ring,
struct fd_context *ctx, const struct pipe_grid_info *info)
assert_dt
{
debug_assert(gl_shader_stage_is_compute(v->type));