gallium/util: replace pipe_mutex with mtx_t

pipe_mutex was made unnecessary with fd33a6bcd7.

Reviewed-by: Marek Olšák <marek.olsak@amd.com>
This commit is contained in:
Timothy Arceri 2017-03-05 12:32:01 +11:00
parent 464d4806c1
commit 2efddc63ee
45 changed files with 67 additions and 71 deletions

View File

@ -108,12 +108,8 @@ static inline int pipe_thread_is_self( pipe_thread thread )
return 0;
}
/* pipe_mutex
*/
typedef mtx_t pipe_mutex;
#define pipe_static_mutex(mutex) \
static pipe_mutex mutex = _MTX_INITIALIZER_NP
static mtx_t mutex = _MTX_INITIALIZER_NP
#define pipe_mutex_init(mutex) \
(void) mtx_init(&(mutex), mtx_plain)
@ -131,11 +127,11 @@ typedef mtx_t pipe_mutex;
__pipe_mutex_assert_locked(&(mutex))
static inline void
__pipe_mutex_assert_locked(pipe_mutex *mutex)
__pipe_mutex_assert_locked(mtx_t *mutex)
{
#ifdef DEBUG
/* NOTE: this would not work for recursive mutexes, but
* pipe_mutex doesn't support those
* mtx_t doesn't support those
*/
int ret = mtx_trylock(mutex);
assert(ret == thrd_busy);
@ -179,7 +175,7 @@ typedef struct {
unsigned count;
unsigned waiters;
uint64_t sequence;
pipe_mutex mutex;
mtx_t mutex;
pipe_condvar condvar;
} pipe_barrier;
@ -231,7 +227,7 @@ static inline void pipe_barrier_wait(pipe_barrier *barrier)
typedef struct
{
pipe_mutex mutex;
mtx_t mutex;
pipe_condvar cond;
int counter;
} pipe_semaphore;

View File

@ -81,7 +81,7 @@ struct fenced_manager
/**
* Following members are mutable and protected by this mutex.
*/
pipe_mutex mutex;
mtx_t mutex;
/**
* Fenced buffer list.

View File

@ -78,7 +78,7 @@ struct pb_debug_buffer
struct debug_stack_frame create_backtrace[PB_DEBUG_CREATE_BACKTRACE];
pipe_mutex mutex;
mtx_t mutex;
unsigned map_count;
struct debug_stack_frame map_backtrace[PB_DEBUG_MAP_BACKTRACE];
@ -95,7 +95,7 @@ struct pb_debug_manager
pb_size underflow_size;
pb_size overflow_size;
pipe_mutex mutex;
mtx_t mutex;
struct list_head list;
};

View File

@ -53,7 +53,7 @@ struct mm_pb_manager
{
struct pb_manager base;
pipe_mutex mutex;
mtx_t mutex;
pb_size size;
struct mem_block *heap;

View File

@ -56,7 +56,7 @@ struct pool_pb_manager
{
struct pb_manager base;
pipe_mutex mutex;
mtx_t mutex;
pb_size bufSize;
pb_size bufAlign;

View File

@ -128,7 +128,7 @@ struct pb_slab_manager
*/
struct list_head slabs;
pipe_mutex mutex;
mtx_t mutex;
};

View File

@ -52,7 +52,7 @@ struct pb_cache
*/
struct list_head buckets[4];
pipe_mutex mutex;
mtx_t mutex;
uint64_t cache_size;
uint64_t max_cache_size;
unsigned usecs;

View File

@ -110,7 +110,7 @@ typedef bool (slab_can_reclaim_fn)(void *priv, struct pb_slab_entry *);
*/
struct pb_slabs
{
pipe_mutex mutex;
mtx_t mutex;
unsigned min_order;
unsigned num_orders;

View File

@ -53,7 +53,7 @@
struct debug_flush_buf {
/* Atomic */
struct pipe_reference reference; /* Must be the first member. */
pipe_mutex mutex;
mtx_t mutex;
/* Immutable */
boolean supports_unsync;
unsigned bt_depth;

View File

@ -40,7 +40,7 @@
* Put this into your job structure.
*/
struct util_queue_fence {
pipe_mutex mutex;
mtx_t mutex;
pipe_condvar cond;
int signalled;
};
@ -57,7 +57,7 @@ struct util_queue_job {
/* Put this into your context. */
struct util_queue {
const char *name;
pipe_mutex lock;
mtx_t lock;
pipe_condvar has_queued_cond;
pipe_condvar has_space_cond;
pipe_thread *threads;

View File

@ -43,7 +43,7 @@ struct util_range {
unsigned end; /* exclusive */
/* for the range to be consistent with multiple contexts: */
pipe_mutex write_mutex;
mtx_t write_mutex;
};

View File

@ -17,7 +17,7 @@ struct util_ringbuffer
unsigned head;
unsigned tail;
pipe_condvar change;
pipe_mutex mutex;
mtx_t mutex;
};

View File

@ -235,7 +235,7 @@ struct dd_context
* the thread dumps the record of the oldest unsignalled fence.
*/
pipe_thread thread;
pipe_mutex mutex;
mtx_t mutex;
int kill_thread;
struct pipe_resource *fence;
struct pipe_transfer *fence_transfer;

View File

@ -44,7 +44,7 @@ struct fd_bo;
struct fd_screen {
struct pipe_screen base;
pipe_mutex lock;
mtx_t lock;
/* it would be tempting to use pipe_reference here, but that
* really doesn't work well if it isn't the first member of

View File

@ -43,7 +43,7 @@ struct lp_fence
struct pipe_reference reference;
unsigned id;
pipe_mutex mutex;
mtx_t mutex;
pipe_condvar signalled;
boolean issued;

View File

@ -174,7 +174,7 @@ struct lp_scene {
unsigned tiles_x, tiles_y;
int curr_x, curr_y; /**< for iterating over bins */
pipe_mutex mutex;
mtx_t mutex;
struct cmd_bin tile[TILES_X][TILES_Y];
struct data_block_list data;

View File

@ -56,7 +56,7 @@ struct llvmpipe_screen
unsigned timestamp;
struct lp_rasterizer *rast;
pipe_mutex rast_mutex;
mtx_t rast_mutex;
};

View File

@ -821,7 +821,7 @@ struct nv50_blitter
struct nv50_tsc_entry sampler[2]; /* nearest, bilinear */
pipe_mutex mutex;
mtx_t mutex;
};
struct nv50_blitctx

View File

@ -772,7 +772,7 @@ struct nvc0_blitter
struct nv50_tsc_entry sampler[2]; /* nearest, bilinear */
pipe_mutex mutex;
mtx_t mutex;
struct nvc0_screen *screen;
};

View File

@ -48,7 +48,7 @@ struct r300_screen {
/* The MSAA texture with CMASK access; */
struct pipe_resource *cmask_resource;
pipe_mutex cmask_mutex;
mtx_t cmask_mutex;
};

View File

@ -381,7 +381,7 @@ struct r600_common_screen {
/* Auxiliary context. Mainly used to initialize resources.
* It must be locked prior to using and flushed before unlocking. */
struct pipe_context *aux_context;
pipe_mutex aux_context_lock;
mtx_t aux_context_lock;
/* This must be in the screen, because UE4 uses one context for
* compilation and another one for rendering.
@ -394,7 +394,7 @@ struct r600_common_screen {
unsigned num_shader_cache_hits;
/* GPU load thread. */
pipe_mutex gpu_load_mutex;
mtx_t gpu_load_mutex;
pipe_thread gpu_load_thread;
union r600_mmio_counters mmio_counters;
volatile unsigned gpu_load_stop_thread; /* bool */

View File

@ -84,7 +84,7 @@ struct si_screen {
bool use_monolithic_shaders;
bool record_llvm_ir;
pipe_mutex shader_parts_mutex;
mtx_t shader_parts_mutex;
struct si_shader_part *vs_prologs;
struct si_shader_part *vs_epilogs;
struct si_shader_part *tcs_epilogs;
@ -104,7 +104,7 @@ struct si_screen {
* - GS and CS aren't cached, but it's certainly possible to cache
* those as well.
*/
pipe_mutex shader_cache_mutex;
mtx_t shader_cache_mutex;
struct hash_table *shader_cache;
/* Shader compiler queue for multithreaded compilation. */

View File

@ -278,7 +278,7 @@ struct si_shader_selector {
struct util_queue_fence ready;
struct si_compiler_ctx_state compiler_ctx_state;
pipe_mutex mutex;
mtx_t mutex;
struct si_shader *first_variant; /* immutable after the first variant */
struct si_shader *last_variant; /* mutable */

View File

@ -42,7 +42,7 @@ struct rbug_context {
struct rbug_list list;
/* call locking */
pipe_mutex call_mutex;
mtx_t call_mutex;
/* current state */
struct {
@ -58,7 +58,7 @@ struct rbug_context {
} curr;
/* draw locking */
pipe_mutex draw_mutex;
mtx_t draw_mutex;
pipe_condvar draw_cond;
unsigned draw_num_rules;
int draw_blocker;
@ -74,7 +74,7 @@ struct rbug_context {
} draw_rule;
/* list of state objects */
pipe_mutex list_mutex;
mtx_t list_mutex;
unsigned num_shaders;
struct rbug_list shaders;
};

View File

@ -49,7 +49,7 @@ struct rbug_screen
/* remote debugger */
struct rbug_rbug *rbug;
pipe_mutex list_mutex;
mtx_t list_mutex;
int num_contexts;
int num_resources;
int num_surfaces;

View File

@ -66,9 +66,9 @@ struct svga_screen
} debug;
unsigned texture_timestamp;
pipe_mutex tex_mutex;
mtx_t tex_mutex;
pipe_mutex swc_mutex; /* Used for buffer uploads */
mtx_t swc_mutex; /* Used for buffer uploads */
/* which formats to translate depth formats into */
struct {

View File

@ -105,7 +105,7 @@ struct svga_host_surface_cache_entry
*/
struct svga_host_surface_cache
{
pipe_mutex mutex;
mtx_t mutex;
/* Unused buffers are put in buckets to speed up lookups */
struct list_head bucket[SVGA_HOST_SURFACE_CACHE_BUCKETS];

View File

@ -77,14 +77,14 @@ struct vc4_screen {
struct list_head *size_list;
uint32_t size_list_size;
pipe_mutex lock;
mtx_t lock;
uint32_t bo_size;
uint32_t bo_count;
} bo_cache;
struct util_hash_table *bo_handles;
pipe_mutex bo_handles_mutex;
mtx_t bo_handles_mutex;
uint32_t bo_size;
uint32_t bo_count;

View File

@ -89,7 +89,7 @@ struct dri_screen
__DRIimage * (*lookup_egl_image)(struct dri_screen *ctx, void *handle);
/* OpenCL interop */
pipe_mutex opencl_func_mutex;
mtx_t opencl_func_mutex;
opencl_dri_event_add_ref_t opencl_dri_event_add_ref;
opencl_dri_event_release_t opencl_dri_event_release;
opencl_dri_event_wait_t opencl_dri_event_wait;

View File

@ -76,7 +76,7 @@ typedef struct xmesa_visual *XMesaVisual;
struct xmesa_display {
pipe_mutex mutex;
mtx_t mutex;
Display *display;
struct pipe_screen *screen;

View File

@ -74,7 +74,7 @@ struct hgl_context
Bitmap* bitmap;
color_space colorSpace;
pipe_mutex fbMutex;
mtx_t fbMutex;
struct hgl_buffer* draw;
struct hgl_buffer* read;

View File

@ -74,8 +74,8 @@ struct nine_queue_pool {
BOOL worker_wait;
pipe_condvar event_pop;
pipe_condvar event_push;
pipe_mutex mutex_pop;
pipe_mutex mutex_push;
mtx_t mutex_pop;
mtx_t mutex_push;
};
/* Consumer functions: */

View File

@ -64,13 +64,13 @@ struct csmt_context {
struct nine_queue_pool* pool;
BOOL terminate;
pipe_condvar event_processed;
pipe_mutex mutex_processed;
mtx_t mutex_processed;
struct NineDevice9 *device;
BOOL processed;
BOOL toPause;
BOOL hasPaused;
pipe_mutex thread_running;
pipe_mutex thread_resume;
mtx_t thread_running;
mtx_t thread_resume;
};
/* Wait for instruction to be processed.

View File

@ -209,7 +209,7 @@ typedef struct {
struct vl_compositor compositor;
struct vl_compositor_state cstate;
vl_csc_matrix csc;
pipe_mutex mutex;
mtx_t mutex;
} vlVaDriver;
typedef struct {

View File

@ -354,7 +354,7 @@ typedef struct
struct pipe_context *context;
struct vl_compositor compositor;
struct pipe_sampler_view *dummy_sv;
pipe_mutex mutex;
mtx_t mutex;
} vlVdpDevice;
typedef struct
@ -439,7 +439,7 @@ typedef struct
typedef struct
{
vlVdpDevice *device;
pipe_mutex mutex;
mtx_t mutex;
struct pipe_video_codec *decoder;
} vlVdpDecoder;

View File

@ -50,7 +50,7 @@ private:
// Context Management
struct hgl_context* fContext[CONTEXT_MAX];
context_id fCurrentContext;
pipe_mutex fMutex;
mtx_t fMutex;
};

View File

@ -52,7 +52,7 @@ struct amdgpu_winsys {
amdgpu_device_handle dev;
pipe_mutex bo_fence_lock;
mtx_t bo_fence_lock;
int num_cs; /* The number of command streams created. */
unsigned num_total_rejected_cs;
@ -79,7 +79,7 @@ struct amdgpu_winsys {
bool check_vm;
/* List of all allocated buffers */
pipe_mutex global_bo_list_lock;
mtx_t global_bo_list_lock;
struct list_head global_bo_list;
unsigned num_buffers;
};

View File

@ -43,7 +43,7 @@ struct radeon_bo {
struct pb_cache_entry cache_entry;
void *ptr;
pipe_mutex map_mutex;
mtx_t map_mutex;
unsigned map_count;
bool use_reusable_pool;
} real;

View File

@ -57,7 +57,7 @@ pipe_static_mutex(fd_tab_mutex);
* with multiple contexts (here command streams) backed by one winsys. */
static bool radeon_set_fd_access(struct radeon_drm_cs *applier,
struct radeon_drm_cs **owner,
pipe_mutex *mutex,
mtx_t *mutex,
unsigned request, const char *request_name,
bool enable)
{

View File

@ -78,9 +78,9 @@ struct radeon_drm_winsys {
struct util_hash_table *bo_handles;
/* List of buffer virtual memory ranges. Protectded by bo_handles_mutex. */
struct util_hash_table *bo_vas;
pipe_mutex bo_handles_mutex;
pipe_mutex bo_va_mutex;
pipe_mutex bo_fence_lock;
mtx_t bo_handles_mutex;
mtx_t bo_va_mutex;
mtx_t bo_fence_lock;
uint64_t va_offset;
struct list_head va_holes;
@ -91,9 +91,9 @@ struct radeon_drm_winsys {
uint32_t num_cpus; /* Number of CPUs. */
struct radeon_drm_cs *hyperz_owner;
pipe_mutex hyperz_owner_mutex;
mtx_t hyperz_owner_mutex;
struct radeon_drm_cs *cmask_owner;
pipe_mutex cmask_owner_mutex;
mtx_t cmask_owner_mutex;
/* multithreaded command submission */
struct util_queue cs_queue;

View File

@ -70,7 +70,7 @@ struct fenced_manager
/**
* Following members are mutable and protected by this mutex.
*/
pipe_mutex mutex;
mtx_t mutex;
/**
* Fenced buffer list.

View File

@ -40,7 +40,7 @@ struct vmw_fence_ops
struct pb_fence_ops base;
struct vmw_winsys_screen *vws;
pipe_mutex mutex;
mtx_t mutex;
/*
* Protected by mutex;

View File

@ -57,7 +57,7 @@ struct vmw_svga_winsys_surface
unsigned next_present_no;
uint32_t present_fences[VMW_MAX_PRESENTS];
pipe_mutex mutex;
mtx_t mutex;
struct svga_winsys_buffer *buf; /* Current backing guest buffer */
uint32_t mapcount; /* Number of mappers */
uint32_t map_mode; /* PIPE_TRANSFER_[READ|WRITE] */

View File

@ -59,11 +59,11 @@ struct virgl_drm_winsys
struct list_head delayed;
int num_delayed;
unsigned usecs;
pipe_mutex mutex;
mtx_t mutex;
struct util_hash_table *bo_handles;
struct util_hash_table *bo_names;
pipe_mutex bo_handles_mutex;
mtx_t bo_handles_mutex;
};
struct virgl_drm_cmd_buf {

View File

@ -48,7 +48,7 @@ struct virgl_vtest_winsys {
struct list_head delayed;
int num_delayed;
unsigned usecs;
pipe_mutex mutex;
mtx_t mutex;
};
struct virgl_hw_res {