util: remove LIST_DEL macro
Just use the inlined function directly. The macro was replaced with
the function in ebe304fa54
.
Reviewed-by: Eric Engestrom <eric@engestrom.ch>
This commit is contained in:
parent
c976b427c4
commit
c578600489
|
@ -262,7 +262,7 @@ static void radv_amdgpu_winsys_bo_destroy(struct radeon_winsys_bo *_bo)
|
|||
} else {
|
||||
if (bo->ws->debug_all_bos) {
|
||||
pthread_mutex_lock(&bo->ws->global_bo_list_lock);
|
||||
LIST_DEL(&bo->global_list_item);
|
||||
list_del(&bo->global_list_item);
|
||||
bo->ws->num_buffers--;
|
||||
pthread_mutex_unlock(&bo->ws->global_bo_list_lock);
|
||||
}
|
||||
|
|
|
@ -696,7 +696,7 @@ hud_stop_queries(struct hud_context *hud, struct pipe_context *pipe)
|
|||
*/
|
||||
if (gr->current_value <
|
||||
LIST_ENTRY(struct hud_graph, next, head)->current_value) {
|
||||
LIST_DEL(&gr->head);
|
||||
list_del(&gr->head);
|
||||
list_add(&gr->head, &next->head);
|
||||
}
|
||||
}
|
||||
|
@ -1752,10 +1752,10 @@ hud_unset_record_context(struct hud_context *hud)
|
|||
|
||||
LIST_FOR_EACH_ENTRY_SAFE(pane, pane_tmp, &hud->pane_list, head) {
|
||||
LIST_FOR_EACH_ENTRY_SAFE(graph, graph_tmp, &pane->graph_list, head) {
|
||||
LIST_DEL(&graph->head);
|
||||
list_del(&graph->head);
|
||||
hud_graph_destroy(graph, pipe);
|
||||
}
|
||||
LIST_DEL(&pane->head);
|
||||
list_del(&pane->head);
|
||||
FREE(pane);
|
||||
}
|
||||
|
||||
|
|
|
@ -250,7 +250,7 @@ fenced_buffer_destroy_locked(struct fenced_manager *fenced_mgr,
|
|||
assert(!fenced_buf->fence);
|
||||
assert(fenced_buf->head.prev);
|
||||
assert(fenced_buf->head.next);
|
||||
LIST_DEL(&fenced_buf->head);
|
||||
list_del(&fenced_buf->head);
|
||||
assert(fenced_mgr->num_unfenced);
|
||||
--fenced_mgr->num_unfenced;
|
||||
|
||||
|
@ -276,7 +276,7 @@ fenced_buffer_add_locked(struct fenced_manager *fenced_mgr,
|
|||
|
||||
p_atomic_inc(&fenced_buf->base.reference.count);
|
||||
|
||||
LIST_DEL(&fenced_buf->head);
|
||||
list_del(&fenced_buf->head);
|
||||
assert(fenced_mgr->num_unfenced);
|
||||
--fenced_mgr->num_unfenced;
|
||||
list_addtail(&fenced_buf->head, &fenced_mgr->fenced);
|
||||
|
@ -305,7 +305,7 @@ fenced_buffer_remove_locked(struct fenced_manager *fenced_mgr,
|
|||
assert(fenced_buf->head.prev);
|
||||
assert(fenced_buf->head.next);
|
||||
|
||||
LIST_DEL(&fenced_buf->head);
|
||||
list_del(&fenced_buf->head);
|
||||
assert(fenced_mgr->num_fenced);
|
||||
--fenced_mgr->num_fenced;
|
||||
|
||||
|
|
|
@ -236,7 +236,7 @@ pb_debug_buffer_destroy(struct pb_buffer *_buf)
|
|||
pb_debug_buffer_check(buf);
|
||||
|
||||
mtx_lock(&mgr->mutex);
|
||||
LIST_DEL(&buf->head);
|
||||
list_del(&buf->head);
|
||||
mtx_unlock(&mgr->mutex);
|
||||
|
||||
mtx_destroy(&buf->mutex);
|
||||
|
|
|
@ -204,7 +204,7 @@ pb_slab_buffer_destroy(struct pb_buffer *_buf)
|
|||
|
||||
buf->mapCount = 0;
|
||||
|
||||
LIST_DEL(list);
|
||||
list_del(list);
|
||||
list_addtail(list, &slab->freeBuffers);
|
||||
slab->numFree++;
|
||||
|
||||
|
|
|
@ -42,7 +42,7 @@ destroy_buffer_locked(struct pb_cache_entry *entry)
|
|||
|
||||
assert(!pipe_is_referenced(&buf->reference));
|
||||
if (entry->head.next) {
|
||||
LIST_DEL(&entry->head);
|
||||
list_del(&entry->head);
|
||||
assert(mgr->num_buffers);
|
||||
--mgr->num_buffers;
|
||||
mgr->cache_size -= buf->size;
|
||||
|
@ -208,7 +208,7 @@ pb_cache_reclaim_buffer(struct pb_cache *mgr, pb_size size,
|
|||
struct pb_buffer *buf = entry->buffer;
|
||||
|
||||
mgr->cache_size -= buf->size;
|
||||
LIST_DEL(&entry->head);
|
||||
list_del(&entry->head);
|
||||
--mgr->num_buffers;
|
||||
mtx_unlock(&mgr->mutex);
|
||||
/* Increase refcount */
|
||||
|
|
|
@ -55,7 +55,7 @@ pb_slab_reclaim(struct pb_slabs *slabs, struct pb_slab_entry *entry)
|
|||
{
|
||||
struct pb_slab *slab = entry->slab;
|
||||
|
||||
LIST_DEL(&entry->head); /* remove from reclaim list */
|
||||
list_del(&entry->head); /* remove from reclaim list */
|
||||
list_add(&entry->head, &slab->free);
|
||||
slab->num_free++;
|
||||
|
||||
|
@ -66,7 +66,7 @@ pb_slab_reclaim(struct pb_slabs *slabs, struct pb_slab_entry *entry)
|
|||
}
|
||||
|
||||
if (slab->num_free >= slab->num_entries) {
|
||||
LIST_DEL(&slab->head);
|
||||
list_del(&slab->head);
|
||||
slabs->slab_free(slabs->priv, slab);
|
||||
}
|
||||
}
|
||||
|
@ -124,7 +124,7 @@ pb_slab_alloc(struct pb_slabs *slabs, unsigned size, unsigned heap)
|
|||
if (!LIST_IS_EMPTY(&slab->free))
|
||||
break;
|
||||
|
||||
LIST_DEL(&slab->head);
|
||||
list_del(&slab->head);
|
||||
}
|
||||
|
||||
if (LIST_IS_EMPTY(&group->slabs)) {
|
||||
|
@ -145,7 +145,7 @@ pb_slab_alloc(struct pb_slabs *slabs, unsigned size, unsigned heap)
|
|||
}
|
||||
|
||||
entry = LIST_ENTRY(struct pb_slab_entry, slab->free.next, head);
|
||||
LIST_DEL(&entry->head);
|
||||
list_del(&entry->head);
|
||||
slab->num_free--;
|
||||
|
||||
mtx_unlock(&slabs->mutex);
|
||||
|
|
|
@ -200,7 +200,7 @@ debug_free(const char *file, unsigned line, const char *function,
|
|||
memset(ptr, DEBUG_FREED_BYTE, hdr->size);
|
||||
#else
|
||||
mtx_lock(&list_mutex);
|
||||
LIST_DEL(&hdr->head);
|
||||
list_del(&hdr->head);
|
||||
mtx_unlock(&list_mutex);
|
||||
hdr->magic = 0;
|
||||
ftr->magic = 0;
|
||||
|
|
|
@ -352,7 +352,7 @@ tc_call_destroy_query(struct pipe_context *pipe, union tc_payload *payload)
|
|||
struct threaded_query *tq = threaded_query(payload->query);
|
||||
|
||||
if (tq->head_unflushed.next)
|
||||
LIST_DEL(&tq->head_unflushed);
|
||||
list_del(&tq->head_unflushed);
|
||||
|
||||
pipe->destroy_query(pipe, payload->query);
|
||||
}
|
||||
|
@ -432,7 +432,7 @@ tc_get_query_result(struct pipe_context *_pipe,
|
|||
tq->flushed = true;
|
||||
if (tq->head_unflushed.next) {
|
||||
/* This is safe because it can only happen after we sync'd. */
|
||||
LIST_DEL(&tq->head_unflushed);
|
||||
list_del(&tq->head_unflushed);
|
||||
}
|
||||
}
|
||||
return success;
|
||||
|
@ -1965,7 +1965,7 @@ tc_flush_queries(struct threaded_context *tc)
|
|||
{
|
||||
struct threaded_query *tq, *tmp;
|
||||
LIST_FOR_EACH_ENTRY_SAFE(tq, tmp, &tc->unflushed_queries, head_unflushed) {
|
||||
LIST_DEL(&tq->head_unflushed);
|
||||
list_del(&tq->head_unflushed);
|
||||
|
||||
/* Memory release semantics: due to a possible race with
|
||||
* tc_get_query_result, we must ensure that the linked list changes
|
||||
|
|
|
@ -50,7 +50,7 @@ nouveau_fence_trigger_work(struct nouveau_fence *fence)
|
|||
|
||||
LIST_FOR_EACH_ENTRY_SAFE(work, tmp, &fence->work, list) {
|
||||
work->func(work->data);
|
||||
LIST_DEL(&work->list);
|
||||
list_del(&work->list);
|
||||
FREE(work);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -189,7 +189,7 @@ nouveau_mm_allocate(struct nouveau_mman *cache,
|
|||
}
|
||||
slab = LIST_ENTRY(struct mm_slab, bucket->free.next, head);
|
||||
|
||||
LIST_DEL(&slab->head);
|
||||
list_del(&slab->head);
|
||||
list_add(&slab->head, &bucket->used);
|
||||
}
|
||||
|
||||
|
@ -202,7 +202,7 @@ nouveau_mm_allocate(struct nouveau_mman *cache,
|
|||
nouveau_bo_ref(slab->bo, bo);
|
||||
|
||||
if (slab->free == 0) {
|
||||
LIST_DEL(&slab->head);
|
||||
list_del(&slab->head);
|
||||
list_add(&slab->head, &bucket->full);
|
||||
}
|
||||
|
||||
|
@ -222,11 +222,11 @@ nouveau_mm_free(struct nouveau_mm_allocation *alloc)
|
|||
mm_slab_free(slab, alloc->offset >> slab->order);
|
||||
|
||||
if (slab->free == slab->count) {
|
||||
LIST_DEL(&slab->head);
|
||||
list_del(&slab->head);
|
||||
list_addtail(&slab->head, &bucket->free);
|
||||
} else
|
||||
if (slab->free == 1) {
|
||||
LIST_DEL(&slab->head);
|
||||
list_del(&slab->head);
|
||||
list_addtail(&slab->head, &bucket->used);
|
||||
}
|
||||
|
||||
|
@ -269,7 +269,7 @@ nouveau_mm_free_slabs(struct list_head *head)
|
|||
struct mm_slab *slab, *next;
|
||||
|
||||
LIST_FOR_EACH_ENTRY_SAFE(slab, next, head, head) {
|
||||
LIST_DEL(&slab->head);
|
||||
list_del(&slab->head);
|
||||
nouveau_bo_ref(NULL, &slab->bo);
|
||||
FREE(slab);
|
||||
}
|
||||
|
|
|
@ -58,7 +58,7 @@ nv30_query_object_del(struct nv30_screen *screen, struct nv30_query_object **po)
|
|||
while (ntfy[3] & 0xff000000) {
|
||||
}
|
||||
nouveau_heap_free(&qo->hw);
|
||||
LIST_DEL(&qo->list);
|
||||
list_del(&qo->list);
|
||||
FREE(qo);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -928,7 +928,7 @@ static int merge_inst_groups(struct r600_bytecode *bc, struct r600_bytecode_alu
|
|||
for (i = 0; i < max_slots; ++i) {
|
||||
slots[i] = result[i];
|
||||
if (result[i]) {
|
||||
LIST_DEL(&result[i]->list);
|
||||
list_del(&result[i]->list);
|
||||
result[i]->last = 0;
|
||||
list_addtail(&result[i]->list, &bc->cf_last->alu);
|
||||
}
|
||||
|
|
|
@ -131,12 +131,12 @@ static void sort_cpb(struct rvce_encoder *enc)
|
|||
}
|
||||
|
||||
if (l1) {
|
||||
LIST_DEL(&l1->list);
|
||||
list_del(&l1->list);
|
||||
list_add(&l1->list, &enc->cpb_slots);
|
||||
}
|
||||
|
||||
if (l0) {
|
||||
LIST_DEL(&l0->list);
|
||||
list_del(&l0->list);
|
||||
list_add(&l0->list, &enc->cpb_slots);
|
||||
}
|
||||
}
|
||||
|
@ -341,7 +341,7 @@ static void rvce_end_frame(struct pipe_video_codec *encoder,
|
|||
slot->frame_num = enc->pic.frame_num;
|
||||
slot->pic_order_cnt = enc->pic.pic_order_cnt;
|
||||
if (!enc->pic.not_referenced) {
|
||||
LIST_DEL(&slot->list);
|
||||
list_del(&slot->list);
|
||||
list_add(&slot->list, &enc->cpb_slots);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -125,12 +125,12 @@ static void sort_cpb(struct rvce_encoder *enc)
|
|||
}
|
||||
|
||||
if (l1) {
|
||||
LIST_DEL(&l1->list);
|
||||
list_del(&l1->list);
|
||||
list_add(&l1->list, &enc->cpb_slots);
|
||||
}
|
||||
|
||||
if (l0) {
|
||||
LIST_DEL(&l0->list);
|
||||
list_del(&l0->list);
|
||||
list_add(&l0->list, &enc->cpb_slots);
|
||||
}
|
||||
}
|
||||
|
@ -340,7 +340,7 @@ static void rvce_end_frame(struct pipe_video_codec *encoder,
|
|||
slot->frame_num = enc->pic.frame_num;
|
||||
slot->pic_order_cnt = enc->pic.pic_order_cnt;
|
||||
if (!enc->pic.not_referenced) {
|
||||
LIST_DEL(&slot->list);
|
||||
list_del(&slot->list);
|
||||
list_add(&slot->list, &enc->cpb_slots);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -106,7 +106,7 @@ static void gfx10_release_query_buffers(struct si_context *sctx,
|
|||
if (qbuf->list.prev == &sctx->shader_query_buffers)
|
||||
continue; /* keep the oldest buffer for recycling */
|
||||
|
||||
LIST_DEL(&qbuf->list);
|
||||
list_del(&qbuf->list);
|
||||
si_resource_reference(&qbuf->buf, NULL);
|
||||
FREE(qbuf);
|
||||
}
|
||||
|
@ -131,7 +131,7 @@ static bool gfx10_alloc_query_buffer(struct si_context *sctx)
|
|||
!si_rings_is_buffer_referenced(sctx, qbuf->buf->buf, RADEON_USAGE_READWRITE) &&
|
||||
sctx->ws->buffer_wait(qbuf->buf->buf, 0, RADEON_USAGE_READWRITE)) {
|
||||
/* Can immediately re-use the oldest buffer */
|
||||
LIST_DEL(&qbuf->list);
|
||||
list_del(&qbuf->list);
|
||||
} else {
|
||||
qbuf = NULL;
|
||||
}
|
||||
|
@ -514,7 +514,7 @@ void gfx10_destroy_query(struct si_context *sctx)
|
|||
struct gfx10_sh_query_buffer *qbuf =
|
||||
list_first_entry(&sctx->shader_query_buffers,
|
||||
struct gfx10_sh_query_buffer, list);
|
||||
LIST_DEL(&qbuf->list);
|
||||
list_del(&qbuf->list);
|
||||
|
||||
assert(!qbuf->refcount);
|
||||
si_resource_reference(&qbuf->buf, NULL);
|
||||
|
|
|
@ -855,7 +855,7 @@ static bool si_pc_query_end(struct si_context *ctx, struct si_query *squery)
|
|||
|
||||
si_pc_query_suspend(ctx, squery);
|
||||
|
||||
LIST_DEL(&squery->active_list);
|
||||
list_del(&squery->active_list);
|
||||
ctx->num_cs_dw_queries_suspend -= squery->num_cs_dw_suspend;
|
||||
|
||||
return query->buffer.buf != NULL;
|
||||
|
|
|
@ -410,7 +410,7 @@ svga_buffer_validate_host_surface(struct svga_context *svga,
|
|||
svga_screen_surface_destroy(svga_screen(sbuf->b.b.screen),
|
||||
&bufsurf->key, &bufsurf->handle);
|
||||
|
||||
LIST_DEL(&bufsurf->list);
|
||||
list_del(&bufsurf->list);
|
||||
FREE(bufsurf);
|
||||
}
|
||||
} else {
|
||||
|
@ -728,7 +728,7 @@ svga_buffer_upload_flush(struct svga_context *svga, struct svga_buffer *sbuf)
|
|||
sbuf->map.num_ranges = 0;
|
||||
|
||||
assert(sbuf->head.prev && sbuf->head.next);
|
||||
LIST_DEL(&sbuf->head); /* remove from svga->dirty_buffers list */
|
||||
list_del(&sbuf->head); /* remove from svga->dirty_buffers list */
|
||||
#ifdef DEBUG
|
||||
sbuf->head.next = sbuf->head.prev = NULL;
|
||||
#endif
|
||||
|
|
|
@ -133,10 +133,10 @@ svga_screen_cache_lookup(struct svga_screen *svgascreen,
|
|||
entry->handle = NULL;
|
||||
|
||||
/* Remove from hash table */
|
||||
LIST_DEL(&entry->bucket_head);
|
||||
list_del(&entry->bucket_head);
|
||||
|
||||
/* remove from LRU list */
|
||||
LIST_DEL(&entry->head);
|
||||
list_del(&entry->head);
|
||||
|
||||
/* Add the cache entry (but not the surface!) to the empty list */
|
||||
list_add(&entry->head, &cache->empty);
|
||||
|
@ -192,8 +192,8 @@ svga_screen_cache_shrink(struct svga_screen *svgascreen,
|
|||
assert(entry->handle);
|
||||
sws->surface_reference(sws, &entry->handle, NULL);
|
||||
|
||||
LIST_DEL(&entry->bucket_head);
|
||||
LIST_DEL(&entry->head);
|
||||
list_del(&entry->bucket_head);
|
||||
list_del(&entry->head);
|
||||
list_add(&entry->head, &cache->empty);
|
||||
|
||||
if (cache->total_size <= target_size) {
|
||||
|
@ -264,7 +264,7 @@ svga_screen_cache_add(struct svga_screen *svgascreen,
|
|||
cache->empty.next, head);
|
||||
|
||||
/* Remove from LRU list */
|
||||
LIST_DEL(&entry->head);
|
||||
list_del(&entry->head);
|
||||
}
|
||||
else if (!LIST_IS_EMPTY(&cache->unused)) {
|
||||
/* free the last used buffer and reuse its entry */
|
||||
|
@ -278,10 +278,10 @@ svga_screen_cache_add(struct svga_screen *svgascreen,
|
|||
sws->surface_reference(sws, &entry->handle, NULL);
|
||||
|
||||
/* Remove from hash table */
|
||||
LIST_DEL(&entry->bucket_head);
|
||||
list_del(&entry->bucket_head);
|
||||
|
||||
/* Remove from LRU list */
|
||||
LIST_DEL(&entry->head);
|
||||
list_del(&entry->head);
|
||||
}
|
||||
|
||||
if (entry) {
|
||||
|
@ -338,7 +338,7 @@ svga_screen_cache_flush(struct svga_screen *svgascreen,
|
|||
|
||||
if (sws->surface_is_flushed(sws, entry->handle)) {
|
||||
/* remove entry from the invalidated list */
|
||||
LIST_DEL(&entry->head);
|
||||
list_del(&entry->head);
|
||||
|
||||
sws->fence_reference(sws, &entry->fence, fence);
|
||||
|
||||
|
@ -364,7 +364,7 @@ svga_screen_cache_flush(struct svga_screen *svgascreen,
|
|||
|
||||
if (sws->surface_is_flushed(sws, entry->handle)) {
|
||||
/* remove entry from the validated list */
|
||||
LIST_DEL(&entry->head);
|
||||
list_del(&entry->head);
|
||||
|
||||
/* It is now safe to invalidate the surface content.
|
||||
* It will be done using the current context.
|
||||
|
|
|
@ -668,7 +668,7 @@ static struct pipe_video_buffer *vid_dec_h265_Flush(vid_dec_PrivateType *priv,
|
|||
*timestamp = result->timestamp;
|
||||
|
||||
--priv->codec_data.h265.dpb_num;
|
||||
LIST_DEL(&result->list);
|
||||
list_del(&result->list);
|
||||
FREE(result);
|
||||
|
||||
return buf;
|
||||
|
|
|
@ -824,7 +824,7 @@ static void enc_ClearBframes(omx_base_PortType *port, struct input_buf_private *
|
|||
return;
|
||||
|
||||
task = LIST_ENTRY(struct encode_task, priv->b_frames.prev, list);
|
||||
LIST_DEL(&task->list);
|
||||
list_del(&task->list);
|
||||
|
||||
/* promote last from to P frame */
|
||||
priv->ref_idx_l0 = priv->ref_idx_l1;
|
||||
|
@ -912,7 +912,7 @@ static OMX_ERRORTYPE vid_enc_EncodeFrame(omx_base_PortType *port, OMX_BUFFERHEAD
|
|||
if (stacked_num == priv->stacked_frames_num) {
|
||||
struct encode_task *t;
|
||||
t = LIST_ENTRY(struct encode_task, priv->stacked_tasks.next, list);
|
||||
LIST_DEL(&t->list);
|
||||
list_del(&t->list);
|
||||
list_addtail(&t->list, &inp->tasks);
|
||||
}
|
||||
priv->ref_idx_l1 = priv->frame_num++;
|
||||
|
|
|
@ -272,7 +272,7 @@ static void enc_ClearBframes(vid_enc_PrivateType * priv, struct input_buf_privat
|
|||
return;
|
||||
|
||||
task = LIST_ENTRY(struct encode_task, priv->b_frames.prev, list);
|
||||
LIST_DEL(&task->list);
|
||||
list_del(&task->list);
|
||||
|
||||
/* promote last from to P frame */
|
||||
priv->ref_idx_l0 = priv->ref_idx_l1;
|
||||
|
@ -366,7 +366,7 @@ static OMX_ERRORTYPE encode_frame(vid_enc_PrivateType * priv, OMX_BUFFERHEADERTY
|
|||
if (stacked_num == priv->stacked_frames_num) {
|
||||
struct encode_task *t;
|
||||
t = LIST_ENTRY(struct encode_task, priv->stacked_tasks.next, list);
|
||||
LIST_DEL(&t->list);
|
||||
list_del(&t->list);
|
||||
list_addtail(&t->list, &inp->tasks);
|
||||
}
|
||||
priv->ref_idx_l1 = priv->frame_num++;
|
||||
|
|
|
@ -98,7 +98,7 @@ struct pipe_video_buffer *vid_dec_h264_Flush(vid_dec_PrivateType *priv,
|
|||
*timestamp = result->timestamp;
|
||||
|
||||
--priv->codec_data.h264.dpb_num;
|
||||
LIST_DEL(&result->list);
|
||||
list_del(&result->list);
|
||||
FREE(result);
|
||||
|
||||
return buf;
|
||||
|
|
|
@ -138,7 +138,7 @@ void vid_enc_BufferEncoded_common(vid_enc_PrivateType * priv, OMX_BUFFERHEADERTY
|
|||
#endif
|
||||
|
||||
task = LIST_ENTRY(struct encode_task, inp->tasks.next, list);
|
||||
LIST_DEL(&task->list);
|
||||
list_del(&task->list);
|
||||
list_addtail(&task->list, &priv->used_tasks);
|
||||
|
||||
if (!task->bitstream)
|
||||
|
@ -184,7 +184,7 @@ struct encode_task *enc_NeedTask_common(vid_enc_PrivateType * priv, OMX_VIDEO_PO
|
|||
|
||||
if (!LIST_IS_EMPTY(&priv->free_tasks)) {
|
||||
task = LIST_ENTRY(struct encode_task, priv->free_tasks.next, list);
|
||||
LIST_DEL(&task->list);
|
||||
list_del(&task->list);
|
||||
return task;
|
||||
}
|
||||
|
||||
|
|
|
@ -176,7 +176,7 @@ void amdgpu_bo_destroy(struct pb_buffer *_buf)
|
|||
|
||||
if (ws->debug_all_bos) {
|
||||
simple_mtx_lock(&ws->global_bo_list_lock);
|
||||
LIST_DEL(&bo->u.real.global_list_item);
|
||||
list_del(&bo->u.real.global_list_item);
|
||||
ws->num_buffers--;
|
||||
simple_mtx_unlock(&ws->global_bo_list_lock);
|
||||
}
|
||||
|
|
|
@ -214,7 +214,7 @@ fenced_buffer_destroy_locked(struct fenced_manager *fenced_mgr,
|
|||
assert(!fenced_buf->fence);
|
||||
assert(fenced_buf->head.prev);
|
||||
assert(fenced_buf->head.next);
|
||||
LIST_DEL(&fenced_buf->head);
|
||||
list_del(&fenced_buf->head);
|
||||
assert(fenced_mgr->num_unfenced);
|
||||
--fenced_mgr->num_unfenced;
|
||||
|
||||
|
@ -239,7 +239,7 @@ fenced_buffer_add_locked(struct fenced_manager *fenced_mgr,
|
|||
|
||||
p_atomic_inc(&fenced_buf->base.reference.count);
|
||||
|
||||
LIST_DEL(&fenced_buf->head);
|
||||
list_del(&fenced_buf->head);
|
||||
assert(fenced_mgr->num_unfenced);
|
||||
--fenced_mgr->num_unfenced;
|
||||
list_addtail(&fenced_buf->head, &fenced_mgr->fenced);
|
||||
|
@ -268,7 +268,7 @@ fenced_buffer_remove_locked(struct fenced_manager *fenced_mgr,
|
|||
assert(fenced_buf->head.prev);
|
||||
assert(fenced_buf->head.next);
|
||||
|
||||
LIST_DEL(&fenced_buf->head);
|
||||
list_del(&fenced_buf->head);
|
||||
assert(fenced_mgr->num_fenced);
|
||||
--fenced_mgr->num_fenced;
|
||||
|
||||
|
|
|
@ -45,7 +45,7 @@ static void
|
|||
virgl_resource_cache_entry_release(struct virgl_resource_cache *cache,
|
||||
struct virgl_resource_cache_entry *entry)
|
||||
{
|
||||
LIST_DEL(&entry->head);
|
||||
list_del(&entry->head);
|
||||
cache->entry_release_func(entry, cache->user_data);
|
||||
}
|
||||
|
||||
|
@ -135,7 +135,7 @@ virgl_resource_cache_remove_compatible(struct virgl_resource_cache *cache,
|
|||
}
|
||||
|
||||
if (compat_entry)
|
||||
LIST_DEL(&compat_entry->head);
|
||||
list_del(&compat_entry->head);
|
||||
|
||||
return compat_entry;
|
||||
}
|
||||
|
|
|
@ -361,7 +361,7 @@ free_zombie_sampler_views(struct st_context *st)
|
|||
|
||||
LIST_FOR_EACH_ENTRY_SAFE(entry, next,
|
||||
&st->zombie_sampler_views.list.node, node) {
|
||||
LIST_DEL(&entry->node); // remove this entry from the list
|
||||
list_del(&entry->node); // remove this entry from the list
|
||||
|
||||
assert(entry->view->context == st->pipe);
|
||||
pipe_sampler_view_reference(&entry->view, NULL);
|
||||
|
@ -391,7 +391,7 @@ free_zombie_shaders(struct st_context *st)
|
|||
|
||||
LIST_FOR_EACH_ENTRY_SAFE(entry, next,
|
||||
&st->zombie_shaders.list.node, node) {
|
||||
LIST_DEL(&entry->node); // remove this entry from the list
|
||||
list_del(&entry->node); // remove this entry from the list
|
||||
|
||||
switch (entry->type) {
|
||||
case PIPE_SHADER_VERTEX:
|
||||
|
|
|
@ -640,7 +640,7 @@ st_framebuffers_purge(struct st_context *st)
|
|||
* deleted.
|
||||
*/
|
||||
if (!st_framebuffer_iface_lookup(smapi, stfbi)) {
|
||||
LIST_DEL(&stfb->head);
|
||||
list_del(&stfb->head);
|
||||
st_framebuffer_reference(&stfb, NULL);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -158,8 +158,6 @@ static inline void list_validate(const struct list_head *list)
|
|||
assert(node->next->prev == node && node->prev->next == node);
|
||||
}
|
||||
|
||||
#define LIST_DEL(__item) list_del(__item)
|
||||
|
||||
#define LIST_ENTRY(__type, __item, __field) \
|
||||
((__type *)(((char *)(__item)) - offsetof(__type, __field)))
|
||||
|
||||
|
|
|
@ -89,7 +89,7 @@ remove_from_atexit_list(struct util_queue *queue)
|
|||
mtx_lock(&exit_mutex);
|
||||
LIST_FOR_EACH_ENTRY_SAFE(iter, tmp, &queue_list, head) {
|
||||
if (iter == queue) {
|
||||
LIST_DEL(&iter->head);
|
||||
list_del(&iter->head);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue