util/list: rename LIST_ENTRY() to list_entry()

This follows the Linux kernel convention, and avoids collision with
macOS header macro.

Closes: https://gitlab.freedesktop.org/mesa/mesa/-/issues/6751
Closes: https://gitlab.freedesktop.org/mesa/mesa/-/issues/6840
Cc: mesa-stable
Signed-off-by: Eric Engestrom <eric@igalia.com>
Acked-by: David Heidelberg <david.heidelberg@collabora.com>
Reviewed-by: Yonggang Luo <luoyonggang@gmail.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/17772>
This commit is contained in:
Eric Engestrom 2022-07-27 16:48:11 +01:00 committed by Marge Bot
parent a9ebf55d02
commit 2c67457e5e
39 changed files with 126 additions and 124 deletions

View File

@ -1516,7 +1516,7 @@ get_hole(struct radv_shader_arena *arena, struct list_head *head)
if (head == &arena->entries)
return NULL;
union radv_shader_arena_block *hole = LIST_ENTRY(union radv_shader_arena_block, head, list);
union radv_shader_arena_block *hole = list_entry(head, union radv_shader_arena_block, list);
return hole->freelist.prev ? hole : NULL;
}

View File

@ -3971,10 +3971,10 @@ nir_before_src(nir_src *src, bool is_if_condition)
}
assert(found);
#endif
/* The LIST_ENTRY macro is a generic container-of macro, it just happens
/* The list_entry() macro is a generic container-of macro, it just happens
* to have a more specific name.
*/
nir_phi_src *phi_src = LIST_ENTRY(nir_phi_src, src, src);
nir_phi_src *phi_src = list_entry(src, nir_phi_src, src);
return nir_after_block_before_jump(phi_src->pred);
} else {
return nir_before_instr(src->parent_instr);

View File

@ -76,7 +76,7 @@ void etna_bo_cache_cleanup(struct etna_bo_cache *cache, time_t time)
struct etna_bo *bo;
while (!list_is_empty(&bucket->list)) {
bo = LIST_ENTRY(struct etna_bo, bucket->list.next, list);
bo = list_entry(bucket->list.next, struct etna_bo, list);
/* keep things in cache for at least 1 second: */
if (time && ((time - bo->free_time) <= 1))

View File

@ -91,7 +91,7 @@ fd_bo_cache_cleanup(struct fd_bo_cache *cache, time_t time)
struct fd_bo *bo;
while (!list_is_empty(&bucket->list)) {
bo = LIST_ENTRY(struct fd_bo, bucket->list.next, list);
bo = list_entry(bucket->list.next, struct fd_bo, list);
/* keep things in cache for at least 1 second: */
if (time && ((time - bo->free_time) <= 1))

View File

@ -2037,7 +2037,7 @@ insert_liveout_copy(struct ir3_block *block, physreg_t dst, physreg_t src,
struct ir3_instruction *old_pcopy = NULL;
if (!list_is_empty(&block->instr_list)) {
struct ir3_instruction *last =
LIST_ENTRY(struct ir3_instruction, block->instr_list.prev, node);
list_entry(block->instr_list.prev, struct ir3_instruction, node);
if (last->opc == OPC_META_PARALLEL_COPY)
old_pcopy = last;
}

View File

@ -97,7 +97,7 @@ delete_block(struct ir3 *ir, struct ir3_block *block)
assert(block->physical_predecessors_count == 1);
struct ir3_block *pred = block->physical_predecessors[0];
assert(block->node.next != &ir->block_list);
struct ir3_block *next = LIST_ENTRY(struct ir3_block, block->node.next, node);
struct ir3_block *next = list_entry(block->node.next, struct ir3_block, node);
if (pred->physical_successors[1] == block)
pred->physical_successors[1] = next;
else

View File

@ -1370,7 +1370,7 @@ add_barrier_deps(struct ir3_block *block, struct ir3_instruction *instr)
*/
while (prev != &block->instr_list) {
struct ir3_instruction *pi =
LIST_ENTRY(struct ir3_instruction, prev, node);
list_entry(prev, struct ir3_instruction, node);
prev = prev->prev;
@ -1391,7 +1391,7 @@ add_barrier_deps(struct ir3_block *block, struct ir3_instruction *instr)
*/
while (next != &block->instr_list) {
struct ir3_instruction *ni =
LIST_ENTRY(struct ir3_instruction, next, node);
list_entry(next, struct ir3_instruction, node);
next = next->next;

View File

@ -694,7 +694,7 @@ hud_stop_queries(struct hud_context *hud, struct pipe_context *pipe)
* per frame. It will eventually reach an equilibrium.
*/
if (gr->current_value <
LIST_ENTRY(struct hud_graph, next, head)->current_value) {
list_entry(next, struct hud_graph, head)->current_value) {
list_del(&gr->head);
list_add(&gr->head, &next->head);
}
@ -1432,7 +1432,7 @@ hud_parse_env_var(struct hud_context *hud, struct pipe_screen *screen,
strip_hyphens(s);
if (added && !list_is_empty(&pane->graph_list)) {
struct hud_graph *graph;
graph = LIST_ENTRY(struct hud_graph, pane->graph_list.prev, head);
graph = list_entry(pane->graph_list.prev, struct hud_graph, head);
strncpy(graph->name, s, sizeof(graph->name)-1);
graph->name[sizeof(graph->name)-1] = 0;
}

View File

@ -207,7 +207,7 @@ fenced_manager_dump_locked(struct fenced_manager *fenced_mgr)
curr = fenced_mgr->unfenced.next;
next = curr->next;
while (curr != &fenced_mgr->unfenced) {
fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
fenced_buf = list_entry(curr, struct fenced_buffer, head);
assert(!fenced_buf->fence);
debug_printf("%10p %"PRIu64" %8u %7s\n",
(void *) fenced_buf,
@ -222,7 +222,7 @@ fenced_manager_dump_locked(struct fenced_manager *fenced_mgr)
next = curr->next;
while (curr != &fenced_mgr->fenced) {
int signaled;
fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
fenced_buf = list_entry(curr, struct fenced_buffer, head);
assert(fenced_buf->buffer);
signaled = ops->fence_signalled(ops, fenced_buf->fence, 0);
debug_printf("%10p %"PRIu64" %8u %7s %10p %s\n",
@ -401,7 +401,7 @@ fenced_manager_check_signalled_locked(struct fenced_manager *fenced_mgr,
curr = fenced_mgr->fenced.next;
next = curr->next;
while (curr != &fenced_mgr->fenced) {
fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
fenced_buf = list_entry(curr, struct fenced_buffer, head);
if (fenced_buf->fence != prev_fence) {
int signaled;
@ -455,7 +455,7 @@ fenced_manager_free_gpu_storage_locked(struct fenced_manager *fenced_mgr)
curr = fenced_mgr->unfenced.next;
next = curr->next;
while (curr != &fenced_mgr->unfenced) {
fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
fenced_buf = list_entry(curr, struct fenced_buffer, head);
/* We can only move storage if the buffer is not mapped and not
* validated.

View File

@ -348,7 +348,7 @@ pb_debug_manager_dump_locked(struct pb_debug_manager *mgr)
curr = mgr->list.next;
next = curr->next;
while(curr != &mgr->list) {
buf = LIST_ENTRY(struct pb_debug_buffer, curr, head);
buf = list_entry(curr, struct pb_debug_buffer, head);
debug_printf("buffer = %p\n", (void *) buf);
debug_printf(" .size = 0x%"PRIx64"\n", buf->base.size);

View File

@ -403,7 +403,7 @@ pb_slab_manager_create_buffer(struct pb_manager *_mgr,
/* Allocate the buffer from a partial (or just created) slab */
list = mgr->slabs.next;
slab = LIST_ENTRY(struct pb_slab, list, head);
slab = list_entry(list, struct pb_slab, head);
/* If totally full remove from the partial slab list */
if (--slab->numFree == 0)
@ -413,7 +413,7 @@ pb_slab_manager_create_buffer(struct pb_manager *_mgr,
list_delinit(list);
mtx_unlock(&mgr->mutex);
buf = LIST_ENTRY(struct pb_slab_buffer, list, head);
buf = list_entry(list, struct pb_slab_buffer, head);
pipe_reference_init(&buf->base.reference, 1);
buf->base.alignment_log2 = util_logbase2(desc->alignment);

View File

@ -63,7 +63,7 @@ release_expired_buffers_locked(struct list_head *cache,
curr = cache->next;
next = curr->next;
while (curr != cache) {
entry = LIST_ENTRY(struct pb_cache_entry, curr, head);
entry = list_entry(curr, struct pb_cache_entry, head);
if (!os_time_timeout(entry->start, entry->end, current_time))
break;
@ -166,7 +166,7 @@ pb_cache_reclaim_buffer(struct pb_cache *mgr, pb_size size,
/* search in the expired buffers, freeing them in the process */
now = os_time_get();
while (cur != cache) {
cur_entry = LIST_ENTRY(struct pb_cache_entry, cur, head);
cur_entry = list_entry(cur, struct pb_cache_entry, head);
if (!entry && (ret = pb_cache_is_buffer_compat(cur_entry, size,
alignment, usage)) > 0)
@ -188,7 +188,7 @@ pb_cache_reclaim_buffer(struct pb_cache *mgr, pb_size size,
/* keep searching in the hot buffers */
if (!entry && ret != -1) {
while (cur != cache) {
cur_entry = LIST_ENTRY(struct pb_cache_entry, cur, head);
cur_entry = list_entry(cur, struct pb_cache_entry, head);
ret = pb_cache_is_buffer_compat(cur_entry, size, alignment, usage);
if (ret > 0) {
@ -237,7 +237,7 @@ pb_cache_release_all_buffers(struct pb_cache *mgr)
curr = cache->next;
next = curr->next;
while (curr != cache) {
buf = LIST_ENTRY(struct pb_cache_entry, curr, head);
buf = list_entry(curr, struct pb_cache_entry, head);
destroy_buffer_locked(buf);
curr = next;
next = curr->next;

View File

@ -147,7 +147,7 @@ pb_slab_alloc_reclaimed(struct pb_slabs *slabs, unsigned size, unsigned heap, bo
* entries, try reclaiming entries.
*/
if (list_is_empty(&group->slabs) ||
list_is_empty(&LIST_ENTRY(struct pb_slab, group->slabs.next, head)->free)) {
list_is_empty(&list_entry(group->slabs.next, struct pb_slab, head)->free)) {
if (reclaim_all)
pb_slabs_reclaim_all_locked(slabs);
else
@ -156,7 +156,7 @@ pb_slab_alloc_reclaimed(struct pb_slabs *slabs, unsigned size, unsigned heap, bo
/* Remove slabs without free entries. */
while (!list_is_empty(&group->slabs)) {
slab = LIST_ENTRY(struct pb_slab, group->slabs.next, head);
slab = list_entry(group->slabs.next, struct pb_slab, head);
if (!list_is_empty(&slab->free))
break;
@ -180,7 +180,7 @@ pb_slab_alloc_reclaimed(struct pb_slabs *slabs, unsigned size, unsigned heap, bo
list_add(&slab->head, &group->slabs);
}
entry = LIST_ENTRY(struct pb_slab_entry, slab->free.next, head);
entry = list_entry(slab->free.next, struct pb_slab_entry, head);
list_del(&entry->head);
slab->num_free--;
@ -287,7 +287,7 @@ pb_slabs_deinit(struct pb_slabs *slabs)
*/
while (!list_is_empty(&slabs->reclaim)) {
struct pb_slab_entry *entry =
LIST_ENTRY(struct pb_slab_entry, slabs->reclaim.next, head);
list_entry(slabs->reclaim.next, struct pb_slab_entry, head);
pb_slab_reclaim(slabs, entry);
}

View File

@ -365,13 +365,13 @@ ensure_sanity(const struct util_cache *cache)
}
else {
struct util_cache_entry *header =
LIST_ENTRY(struct util_cache_entry, &cache->lru, list);
list_entry(&cache->lru, struct util_cache_entry, list);
assert (header);
assert (!list_is_empty(&cache->lru.list));
for (i = 0; i < cache->count; i++)
header = LIST_ENTRY(struct util_cache_entry, &header, list);
header = list_entry(&header, struct util_cache_entry, list);
assert(header == &cache->lru);
}

View File

@ -59,7 +59,7 @@ util_dirty_surfaces_use_for_sampling(struct pipe_context *pipe, struct util_dirt
struct list_head *p, *next;
for(p = dss->dirty_list.next; p != &dss->dirty_list; p = next)
{
struct util_dirty_surface *ds = LIST_ENTRY(struct util_dirty_surface, p, dirty_list);
struct util_dirty_surface *ds = list_entry(p, struct util_dirty_surface, dirty_list);
next = p->next;
flush(pipe, &ds->base);
@ -74,7 +74,7 @@ util_dirty_surfaces_use_levels_for_sampling(struct pipe_context *pipe, struct ut
return;
for(p = dss->dirty_list.next; p != &dss->dirty_list; p = next)
{
struct util_dirty_surface *ds = LIST_ENTRY(struct util_dirty_surface, p, dirty_list);
struct util_dirty_surface *ds = list_entry(p, struct util_dirty_surface, dirty_list);
next = p->next;
if(ds->base.u.tex.level >= first && ds->base.u.tex.level <= last)

View File

@ -80,7 +80,7 @@ optimize_branches(gpir_compiler *comp)
if (block->list.prev == &comp->block_list)
continue;
gpir_block *prev_block = LIST_ENTRY(gpir_block, block->list.prev, list);
gpir_block *prev_block = list_entry(block->list.prev, gpir_block, list);
if (list_is_empty(&prev_block->node_list))
continue;
@ -109,7 +109,7 @@ optimize_branches(gpir_compiler *comp)
/* Delete the branch */
list_del(&node->list);
block->successors[0] = LIST_ENTRY(gpir_block, block->list.next, list);
block->successors[0] = list_entry(block->list.next, gpir_block, list);
}
}

View File

@ -1297,9 +1297,9 @@ static bool try_node(sched_ctx *ctx)
* the list at all. We know better here, so we have to open-code
* list_for_each_entry() without the check in order to not assert.
*/
for (gpir_node *node = LIST_ENTRY(gpir_node, ctx->ready_list.next, list);
for (gpir_node *node = list_entry(ctx->ready_list.next, gpir_node, list);
&node->list != &ctx->ready_list;
node = LIST_ENTRY(gpir_node, node->list.next, list)) {
node = list_entry(node->list.next, gpir_node, list)) {
if (best_score != INT_MIN) {
if (node->sched.dist < best_node->sched.dist)
break;

View File

@ -659,7 +659,7 @@ static void ppir_codegen_encode_branch(ppir_node *node, void *code)
while (list_is_empty(&target->instr_list)) {
if (!target->list.next)
break;
target = LIST_ENTRY(ppir_block, target->list.next, list);
target = list_entry(target->list.next, ppir_block, list);
}
assert(!list_is_empty(&target->instr_list));

View File

@ -233,7 +233,7 @@ ppir_liveness_compute_live_sets(ppir_compiler *comp)
}
}
else {
ppir_instr *next_instr = LIST_ENTRY(ppir_instr, instr->list.next, list);
ppir_instr *next_instr = list_entry(instr->list.next, ppir_instr, list);
ppir_liveness_propagate(comp,
instr->live_set, next_instr->live_set,
instr->live_mask, next_instr->live_mask);

View File

@ -183,12 +183,12 @@ nouveau_mm_allocate(struct nouveau_mman *cache,
}
if (!list_is_empty(&bucket->used)) {
slab = LIST_ENTRY(struct mm_slab, bucket->used.next, head);
slab = list_entry(bucket->used.next, struct mm_slab, head);
} else {
if (list_is_empty(&bucket->free)) {
mm_slab_new(cache, bucket, MAX2(mm_get_order(size), MM_MIN_ORDER));
}
slab = LIST_ENTRY(struct mm_slab, bucket->free.next, head);
slab = list_entry(bucket->free.next, struct mm_slab, head);
list_del(&slab->head);
list_add(&slab->head, &bucket->used);

View File

@ -28,8 +28,8 @@
#include "nv30/nv30_screen.h"
#include "nv30/nv30_context.h"
#define LIST_FIRST_ENTRY(__type, __item, __field) \
LIST_ENTRY(__type, (__item)->next, __field)
#define LIST_FIRST_ENTRY(__item, __type, __field) \
list_entry((__item)->next, __type, __field)
struct nv30_query_object {
struct list_head list;
@ -76,7 +76,7 @@ nv30_query_object_new(struct nv30_screen *screen)
* spin waiting for one to become free
*/
while (nouveau_heap_alloc(screen->query_heap, 32, NULL, &qo->hw)) {
oq = LIST_FIRST_ENTRY(struct nv30_query_object, &screen->queries, list);
oq = LIST_FIRST_ENTRY(&screen->queries, struct nv30_query_object, list);
nv30_query_object_del(screen, &oq);
}

View File

@ -387,7 +387,7 @@ static int assign_alu_units(struct r600_bytecode *bc, struct r600_bytecode_alu *
for (i = 0; i < max_slots; i++)
assignment[i] = NULL;
for (alu = alu_first; alu; alu = LIST_ENTRY(struct r600_bytecode_alu, alu->list.next, list)) {
for (alu = alu_first; alu; alu = list_entry(alu->list.next, struct r600_bytecode_alu, list)) {
chan = alu->dst.chan;
if (max_slots == 4)
trans = 0;
@ -990,7 +990,7 @@ static int merge_inst_groups(struct r600_bytecode *bc, struct r600_bytecode_alu
}
/* determine new last instruction */
LIST_ENTRY(struct r600_bytecode_alu, bc->cf_last->alu.prev, list)->last = 1;
list_entry(bc->cf_last->alu.prev, struct r600_bytecode_alu, list)->last = 1;
/* determine new first instruction */
for (i = 0; i < max_slots; ++i) {

View File

@ -204,7 +204,7 @@ static unsigned get_cpb_num(struct rvce_encoder *enc)
*/
struct rvce_cpb_slot *current_slot(struct rvce_encoder *enc)
{
return LIST_ENTRY(struct rvce_cpb_slot, enc->cpb_slots.prev, list);
return list_entry(enc->cpb_slots.prev, struct rvce_cpb_slot, list);
}
/**
@ -212,7 +212,7 @@ struct rvce_cpb_slot *current_slot(struct rvce_encoder *enc)
*/
struct rvce_cpb_slot *l0_slot(struct rvce_encoder *enc)
{
return LIST_ENTRY(struct rvce_cpb_slot, enc->cpb_slots.next, list);
return list_entry(enc->cpb_slots.next, struct rvce_cpb_slot, list);
}
/**
@ -220,7 +220,7 @@ struct rvce_cpb_slot *l0_slot(struct rvce_encoder *enc)
*/
struct rvce_cpb_slot *l1_slot(struct rvce_encoder *enc)
{
return LIST_ENTRY(struct rvce_cpb_slot, enc->cpb_slots.next->next, list);
return list_entry(enc->cpb_slots.next->next, struct rvce_cpb_slot, list);
}
/**
@ -334,8 +334,9 @@ static void rvce_end_frame(struct pipe_video_codec *encoder,
struct pipe_picture_desc *picture)
{
struct rvce_encoder *enc = (struct rvce_encoder*)encoder;
struct rvce_cpb_slot *slot = LIST_ENTRY(
struct rvce_cpb_slot, enc->cpb_slots.prev, list);
struct rvce_cpb_slot *slot = list_entry(enc->cpb_slots.prev,
struct rvce_cpb_slot,
list);
if (!enc->dual_inst || enc->bs_idx > 1)
flush(enc);

View File

@ -46,7 +46,7 @@ static void gfx10_release_query_buffers(struct si_context *sctx,
while (first) {
struct gfx10_sh_query_buffer *qbuf = first;
if (first != last)
first = LIST_ENTRY(struct gfx10_sh_query_buffer, qbuf->list.next, list);
first = list_entry(qbuf->list.next, struct gfx10_sh_query_buffer, list);
else
first = NULL;
@ -242,7 +242,7 @@ static bool gfx10_sh_query_get_result(struct si_context *sctx, struct si_query *
assert(query->last);
for (struct gfx10_sh_query_buffer *qbuf = query->last;;
qbuf = LIST_ENTRY(struct gfx10_sh_query_buffer, qbuf->list.prev, list)) {
qbuf = list_entry(qbuf->list.prev, struct gfx10_sh_query_buffer, list)) {
unsigned usage = PIPE_MAP_READ | (wait ? 0 : PIPE_MAP_DONTBLOCK);
void *map;
@ -411,7 +411,7 @@ static void gfx10_sh_query_get_result_resource(struct si_context *sctx, struct s
if (qbuf == query->last)
break;
qbuf = LIST_ENTRY(struct gfx10_sh_query_buffer, qbuf->list.next, list);
qbuf = list_entry(qbuf->list.next, struct gfx10_sh_query_buffer, list);
}
si_restore_qbo_state(sctx, &saved_state);

View File

@ -191,7 +191,7 @@ static unsigned get_cpb_num(struct rvce_encoder *enc)
*/
struct rvce_cpb_slot *si_current_slot(struct rvce_encoder *enc)
{
return LIST_ENTRY(struct rvce_cpb_slot, enc->cpb_slots.prev, list);
return list_entry(enc->cpb_slots.prev, struct rvce_cpb_slot, list);
}
/**
@ -199,7 +199,7 @@ struct rvce_cpb_slot *si_current_slot(struct rvce_encoder *enc)
*/
struct rvce_cpb_slot *si_l0_slot(struct rvce_encoder *enc)
{
return LIST_ENTRY(struct rvce_cpb_slot, enc->cpb_slots.next, list);
return list_entry(enc->cpb_slots.next, struct rvce_cpb_slot, list);
}
/**
@ -207,7 +207,7 @@ struct rvce_cpb_slot *si_l0_slot(struct rvce_encoder *enc)
*/
struct rvce_cpb_slot *si_l1_slot(struct rvce_encoder *enc)
{
return LIST_ENTRY(struct rvce_cpb_slot, enc->cpb_slots.next->next, list);
return list_entry(enc->cpb_slots.next->next, struct rvce_cpb_slot, list);
}
/**
@ -326,7 +326,7 @@ static void rvce_end_frame(struct pipe_video_codec *encoder, struct pipe_video_b
struct pipe_picture_desc *picture)
{
struct rvce_encoder *enc = (struct rvce_encoder *)encoder;
struct rvce_cpb_slot *slot = LIST_ENTRY(struct rvce_cpb_slot, enc->cpb_slots.prev, list);
struct rvce_cpb_slot *slot = list_entry(enc->cpb_slots.prev, struct rvce_cpb_slot, list);
if (!enc->dual_inst || enc->bs_idx > 1)
flush(enc);

View File

@ -1090,7 +1090,7 @@ static void si_emit_query_predication(struct si_context *ctx)
while (first) {
qbuf = first;
if (first != last)
first = LIST_ENTRY(struct gfx10_sh_query_buffer, qbuf->list.next, list);
first = list_entry(qbuf->list.next, struct gfx10_sh_query_buffer, list);
else
first = NULL;

View File

@ -1116,7 +1116,7 @@ svga_context_flush_buffers(struct svga_context *svga)
curr = svga->dirty_buffers.next;
next = curr->next;
while (curr != &svga->dirty_buffers) {
struct svga_buffer *sbuf = LIST_ENTRY(struct svga_buffer, curr, head);
struct svga_buffer *sbuf = list_entry(curr, struct svga_buffer, head);
assert(p_atomic_read(&sbuf->b.reference.count) != 0);
assert(sbuf->dma.pending);

View File

@ -113,7 +113,7 @@ svga_screen_cache_lookup(struct svga_screen *svgascreen,
while (curr != &cache->bucket[bucket]) {
++tries;
entry = LIST_ENTRY(struct svga_host_surface_cache_entry, curr, bucket_head);
entry = list_entry(curr, struct svga_host_surface_cache_entry, bucket_head);
assert(entry->handle);
@ -261,16 +261,18 @@ svga_screen_cache_add(struct svga_screen *svgascreen,
/* An empty entry has no surface associated with it.
* Use the first empty entry.
*/
entry = LIST_ENTRY(struct svga_host_surface_cache_entry,
cache->empty.next, head);
entry = list_entry(cache->empty.next,
struct svga_host_surface_cache_entry,
head);
/* Remove from LRU list */
list_del(&entry->head);
}
else if (!list_is_empty(&cache->unused)) {
/* free the last used buffer and reuse its entry */
entry = LIST_ENTRY(struct svga_host_surface_cache_entry,
cache->unused.prev, head);
entry = list_entry(cache->unused.prev,
struct svga_host_surface_cache_entry,
head);
SVGA_DBG(DEBUG_CACHE|DEBUG_DMA,
"unref sid %p (make space)\n", entry->handle);
@ -340,7 +342,7 @@ svga_screen_cache_flush(struct svga_screen *svgascreen,
curr = cache->invalidated.next;
next = curr->next;
while (curr != &cache->invalidated) {
entry = LIST_ENTRY(struct svga_host_surface_cache_entry, curr, head);
entry = list_entry(curr, struct svga_host_surface_cache_entry, head);
assert(entry->handle);
@ -366,7 +368,7 @@ svga_screen_cache_flush(struct svga_screen *svgascreen,
curr = cache->validated.next;
next = curr->next;
while (curr != &cache->validated) {
entry = LIST_ENTRY(struct svga_host_surface_cache_entry, curr, head);
entry = list_entry(curr, struct svga_host_surface_cache_entry, head);
assert(entry->handle);
assert(svga_have_gb_objects(svga));
@ -644,8 +646,7 @@ svga_screen_cache_dump(const struct svga_screen *svgascreen)
curr = cache->bucket[bucket].next;
while (curr && curr != &cache->bucket[bucket]) {
struct svga_host_surface_cache_entry *entry =
LIST_ENTRY(struct svga_host_surface_cache_entry,
curr, bucket_head);
list_entry(curr, struct svga_host_surface_cache_entry,bucket_head);
if (entry->key.format == SVGA3D_BUFFER) {
debug_printf(" %p: buffer %u bytes\n",
entry->handle,

View File

@ -85,12 +85,12 @@ vc4_bo_dump_stats(struct vc4_screen *screen)
fprintf(stderr, " BOs cached size: %dkb\n", cache->bo_size / 1024);
if (!list_is_empty(&cache->time_list)) {
struct vc4_bo *first = LIST_ENTRY(struct vc4_bo,
cache->time_list.next,
time_list);
struct vc4_bo *last = LIST_ENTRY(struct vc4_bo,
cache->time_list.prev,
struct vc4_bo *first = list_entry(cache->time_list.next,
struct vc4_bo,
time_list);
struct vc4_bo *last = list_entry(cache->time_list.prev,
struct vc4_bo,
time_list);
fprintf(stderr, " oldest cache time: %ld\n",
(long)first->free_time);

View File

@ -1921,8 +1921,8 @@ static struct dec_av1_task *dec_av1_NeedTask(vid_dec_PrivateType *priv)
assert(pscreen);
if (!list_is_empty(&priv->codec_data.av1.free_tasks)) {
task = LIST_ENTRY(struct dec_av1_task,
priv->codec_data.av1.free_tasks.next, list);
task = list_entry(priv->codec_data.av1.free_tasks.next,
struct dec_av1_task, list);
task->buf_ref_count = 1;
list_del(&task->list);
return task;
@ -2020,8 +2020,8 @@ static bool dec_av1_GetStartedTask(vid_dec_PrivateType *priv,
if (priv->codec_data.av1.que_num <= 16)
return false;
started_task = LIST_ENTRY(struct dec_av1_task,
priv->codec_data.av1.started_tasks.next, list);
started_task = list_entry(priv->codec_data.av1.started_tasks.next,
struct dec_av1_task, list);
list_del(&started_task->list);
list_addtail(&started_task->list, tasks);
--priv->codec_data.av1.que_num;
@ -2402,14 +2402,14 @@ void vid_dec_av1_FrameDecoded(OMX_COMPONENTTYPE *comp,
stacked = true;
if (list_is_empty(&inp->tasks)) {
task = LIST_ENTRY(struct dec_av1_task,
priv->codec_data.av1.started_tasks.next, list);
task = list_entry(priv->codec_data.av1.started_tasks.next,
struct dec_av1_task, list);
list_del(&task->list);
list_addtail(&task->list, &inp->tasks);
--priv->codec_data.av1.que_num;
}
task = LIST_ENTRY(struct dec_av1_task, inp->tasks.next, list);
task = list_entry(inp->tasks.next, struct dec_av1_task, list);
if (!task->no_show_frame) {
vid_dec_FillOutput(priv, task->buf, output);

View File

@ -823,7 +823,7 @@ static void enc_ClearBframes(omx_base_PortType *port, struct input_buf_private *
if (list_is_empty(&priv->b_frames))
return;
task = LIST_ENTRY(struct encode_task, priv->b_frames.prev, list);
task = list_entry(priv->b_frames.prev, struct encode_task, list);
list_del(&task->list);
/* promote last from to P frame */
@ -912,7 +912,7 @@ static OMX_ERRORTYPE vid_enc_EncodeFrame(omx_base_PortType *port, OMX_BUFFERHEAD
}
if (stacked_num == priv->stacked_frames_num) {
struct encode_task *t;
t = LIST_ENTRY(struct encode_task, priv->stacked_tasks.next, list);
t = list_entry(priv->stacked_tasks.next, struct encode_task, list);
list_del(&t->list);
list_addtail(&t->list, &inp->tasks);
}

View File

@ -273,7 +273,7 @@ static void enc_ClearBframes(vid_enc_PrivateType * priv, struct input_buf_privat
if (list_is_empty(&priv->b_frames))
return;
task = LIST_ENTRY(struct encode_task, priv->b_frames.prev, list);
task = list_entry(priv->b_frames.prev, struct encode_task, list);
list_del(&task->list);
/* promote last from to P frame */
@ -367,7 +367,7 @@ static OMX_ERRORTYPE encode_frame(vid_enc_PrivateType * priv, OMX_BUFFERHEADERTY
}
if (stacked_num == priv->stacked_frames_num) {
struct encode_task *t;
t = LIST_ENTRY(struct encode_task, priv->stacked_tasks.next, list);
t = list_entry(priv->stacked_tasks.next, struct encode_task, list);
list_del(&t->list);
list_addtail(&t->list, &inp->tasks);
}

View File

@ -137,7 +137,7 @@ void vid_enc_BufferEncoded_common(vid_enc_PrivateType * priv, OMX_BUFFERHEADERTY
}
#endif
task = LIST_ENTRY(struct encode_task, inp->tasks.next, list);
task = list_entry(inp->tasks.next, struct encode_task, list);
list_del(&task->list);
list_addtail(&task->list, &priv->used_tasks);
@ -183,7 +183,7 @@ struct encode_task *enc_NeedTask_common(vid_enc_PrivateType * priv, OMX_VIDEO_PO
struct encode_task *task;
if (!list_is_empty(&priv->free_tasks)) {
task = LIST_ENTRY(struct encode_task, priv->free_tasks.next, list);
task = list_entry(priv->free_tasks.next, struct encode_task, list);
list_del(&task->list);
return task;
}

View File

@ -171,7 +171,7 @@ fenced_manager_dump_locked(struct fenced_manager *fenced_mgr)
curr = fenced_mgr->unfenced.next;
next = curr->next;
while(curr != &fenced_mgr->unfenced) {
fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
fenced_buf = list_entry(curr, struct fenced_buffer, head);
assert(!fenced_buf->fence);
debug_printf("%10p %"PRIu64" %8u %7s\n",
(void *) fenced_buf,
@ -186,7 +186,7 @@ fenced_manager_dump_locked(struct fenced_manager *fenced_mgr)
next = curr->next;
while(curr != &fenced_mgr->fenced) {
int signaled;
fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
fenced_buf = list_entry(curr, struct fenced_buffer, head);
assert(fenced_buf->buffer);
signaled = ops->fence_signalled(ops, fenced_buf->fence, 0);
debug_printf("%10p %"PRIu64" %8u %7s %10p %s\n",
@ -370,7 +370,7 @@ fenced_manager_check_signalled_locked(struct fenced_manager *fenced_mgr,
curr = fenced_mgr->fenced.next;
next = curr->next;
while(curr != &fenced_mgr->fenced) {
fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
fenced_buf = list_entry(curr, struct fenced_buffer, head);
if(fenced_buf->fence != prev_fence) {
int signaled;

View File

@ -553,7 +553,7 @@ anv_batch_bo_list_clone(const struct list_head *list,
static struct anv_batch_bo *
anv_cmd_buffer_current_batch_bo(struct anv_cmd_buffer *cmd_buffer)
{
return LIST_ENTRY(struct anv_batch_bo, cmd_buffer->batch_bos.prev, link);
return list_entry(cmd_buffer->batch_bos.prev, struct anv_batch_bo, link);
}
struct anv_address

View File

@ -1301,16 +1301,16 @@ bi_last_instr_in_clause(bi_clause *clause)
* (end) of the clause and adding a condition for the clause boundary */
#define bi_foreach_instr_in_clause(block, clause, pos) \
for (bi_instr *pos = LIST_ENTRY(bi_instr, bi_first_instr_in_clause(clause), link); \
for (bi_instr *pos = list_entry(bi_first_instr_in_clause(clause), bi_instr, link); \
(&pos->link != &(block)->instructions) \
&& (pos != bi_next_op(bi_last_instr_in_clause(clause))); \
pos = LIST_ENTRY(bi_instr, pos->link.next, link))
pos = list_entry(pos->link.next, bi_instr, link))
#define bi_foreach_instr_in_clause_rev(block, clause, pos) \
for (bi_instr *pos = LIST_ENTRY(bi_instr, bi_last_instr_in_clause(clause), link); \
for (bi_instr *pos = list_entry(bi_last_instr_in_clause(clause), bi_instr, link); \
(&pos->link != &(block)->instructions) \
&& pos != bi_prev_op(bi_first_instr_in_clause(clause)); \
pos = LIST_ENTRY(bi_instr, pos->link.prev, link))
pos = list_entry(pos->link.prev, bi_instr, link))
static inline bi_cursor
bi_before_clause(bi_clause *clause)

View File

@ -187,7 +187,7 @@ static inline void list_move_to(struct list_head *item, struct list_head *loc) {
list_add(item, loc);
}
#define LIST_ENTRY(__type, __item, __field) \
#define list_entry(__item, __type, __field) \
((__type *)(((char *)(__item)) - offsetof(__type, __field)))
/**
@ -200,10 +200,10 @@ static inline void list_move_to(struct list_head *item, struct list_head *loc) {
- ((char *)&(sample)->member - (char *)(sample)))
#define list_first_entry(ptr, type, member) \
LIST_ENTRY(type, (ptr)->next, member)
list_entry((ptr)->next, type, member)
#define list_last_entry(ptr, type, member) \
LIST_ENTRY(type, (ptr)->prev, member)
list_entry((ptr)->prev, type, member)
#define LIST_FOR_EACH_ENTRY(pos, head, member) \
@ -234,57 +234,57 @@ static inline void list_move_to(struct list_head *item, struct list_head *loc) {
pos = list_container_of(pos->member.prev, pos, member))
#define list_for_each_entry(type, pos, head, member) \
for (type *pos = LIST_ENTRY(type, (head)->next, member), \
*__next = LIST_ENTRY(type, pos->member.next, member); \
for (type *pos = list_entry((head)->next, type, member), \
*__next = list_entry(pos->member.next, type, member); \
&pos->member != (head); \
pos = LIST_ENTRY(type, pos->member.next, member), \
pos = list_entry(pos->member.next, type, member), \
list_assert(pos == __next, "use _safe iterator"), \
__next = LIST_ENTRY(type, __next->member.next, member))
__next = list_entry(__next->member.next, type, member))
#define list_for_each_entry_safe(type, pos, head, member) \
for (type *pos = LIST_ENTRY(type, (head)->next, member), \
*__next = LIST_ENTRY(type, pos->member.next, member); \
for (type *pos = list_entry((head)->next, type, member), \
*__next = list_entry(pos->member.next, type, member); \
&pos->member != (head); \
pos = __next, \
__next = LIST_ENTRY(type, __next->member.next, member))
__next = list_entry(__next->member.next, type, member))
#define list_for_each_entry_rev(type, pos, head, member) \
for (type *pos = LIST_ENTRY(type, (head)->prev, member), \
*__prev = LIST_ENTRY(type, pos->member.prev, member); \
for (type *pos = list_entry((head)->prev, type, member), \
*__prev = list_entry(pos->member.prev, type, member); \
&pos->member != (head); \
pos = LIST_ENTRY(type, pos->member.prev, member), \
pos = list_entry(pos->member.prev, type, member), \
list_assert(pos == __prev, "use _safe iterator"), \
__prev = LIST_ENTRY(type, __prev->member.prev, member))
__prev = list_entry(__prev->member.prev, type, member))
#define list_for_each_entry_safe_rev(type, pos, head, member) \
for (type *pos = LIST_ENTRY(type, (head)->prev, member), \
*__prev = LIST_ENTRY(type, pos->member.prev, member); \
for (type *pos = list_entry((head)->prev, type, member), \
*__prev = list_entry(pos->member.prev, type, member); \
&pos->member != (head); \
pos = __prev, \
__prev = LIST_ENTRY(type, __prev->member.prev, member))
__prev = list_entry(__prev->member.prev, type, member))
#define list_for_each_entry_from(type, pos, start, head, member) \
for (type *pos = LIST_ENTRY(type, (start), member); \
for (type *pos = list_entry((start), type, member); \
&pos->member != (head); \
pos = LIST_ENTRY(type, pos->member.next, member))
pos = list_entry(pos->member.next, type, member))
#define list_for_each_entry_from_safe(type, pos, start, head, member) \
for (type *pos = LIST_ENTRY(type, (start), member), \
*__next = LIST_ENTRY(type, pos->member.next, member); \
for (type *pos = list_entry((start), type, member), \
*__next = list_entry(pos->member.next, type, member); \
&pos->member != (head); \
pos = __next, \
__next = LIST_ENTRY(type, __next->member.next, member))
__next = list_entry(__next->member.next, type, member))
#define list_for_each_entry_from_rev(type, pos, start, head, member) \
for (type *pos = LIST_ENTRY(type, (start), member); \
for (type *pos = list_entry((start), type, member); \
&pos->member != (head); \
pos = LIST_ENTRY(type, pos->member.prev, member))
pos = list_entry(pos->member.prev, type, member))
#define list_pair_for_each_entry(type, pos1, pos2, head1, head2, member) \
for (type *pos1 = LIST_ENTRY(type, (head1)->next, member), \
*pos2 = LIST_ENTRY(type, (head2)->next, member); \
for (type *pos1 = list_entry((head1)->next, type, member), \
*pos2 = list_entry((head2)->next, type, member); \
&pos1->member != (head1) && &pos2->member != (head2); \
pos1 = LIST_ENTRY(type, pos1->member.next, member), \
pos2 = LIST_ENTRY(type, pos2->member.next, member))
pos1 = list_entry(pos1->member.next, type, member), \
pos2 = list_entry(pos2->member.next, type, member))
#endif /*_UTIL_LIST_H_*/

View File

@ -718,7 +718,7 @@ u_trace_clone_append(struct u_trace_iterator begin_it,
break;
from_idx = 0;
from_chunk = LIST_ENTRY(struct u_trace_chunk, from_chunk->node.next, node);
from_chunk = list_entry(from_chunk->node.next, struct u_trace_chunk, node);
}
}
}
@ -737,7 +737,7 @@ u_trace_disable_event_range(struct u_trace_iterator begin_it,
memset(&current_chunk->traces[start_idx], 0,
(current_chunk->num_traces - start_idx) * sizeof(struct u_trace_event));
start_idx = 0;
current_chunk = LIST_ENTRY(struct u_trace_chunk, current_chunk->node.next, node);
current_chunk = list_entry(current_chunk->node.next, struct u_trace_chunk, node);
}
memset(&current_chunk->traces[start_idx], 0,

View File

@ -310,7 +310,7 @@ debug_memory_end(unsigned long start_no)
void *ptr;
struct debug_memory_footer *ftr;
hdr = LIST_ENTRY(struct debug_memory_header, entry, head);
hdr = list_entry(entry, struct debug_memory_header, head);
ptr = data_from_header(hdr);
ftr = footer_from_header(hdr);
@ -417,7 +417,7 @@ debug_memory_check(void)
struct debug_memory_footer *ftr;
const char *ptr;
hdr = LIST_ENTRY(struct debug_memory_header, entry, head);
hdr = list_entry(entry, struct debug_memory_header, head);
ftr = footer_from_header(hdr);
ptr = (const char *) data_from_header(hdr);