intel: Rename gen_{mapped, clflush, invalidate} prefix to intel_{..}

export SEARCH_PATH="src/intel src/gallium/drivers/iris src/mesa/drivers/dri/i965
grep -E "gen_" -rIl $SEARCH_PATH | xargs sed -ie "s/gen_\(mapped\|clflush\|invalidate\|shader\)/intel_\1/g"

Signed-off-by: Anuj Phogat <anuj.phogat@gmail.com>
Reviewed-by: Kenneth Graunke <kenneth@whitecape.org>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/10241>
This commit is contained in:
Anuj Phogat 2021-04-07 13:22:19 -07:00 committed by Marge Bot
parent 07eec673fc
commit 4f42b28cc3
10 changed files with 19 additions and 19 deletions

View File

@ -1070,7 +1070,7 @@ iris_bo_map_cpu(struct pipe_debug_callback *dbg,
* LLC entirely requiring us to keep dirty pixels for the scanout
* out of any cache.)
*/
gen_invalidate_range(bo->map_cpu, bo->size);
intel_invalidate_range(bo->map_cpu, bo->size);
}
return bo->map_cpu;
@ -1858,7 +1858,7 @@ intel_aux_map_buffer_free(void *driver_ctx, struct intel_buffer *buffer)
free(buffer);
}
static struct gen_mapped_pinned_buffer_alloc aux_map_allocator = {
static struct intel_mapped_pinned_buffer_alloc aux_map_allocator = {
.alloc = intel_aux_map_buffer_alloc,
.free = intel_aux_map_buffer_free,
};

View File

@ -101,7 +101,7 @@ struct aux_map_buffer {
struct intel_aux_map_context {
void *driver_ctx;
pthread_mutex_t mutex;
struct gen_mapped_pinned_buffer_alloc *buffer_alloc;
struct intel_mapped_pinned_buffer_alloc *buffer_alloc;
uint32_t num_buffers;
struct list_head buffers;
uint64_t level3_base_addr;
@ -199,7 +199,7 @@ intel_aux_map_get_state_num(struct intel_aux_map_context *ctx)
struct intel_aux_map_context *
intel_aux_map_init(void *driver_ctx,
struct gen_mapped_pinned_buffer_alloc *buffer_alloc,
struct intel_mapped_pinned_buffer_alloc *buffer_alloc,
const struct intel_device_info *devinfo)
{
struct intel_aux_map_context *ctx;

View File

@ -53,7 +53,7 @@ struct intel_device_info;
struct intel_aux_map_context *
intel_aux_map_init(void *driver_ctx,
struct gen_mapped_pinned_buffer_alloc *buffer_alloc,
struct intel_mapped_pinned_buffer_alloc *buffer_alloc,
const struct intel_device_info *devinfo);
void

View File

@ -37,7 +37,7 @@ struct intel_buffer {
void *driver_bo;
};
struct gen_mapped_pinned_buffer_alloc {
struct intel_mapped_pinned_buffer_alloc {
struct intel_buffer * (*alloc)(void *driver_ctx, uint32_t size);
void (*free)(void *driver_ctx, struct intel_buffer *buffer);
};

View File

@ -28,7 +28,7 @@
#define CACHELINE_MASK 63
static inline void
gen_clflush_range(void *start, size_t size)
intel_clflush_range(void *start, size_t size)
{
void *p = (void *) (((uintptr_t) start) & ~CACHELINE_MASK);
void *end = start + size;
@ -43,13 +43,13 @@ static inline void
intel_flush_range(void *start, size_t size)
{
__builtin_ia32_mfence();
gen_clflush_range(start, size);
intel_clflush_range(start, size);
}
static inline void
gen_invalidate_range(void *start, size_t size)
intel_invalidate_range(void *start, size_t size)
{
gen_clflush_range(start, size);
intel_clflush_range(start, size);
/* Modern Atom CPUs (Baytrail+) have issues with clflush serialization,
* where mfence is not a sufficient synchronization barrier. We must

View File

@ -189,7 +189,7 @@ padding_is_good(int fd, uint32_t handle)
* if the bo is not cache coherent we likely need to
* invalidate the cache lines to get it.
*/
gen_invalidate_range(mapped, PADDING_SIZE);
intel_invalidate_range(mapped, PADDING_SIZE);
expected_value = handle & 0xFF;
for (uint32_t i = 0; i < PADDING_SIZE; ++i) {

View File

@ -2750,7 +2750,7 @@ anv_device_init_trivial_batch(struct anv_device *device)
anv_batch_emit(&batch, GFX7_MI_NOOP, noop);
if (!device->info.has_llc)
gen_clflush_range(batch.start, batch.next - batch.start);
intel_clflush_range(batch.start, batch.next - batch.start);
return VK_SUCCESS;
}
@ -2866,7 +2866,7 @@ intel_aux_map_buffer_free(void *driver_ctx, struct intel_buffer *buffer)
free(buf);
}
static struct gen_mapped_pinned_buffer_alloc aux_map_allocator = {
static struct intel_mapped_pinned_buffer_alloc aux_map_allocator = {
.alloc = intel_aux_map_buffer_alloc,
.free = intel_aux_map_buffer_free,
};
@ -4101,7 +4101,7 @@ clflush_mapped_ranges(struct anv_device *device,
if (ranges[i].offset >= mem->map_size)
continue;
gen_clflush_range(mem->map + ranges[i].offset,
intel_clflush_range(mem->map + ranges[i].offset,
MIN2(ranges[i].size, mem->map_size - ranges[i].offset));
}
}

View File

@ -295,7 +295,7 @@ VkResult anv_QueuePresentKHR(
if (device->debug_frame_desc) {
device->debug_frame_desc->frame_id++;
if (!device->info.has_llc) {
gen_clflush_range(device->debug_frame_desc,
intel_clflush_range(device->debug_frame_desc,
sizeof(*device->debug_frame_desc));
}
}

View File

@ -1113,7 +1113,7 @@ brw_bo_map_cpu(struct brw_context *brw, struct brw_bo *bo, unsigned flags)
* LLC entirely requiring us to keep dirty pixels for the scanout
* out of any cache.)
*/
gen_invalidate_range(bo->map_cpu, bo->size);
intel_invalidate_range(bo->map_cpu, bo->size);
}
return bo->map_cpu;

View File

@ -53,7 +53,7 @@ debug_enabled_for_stage(gl_shader_stage stage)
}
static void
gen_shader_sha1(struct gl_program *prog, gl_shader_stage stage,
intel_shader_sha1(struct gl_program *prog, gl_shader_stage stage,
void *key, unsigned char *out_sha1)
{
char sha1_buf[41];
@ -120,7 +120,7 @@ read_and_upload(struct brw_context *brw, struct disk_cache *cache,
*/
prog_key.base.program_string_id = 0;
gen_shader_sha1(prog, stage, &prog_key, binary_sha1);
intel_shader_sha1(prog, stage, &prog_key, binary_sha1);
size_t buffer_size;
uint8_t *buffer = disk_cache_get(cache, binary_sha1, &buffer_size);
@ -280,7 +280,7 @@ write_program_data(struct brw_context *brw, struct gl_program *prog,
unsigned char sha1[20];
char buf[41];
gen_shader_sha1(prog, stage, key, sha1);
intel_shader_sha1(prog, stage, key, sha1);
_mesa_sha1_format(buf, sha1);
if (brw->ctx._Shader->Flags & GLSL_CACHE_INFO) {
fprintf(stderr, "putting binary in cache: %s\n", buf);