intel: Rename "gen_" prefix used in common code to "intel_"

This patch renames functions, structures, enums etc. with "gen_"
prefix defined in common code.

Signed-off-by: Anuj Phogat <anuj.phogat@gmail.com>
Reviewed-by: Kenneth Graunke <kenneth@whitecape.org>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/9413>
This commit is contained in:
Anuj Phogat 2021-03-03 13:49:18 -08:00 committed by Marge Bot
parent 733b0ee8cb
commit 692472a376
68 changed files with 955 additions and 955 deletions

View File

@ -119,7 +119,7 @@ dump_validation_list(struct iris_batch *batch)
/**
* Return BO information to the batch decoder (for debugging).
*/
static struct gen_batch_decode_bo
static struct intel_batch_decode_bo
decode_get_bo(void *v_batch, bool ppgtt, uint64_t address)
{
struct iris_batch *batch = v_batch;
@ -132,7 +132,7 @@ decode_get_bo(void *v_batch, bool ppgtt, uint64_t address)
uint64_t bo_address = bo->gtt_offset & (~0ull >> 16);
if (address >= bo_address && address < bo_address + bo->size) {
return (struct gen_batch_decode_bo) {
return (struct intel_batch_decode_bo) {
.addr = address,
.size = bo->size,
.map = iris_bo_map(batch->dbg, bo, MAP_READ) +
@ -141,7 +141,7 @@ decode_get_bo(void *v_batch, bool ppgtt, uint64_t address)
}
}
return (struct gen_batch_decode_bo) { };
return (struct intel_batch_decode_bo) { };
}
static unsigned
@ -163,7 +163,7 @@ static void
decode_batch(struct iris_batch *batch)
{
void *map = iris_bo_map(batch->dbg, batch->exec_bos[0], MAP_READ);
gen_print_batch(&batch->decoder, map, batch->primary_batch_size,
intel_print_batch(&batch->decoder, map, batch->primary_batch_size,
batch->exec_bos[0]->gtt_offset, false);
}
@ -220,7 +220,7 @@ iris_init_batch(struct iris_context *ice,
GEN_BATCH_DECODE_OFFSETS |
GEN_BATCH_DECODE_FLOATS;
gen_batch_decode_ctx_init(&batch->decoder, &screen->devinfo,
intel_batch_decode_ctx_init(&batch->decoder, &screen->devinfo,
stderr, decode_flags, NULL,
decode_get_bo, decode_get_state_size, batch);
batch->decoder.dynamic_base = IRIS_MEMZONE_DYNAMIC_START;
@ -453,7 +453,7 @@ iris_batch_free(struct iris_batch *batch)
_mesa_hash_table_destroy(batch->cache.render, NULL);
if (INTEL_DEBUG)
gen_batch_decode_ctx_finish(&batch->decoder);
intel_batch_decode_ctx_finish(&batch->decoder);
}
/**
@ -507,9 +507,9 @@ add_aux_map_bos_to_batch(struct iris_batch *batch)
if (!aux_map_ctx)
return;
uint32_t count = gen_aux_map_get_num_buffers(aux_map_ctx);
uint32_t count = intel_aux_map_get_num_buffers(aux_map_ctx);
ensure_exec_obj_space(batch, count);
gen_aux_map_fill_bos(aux_map_ctx,
intel_aux_map_fill_bos(aux_map_ctx,
(void**)&batch->exec_bos[batch->exec_count], count);
for (uint32_t i = 0; i < count; i++) {
struct iris_bo *bo = batch->exec_bos[batch->exec_count];
@ -654,7 +654,7 @@ submit_batch(struct iris_batch *batch)
int ret = 0;
if (!batch->screen->no_hw &&
gen_ioctl(batch->screen->fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &execbuf))
intel_ioctl(batch->screen->fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &execbuf))
ret = -errno;
for (int i = 0; i < batch->exec_count; i++) {

View File

@ -135,7 +135,7 @@ struct iris_batch {
struct hash_table *render;
} cache;
struct gen_batch_decode_ctx decoder;
struct intel_batch_decode_ctx decoder;
struct hash_table_u64 *state_sizes;
/**

View File

@ -245,7 +245,7 @@ blorp_flush_range(UNUSED struct blorp_batch *blorp_batch,
*/
}
static const struct gen_l3_config *
static const struct intel_l3_config *
blorp_get_l3_config(struct blorp_batch *blorp_batch)
{
struct iris_batch *batch = blorp_batch->driver_batch;

View File

@ -177,7 +177,7 @@ struct iris_bufmgr {
bool has_tiling_uapi:1;
bool bo_reuse:1;
struct gen_aux_map_context *aux_map_ctx;
struct intel_aux_map_context *aux_map_ctx;
};
static mtx_t global_bufmgr_list_mutex = _MTX_INITIALIZER_NP;
@ -310,7 +310,7 @@ vma_alloc(struct iris_bufmgr *bufmgr,
assert((addr >> 48ull) == 0);
assert((addr % alignment) == 0);
return gen_canonical_address(addr);
return intel_canonical_address(addr);
}
static void
@ -322,7 +322,7 @@ vma_free(struct iris_bufmgr *bufmgr,
return;
/* Un-canonicalize the address. */
address = gen_48b_address(address);
address = intel_48b_address(address);
if (address == 0ull)
return;
@ -344,7 +344,7 @@ iris_bo_busy(struct iris_bo *bo)
struct iris_bufmgr *bufmgr = bo->bufmgr;
struct drm_i915_gem_busy busy = { .handle = bo->gem_handle };
int ret = gen_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
int ret = intel_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
if (ret == 0) {
bo->idle = !busy.busy;
return busy.busy;
@ -361,7 +361,7 @@ iris_bo_madvise(struct iris_bo *bo, int state)
.retained = 1,
};
gen_ioctl(bo->bufmgr->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv);
intel_ioctl(bo->bufmgr->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv);
return madv.retained;
}
@ -429,7 +429,7 @@ alloc_bo_from_cache(struct iris_bufmgr *bufmgr,
* removed from the aux-map.
*/
if (bo->bufmgr->aux_map_ctx)
gen_aux_map_unmap_range(bo->bufmgr->aux_map_ctx, bo->gtt_offset,
intel_aux_map_unmap_range(bo->bufmgr->aux_map_ctx, bo->gtt_offset,
bo->size);
bo->aux_map_address = 0;
}
@ -471,7 +471,7 @@ alloc_fresh_bo(struct iris_bufmgr *bufmgr, uint64_t bo_size)
/* All new BOs we get from the kernel are zeroed, so we don't need to
* worry about that here.
*/
if (gen_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CREATE, &create) != 0) {
if (intel_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CREATE, &create) != 0) {
free(bo);
return NULL;
}
@ -493,7 +493,7 @@ alloc_fresh_bo(struct iris_bufmgr *bufmgr, uint64_t bo_size)
.write_domain = 0,
};
if (gen_ioctl(bo->bufmgr->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &sd) != 0) {
if (intel_ioctl(bo->bufmgr->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &sd) != 0) {
bo_free(bo);
return NULL;
}
@ -572,7 +572,7 @@ bo_alloc_internal(struct iris_bufmgr *bufmgr,
.handle = bo->gem_handle,
.caching = 1,
};
if (gen_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_SET_CACHING, &arg) == 0) {
if (intel_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_SET_CACHING, &arg) == 0) {
bo->cache_coherent = true;
bo->reusable = false;
}
@ -624,7 +624,7 @@ iris_bo_create_userptr(struct iris_bufmgr *bufmgr, const char *name,
.user_ptr = (uintptr_t)ptr,
.user_size = size,
};
if (gen_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_USERPTR, &arg))
if (intel_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_USERPTR, &arg))
goto err_free;
bo->gem_handle = arg.handle;
@ -633,7 +633,7 @@ iris_bo_create_userptr(struct iris_bufmgr *bufmgr, const char *name,
.handle = bo->gem_handle,
.read_domains = I915_GEM_DOMAIN_CPU,
};
if (gen_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &sd))
if (intel_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &sd))
goto err_close;
bo->name = name;
@ -660,7 +660,7 @@ iris_bo_create_userptr(struct iris_bufmgr *bufmgr, const char *name,
err_close:
close.handle = bo->gem_handle;
gen_ioctl(bufmgr->fd, DRM_IOCTL_GEM_CLOSE, &close);
intel_ioctl(bufmgr->fd, DRM_IOCTL_GEM_CLOSE, &close);
err_free:
free(bo);
return NULL;
@ -690,7 +690,7 @@ iris_bo_gem_create_from_name(struct iris_bufmgr *bufmgr,
goto out;
struct drm_gem_open open_arg = { .name = handle };
int ret = gen_ioctl(bufmgr->fd, DRM_IOCTL_GEM_OPEN, &open_arg);
int ret = intel_ioctl(bufmgr->fd, DRM_IOCTL_GEM_OPEN, &open_arg);
if (ret != 0) {
DBG("Couldn't reference %s handle 0x%08x: %s\n",
name, handle, strerror(errno));
@ -725,7 +725,7 @@ iris_bo_gem_create_from_name(struct iris_bufmgr *bufmgr,
_mesa_hash_table_insert(bufmgr->name_table, &bo->global_name, bo);
struct drm_i915_gem_get_tiling get_tiling = { .handle = bo->gem_handle };
ret = gen_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling);
ret = intel_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling);
if (ret != 0)
goto err_unref;
@ -762,7 +762,7 @@ bo_close(struct iris_bo *bo)
list_for_each_entry_safe(struct bo_export, export, &bo->exports, link) {
struct drm_gem_close close = { .handle = export->gem_handle };
gen_ioctl(export->drm_fd, DRM_IOCTL_GEM_CLOSE, &close);
intel_ioctl(export->drm_fd, DRM_IOCTL_GEM_CLOSE, &close);
list_del(&export->link);
free(export);
@ -773,14 +773,14 @@ bo_close(struct iris_bo *bo)
/* Close this object */
struct drm_gem_close close = { .handle = bo->gem_handle };
int ret = gen_ioctl(bufmgr->fd, DRM_IOCTL_GEM_CLOSE, &close);
int ret = intel_ioctl(bufmgr->fd, DRM_IOCTL_GEM_CLOSE, &close);
if (ret != 0) {
DBG("DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
bo->gem_handle, bo->name, strerror(errno));
}
if (bo->aux_map_address && bo->bufmgr->aux_map_ctx) {
gen_aux_map_unmap_range(bo->bufmgr->aux_map_ctx, bo->gtt_offset,
intel_aux_map_unmap_range(bo->bufmgr->aux_map_ctx, bo->gtt_offset,
bo->size);
}
@ -950,7 +950,7 @@ iris_bo_gem_mmap_legacy(struct pipe_debug_callback *dbg,
.flags = wc ? I915_MMAP_WC : 0,
};
int ret = gen_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg);
int ret = intel_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg);
if (ret != 0) {
DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
__FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
@ -973,7 +973,7 @@ iris_bo_gem_mmap_offset(struct pipe_debug_callback *dbg, struct iris_bo *bo,
};
/* Get the fake offset back */
int ret = gen_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP_OFFSET, &mmap_arg);
int ret = intel_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP_OFFSET, &mmap_arg);
if (ret != 0) {
DBG("%s:%d: Error preparing buffer %d (%s): %s .\n",
__FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
@ -1130,7 +1130,7 @@ iris_bo_map_gtt(struct pipe_debug_callback *dbg,
struct drm_i915_gem_mmap_gtt mmap_arg = { .handle = bo->gem_handle };
/* Get the fake offset back... */
int ret = gen_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP_GTT, &mmap_arg);
int ret = intel_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP_GTT, &mmap_arg);
if (ret != 0) {
DBG("%s:%d: Error preparing buffer map %d (%s): %s .\n",
__FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
@ -1288,7 +1288,7 @@ iris_bo_wait(struct iris_bo *bo, int64_t timeout_ns)
.bo_handle = bo->gem_handle,
.timeout_ns = timeout_ns,
};
int ret = gen_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_WAIT, &wait);
int ret = intel_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_WAIT, &wait);
if (ret != 0)
return -errno;
@ -1301,7 +1301,7 @@ static void
iris_bufmgr_destroy(struct iris_bufmgr *bufmgr)
{
/* Free aux-map buffers */
gen_aux_map_finish(bufmgr->aux_map_ctx);
intel_aux_map_finish(bufmgr->aux_map_ctx);
/* bufmgr will no longer try to free VMA entries in the aux-map */
bufmgr->aux_map_ctx = NULL;
@ -1447,7 +1447,7 @@ iris_bo_import_dmabuf(struct iris_bufmgr *bufmgr, int prime_fd,
bo->tiling_mode = isl_tiling_to_i915_tiling(mod_info->tiling);
} else if (bufmgr->has_tiling_uapi) {
struct drm_i915_gem_get_tiling get_tiling = { .handle = bo->gem_handle };
if (gen_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling))
if (intel_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling))
goto err;
bo->tiling_mode = get_tiling.tiling_mode;
} else {
@ -1524,7 +1524,7 @@ iris_bo_flink(struct iris_bo *bo, uint32_t *name)
if (!bo->global_name) {
struct drm_gem_flink flink = { .handle = bo->gem_handle };
if (gen_ioctl(bufmgr->fd, DRM_IOCTL_GEM_FLINK, &flink))
if (intel_ioctl(bufmgr->fd, DRM_IOCTL_GEM_FLINK, &flink))
return -errno;
mtx_lock(&bufmgr->lock);
@ -1650,7 +1650,7 @@ uint32_t
iris_create_hw_context(struct iris_bufmgr *bufmgr)
{
struct drm_i915_gem_context_create create = { };
int ret = gen_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create);
int ret = intel_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create);
if (ret != 0) {
DBG("DRM_IOCTL_I915_GEM_CONTEXT_CREATE failed: %s\n", strerror(errno));
return 0;
@ -1705,7 +1705,7 @@ iris_hw_context_set_priority(struct iris_bufmgr *bufmgr,
int err;
err = 0;
if (gen_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM, &p))
if (intel_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM, &p))
err = -errno;
return err;
@ -1730,7 +1730,7 @@ iris_destroy_hw_context(struct iris_bufmgr *bufmgr, uint32_t ctx_id)
struct drm_i915_gem_context_destroy d = { .ctx_id = ctx_id };
if (ctx_id != 0 &&
gen_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY, &d) != 0) {
intel_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY, &d) != 0) {
fprintf(stderr, "DRM_IOCTL_I915_GEM_CONTEXT_DESTROY failed: %s\n",
strerror(errno));
}
@ -1740,7 +1740,7 @@ int
iris_reg_read(struct iris_bufmgr *bufmgr, uint32_t offset, uint64_t *result)
{
struct drm_i915_reg_read reg_read = { .offset = offset };
int ret = gen_ioctl(bufmgr->fd, DRM_IOCTL_I915_REG_READ, &reg_read);
int ret = intel_ioctl(bufmgr->fd, DRM_IOCTL_I915_REG_READ, &reg_read);
*result = reg_read.val;
return ret;
@ -1755,16 +1755,16 @@ iris_gtt_size(int fd)
struct drm_i915_gem_context_param p = {
.param = I915_CONTEXT_PARAM_GTT_SIZE,
};
if (!gen_ioctl(fd, DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM, &p))
if (!intel_ioctl(fd, DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM, &p))
return p.value;
return 0;
}
static struct gen_buffer *
gen_aux_map_buffer_alloc(void *driver_ctx, uint32_t size)
static struct intel_buffer *
intel_aux_map_buffer_alloc(void *driver_ctx, uint32_t size)
{
struct gen_buffer *buf = malloc(sizeof(struct gen_buffer));
struct intel_buffer *buf = malloc(sizeof(struct intel_buffer));
if (!buf)
return NULL;
@ -1782,15 +1782,15 @@ gen_aux_map_buffer_alloc(void *driver_ctx, uint32_t size)
}
static void
gen_aux_map_buffer_free(void *driver_ctx, struct gen_buffer *buffer)
intel_aux_map_buffer_free(void *driver_ctx, struct intel_buffer *buffer)
{
iris_bo_unreference((struct iris_bo*)buffer->driver_bo);
free(buffer);
}
static struct gen_mapped_pinned_buffer_alloc aux_map_allocator = {
.alloc = gen_aux_map_buffer_alloc,
.free = gen_aux_map_buffer_free,
.alloc = intel_aux_map_buffer_alloc,
.free = intel_aux_map_buffer_free,
};
static int
@ -1799,7 +1799,7 @@ gem_param(int fd, int name)
int v = -1; /* No param uses (yet) the sign bit, reserve it for errors */
struct drm_i915_getparam gp = { .param = name, .value = &v };
if (gen_ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp))
if (intel_ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp))
return -1;
return v;
@ -1886,7 +1886,7 @@ iris_bufmgr_create(struct gen_device_info *devinfo, int fd, bool bo_reuse)
_mesa_hash_table_create(NULL, _mesa_hash_uint, _mesa_key_uint_equal);
if (devinfo->has_aux_map) {
bufmgr->aux_map_ctx = gen_aux_map_init(bufmgr, &aux_map_allocator,
bufmgr->aux_map_ctx = intel_aux_map_init(bufmgr, &aux_map_allocator,
devinfo);
assert(bufmgr->aux_map_ctx);
}

View File

@ -718,7 +718,7 @@ struct iris_context {
/** Aux usage of the fb's depth buffer (which may or may not exist). */
enum isl_aux_usage hiz_usage;
enum gen_urb_deref_block_size urb_deref_block_size;
enum intel_urb_deref_block_size urb_deref_block_size;
/** Are depth writes enabled? (Depth buffer may or may not exist.) */
bool depth_writes_enabled;

View File

@ -44,7 +44,7 @@ gem_syncobj_create(int fd, uint32_t flags)
.flags = flags,
};
gen_ioctl(fd, DRM_IOCTL_SYNCOBJ_CREATE, &args);
intel_ioctl(fd, DRM_IOCTL_SYNCOBJ_CREATE, &args);
return args.handle;
}
@ -56,7 +56,7 @@ gem_syncobj_destroy(int fd, uint32_t handle)
.handle = handle,
};
gen_ioctl(fd, DRM_IOCTL_SYNCOBJ_DESTROY, &args);
intel_ioctl(fd, DRM_IOCTL_SYNCOBJ_DESTROY, &args);
}
/**
@ -207,7 +207,7 @@ iris_wait_syncobj(struct pipe_screen *p_screen,
.count_handles = 1,
.timeout_nsec = timeout_nsec,
};
return gen_ioctl(screen->fd, DRM_IOCTL_SYNCOBJ_WAIT, &args);
return intel_ioctl(screen->fd, DRM_IOCTL_SYNCOBJ_WAIT, &args);
}
#define CSI "\e["
@ -426,7 +426,7 @@ iris_fence_finish(struct pipe_screen *p_screen,
args.flags |= DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT;
}
return gen_ioctl(screen->fd, DRM_IOCTL_SYNCOBJ_WAIT, &args) == 0;
return intel_ioctl(screen->fd, DRM_IOCTL_SYNCOBJ_WAIT, &args) == 0;
}
static int
@ -444,7 +444,7 @@ sync_merge_fd(int sync_fd, int new_fd)
.fence = -1,
};
gen_ioctl(sync_fd, SYNC_IOC_MERGE, &args);
intel_ioctl(sync_fd, SYNC_IOC_MERGE, &args);
close(new_fd);
close(sync_fd);
@ -474,7 +474,7 @@ iris_fence_get_fd(struct pipe_screen *p_screen,
.fd = -1,
};
gen_ioctl(screen->fd, DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, &args);
intel_ioctl(screen->fd, DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, &args);
fd = sync_merge_fd(fd, args.fd);
}
@ -489,7 +489,7 @@ iris_fence_get_fd(struct pipe_screen *p_screen,
};
args.handle = gem_syncobj_create(screen->fd, DRM_SYNCOBJ_CREATE_SIGNALED);
gen_ioctl(screen->fd, DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, &args);
intel_ioctl(screen->fd, DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, &args);
gem_syncobj_destroy(screen->fd, args.handle);
return args.fd;
}
@ -515,7 +515,7 @@ iris_fence_create_fd(struct pipe_context *ctx,
args.handle = gem_syncobj_create(screen->fd, DRM_SYNCOBJ_CREATE_SIGNALED);
}
if (gen_ioctl(screen->fd, DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, &args) == -1) {
if (intel_ioctl(screen->fd, DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, &args) == -1) {
fprintf(stderr, "DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE failed: %s\n",
strerror(errno));
if (type == PIPE_FD_TYPE_NATIVE_SYNC)

View File

@ -495,8 +495,8 @@ map_aux_addresses(struct iris_screen *screen, struct iris_resource *res,
const unsigned aux_offset = res->aux.extra_aux.surf.size_B > 0 ?
res->aux.extra_aux.offset : res->aux.offset;
const uint64_t format_bits =
gen_aux_map_format_bits(res->surf.tiling, format, plane);
gen_aux_map_add_mapping(aux_map_ctx, res->bo->gtt_offset + res->offset,
intel_aux_map_format_bits(res->surf.tiling, format, plane);
intel_aux_map_add_mapping(aux_map_ctx, res->bo->gtt_offset + res->offset,
res->aux.bo->gtt_offset + aux_offset,
res->surf.size_B, format_bits);
res->bo->aux_map_address = res->aux.bo->gtt_offset;

View File

@ -106,7 +106,7 @@ iris_get_device_uuid(struct pipe_screen *pscreen, char *uuid)
struct iris_screen *screen = (struct iris_screen *)pscreen;
const struct isl_device *isldev = &screen->isl_dev;
gen_uuid_compute_device_id((uint8_t *)uuid, isldev, PIPE_UUID_SIZE);
intel_uuid_compute_device_id((uint8_t *)uuid, isldev, PIPE_UUID_SIZE);
}
static void
@ -115,7 +115,7 @@ iris_get_driver_uuid(struct pipe_screen *pscreen, char *uuid)
struct iris_screen *screen = (struct iris_screen *)pscreen;
const struct gen_device_info *devinfo = &screen->devinfo;
gen_uuid_compute_driver_id((uint8_t *)uuid, devinfo, PIPE_UUID_SIZE);
intel_uuid_compute_driver_id((uint8_t *)uuid, devinfo, PIPE_UUID_SIZE);
}
static bool
@ -674,15 +674,15 @@ iris_getparam_integer(int fd, int param)
return -1;
}
static const struct gen_l3_config *
static const struct intel_l3_config *
iris_get_default_l3_config(const struct gen_device_info *devinfo,
bool compute)
{
bool wants_dc_cache = true;
bool has_slm = compute;
const struct gen_l3_weights w =
gen_get_default_l3_weights(devinfo, wants_dc_cache, has_slm);
return gen_get_l3_config(devinfo, w);
const struct intel_l3_weights w =
intel_get_default_l3_weights(devinfo, wants_dc_cache, has_slm);
return intel_get_l3_config(devinfo, w);
}
static void
@ -726,7 +726,7 @@ static void
iris_detect_kernel_features(struct iris_screen *screen)
{
/* Kernel 5.2+ */
if (gen_gem_supports_syncobj_wait(screen->fd))
if (intel_gem_supports_syncobj_wait(screen->fd))
screen->kernel_features |= KERNEL_HAS_WAIT_FOR_SUBMIT;
}

View File

@ -35,7 +35,7 @@
#include "iris_measure.h"
#include "iris_resource.h"
struct gen_l3_config;
struct intel_l3_config;
struct brw_vue_map;
struct iris_vs_prog_key;
struct iris_tcs_prog_key;
@ -204,8 +204,8 @@ struct iris_screen {
struct brw_compiler *compiler;
struct gen_perf_config *perf_cfg;
const struct gen_l3_config *l3_config_3d;
const struct gen_l3_config *l3_config_cs;
const struct intel_l3_config *l3_config_3d;
const struct intel_l3_config *l3_config_cs;
/**
* A buffer containing a marker + description of the driver. This buffer is

View File

@ -743,7 +743,7 @@ init_state_base_address(struct iris_batch *batch)
static void
iris_emit_l3_config(struct iris_batch *batch,
const struct gen_l3_config *cfg)
const struct intel_l3_config *cfg)
{
uint32_t reg_val;
assert(cfg || GEN_GEN >= 12);
@ -5392,7 +5392,7 @@ genX(invalidate_aux_map_state)(struct iris_batch *batch)
void *aux_map_ctx = iris_bufmgr_get_aux_map_context(screen->bufmgr);
if (!aux_map_ctx)
return;
uint32_t aux_map_state_num = gen_aux_map_get_state_num(aux_map_ctx);
uint32_t aux_map_state_num = intel_aux_map_get_state_num(aux_map_ctx);
if (batch->last_aux_map_state != aux_map_state_num) {
/* HSD 1209978178: docs say that before programming the aux table:
*
@ -5424,7 +5424,7 @@ init_aux_map_state(struct iris_batch *batch)
if (!aux_map_ctx)
return;
uint64_t base_addr = gen_aux_map_get_base(aux_map_ctx);
uint64_t base_addr = intel_aux_map_get_base(aux_map_ctx);
assert(base_addr != 0 && align64(base_addr, 32 * 1024) == base_addr);
iris_load_register_imm64(batch, GENX(GFX_AUX_TABLE_BASE_ADDR_num),
base_addr);
@ -5640,7 +5640,7 @@ iris_upload_dirty_render_state(struct iris_context *ice,
float vp_ymin = viewport_extent(state, 1, -1.0f);
float vp_ymax = viewport_extent(state, 1, 1.0f);
gen_calculate_guardband_size(cso_fb->width, cso_fb->height,
intel_calculate_guardband_size(cso_fb->width, cso_fb->height,
state->scale[0], state->scale[1],
state->translate[0], state->translate[1],
&gb_xmin, &gb_xmax, &gb_ymin, &gb_ymax);
@ -5682,7 +5682,7 @@ iris_upload_dirty_render_state(struct iris_context *ice,
assert(ice->shaders.urb.size[i] != 0);
}
gen_get_urb_config(&batch->screen->devinfo,
intel_get_urb_config(&batch->screen->devinfo,
batch->screen->l3_config_3d,
ice->shaders.prog[MESA_SHADER_TESS_EVAL] != NULL,
ice->shaders.prog[MESA_SHADER_GEOMETRY] != NULL,

View File

@ -96,7 +96,7 @@ blorp_get_surface_base_address(struct blorp_batch *batch);
#endif
#if GEN_GEN >= 7
static const struct gen_l3_config *
static const struct intel_l3_config *
blorp_get_l3_config(struct blorp_batch *batch);
# else
static void
@ -194,7 +194,7 @@ _blorp_combine_address(struct blorp_batch *batch, void *location,
static void
emit_urb_config(struct blorp_batch *batch,
const struct blorp_params *params,
UNUSED enum gen_urb_deref_block_size *deref_block_size)
UNUSED enum intel_urb_deref_block_size *deref_block_size)
{
/* Once vertex fetcher has written full VUE entries with complete
* header the space requirement is as follows per vertex (in bytes):
@ -222,7 +222,7 @@ emit_urb_config(struct blorp_batch *batch,
unsigned entries[4], start[4];
bool constrained;
gen_get_urb_config(batch->blorp->compiler->devinfo,
intel_get_urb_config(batch->blorp->compiler->devinfo,
blorp_get_l3_config(batch),
false, false, entry_size,
entries, start, deref_block_size, &constrained);
@ -692,7 +692,7 @@ blorp_emit_vs_config(struct blorp_batch *batch,
static void
blorp_emit_sf_config(struct blorp_batch *batch,
const struct blorp_params *params,
UNUSED enum gen_urb_deref_block_size urb_deref_block_size)
UNUSED enum intel_urb_deref_block_size urb_deref_block_size)
{
const struct brw_wm_prog_data *prog_data = params->wm_prog_data;
@ -1268,7 +1268,7 @@ blorp_emit_pipeline(struct blorp_batch *batch,
uint32_t color_calc_state_offset;
uint32_t depth_stencil_state_offset;
enum gen_urb_deref_block_size urb_deref_block_size;
enum intel_urb_deref_block_size urb_deref_block_size;
emit_urb_config(batch, params, &urb_deref_block_size);
if (params->wm_prog_data) {

View File

@ -95,10 +95,10 @@ static const bool aux_map_debug = false;
struct aux_map_buffer {
struct list_head link;
struct gen_buffer *buffer;
struct intel_buffer *buffer;
};
struct gen_aux_map_context {
struct intel_aux_map_context {
void *driver_ctx;
pthread_mutex_t mutex;
struct gen_mapped_pinned_buffer_alloc *buffer_alloc;
@ -111,7 +111,7 @@ struct gen_aux_map_context {
};
static bool
add_buffer(struct gen_aux_map_context *ctx)
add_buffer(struct intel_aux_map_context *ctx)
{
struct aux_map_buffer *buf = ralloc(ctx, struct aux_map_buffer);
if (!buf)
@ -135,7 +135,7 @@ add_buffer(struct gen_aux_map_context *ctx)
}
static void
advance_current_pos(struct gen_aux_map_context *ctx, uint32_t size)
advance_current_pos(struct intel_aux_map_context *ctx, uint32_t size)
{
assert(ctx->tail_remaining >= size);
ctx->tail_remaining -= size;
@ -143,7 +143,7 @@ advance_current_pos(struct gen_aux_map_context *ctx, uint32_t size)
}
static bool
align_and_verify_space(struct gen_aux_map_context *ctx, uint32_t size,
align_and_verify_space(struct intel_aux_map_context *ctx, uint32_t size,
uint32_t align)
{
if (ctx->tail_remaining < size)
@ -164,7 +164,7 @@ align_and_verify_space(struct gen_aux_map_context *ctx, uint32_t size,
}
static void
get_current_pos(struct gen_aux_map_context *ctx, uint64_t *gpu, uint64_t **map)
get_current_pos(struct intel_aux_map_context *ctx, uint64_t *gpu, uint64_t **map)
{
assert(!list_is_empty(&ctx->buffers));
struct aux_map_buffer *tail =
@ -176,7 +176,7 @@ get_current_pos(struct gen_aux_map_context *ctx, uint64_t *gpu, uint64_t **map)
}
static bool
add_sub_table(struct gen_aux_map_context *ctx, uint32_t size,
add_sub_table(struct intel_aux_map_context *ctx, uint32_t size,
uint32_t align, uint64_t *gpu, uint64_t **map)
{
if (!align_and_verify_space(ctx, size, align)) {
@ -192,21 +192,21 @@ add_sub_table(struct gen_aux_map_context *ctx, uint32_t size,
}
uint32_t
gen_aux_map_get_state_num(struct gen_aux_map_context *ctx)
intel_aux_map_get_state_num(struct intel_aux_map_context *ctx)
{
return p_atomic_read(&ctx->state_num);
}
struct gen_aux_map_context *
gen_aux_map_init(void *driver_ctx,
struct intel_aux_map_context *
intel_aux_map_init(void *driver_ctx,
struct gen_mapped_pinned_buffer_alloc *buffer_alloc,
const struct gen_device_info *devinfo)
{
struct gen_aux_map_context *ctx;
struct intel_aux_map_context *ctx;
if (devinfo->gen < 12)
return NULL;
ctx = ralloc(NULL, struct gen_aux_map_context);
ctx = ralloc(NULL, struct intel_aux_map_context);
if (!ctx)
return NULL;
@ -235,7 +235,7 @@ gen_aux_map_init(void *driver_ctx,
}
void
gen_aux_map_finish(struct gen_aux_map_context *ctx)
intel_aux_map_finish(struct intel_aux_map_context *ctx)
{
if (!ctx)
return;
@ -252,17 +252,17 @@ gen_aux_map_finish(struct gen_aux_map_context *ctx)
}
uint64_t
gen_aux_map_get_base(struct gen_aux_map_context *ctx)
intel_aux_map_get_base(struct intel_aux_map_context *ctx)
{
/**
* This get initialized in gen_aux_map_init, and never changes, so there is
* This get initialized in intel_aux_map_init, and never changes, so there is
* no need to lock the mutex.
*/
return ctx->level3_base_addr;
}
static struct aux_map_buffer *
find_buffer(struct gen_aux_map_context *ctx, uint64_t addr)
find_buffer(struct intel_aux_map_context *ctx, uint64_t addr)
{
list_for_each_entry(struct aux_map_buffer, buf, &ctx->buffers, link) {
if (buf->buffer->gpu <= addr && buf->buffer->gpu_end > addr) {
@ -273,7 +273,7 @@ find_buffer(struct gen_aux_map_context *ctx, uint64_t addr)
}
static uint64_t *
get_u64_entry_ptr(struct gen_aux_map_context *ctx, uint64_t addr)
get_u64_entry_ptr(struct intel_aux_map_context *ctx, uint64_t addr)
{
struct aux_map_buffer *buf = find_buffer(ctx, addr);
assert(buf);
@ -313,7 +313,7 @@ get_bpp_encoding(enum isl_format format)
#define GEN_AUX_MAP_ENTRY_Y_TILED_BIT (0x1ull << 52)
uint64_t
gen_aux_map_format_bits(enum isl_tiling tiling, enum isl_format format,
intel_aux_map_format_bits(enum isl_tiling tiling, enum isl_format format,
uint8_t plane)
{
if (aux_map_debug)
@ -335,14 +335,14 @@ gen_aux_map_format_bits(enum isl_tiling tiling, enum isl_format format,
}
uint64_t
gen_aux_map_format_bits_for_isl_surf(const struct isl_surf *isl_surf)
intel_aux_map_format_bits_for_isl_surf(const struct isl_surf *isl_surf)
{
assert(!isl_format_is_planar(isl_surf->format));
return gen_aux_map_format_bits(isl_surf->tiling, isl_surf->format, 0);
return intel_aux_map_format_bits(isl_surf->tiling, isl_surf->format, 0);
}
static void
get_aux_entry(struct gen_aux_map_context *ctx, uint64_t address,
get_aux_entry(struct intel_aux_map_context *ctx, uint64_t address,
uint32_t *l1_index_out, uint64_t *l1_entry_addr_out,
uint64_t **l1_entry_map_out)
{
@ -361,7 +361,7 @@ get_aux_entry(struct gen_aux_map_context *ctx, uint64_t address,
}
*l3_entry = (l2_gpu & 0xffffffff8000ULL) | 1;
} else {
uint64_t l2_addr = gen_canonical_address(*l3_entry & ~0x7fffULL);
uint64_t l2_addr = intel_canonical_address(*l3_entry & ~0x7fffULL);
l2_map = get_u64_entry_ptr(ctx, l2_addr);
}
uint32_t l2_index = (address >> 24) & 0xfff;
@ -378,7 +378,7 @@ get_aux_entry(struct gen_aux_map_context *ctx, uint64_t address,
}
*l2_entry = (l1_addr & 0xffffffffe000ULL) | 1;
} else {
l1_addr = gen_canonical_address(*l2_entry & ~0x1fffULL);
l1_addr = intel_canonical_address(*l2_entry & ~0x1fffULL);
l1_map = get_u64_entry_ptr(ctx, l1_addr);
}
uint32_t l1_index = (address >> 16) & 0xff;
@ -391,7 +391,7 @@ get_aux_entry(struct gen_aux_map_context *ctx, uint64_t address,
}
static void
add_mapping(struct gen_aux_map_context *ctx, uint64_t address,
add_mapping(struct intel_aux_map_context *ctx, uint64_t address,
uint64_t aux_address, uint64_t format_bits,
bool *state_changed)
{
@ -433,7 +433,7 @@ add_mapping(struct gen_aux_map_context *ctx, uint64_t address,
}
uint64_t *
gen_aux_map_get_entry(struct gen_aux_map_context *ctx,
intel_aux_map_get_entry(struct intel_aux_map_context *ctx,
uint64_t address,
uint64_t *entry_address)
{
@ -446,7 +446,7 @@ gen_aux_map_get_entry(struct gen_aux_map_context *ctx,
}
void
gen_aux_map_add_mapping(struct gen_aux_map_context *ctx, uint64_t address,
intel_aux_map_add_mapping(struct intel_aux_map_context *ctx, uint64_t address,
uint64_t aux_address, uint64_t main_size_B,
uint64_t format_bits)
{
@ -473,7 +473,7 @@ gen_aux_map_add_mapping(struct gen_aux_map_context *ctx, uint64_t address,
* tables.
*/
static void
remove_mapping(struct gen_aux_map_context *ctx, uint64_t address,
remove_mapping(struct intel_aux_map_context *ctx, uint64_t address,
bool *state_changed)
{
uint32_t l3_index = (address >> 36) & 0xfff;
@ -483,7 +483,7 @@ remove_mapping(struct gen_aux_map_context *ctx, uint64_t address,
if ((*l3_entry & GEN_AUX_MAP_ENTRY_VALID_BIT) == 0) {
return;
} else {
uint64_t l2_addr = gen_canonical_address(*l3_entry & ~0x7fffULL);
uint64_t l2_addr = intel_canonical_address(*l3_entry & ~0x7fffULL);
l2_map = get_u64_entry_ptr(ctx, l2_addr);
}
uint32_t l2_index = (address >> 24) & 0xfff;
@ -493,7 +493,7 @@ remove_mapping(struct gen_aux_map_context *ctx, uint64_t address,
if ((*l2_entry & GEN_AUX_MAP_ENTRY_VALID_BIT) == 0) {
return;
} else {
uint64_t l1_addr = gen_canonical_address(*l2_entry & ~0x1fffULL);
uint64_t l1_addr = intel_canonical_address(*l2_entry & ~0x1fffULL);
l1_map = get_u64_entry_ptr(ctx, l1_addr);
}
uint32_t l1_index = (address >> 16) & 0xff;
@ -520,7 +520,7 @@ remove_mapping(struct gen_aux_map_context *ctx, uint64_t address,
}
void
gen_aux_map_unmap_range(struct gen_aux_map_context *ctx, uint64_t address,
intel_aux_map_unmap_range(struct intel_aux_map_context *ctx, uint64_t address,
uint64_t size)
{
bool state_changed = false;
@ -541,13 +541,13 @@ gen_aux_map_unmap_range(struct gen_aux_map_context *ctx, uint64_t address,
}
uint32_t
gen_aux_map_get_num_buffers(struct gen_aux_map_context *ctx)
intel_aux_map_get_num_buffers(struct intel_aux_map_context *ctx)
{
return p_atomic_read(&ctx->num_buffers);
}
void
gen_aux_map_fill_bos(struct gen_aux_map_context *ctx, void **driver_bos,
intel_aux_map_fill_bos(struct intel_aux_map_context *ctx, void **driver_bos,
uint32_t max_bos)
{
assert(p_atomic_read(&ctx->num_buffers) >= max_bos);

View File

@ -40,7 +40,7 @@ extern "C" {
* These functions are implemented in common code shared by drivers.
*/
struct gen_aux_map_context;
struct intel_aux_map_context;
struct gen_device_info;
#define GEN_AUX_MAP_ADDRESS_MASK 0x0000ffffffffff00ull
@ -51,16 +51,16 @@ struct gen_device_info;
#define GEN_AUX_MAP_AUX_PAGE_SIZE \
(GEN_AUX_MAP_MAIN_PAGE_SIZE / GEN_AUX_MAP_GEN12_CCS_SCALE)
struct gen_aux_map_context *
gen_aux_map_init(void *driver_ctx,
struct intel_aux_map_context *
intel_aux_map_init(void *driver_ctx,
struct gen_mapped_pinned_buffer_alloc *buffer_alloc,
const struct gen_device_info *devinfo);
void
gen_aux_map_finish(struct gen_aux_map_context *ctx);
intel_aux_map_finish(struct intel_aux_map_context *ctx);
uint32_t
gen_aux_map_get_state_num(struct gen_aux_map_context *ctx);
intel_aux_map_get_state_num(struct intel_aux_map_context *ctx);
/** Returns the current number of buffers used by the aux-map tables
*
@ -70,39 +70,39 @@ gen_aux_map_get_state_num(struct gen_aux_map_context *ctx);
* they involve surfaces not used by this batch.
*/
uint32_t
gen_aux_map_get_num_buffers(struct gen_aux_map_context *ctx);
intel_aux_map_get_num_buffers(struct intel_aux_map_context *ctx);
/** Fill an array of exec_object2 with aux-map buffer handles
*
* The gen_aux_map_get_num_buffers call should be made, then the driver can
* The intel_aux_map_get_num_buffers call should be made, then the driver can
* make sure the `obj` array is large enough before calling this function.
*/
void
gen_aux_map_fill_bos(struct gen_aux_map_context *ctx, void **driver_bos,
intel_aux_map_fill_bos(struct intel_aux_map_context *ctx, void **driver_bos,
uint32_t max_bos);
uint64_t
gen_aux_map_get_base(struct gen_aux_map_context *ctx);
intel_aux_map_get_base(struct intel_aux_map_context *ctx);
uint64_t
gen_aux_map_format_bits(enum isl_tiling tiling, enum isl_format format,
intel_aux_map_format_bits(enum isl_tiling tiling, enum isl_format format,
uint8_t plane);
uint64_t
gen_aux_map_format_bits_for_isl_surf(const struct isl_surf *isl_surf);
intel_aux_map_format_bits_for_isl_surf(const struct isl_surf *isl_surf);
uint64_t *
gen_aux_map_get_entry(struct gen_aux_map_context *ctx,
intel_aux_map_get_entry(struct intel_aux_map_context *ctx,
uint64_t address,
uint64_t *entry_address);
void
gen_aux_map_add_mapping(struct gen_aux_map_context *ctx, uint64_t address,
intel_aux_map_add_mapping(struct intel_aux_map_context *ctx, uint64_t address,
uint64_t aux_address, uint64_t main_size_B,
uint64_t format_bits);
void
gen_aux_map_unmap_range(struct gen_aux_map_context *ctx, uint64_t address,
intel_aux_map_unmap_range(struct intel_aux_map_context *ctx, uint64_t address,
uint64_t size);
#ifdef __cplusplus

View File

@ -29,11 +29,11 @@
#include <string.h>
void
gen_batch_decode_ctx_init(struct gen_batch_decode_ctx *ctx,
intel_batch_decode_ctx_init(struct intel_batch_decode_ctx *ctx,
const struct gen_device_info *devinfo,
FILE *fp, enum gen_batch_decode_flags flags,
FILE *fp, enum intel_batch_decode_flags flags,
const char *xml_path,
struct gen_batch_decode_bo (*get_bo)(void *,
struct intel_batch_decode_bo (*get_bo)(void *,
bool,
uint64_t),
unsigned (*get_state_size)(void *, uint64_t,
@ -52,15 +52,15 @@ gen_batch_decode_ctx_init(struct gen_batch_decode_ctx *ctx,
ctx->engine = I915_ENGINE_CLASS_RENDER;
if (xml_path == NULL)
ctx->spec = gen_spec_load(devinfo);
ctx->spec = intel_spec_load(devinfo);
else
ctx->spec = gen_spec_load_from_path(devinfo, xml_path);
ctx->spec = intel_spec_load_from_path(devinfo, xml_path);
}
void
gen_batch_decode_ctx_finish(struct gen_batch_decode_ctx *ctx)
intel_batch_decode_ctx_finish(struct intel_batch_decode_ctx *ctx)
{
gen_spec_destroy(ctx->spec);
intel_spec_destroy(ctx->spec);
}
#define CSI "\e["
@ -70,18 +70,18 @@ gen_batch_decode_ctx_finish(struct gen_batch_decode_ctx *ctx)
#define NORMAL CSI "0m"
static void
ctx_print_group(struct gen_batch_decode_ctx *ctx,
struct gen_group *group,
ctx_print_group(struct intel_batch_decode_ctx *ctx,
struct intel_group *group,
uint64_t address, const void *map)
{
gen_print_group(ctx->fp, group, address, map, 0,
intel_print_group(ctx->fp, group, address, map, 0,
(ctx->flags & GEN_BATCH_DECODE_IN_COLOR) != 0);
}
static struct gen_batch_decode_bo
ctx_get_bo(struct gen_batch_decode_ctx *ctx, bool ppgtt, uint64_t addr)
static struct intel_batch_decode_bo
ctx_get_bo(struct intel_batch_decode_ctx *ctx, bool ppgtt, uint64_t addr)
{
if (gen_spec_get_gen(ctx->spec) >= gen_make_gen(8,0)) {
if (intel_spec_get_gen(ctx->spec) >= intel_make_gen(8,0)) {
/* On Broadwell and above, we have 48-bit addresses which consume two
* dwords. Some packets require that these get stored in a "canonical
* form" which means that bit 47 is sign-extended through the upper
@ -91,9 +91,9 @@ ctx_get_bo(struct gen_batch_decode_ctx *ctx, bool ppgtt, uint64_t addr)
addr &= (~0ull >> 16);
}
struct gen_batch_decode_bo bo = ctx->get_bo(ctx->user_data, ppgtt, addr);
struct intel_batch_decode_bo bo = ctx->get_bo(ctx->user_data, ppgtt, addr);
if (gen_spec_get_gen(ctx->spec) >= gen_make_gen(8,0))
if (intel_spec_get_gen(ctx->spec) >= intel_make_gen(8,0))
bo.addr &= (~0ull >> 16);
/* We may actually have an offset into the bo */
@ -109,7 +109,7 @@ ctx_get_bo(struct gen_batch_decode_ctx *ctx, bool ppgtt, uint64_t addr)
}
static int
update_count(struct gen_batch_decode_ctx *ctx,
update_count(struct intel_batch_decode_ctx *ctx,
uint64_t address,
uint64_t base_address,
unsigned element_dwords,
@ -128,16 +128,16 @@ update_count(struct gen_batch_decode_ctx *ctx,
}
static void
ctx_disassemble_program(struct gen_batch_decode_ctx *ctx,
ctx_disassemble_program(struct intel_batch_decode_ctx *ctx,
uint32_t ksp, const char *type)
{
uint64_t addr = ctx->instruction_base + ksp;
struct gen_batch_decode_bo bo = ctx_get_bo(ctx, true, addr);
struct intel_batch_decode_bo bo = ctx_get_bo(ctx, true, addr);
if (!bo.map)
return;
fprintf(ctx->fp, "\nReferenced %s:\n", type);
gen_disassemble(&ctx->devinfo, bo.map, 0, ctx->fp);
intel_disassemble(&ctx->devinfo, bo.map, 0, ctx->fp);
}
/* Heuristic to determine whether a uint32_t is probably actually a float
@ -166,8 +166,8 @@ probably_float(uint32_t bits)
}
static void
ctx_print_buffer(struct gen_batch_decode_ctx *ctx,
struct gen_batch_decode_bo bo,
ctx_print_buffer(struct intel_batch_decode_ctx *ctx,
struct intel_batch_decode_bo bo,
uint32_t read_length,
uint32_t pitch,
int max_lines)
@ -200,24 +200,24 @@ ctx_print_buffer(struct gen_batch_decode_ctx *ctx,
fprintf(ctx->fp, "\n");
}
static struct gen_group *
gen_ctx_find_instruction(struct gen_batch_decode_ctx *ctx, const uint32_t *p)
static struct intel_group *
intel_ctx_find_instruction(struct intel_batch_decode_ctx *ctx, const uint32_t *p)
{
return gen_spec_find_instruction(ctx->spec, ctx->engine, p);
return intel_spec_find_instruction(ctx->spec, ctx->engine, p);
}
static void
handle_state_base_address(struct gen_batch_decode_ctx *ctx, const uint32_t *p)
handle_state_base_address(struct intel_batch_decode_ctx *ctx, const uint32_t *p)
{
struct gen_group *inst = gen_ctx_find_instruction(ctx, p);
struct intel_group *inst = intel_ctx_find_instruction(ctx, p);
struct gen_field_iterator iter;
gen_field_iterator_init(&iter, inst, p, 0, false);
struct intel_field_iterator iter;
intel_field_iterator_init(&iter, inst, p, 0, false);
uint64_t surface_base = 0, dynamic_base = 0, instruction_base = 0;
bool surface_modify = 0, dynamic_modify = 0, instruction_modify = 0;
while (gen_field_iterator_next(&iter)) {
while (intel_field_iterator_next(&iter)) {
if (strcmp(iter.name, "Surface State Base Address") == 0) {
surface_base = iter.raw_value;
} else if (strcmp(iter.name, "Dynamic State Base Address") == 0) {
@ -244,10 +244,10 @@ handle_state_base_address(struct gen_batch_decode_ctx *ctx, const uint32_t *p)
}
static void
dump_binding_table(struct gen_batch_decode_ctx *ctx, uint32_t offset, int count)
dump_binding_table(struct intel_batch_decode_ctx *ctx, uint32_t offset, int count)
{
struct gen_group *strct =
gen_spec_find_struct(ctx->spec, "RENDER_SURFACE_STATE");
struct intel_group *strct =
intel_spec_find_struct(ctx->spec, "RENDER_SURFACE_STATE");
if (strct == NULL) {
fprintf(ctx->fp, "did not find RENDER_SURFACE_STATE info\n");
return;
@ -263,7 +263,7 @@ dump_binding_table(struct gen_batch_decode_ctx *ctx, uint32_t offset, int count)
return;
}
struct gen_batch_decode_bo bind_bo =
struct intel_batch_decode_bo bind_bo =
ctx_get_bo(ctx, true, ctx->surface_base + offset);
if (bind_bo.map == NULL) {
@ -277,7 +277,7 @@ dump_binding_table(struct gen_batch_decode_ctx *ctx, uint32_t offset, int count)
continue;
uint64_t addr = ctx->surface_base + pointers[i];
struct gen_batch_decode_bo bo = ctx_get_bo(ctx, true, addr);
struct intel_batch_decode_bo bo = ctx_get_bo(ctx, true, addr);
uint32_t size = strct->dw_length * 4;
if (pointers[i] % 32 != 0 ||
@ -292,9 +292,9 @@ dump_binding_table(struct gen_batch_decode_ctx *ctx, uint32_t offset, int count)
}
static void
dump_samplers(struct gen_batch_decode_ctx *ctx, uint32_t offset, int count)
dump_samplers(struct intel_batch_decode_ctx *ctx, uint32_t offset, int count)
{
struct gen_group *strct = gen_spec_find_struct(ctx->spec, "SAMPLER_STATE");
struct intel_group *strct = intel_spec_find_struct(ctx->spec, "SAMPLER_STATE");
uint64_t state_addr = ctx->dynamic_base + offset;
if (count < 0) {
@ -302,7 +302,7 @@ dump_samplers(struct gen_batch_decode_ctx *ctx, uint32_t offset, int count)
strct->dw_length, 4);
}
struct gen_batch_decode_bo bo = ctx_get_bo(ctx, true, state_addr);
struct intel_batch_decode_bo bo = ctx_get_bo(ctx, true, state_addr);
const void *state_map = bo.map;
if (state_map == NULL) {
@ -324,16 +324,16 @@ dump_samplers(struct gen_batch_decode_ctx *ctx, uint32_t offset, int count)
}
static void
handle_interface_descriptor_data(struct gen_batch_decode_ctx *ctx,
struct gen_group *desc, const uint32_t *p)
handle_interface_descriptor_data(struct intel_batch_decode_ctx *ctx,
struct intel_group *desc, const uint32_t *p)
{
uint64_t ksp = 0;
uint32_t sampler_offset = 0, sampler_count = 0;
uint32_t binding_table_offset = 0, binding_entry_count = 0;
struct gen_field_iterator iter;
gen_field_iterator_init(&iter, desc, p, 0, false);
while (gen_field_iterator_next(&iter)) {
struct intel_field_iterator iter;
intel_field_iterator_init(&iter, desc, p, 0, false);
while (intel_field_iterator_next(&iter)) {
if (strcmp(iter.name, "Kernel Start Pointer") == 0) {
ksp = strtoll(iter.value, NULL, 16);
} else if (strcmp(iter.name, "Sampler State Pointer") == 0) {
@ -355,18 +355,18 @@ handle_interface_descriptor_data(struct gen_batch_decode_ctx *ctx,
}
static void
handle_media_interface_descriptor_load(struct gen_batch_decode_ctx *ctx,
handle_media_interface_descriptor_load(struct intel_batch_decode_ctx *ctx,
const uint32_t *p)
{
struct gen_group *inst = gen_ctx_find_instruction(ctx, p);
struct gen_group *desc =
gen_spec_find_struct(ctx->spec, "INTERFACE_DESCRIPTOR_DATA");
struct intel_group *inst = intel_ctx_find_instruction(ctx, p);
struct intel_group *desc =
intel_spec_find_struct(ctx->spec, "INTERFACE_DESCRIPTOR_DATA");
struct gen_field_iterator iter;
gen_field_iterator_init(&iter, inst, p, 0, false);
struct intel_field_iterator iter;
intel_field_iterator_init(&iter, inst, p, 0, false);
uint32_t descriptor_offset = 0;
int descriptor_count = 0;
while (gen_field_iterator_next(&iter)) {
while (intel_field_iterator_next(&iter)) {
if (strcmp(iter.name, "Interface Descriptor Data Start Address") == 0) {
descriptor_offset = strtol(iter.value, NULL, 16);
} else if (strcmp(iter.name, "Interface Descriptor Total Length") == 0) {
@ -376,7 +376,7 @@ handle_media_interface_descriptor_load(struct gen_batch_decode_ctx *ctx,
}
uint64_t desc_addr = ctx->dynamic_base + descriptor_offset;
struct gen_batch_decode_bo bo = ctx_get_bo(ctx, true, desc_addr);
struct intel_batch_decode_bo bo = ctx_get_bo(ctx, true, desc_addr);
const void *desc_map = bo.map;
if (desc_map == NULL) {
@ -397,14 +397,14 @@ handle_media_interface_descriptor_load(struct gen_batch_decode_ctx *ctx,
}
static void
handle_compute_walker(struct gen_batch_decode_ctx *ctx,
handle_compute_walker(struct intel_batch_decode_ctx *ctx,
const uint32_t *p)
{
struct gen_group *inst = gen_ctx_find_instruction(ctx, p);
struct intel_group *inst = intel_ctx_find_instruction(ctx, p);
struct gen_field_iterator iter;
gen_field_iterator_init(&iter, inst, p, 0, false);
while (gen_field_iterator_next(&iter)) {
struct intel_field_iterator iter;
intel_field_iterator_init(&iter, inst, p, 0, false);
while (intel_field_iterator_next(&iter)) {
if (strcmp(iter.name, "Interface Descriptor") == 0) {
handle_interface_descriptor_data(ctx, iter.struct_desc,
&iter.p[iter.start_bit / 32]);
@ -413,27 +413,27 @@ handle_compute_walker(struct gen_batch_decode_ctx *ctx,
}
static void
handle_3dstate_vertex_buffers(struct gen_batch_decode_ctx *ctx,
handle_3dstate_vertex_buffers(struct intel_batch_decode_ctx *ctx,
const uint32_t *p)
{
struct gen_group *inst = gen_ctx_find_instruction(ctx, p);
struct gen_group *vbs = gen_spec_find_struct(ctx->spec, "VERTEX_BUFFER_STATE");
struct intel_group *inst = intel_ctx_find_instruction(ctx, p);
struct intel_group *vbs = intel_spec_find_struct(ctx->spec, "VERTEX_BUFFER_STATE");
struct gen_batch_decode_bo vb = {};
struct intel_batch_decode_bo vb = {};
uint32_t vb_size = 0;
int index = -1;
int pitch = -1;
bool ready = false;
struct gen_field_iterator iter;
gen_field_iterator_init(&iter, inst, p, 0, false);
while (gen_field_iterator_next(&iter)) {
struct intel_field_iterator iter;
intel_field_iterator_init(&iter, inst, p, 0, false);
while (intel_field_iterator_next(&iter)) {
if (iter.struct_desc != vbs)
continue;
struct gen_field_iterator vbs_iter;
gen_field_iterator_init(&vbs_iter, vbs, &iter.p[iter.start_bit / 32], 0, false);
while (gen_field_iterator_next(&vbs_iter)) {
struct intel_field_iterator vbs_iter;
intel_field_iterator_init(&vbs_iter, vbs, &iter.p[iter.start_bit / 32], 0, false);
while (intel_field_iterator_next(&vbs_iter)) {
if (strcmp(vbs_iter.name, "Vertex Buffer Index") == 0) {
index = vbs_iter.raw_value;
} else if (strcmp(vbs_iter.name, "Buffer Pitch") == 0) {
@ -476,18 +476,18 @@ handle_3dstate_vertex_buffers(struct gen_batch_decode_ctx *ctx,
}
static void
handle_3dstate_index_buffer(struct gen_batch_decode_ctx *ctx,
handle_3dstate_index_buffer(struct intel_batch_decode_ctx *ctx,
const uint32_t *p)
{
struct gen_group *inst = gen_ctx_find_instruction(ctx, p);
struct intel_group *inst = intel_ctx_find_instruction(ctx, p);
struct gen_batch_decode_bo ib = {};
struct intel_batch_decode_bo ib = {};
uint32_t ib_size = 0;
uint32_t format = 0;
struct gen_field_iterator iter;
gen_field_iterator_init(&iter, inst, p, 0, false);
while (gen_field_iterator_next(&iter)) {
struct intel_field_iterator iter;
intel_field_iterator_init(&iter, inst, p, 0, false);
while (intel_field_iterator_next(&iter)) {
if (strcmp(iter.name, "Index Format") == 0) {
format = iter.raw_value;
} else if (strcmp(iter.name, "Buffer Starting Address") == 0) {
@ -527,17 +527,17 @@ handle_3dstate_index_buffer(struct gen_batch_decode_ctx *ctx,
}
static void
decode_single_ksp(struct gen_batch_decode_ctx *ctx, const uint32_t *p)
decode_single_ksp(struct intel_batch_decode_ctx *ctx, const uint32_t *p)
{
struct gen_group *inst = gen_ctx_find_instruction(ctx, p);
struct intel_group *inst = intel_ctx_find_instruction(ctx, p);
uint64_t ksp = 0;
bool is_simd8 = ctx->devinfo.gen >= 11; /* vertex shaders on Gen8+ only */
bool is_enabled = true;
struct gen_field_iterator iter;
gen_field_iterator_init(&iter, inst, p, 0, false);
while (gen_field_iterator_next(&iter)) {
struct intel_field_iterator iter;
intel_field_iterator_init(&iter, inst, p, 0, false);
while (intel_field_iterator_next(&iter)) {
if (strcmp(iter.name, "Kernel Start Pointer") == 0) {
ksp = iter.raw_value;
} else if (strcmp(iter.name, "SIMD8 Dispatch Enable") == 0) {
@ -569,16 +569,16 @@ decode_single_ksp(struct gen_batch_decode_ctx *ctx, const uint32_t *p)
}
static void
decode_ps_kernels(struct gen_batch_decode_ctx *ctx, const uint32_t *p)
decode_ps_kernels(struct intel_batch_decode_ctx *ctx, const uint32_t *p)
{
struct gen_group *inst = gen_ctx_find_instruction(ctx, p);
struct intel_group *inst = intel_ctx_find_instruction(ctx, p);
uint64_t ksp[3] = {0, 0, 0};
bool enabled[3] = {false, false, false};
struct gen_field_iterator iter;
gen_field_iterator_init(&iter, inst, p, 0, false);
while (gen_field_iterator_next(&iter)) {
struct intel_field_iterator iter;
intel_field_iterator_init(&iter, inst, p, 0, false);
while (intel_field_iterator_next(&iter)) {
if (strncmp(iter.name, "Kernel Start Pointer ",
strlen("Kernel Start Pointer ")) == 0) {
int idx = iter.name[strlen("Kernel Start Pointer ")] - '0';
@ -619,28 +619,28 @@ decode_ps_kernels(struct gen_batch_decode_ctx *ctx, const uint32_t *p)
}
static void
decode_3dstate_constant_all(struct gen_batch_decode_ctx *ctx, const uint32_t *p)
decode_3dstate_constant_all(struct intel_batch_decode_ctx *ctx, const uint32_t *p)
{
struct gen_group *inst =
gen_spec_find_instruction(ctx->spec, ctx->engine, p);
struct gen_group *body =
gen_spec_find_struct(ctx->spec, "3DSTATE_CONSTANT_ALL_DATA");
struct intel_group *inst =
intel_spec_find_instruction(ctx->spec, ctx->engine, p);
struct intel_group *body =
intel_spec_find_struct(ctx->spec, "3DSTATE_CONSTANT_ALL_DATA");
uint32_t read_length[4];
struct gen_batch_decode_bo buffer[4];
struct intel_batch_decode_bo buffer[4];
memset(buffer, 0, sizeof(buffer));
struct gen_field_iterator outer;
gen_field_iterator_init(&outer, inst, p, 0, false);
struct intel_field_iterator outer;
intel_field_iterator_init(&outer, inst, p, 0, false);
int idx = 0;
while (gen_field_iterator_next(&outer)) {
while (intel_field_iterator_next(&outer)) {
if (outer.struct_desc != body)
continue;
struct gen_field_iterator iter;
gen_field_iterator_init(&iter, body, &outer.p[outer.start_bit / 32],
struct intel_field_iterator iter;
intel_field_iterator_init(&iter, body, &outer.p[outer.start_bit / 32],
0, false);
while (gen_field_iterator_next(&iter)) {
while (intel_field_iterator_next(&iter)) {
if (!strcmp(iter.name, "Pointer To Constant Buffer")) {
buffer[idx] = ctx_get_bo(ctx, true, iter.raw_value);
} else if (!strcmp(iter.name, "Constant Buffer Read Length")) {
@ -662,26 +662,26 @@ decode_3dstate_constant_all(struct gen_batch_decode_ctx *ctx, const uint32_t *p)
}
static void
decode_3dstate_constant(struct gen_batch_decode_ctx *ctx, const uint32_t *p)
decode_3dstate_constant(struct intel_batch_decode_ctx *ctx, const uint32_t *p)
{
struct gen_group *inst = gen_ctx_find_instruction(ctx, p);
struct gen_group *body =
gen_spec_find_struct(ctx->spec, "3DSTATE_CONSTANT_BODY");
struct intel_group *inst = intel_ctx_find_instruction(ctx, p);
struct intel_group *body =
intel_spec_find_struct(ctx->spec, "3DSTATE_CONSTANT_BODY");
uint32_t read_length[4] = {0};
uint64_t read_addr[4];
struct gen_field_iterator outer;
gen_field_iterator_init(&outer, inst, p, 0, false);
while (gen_field_iterator_next(&outer)) {
struct intel_field_iterator outer;
intel_field_iterator_init(&outer, inst, p, 0, false);
while (intel_field_iterator_next(&outer)) {
if (outer.struct_desc != body)
continue;
struct gen_field_iterator iter;
gen_field_iterator_init(&iter, body, &outer.p[outer.start_bit / 32],
struct intel_field_iterator iter;
intel_field_iterator_init(&iter, body, &outer.p[outer.start_bit / 32],
0, false);
while (gen_field_iterator_next(&iter)) {
while (intel_field_iterator_next(&iter)) {
int idx;
if (sscanf(iter.name, "Read Length[%d]", &idx) == 1) {
read_length[idx] = iter.raw_value;
@ -694,7 +694,7 @@ decode_3dstate_constant(struct gen_batch_decode_ctx *ctx, const uint32_t *p)
if (read_length[i] == 0)
continue;
struct gen_batch_decode_bo buffer = ctx_get_bo(ctx, true, read_addr[i]);
struct intel_batch_decode_bo buffer = ctx_get_bo(ctx, true, read_addr[i]);
if (!buffer.map) {
fprintf(ctx->fp, "constant buffer %d unavailable\n", i);
continue;
@ -709,7 +709,7 @@ decode_3dstate_constant(struct gen_batch_decode_ctx *ctx, const uint32_t *p)
}
static void
decode_gen6_3dstate_binding_table_pointers(struct gen_batch_decode_ctx *ctx,
decode_gen6_3dstate_binding_table_pointers(struct intel_batch_decode_ctx *ctx,
const uint32_t *p)
{
fprintf(ctx->fp, "VS Binding Table:\n");
@ -723,21 +723,21 @@ decode_gen6_3dstate_binding_table_pointers(struct gen_batch_decode_ctx *ctx,
}
static void
decode_3dstate_binding_table_pointers(struct gen_batch_decode_ctx *ctx,
decode_3dstate_binding_table_pointers(struct intel_batch_decode_ctx *ctx,
const uint32_t *p)
{
dump_binding_table(ctx, p[1], -1);
}
static void
decode_3dstate_sampler_state_pointers(struct gen_batch_decode_ctx *ctx,
decode_3dstate_sampler_state_pointers(struct intel_batch_decode_ctx *ctx,
const uint32_t *p)
{
dump_samplers(ctx, p[1], -1);
}
static void
decode_3dstate_sampler_state_pointers_gen6(struct gen_batch_decode_ctx *ctx,
decode_3dstate_sampler_state_pointers_gen6(struct intel_batch_decode_ctx *ctx,
const uint32_t *p)
{
dump_samplers(ctx, p[1], -1);
@ -756,17 +756,17 @@ str_ends_with(const char *str, const char *end)
}
static void
decode_dynamic_state_pointers(struct gen_batch_decode_ctx *ctx,
decode_dynamic_state_pointers(struct intel_batch_decode_ctx *ctx,
const char *struct_type, const uint32_t *p,
int count)
{
struct gen_group *inst = gen_ctx_find_instruction(ctx, p);
struct intel_group *inst = intel_ctx_find_instruction(ctx, p);
uint32_t state_offset = 0;
struct gen_field_iterator iter;
gen_field_iterator_init(&iter, inst, p, 0, false);
while (gen_field_iterator_next(&iter)) {
struct intel_field_iterator iter;
intel_field_iterator_init(&iter, inst, p, 0, false);
while (intel_field_iterator_next(&iter)) {
if (str_ends_with(iter.name, "Pointer")) {
state_offset = iter.raw_value;
break;
@ -774,7 +774,7 @@ decode_dynamic_state_pointers(struct gen_batch_decode_ctx *ctx,
}
uint64_t state_addr = ctx->dynamic_base + state_offset;
struct gen_batch_decode_bo bo = ctx_get_bo(ctx, true, state_addr);
struct intel_batch_decode_bo bo = ctx_get_bo(ctx, true, state_addr);
const void *state_map = bo.map;
if (state_map == NULL) {
@ -782,7 +782,7 @@ decode_dynamic_state_pointers(struct gen_batch_decode_ctx *ctx,
return;
}
struct gen_group *state = gen_spec_find_struct(ctx->spec, struct_type);
struct intel_group *state = intel_spec_find_struct(ctx->spec, struct_type);
if (strcmp(struct_type, "BLEND_STATE") == 0) {
/* Blend states are different from the others because they have a header
* struct called BLEND_STATE which is followed by a variable number of
@ -795,7 +795,7 @@ decode_dynamic_state_pointers(struct gen_batch_decode_ctx *ctx,
state_map += state->dw_length * 4;
struct_type = "BLEND_STATE_ENTRY";
state = gen_spec_find_struct(ctx->spec, struct_type);
state = intel_spec_find_struct(ctx->spec, struct_type);
}
count = update_count(ctx, ctx->dynamic_base + state_offset,
@ -811,51 +811,51 @@ decode_dynamic_state_pointers(struct gen_batch_decode_ctx *ctx,
}
static void
decode_3dstate_viewport_state_pointers_cc(struct gen_batch_decode_ctx *ctx,
decode_3dstate_viewport_state_pointers_cc(struct intel_batch_decode_ctx *ctx,
const uint32_t *p)
{
decode_dynamic_state_pointers(ctx, "CC_VIEWPORT", p, 4);
}
static void
decode_3dstate_viewport_state_pointers_sf_clip(struct gen_batch_decode_ctx *ctx,
decode_3dstate_viewport_state_pointers_sf_clip(struct intel_batch_decode_ctx *ctx,
const uint32_t *p)
{
decode_dynamic_state_pointers(ctx, "SF_CLIP_VIEWPORT", p, 4);
}
static void
decode_3dstate_blend_state_pointers(struct gen_batch_decode_ctx *ctx,
decode_3dstate_blend_state_pointers(struct intel_batch_decode_ctx *ctx,
const uint32_t *p)
{
decode_dynamic_state_pointers(ctx, "BLEND_STATE", p, 1);
}
static void
decode_3dstate_cc_state_pointers(struct gen_batch_decode_ctx *ctx,
decode_3dstate_cc_state_pointers(struct intel_batch_decode_ctx *ctx,
const uint32_t *p)
{
decode_dynamic_state_pointers(ctx, "COLOR_CALC_STATE", p, 1);
}
static void
decode_3dstate_scissor_state_pointers(struct gen_batch_decode_ctx *ctx,
decode_3dstate_scissor_state_pointers(struct intel_batch_decode_ctx *ctx,
const uint32_t *p)
{
decode_dynamic_state_pointers(ctx, "SCISSOR_RECT", p, 1);
}
static void
decode_3dstate_slice_table_state_pointers(struct gen_batch_decode_ctx *ctx,
decode_3dstate_slice_table_state_pointers(struct intel_batch_decode_ctx *ctx,
const uint32_t *p)
{
decode_dynamic_state_pointers(ctx, "SLICE_HASH_TABLE", p, 1);
}
static void
decode_load_register_imm(struct gen_batch_decode_ctx *ctx, const uint32_t *p)
decode_load_register_imm(struct intel_batch_decode_ctx *ctx, const uint32_t *p)
{
struct gen_group *reg = gen_spec_find_register(ctx->spec, p[1]);
struct intel_group *reg = intel_spec_find_register(ctx->spec, p[1]);
if (reg != NULL) {
fprintf(ctx->fp, "register %s (0x%x): 0x%x\n",
@ -865,16 +865,16 @@ decode_load_register_imm(struct gen_batch_decode_ctx *ctx, const uint32_t *p)
}
static void
decode_vs_state(struct gen_batch_decode_ctx *ctx, uint32_t offset)
decode_vs_state(struct intel_batch_decode_ctx *ctx, uint32_t offset)
{
struct gen_group *strct =
gen_spec_find_struct(ctx->spec, "VS_STATE");
struct intel_group *strct =
intel_spec_find_struct(ctx->spec, "VS_STATE");
if (strct == NULL) {
fprintf(ctx->fp, "did not find VS_STATE info\n");
return;
}
struct gen_batch_decode_bo bind_bo =
struct intel_batch_decode_bo bind_bo =
ctx_get_bo(ctx, true, offset);
if (bind_bo.map == NULL) {
@ -887,16 +887,16 @@ decode_vs_state(struct gen_batch_decode_ctx *ctx, uint32_t offset)
static void
decode_clip_state(struct gen_batch_decode_ctx *ctx, uint32_t offset)
decode_clip_state(struct intel_batch_decode_ctx *ctx, uint32_t offset)
{
struct gen_group *strct =
gen_spec_find_struct(ctx->spec, "CLIP_STATE");
struct intel_group *strct =
intel_spec_find_struct(ctx->spec, "CLIP_STATE");
if (strct == NULL) {
fprintf(ctx->fp, "did not find CLIP_STATE info\n");
return;
}
struct gen_batch_decode_bo bind_bo =
struct intel_batch_decode_bo bind_bo =
ctx_get_bo(ctx, true, offset);
if (bind_bo.map == NULL) {
@ -906,14 +906,14 @@ decode_clip_state(struct gen_batch_decode_ctx *ctx, uint32_t offset)
ctx_print_group(ctx, strct, offset, bind_bo.map);
struct gen_group *vp_strct =
gen_spec_find_struct(ctx->spec, "CLIP_VIEWPORT");
struct intel_group *vp_strct =
intel_spec_find_struct(ctx->spec, "CLIP_VIEWPORT");
if (vp_strct == NULL) {
fprintf(ctx->fp, "did not find CLIP_VIEWPORT info\n");
return;
}
uint32_t clip_vp_offset = ((uint32_t *)bind_bo.map)[6] & ~0x3;
struct gen_batch_decode_bo vp_bo =
struct intel_batch_decode_bo vp_bo =
ctx_get_bo(ctx, true, clip_vp_offset);
if (vp_bo.map == NULL) {
fprintf(ctx->fp, " clip vp state unavailable\n");
@ -923,16 +923,16 @@ decode_clip_state(struct gen_batch_decode_ctx *ctx, uint32_t offset)
}
static void
decode_sf_state(struct gen_batch_decode_ctx *ctx, uint32_t offset)
decode_sf_state(struct intel_batch_decode_ctx *ctx, uint32_t offset)
{
struct gen_group *strct =
gen_spec_find_struct(ctx->spec, "SF_STATE");
struct intel_group *strct =
intel_spec_find_struct(ctx->spec, "SF_STATE");
if (strct == NULL) {
fprintf(ctx->fp, "did not find SF_STATE info\n");
return;
}
struct gen_batch_decode_bo bind_bo =
struct intel_batch_decode_bo bind_bo =
ctx_get_bo(ctx, true, offset);
if (bind_bo.map == NULL) {
@ -942,15 +942,15 @@ decode_sf_state(struct gen_batch_decode_ctx *ctx, uint32_t offset)
ctx_print_group(ctx, strct, offset, bind_bo.map);
struct gen_group *vp_strct =
gen_spec_find_struct(ctx->spec, "SF_VIEWPORT");
struct intel_group *vp_strct =
intel_spec_find_struct(ctx->spec, "SF_VIEWPORT");
if (vp_strct == NULL) {
fprintf(ctx->fp, "did not find SF_VIEWPORT info\n");
return;
}
uint32_t sf_vp_offset = ((uint32_t *)bind_bo.map)[5] & ~0x3;
struct gen_batch_decode_bo vp_bo =
struct intel_batch_decode_bo vp_bo =
ctx_get_bo(ctx, true, sf_vp_offset);
if (vp_bo.map == NULL) {
fprintf(ctx->fp, " sf vp state unavailable\n");
@ -960,16 +960,16 @@ decode_sf_state(struct gen_batch_decode_ctx *ctx, uint32_t offset)
}
static void
decode_wm_state(struct gen_batch_decode_ctx *ctx, uint32_t offset)
decode_wm_state(struct intel_batch_decode_ctx *ctx, uint32_t offset)
{
struct gen_group *strct =
gen_spec_find_struct(ctx->spec, "WM_STATE");
struct intel_group *strct =
intel_spec_find_struct(ctx->spec, "WM_STATE");
if (strct == NULL) {
fprintf(ctx->fp, "did not find WM_STATE info\n");
return;
}
struct gen_batch_decode_bo bind_bo =
struct intel_batch_decode_bo bind_bo =
ctx_get_bo(ctx, true, offset);
if (bind_bo.map == NULL) {
@ -981,16 +981,16 @@ decode_wm_state(struct gen_batch_decode_ctx *ctx, uint32_t offset)
}
static void
decode_cc_state(struct gen_batch_decode_ctx *ctx, uint32_t offset)
decode_cc_state(struct intel_batch_decode_ctx *ctx, uint32_t offset)
{
struct gen_group *strct =
gen_spec_find_struct(ctx->spec, "COLOR_CALC_STATE");
struct intel_group *strct =
intel_spec_find_struct(ctx->spec, "COLOR_CALC_STATE");
if (strct == NULL) {
fprintf(ctx->fp, "did not find COLOR_CALC_STATE info\n");
return;
}
struct gen_batch_decode_bo bind_bo =
struct intel_batch_decode_bo bind_bo =
ctx_get_bo(ctx, true, offset);
if (bind_bo.map == NULL) {
@ -1000,14 +1000,14 @@ decode_cc_state(struct gen_batch_decode_ctx *ctx, uint32_t offset)
ctx_print_group(ctx, strct, offset, bind_bo.map);
struct gen_group *vp_strct =
gen_spec_find_struct(ctx->spec, "CC_VIEWPORT");
struct intel_group *vp_strct =
intel_spec_find_struct(ctx->spec, "CC_VIEWPORT");
if (vp_strct == NULL) {
fprintf(ctx->fp, "did not find CC_VIEWPORT info\n");
return;
}
uint32_t cc_vp_offset = ((uint32_t *)bind_bo.map)[4] & ~0x3;
struct gen_batch_decode_bo vp_bo =
struct intel_batch_decode_bo vp_bo =
ctx_get_bo(ctx, true, cc_vp_offset);
if (vp_bo.map == NULL) {
fprintf(ctx->fp, " cc vp state unavailable\n");
@ -1016,7 +1016,7 @@ decode_cc_state(struct gen_batch_decode_ctx *ctx, uint32_t offset)
ctx_print_group(ctx, vp_strct, cc_vp_offset, vp_bo.map);
}
static void
decode_pipelined_pointers(struct gen_batch_decode_ctx *ctx, const uint32_t *p)
decode_pipelined_pointers(struct intel_batch_decode_ctx *ctx, const uint32_t *p)
{
fprintf(ctx->fp, "VS State Table:\n");
decode_vs_state(ctx, p[1]);
@ -1032,7 +1032,7 @@ decode_pipelined_pointers(struct gen_batch_decode_ctx *ctx, const uint32_t *p)
struct custom_decoder {
const char *cmd_name;
void (*decode)(struct gen_batch_decode_ctx *ctx, const uint32_t *p);
void (*decode)(struct intel_batch_decode_ctx *ctx, const uint32_t *p);
} custom_decoders[] = {
{ "STATE_BASE_ADDRESS", handle_state_base_address },
{ "MEDIA_INTERFACE_DESCRIPTOR_LOAD", handle_media_interface_descriptor_load },
@ -1077,13 +1077,13 @@ struct custom_decoder {
};
void
gen_print_batch(struct gen_batch_decode_ctx *ctx,
intel_print_batch(struct intel_batch_decode_ctx *ctx,
const uint32_t *batch, uint32_t batch_size,
uint64_t batch_addr, bool from_ring)
{
const uint32_t *p, *end = batch + batch_size / sizeof(uint32_t);
int length;
struct gen_group *inst;
struct intel_group *inst;
const char *reset_color = ctx->flags & GEN_BATCH_DECODE_IN_COLOR ? NORMAL : "";
if (ctx->n_batch_buffer_start >= 100) {
@ -1097,8 +1097,8 @@ gen_print_batch(struct gen_batch_decode_ctx *ctx,
ctx->n_batch_buffer_start++;
for (p = batch; p < end; p += length) {
inst = gen_ctx_find_instruction(ctx, p);
length = gen_group_get_length(inst, p);
inst = intel_ctx_find_instruction(ctx, p);
length = intel_group_get_length(inst, p);
assert(inst == NULL || length > 0);
length = MAX2(1, length);
@ -1123,7 +1123,7 @@ gen_print_batch(struct gen_batch_decode_ctx *ctx,
}
const char *color;
const char *inst_name = gen_group_get_name(inst);
const char *inst_name = intel_group_get_name(inst);
if (ctx->flags & GEN_BATCH_DECODE_IN_COLOR) {
reset_color = NORMAL;
if (ctx->flags & GEN_BATCH_DECODE_FULL) {
@ -1159,9 +1159,9 @@ gen_print_batch(struct gen_batch_decode_ctx *ctx,
bool ppgtt = false;
bool second_level = false;
bool predicate = false;
struct gen_field_iterator iter;
gen_field_iterator_init(&iter, inst, p, 0, false);
while (gen_field_iterator_next(&iter)) {
struct intel_field_iterator iter;
intel_field_iterator_init(&iter, inst, p, 0, false);
while (intel_field_iterator_next(&iter)) {
if (strcmp(iter.name, "Batch Buffer Start Address") == 0) {
next_batch_addr = iter.raw_value;
} else if (strcmp(iter.name, "Second Level Batch Buffer") == 0) {
@ -1174,13 +1174,13 @@ gen_print_batch(struct gen_batch_decode_ctx *ctx,
}
if (!predicate) {
struct gen_batch_decode_bo next_batch = ctx_get_bo(ctx, ppgtt, next_batch_addr);
struct intel_batch_decode_bo next_batch = ctx_get_bo(ctx, ppgtt, next_batch_addr);
if (next_batch.map == NULL) {
fprintf(ctx->fp, "Secondary batch at 0x%08"PRIx64" unavailable\n",
next_batch_addr);
} else {
gen_print_batch(ctx, next_batch.map, next_batch.size,
intel_print_batch(ctx, next_batch.map, next_batch.size,
next_batch.addr, false);
}
if (second_level) {

View File

@ -30,7 +30,7 @@
extern "C" {
#endif
struct gen_buffer {
struct intel_buffer {
uint64_t gpu;
uint64_t gpu_end;
void *map;
@ -38,8 +38,8 @@ struct gen_buffer {
};
struct gen_mapped_pinned_buffer_alloc {
struct gen_buffer * (*alloc)(void *driver_ctx, uint32_t size);
void (*free)(void *driver_ctx, struct gen_buffer *buffer);
struct intel_buffer * (*alloc)(void *driver_ctx, uint32_t size);
void (*free)(void *driver_ctx, struct intel_buffer *buffer);
};
#ifdef __cplusplus

View File

@ -40,7 +40,7 @@ gen_clflush_range(void *start, size_t size)
}
static inline void
gen_flush_range(void *start, size_t size)
intel_flush_range(void *start, size_t size)
{
__builtin_ia32_mfence();
gen_clflush_range(start, size);

View File

@ -51,39 +51,39 @@ struct parser_context {
int foo;
struct location loc;
struct gen_group *group;
struct gen_enum *enoom;
struct intel_group *group;
struct intel_enum *enoom;
int n_values, n_allocated_values;
struct gen_value **values;
struct intel_value **values;
struct gen_field *last_field;
struct intel_field *last_field;
struct gen_spec *spec;
struct intel_spec *spec;
};
const char *
gen_group_get_name(struct gen_group *group)
intel_group_get_name(struct intel_group *group)
{
return group->name;
}
uint32_t
gen_group_get_opcode(struct gen_group *group)
intel_group_get_opcode(struct intel_group *group)
{
return group->opcode;
}
struct gen_group *
gen_spec_find_struct(struct gen_spec *spec, const char *name)
struct intel_group *
intel_spec_find_struct(struct intel_spec *spec, const char *name)
{
struct hash_entry *entry = _mesa_hash_table_search(spec->structs,
name);
return entry ? entry->data : NULL;
}
struct gen_group *
gen_spec_find_register(struct gen_spec *spec, uint32_t offset)
struct intel_group *
intel_spec_find_register(struct intel_spec *spec, uint32_t offset)
{
struct hash_entry *entry =
_mesa_hash_table_search(spec->registers_by_offset,
@ -91,16 +91,16 @@ gen_spec_find_register(struct gen_spec *spec, uint32_t offset)
return entry ? entry->data : NULL;
}
struct gen_group *
gen_spec_find_register_by_name(struct gen_spec *spec, const char *name)
struct intel_group *
intel_spec_find_register_by_name(struct intel_spec *spec, const char *name)
{
struct hash_entry *entry =
_mesa_hash_table_search(spec->registers_by_name, name);
return entry ? entry->data : NULL;
}
struct gen_enum *
gen_spec_find_enum(struct gen_spec *spec, const char *name)
struct intel_enum *
intel_spec_find_enum(struct intel_spec *spec, const char *name)
{
struct hash_entry *entry = _mesa_hash_table_search(spec->enums,
name);
@ -108,7 +108,7 @@ gen_spec_find_enum(struct gen_spec *spec, const char *name)
}
uint32_t
gen_spec_get_gen(struct gen_spec *spec)
intel_spec_get_gen(struct intel_spec *spec)
{
return spec->gen;
}
@ -147,16 +147,16 @@ get_array_offset_count(const char **atts, uint32_t *offset, uint32_t *count,
return;
}
static struct gen_group *
static struct intel_group *
create_group(struct parser_context *ctx,
const char *name,
const char **atts,
struct gen_group *parent,
struct intel_group *parent,
bool fixed_length)
{
struct gen_group *group;
struct intel_group *group;
group = rzalloc(ctx->spec, struct gen_group);
group = rzalloc(ctx->spec, struct intel_group);
if (name)
group->name = ralloc_strdup(group, name);
@ -213,12 +213,12 @@ create_group(struct parser_context *ctx,
return group;
}
static struct gen_enum *
static struct intel_enum *
create_enum(struct parser_context *ctx, const char *name, const char **atts)
{
struct gen_enum *e;
struct intel_enum *e;
e = rzalloc(ctx->spec, struct gen_enum);
e = rzalloc(ctx->spec, struct intel_enum);
if (name)
e->name = ralloc_strdup(e, name);
@ -274,45 +274,45 @@ field_value(uint64_t value, int start, int end)
return (value & mask(start, end)) >> (start);
}
static struct gen_type
static struct intel_type
string_to_type(struct parser_context *ctx, const char *s)
{
int i, f;
struct gen_group *g;
struct gen_enum *e;
struct intel_group *g;
struct intel_enum *e;
if (strcmp(s, "int") == 0)
return (struct gen_type) { .kind = GEN_TYPE_INT };
return (struct intel_type) { .kind = GEN_TYPE_INT };
else if (strcmp(s, "uint") == 0)
return (struct gen_type) { .kind = GEN_TYPE_UINT };
return (struct intel_type) { .kind = GEN_TYPE_UINT };
else if (strcmp(s, "bool") == 0)
return (struct gen_type) { .kind = GEN_TYPE_BOOL };
return (struct intel_type) { .kind = GEN_TYPE_BOOL };
else if (strcmp(s, "float") == 0)
return (struct gen_type) { .kind = GEN_TYPE_FLOAT };
return (struct intel_type) { .kind = GEN_TYPE_FLOAT };
else if (strcmp(s, "address") == 0)
return (struct gen_type) { .kind = GEN_TYPE_ADDRESS };
return (struct intel_type) { .kind = GEN_TYPE_ADDRESS };
else if (strcmp(s, "offset") == 0)
return (struct gen_type) { .kind = GEN_TYPE_OFFSET };
return (struct intel_type) { .kind = GEN_TYPE_OFFSET };
else if (sscanf(s, "u%d.%d", &i, &f) == 2)
return (struct gen_type) { .kind = GEN_TYPE_UFIXED, .i = i, .f = f };
return (struct intel_type) { .kind = GEN_TYPE_UFIXED, .i = i, .f = f };
else if (sscanf(s, "s%d.%d", &i, &f) == 2)
return (struct gen_type) { .kind = GEN_TYPE_SFIXED, .i = i, .f = f };
else if (g = gen_spec_find_struct(ctx->spec, s), g != NULL)
return (struct gen_type) { .kind = GEN_TYPE_STRUCT, .gen_struct = g };
else if (e = gen_spec_find_enum(ctx->spec, s), e != NULL)
return (struct gen_type) { .kind = GEN_TYPE_ENUM, .gen_enum = e };
return (struct intel_type) { .kind = GEN_TYPE_SFIXED, .i = i, .f = f };
else if (g = intel_spec_find_struct(ctx->spec, s), g != NULL)
return (struct intel_type) { .kind = GEN_TYPE_STRUCT, .intel_struct = g };
else if (e = intel_spec_find_enum(ctx->spec, s), e != NULL)
return (struct intel_type) { .kind = GEN_TYPE_ENUM, .intel_enum = e };
else if (strcmp(s, "mbo") == 0)
return (struct gen_type) { .kind = GEN_TYPE_MBO };
return (struct intel_type) { .kind = GEN_TYPE_MBO };
else
fail(&ctx->loc, "invalid type: %s", s);
}
static struct gen_field *
static struct intel_field *
create_field(struct parser_context *ctx, const char **atts)
{
struct gen_field *field;
struct intel_field *field;
field = rzalloc(ctx->group, struct gen_field);
field = rzalloc(ctx->group, struct intel_field);
field->parent = ctx->group;
for (int i = 0; atts[i]; i += 2) {
@ -339,12 +339,12 @@ create_field(struct parser_context *ctx, const char **atts)
return field;
}
static struct gen_field *
create_array_field(struct parser_context *ctx, struct gen_group *array)
static struct intel_field *
create_array_field(struct parser_context *ctx, struct intel_group *array)
{
struct gen_field *field;
struct intel_field *field;
field = rzalloc(ctx->group, struct gen_field);
field = rzalloc(ctx->group, struct intel_field);
field->parent = ctx->group;
field->array = array;
@ -353,10 +353,10 @@ create_array_field(struct parser_context *ctx, struct gen_group *array)
return field;
}
static struct gen_value *
static struct intel_value *
create_value(struct parser_context *ctx, const char **atts)
{
struct gen_value *value = rzalloc(ctx->values, struct gen_value);
struct intel_value *value = rzalloc(ctx->values, struct intel_value);
for (int i = 0; atts[i]; i += 2) {
if (strcmp(atts[i], "name") == 0)
@ -368,14 +368,14 @@ create_value(struct parser_context *ctx, const char **atts)
return value;
}
static struct gen_field *
static struct intel_field *
create_and_append_field(struct parser_context *ctx,
const char **atts,
struct gen_group *array)
struct intel_group *array)
{
struct gen_field *field = array ?
struct intel_field *field = array ?
create_array_field(ctx, array) : create_field(ctx, atts);
struct gen_field *prev = NULL, *list = ctx->group->fields;
struct intel_field *prev = NULL, *list = ctx->group->fields;
while (list && field->start > list->start) {
prev = list;
@ -420,7 +420,7 @@ start_element(void *data, const char *element_name, const char **atts)
if (n == 1)
minor = 0;
ctx->spec->gen = gen_make_gen(major, minor);
ctx->spec->gen = intel_make_gen(major, minor);
} else if (strcmp(element_name, "instruction") == 0) {
ctx->group = create_group(ctx, name, atts, NULL, false);
} else if (strcmp(element_name, "struct") == 0) {
@ -429,7 +429,7 @@ start_element(void *data, const char *element_name, const char **atts)
ctx->group = create_group(ctx, name, atts, NULL, true);
get_register_offset(atts, &ctx->group->register_offset);
} else if (strcmp(element_name, "group") == 0) {
struct gen_group *group = create_group(ctx, "", atts, ctx->group, false);
struct intel_group *group = create_group(ctx, "", atts, ctx->group, false);
ctx->last_field = create_and_append_field(ctx, NULL, group);
ctx->group = group;
} else if (strcmp(element_name, "field") == 0) {
@ -440,7 +440,7 @@ start_element(void *data, const char *element_name, const char **atts)
if (ctx->n_values >= ctx->n_allocated_values) {
ctx->n_allocated_values = MAX2(2, ctx->n_allocated_values * 2);
ctx->values = reralloc_array_size(ctx->spec, ctx->values,
sizeof(struct gen_value *),
sizeof(struct intel_value *),
ctx->n_allocated_values);
}
assert(ctx->n_values < ctx->n_allocated_values);
@ -453,13 +453,13 @@ static void
end_element(void *data, const char *name)
{
struct parser_context *ctx = data;
struct gen_spec *spec = ctx->spec;
struct intel_spec *spec = ctx->spec;
if (strcmp(name, "instruction") == 0 ||
strcmp(name, "struct") == 0 ||
strcmp(name, "register") == 0) {
struct gen_group *group = ctx->group;
struct gen_field *list = group->fields;
struct intel_group *group = ctx->group;
struct intel_field *list = group->fields;
ctx->group = ctx->group->parent;
@ -485,17 +485,17 @@ end_element(void *data, const char *name)
} else if (strcmp(name, "group") == 0) {
ctx->group = ctx->group->parent;
} else if (strcmp(name, "field") == 0) {
struct gen_field *field = ctx->last_field;
struct intel_field *field = ctx->last_field;
ctx->last_field = NULL;
field->inline_enum.values = ctx->values;
field->inline_enum.nvalues = ctx->n_values;
ctx->values = ralloc_array(ctx->spec, struct gen_value*, ctx->n_allocated_values = 2);
ctx->values = ralloc_array(ctx->spec, struct intel_value*, ctx->n_allocated_values = 2);
ctx->n_values = 0;
} else if (strcmp(name, "enum") == 0) {
struct gen_enum *e = ctx->enoom;
struct intel_enum *e = ctx->enoom;
e->values = ctx->values;
e->nvalues = ctx->n_values;
ctx->values = ralloc_array(ctx->spec, struct gen_value*, ctx->n_allocated_values = 2);
ctx->values = ralloc_array(ctx->spec, struct intel_value*, ctx->n_allocated_values = 2);
ctx->n_values = 0;
ctx->enoom = NULL;
_mesa_hash_table_insert(spec->enums, e->name, e);
@ -570,11 +570,11 @@ static uint32_t _hash_uint32(const void *key)
return (uint32_t) (uintptr_t) key;
}
static struct gen_spec *
gen_spec_init(void)
static struct intel_spec *
intel_spec_init(void)
{
struct gen_spec *spec;
spec = rzalloc(NULL, struct gen_spec);
struct intel_spec *spec;
spec = rzalloc(NULL, struct intel_spec);
if (spec == NULL)
return NULL;
@ -594,8 +594,8 @@ gen_spec_init(void)
return spec;
}
struct gen_spec *
gen_spec_load(const struct gen_device_info *devinfo)
struct intel_spec *
intel_spec_load(const struct gen_device_info *devinfo)
{
struct parser_context ctx;
void *buf;
@ -628,9 +628,9 @@ gen_spec_load(const struct gen_device_info *devinfo)
XML_SetElementHandler(ctx.parser, start_element, end_element);
XML_SetCharacterDataHandler(ctx.parser, character_data);
ctx.spec = gen_spec_init();
ctx.spec = intel_spec_init();
if (ctx.spec == NULL) {
fprintf(stderr, "Failed to create gen_spec\n");
fprintf(stderr, "Failed to create intel_spec\n");
return NULL;
}
@ -660,8 +660,8 @@ gen_spec_load(const struct gen_device_info *devinfo)
return ctx.spec;
}
struct gen_spec *
gen_spec_load_filename(const char *filename)
struct intel_spec *
intel_spec_load_filename(const char *filename)
{
struct parser_context ctx;
FILE *input;
@ -687,9 +687,9 @@ gen_spec_load_filename(const char *filename)
XML_SetCharacterDataHandler(ctx.parser, character_data);
ctx.loc.filename = filename;
ctx.spec = gen_spec_init();
ctx.spec = intel_spec_init();
if (ctx.spec == NULL) {
fprintf(stderr, "Failed to create gen_spec\n");
fprintf(stderr, "Failed to create intel_spec\n");
goto end;
}
@ -698,7 +698,7 @@ gen_spec_load_filename(const char *filename)
len = fread(buf, 1, XML_BUFFER_SIZE, input);
if (ferror(input)) {
fprintf(stderr, "fread: %m\n");
gen_spec_destroy(ctx.spec);
intel_spec_destroy(ctx.spec);
ctx.spec = NULL;
goto end;
} else if (len == 0 && feof(input))
@ -710,7 +710,7 @@ gen_spec_load_filename(const char *filename)
XML_GetCurrentLineNumber(ctx.parser),
XML_GetCurrentColumnNumber(ctx.parser),
XML_ErrorString(XML_GetErrorCode(ctx.parser)));
gen_spec_destroy(ctx.spec);
intel_spec_destroy(ctx.spec);
ctx.spec = NULL;
goto end;
}
@ -727,15 +727,15 @@ gen_spec_load_filename(const char *filename)
_mesa_hash_table_num_entries(ctx.spec->structs) == 0) {
fprintf(stderr,
"Error parsing XML: empty spec.\n");
gen_spec_destroy(ctx.spec);
intel_spec_destroy(ctx.spec);
return NULL;
}
return ctx.spec;
}
struct gen_spec *
gen_spec_load_from_path(const struct gen_device_info *devinfo,
struct intel_spec *
intel_spec_load_from_path(const struct gen_device_info *devinfo,
const char *path)
{
size_t filename_len = strlen(path) + 20;
@ -745,24 +745,24 @@ gen_spec_load_from_path(const struct gen_device_info *devinfo,
path, devinfo_to_gen(devinfo, false));
assert(len < filename_len);
struct gen_spec *spec = gen_spec_load_filename(filename);
struct intel_spec *spec = intel_spec_load_filename(filename);
free(filename);
return spec;
}
void gen_spec_destroy(struct gen_spec *spec)
void intel_spec_destroy(struct intel_spec *spec)
{
ralloc_free(spec);
}
struct gen_group *
gen_spec_find_instruction(struct gen_spec *spec,
struct intel_group *
intel_spec_find_instruction(struct intel_spec *spec,
enum drm_i915_gem_engine_class engine,
const uint32_t *p)
{
hash_table_foreach(spec->commands, entry) {
struct gen_group *command = entry->data;
struct intel_group *command = entry->data;
uint32_t opcode = *p & command->opcode_mask;
if ((command->engine_mask & I915_ENGINE_CLASS_TO_MASK(engine)) &&
opcode == command->opcode)
@ -772,19 +772,19 @@ gen_spec_find_instruction(struct gen_spec *spec,
return NULL;
}
struct gen_field *
gen_group_find_field(struct gen_group *group, const char *name)
struct intel_field *
intel_group_find_field(struct intel_group *group, const char *name)
{
char path[256];
snprintf(path, sizeof(path), "%s/%s", group->name, name);
struct gen_spec *spec = group->spec;
struct intel_spec *spec = group->spec;
struct hash_entry *entry = _mesa_hash_table_search(spec->access_cache,
path);
if (entry)
return entry->data;
struct gen_field *field = group->fields;
struct intel_field *field = group->fields;
while (field) {
if (strcmp(field->name, name) == 0) {
_mesa_hash_table_insert(spec->access_cache,
@ -799,13 +799,13 @@ gen_group_find_field(struct gen_group *group, const char *name)
}
int
gen_group_get_length(struct gen_group *group, const uint32_t *p)
intel_group_get_length(struct intel_group *group, const uint32_t *p)
{
if (group) {
if (group->fixed_length)
return group->dw_length;
else {
struct gen_field *field = group->dword_length_field;
struct intel_field *field = group->dword_length_field;
if (field) {
return field_value(p[0], field->start, field->end) + group->bias;
}
@ -869,7 +869,7 @@ gen_group_get_length(struct gen_group *group, const uint32_t *p)
}
static const char *
gen_get_enum_name(struct gen_enum *e, uint64_t value)
intel_get_enum_name(struct intel_enum *e, uint64_t value)
{
for (int i = 0; i < e->nvalues; i++) {
if (e->values[i]->value == value) {
@ -880,19 +880,19 @@ gen_get_enum_name(struct gen_enum *e, uint64_t value)
}
static bool
iter_more_fields(const struct gen_field_iterator *iter)
iter_more_fields(const struct intel_field_iterator *iter)
{
return iter->field != NULL && iter->field->next != NULL;
}
static uint32_t
iter_array_offset_bits(const struct gen_field_iterator *iter)
iter_array_offset_bits(const struct intel_field_iterator *iter)
{
if (iter->level == 0)
return 0;
uint32_t offset = 0;
const struct gen_group *group = iter->groups[1];
const struct intel_group *group = iter->groups[1];
for (int level = 1; level <= iter->level; level++, group = iter->groups[level]) {
uint32_t array_idx = iter->array_iter[level];
offset += group->array_offset + array_idx * group->array_item_size;
@ -906,7 +906,7 @@ iter_array_offset_bits(const struct gen_field_iterator *iter)
*/
/* descend into a non-array field */
static void
iter_push_array(struct gen_field_iterator *iter)
iter_push_array(struct intel_field_iterator *iter)
{
assert(iter->level >= 0);
@ -922,7 +922,7 @@ iter_push_array(struct gen_field_iterator *iter)
}
static void
iter_pop_array(struct gen_field_iterator *iter)
iter_pop_array(struct intel_field_iterator *iter)
{
assert(iter->level > 0);
@ -932,7 +932,7 @@ iter_pop_array(struct gen_field_iterator *iter)
}
static void
iter_start_field(struct gen_field_iterator *iter, struct gen_field *field)
iter_start_field(struct intel_field_iterator *iter, struct intel_field *field)
{
iter->field = field;
iter->fields[iter->level] = field;
@ -948,7 +948,7 @@ iter_start_field(struct gen_field_iterator *iter, struct gen_field *field)
}
static void
iter_advance_array(struct gen_field_iterator *iter)
iter_advance_array(struct intel_field_iterator *iter)
{
assert(iter->level > 0);
int lvl = iter->level;
@ -965,13 +965,13 @@ iter_advance_array(struct gen_field_iterator *iter)
}
static bool
iter_more_array_elems(const struct gen_field_iterator *iter)
iter_more_array_elems(const struct intel_field_iterator *iter)
{
int lvl = iter->level;
assert(lvl >= 0);
if (iter->group->variable) {
int length = gen_group_get_length(iter->group, iter->p);
int length = intel_group_get_length(iter->group, iter->p);
assert(length >= 0 && "error the length is unknown!");
return iter_array_offset_bits(iter) + iter->group->array_item_size <
(length * 32);
@ -981,7 +981,7 @@ iter_more_array_elems(const struct gen_field_iterator *iter)
}
static bool
iter_advance_field(struct gen_field_iterator *iter)
iter_advance_field(struct intel_field_iterator *iter)
{
/* Keep looping while we either have more fields to look at, or we are
* inside a <group> and can go up a level.
@ -1010,7 +1010,7 @@ iter_advance_field(struct gen_field_iterator *iter)
}
static bool
iter_decode_field_raw(struct gen_field_iterator *iter, uint64_t *qw)
iter_decode_field_raw(struct intel_field_iterator *iter, uint64_t *qw)
{
*qw = 0;
@ -1041,7 +1041,7 @@ iter_decode_field_raw(struct gen_field_iterator *iter, uint64_t *qw)
}
static bool
iter_decode_field(struct gen_field_iterator *iter)
iter_decode_field(struct intel_field_iterator *iter)
{
union {
uint64_t qw;
@ -1065,12 +1065,12 @@ iter_decode_field(struct gen_field_iterator *iter)
case GEN_TYPE_UNKNOWN:
case GEN_TYPE_INT: {
snprintf(iter->value, sizeof(iter->value), "%"PRId64, v.qw);
enum_name = gen_get_enum_name(&iter->field->inline_enum, v.qw);
enum_name = intel_get_enum_name(&iter->field->inline_enum, v.qw);
break;
}
case GEN_TYPE_UINT: {
snprintf(iter->value, sizeof(iter->value), "%"PRIu64, v.qw);
enum_name = gen_get_enum_name(&iter->field->inline_enum, v.qw);
enum_name = intel_get_enum_name(&iter->field->inline_enum, v.qw);
break;
}
case GEN_TYPE_BOOL: {
@ -1089,10 +1089,10 @@ iter_decode_field(struct gen_field_iterator *iter)
break;
case GEN_TYPE_STRUCT:
snprintf(iter->value, sizeof(iter->value), "<struct %s>",
iter->field->type.gen_struct->name);
iter->field->type.intel_struct->name);
iter->struct_desc =
gen_spec_find_struct(iter->group->spec,
iter->field->type.gen_struct->name);
intel_spec_find_struct(iter->group->spec,
iter->field->type.intel_struct->name);
break;
case GEN_TYPE_UFIXED:
snprintf(iter->value, sizeof(iter->value), "%f",
@ -1110,7 +1110,7 @@ iter_decode_field(struct gen_field_iterator *iter)
break;
case GEN_TYPE_ENUM: {
snprintf(iter->value, sizeof(iter->value), "%"PRId64, v.qw);
enum_name = gen_get_enum_name(iter->field->type.gen_enum, v.qw);
enum_name = intel_get_enum_name(iter->field->type.intel_enum, v.qw);
break;
}
}
@ -1148,8 +1148,8 @@ iter_decode_field(struct gen_field_iterator *iter)
}
void
gen_field_iterator_init(struct gen_field_iterator *iter,
struct gen_group *group,
intel_field_iterator_init(struct intel_field_iterator *iter,
struct intel_group *group,
const uint32_t *p, int p_bit,
bool print_colors)
{
@ -1160,14 +1160,14 @@ gen_field_iterator_init(struct gen_field_iterator *iter,
iter->p = p;
iter->p_bit = p_bit;
int length = gen_group_get_length(iter->group, iter->p);
int length = intel_group_get_length(iter->group, iter->p);
assert(length >= 0 && "error the length is unknown!");
iter->p_end = length >= 0 ? &p[length] : NULL;
iter->print_colors = print_colors;
}
bool
gen_field_iterator_next(struct gen_field_iterator *iter)
intel_field_iterator_next(struct intel_field_iterator *iter)
{
/* Initial condition */
if (!iter->field) {
@ -1196,7 +1196,7 @@ gen_field_iterator_next(struct gen_field_iterator *iter)
static void
print_dword_header(FILE *outfile,
struct gen_field_iterator *iter,
struct intel_field_iterator *iter,
uint64_t offset, uint32_t dword)
{
fprintf(outfile, "0x%08"PRIx64": 0x%08x : Dword %d\n",
@ -1204,7 +1204,7 @@ print_dword_header(FILE *outfile,
}
bool
gen_field_is_header(struct gen_field *field)
intel_field_is_header(struct intel_field *field)
{
uint32_t bits;
@ -1220,26 +1220,26 @@ gen_field_is_header(struct gen_field *field)
}
void
gen_print_group(FILE *outfile, struct gen_group *group, uint64_t offset,
intel_print_group(FILE *outfile, struct intel_group *group, uint64_t offset,
const uint32_t *p, int p_bit, bool color)
{
struct gen_field_iterator iter;
struct intel_field_iterator iter;
int last_dword = -1;
gen_field_iterator_init(&iter, group, p, p_bit, color);
while (gen_field_iterator_next(&iter)) {
intel_field_iterator_init(&iter, group, p, p_bit, color);
while (intel_field_iterator_next(&iter)) {
int iter_dword = iter.end_bit / 32;
if (last_dword != iter_dword) {
for (int i = last_dword + 1; i <= iter_dword; i++)
print_dword_header(outfile, &iter, offset, i);
last_dword = iter_dword;
}
if (!gen_field_is_header(iter.field)) {
if (!intel_field_is_header(iter.field)) {
fprintf(outfile, " %s: %s\n", iter.name, iter.value);
if (iter.struct_desc) {
int struct_dword = iter.start_bit / 32;
uint64_t struct_offset = offset + 4 * struct_dword;
gen_print_group(outfile, iter.struct_desc, struct_offset,
intel_print_group(outfile, iter.struct_desc, struct_offset,
&p[struct_dword], iter.start_bit % 32, color);
}
}

View File

@ -38,66 +38,66 @@
extern "C" {
#endif
struct gen_spec;
struct gen_group;
struct gen_field;
union gen_field_value;
struct intel_spec;
struct intel_group;
struct intel_field;
union intel_field_value;
#define I915_ENGINE_CLASS_TO_MASK(x) BITSET_BIT(x)
static inline uint32_t gen_make_gen(uint32_t major, uint32_t minor)
static inline uint32_t intel_make_gen(uint32_t major, uint32_t minor)
{
return (major << 8) | minor;
}
struct gen_group *gen_spec_find_struct(struct gen_spec *spec, const char *name);
struct gen_spec *gen_spec_load(const struct gen_device_info *devinfo);
struct gen_spec *gen_spec_load_from_path(const struct gen_device_info *devinfo,
struct intel_group *intel_spec_find_struct(struct intel_spec *spec, const char *name);
struct intel_spec *intel_spec_load(const struct gen_device_info *devinfo);
struct intel_spec *intel_spec_load_from_path(const struct gen_device_info *devinfo,
const char *path);
struct gen_spec *gen_spec_load_filename(const char *filename);
void gen_spec_destroy(struct gen_spec *spec);
uint32_t gen_spec_get_gen(struct gen_spec *spec);
struct gen_group *gen_spec_find_instruction(struct gen_spec *spec,
struct intel_spec *intel_spec_load_filename(const char *filename);
void intel_spec_destroy(struct intel_spec *spec);
uint32_t intel_spec_get_gen(struct intel_spec *spec);
struct intel_group *intel_spec_find_instruction(struct intel_spec *spec,
enum drm_i915_gem_engine_class engine,
const uint32_t *p);
struct gen_group *gen_spec_find_register(struct gen_spec *spec, uint32_t offset);
struct gen_group *gen_spec_find_register_by_name(struct gen_spec *spec, const char *name);
struct gen_enum *gen_spec_find_enum(struct gen_spec *spec, const char *name);
struct intel_group *intel_spec_find_register(struct intel_spec *spec, uint32_t offset);
struct intel_group *intel_spec_find_register_by_name(struct intel_spec *spec, const char *name);
struct intel_enum *intel_spec_find_enum(struct intel_spec *spec, const char *name);
int gen_group_get_length(struct gen_group *group, const uint32_t *p);
const char *gen_group_get_name(struct gen_group *group);
uint32_t gen_group_get_opcode(struct gen_group *group);
struct gen_field *gen_group_find_field(struct gen_group *group, const char *name);
struct gen_enum *gen_spec_find_enum(struct gen_spec *spec, const char *name);
int intel_group_get_length(struct intel_group *group, const uint32_t *p);
const char *intel_group_get_name(struct intel_group *group);
uint32_t intel_group_get_opcode(struct intel_group *group);
struct intel_field *intel_group_find_field(struct intel_group *group, const char *name);
struct intel_enum *intel_spec_find_enum(struct intel_spec *spec, const char *name);
bool gen_field_is_header(struct gen_field *field);
bool intel_field_is_header(struct intel_field *field);
/* Only allow 5 levels of subgroup'ing
*/
#define DECODE_MAX_ARRAY_DEPTH 5
struct gen_field_iterator {
struct gen_group *group;
struct intel_field_iterator {
struct intel_group *group;
char name[128];
char value[128];
uint64_t raw_value;
struct gen_group *struct_desc;
struct intel_group *struct_desc;
const uint32_t *p;
int p_bit; /**< bit offset into p */
const uint32_t *p_end;
int start_bit; /**< current field starts at this bit offset into p */
int end_bit; /**< current field ends at this bit offset into p */
struct gen_field *fields[DECODE_MAX_ARRAY_DEPTH];
struct gen_group *groups[DECODE_MAX_ARRAY_DEPTH];
struct intel_field *fields[DECODE_MAX_ARRAY_DEPTH];
struct intel_group *groups[DECODE_MAX_ARRAY_DEPTH];
int array_iter[DECODE_MAX_ARRAY_DEPTH];
int level;
struct gen_field *field;
struct intel_field *field;
bool print_colors;
};
struct gen_spec {
struct intel_spec {
uint32_t gen;
struct hash_table *commands;
@ -109,12 +109,12 @@ struct gen_spec {
struct hash_table *access_cache;
};
struct gen_group {
struct gen_spec *spec;
struct intel_group {
struct intel_spec *spec;
char *name;
struct gen_field *fields; /* linked list of fields */
struct gen_field *dword_length_field; /* <instruction> specific */
struct intel_field *fields; /* linked list of fields */
struct intel_field *dword_length_field; /* <instruction> specific */
uint32_t dw_length;
uint32_t engine_mask; /* <instruction> specific */
@ -125,8 +125,8 @@ struct gen_group {
bool variable; /* <group> specific */
bool fixed_length; /* True for <struct> & <register> */
struct gen_group *parent;
struct gen_group *next;
struct intel_group *parent;
struct intel_group *next;
uint32_t opcode_mask;
uint32_t opcode;
@ -134,18 +134,18 @@ struct gen_group {
uint32_t register_offset; /* <register> specific */
};
struct gen_value {
struct intel_value {
char *name;
uint64_t value;
};
struct gen_enum {
struct intel_enum {
char *name;
int nvalues;
struct gen_value **values;
struct intel_value **values;
};
struct gen_type {
struct intel_type {
enum {
GEN_TYPE_UNKNOWN,
GEN_TYPE_INT,
@ -163,8 +163,8 @@ struct gen_type {
/* Struct definition for GEN_TYPE_STRUCT */
union {
struct gen_group *gen_struct;
struct gen_enum *gen_enum;
struct intel_group *intel_struct;
struct intel_enum *intel_enum;
struct {
/* Integer and fractional sizes for GEN_TYPE_UFIXED and GEN_TYPE_SFIXED */
int i, f;
@ -172,40 +172,40 @@ struct gen_type {
};
};
union gen_field_value {
union intel_field_value {
bool b32;
float f32;
uint64_t u64;
int64_t i64;
};
struct gen_field {
struct gen_group *parent;
struct gen_field *next;
struct gen_group *array;
struct intel_field {
struct intel_group *parent;
struct intel_field *next;
struct intel_group *array;
char *name;
int start, end;
struct gen_type type;
struct intel_type type;
bool has_default;
uint32_t default_value;
struct gen_enum inline_enum;
struct intel_enum inline_enum;
};
void gen_field_iterator_init(struct gen_field_iterator *iter,
struct gen_group *group,
void intel_field_iterator_init(struct intel_field_iterator *iter,
struct intel_group *group,
const uint32_t *p, int p_bit,
bool print_colors);
bool gen_field_iterator_next(struct gen_field_iterator *iter);
bool intel_field_iterator_next(struct intel_field_iterator *iter);
void gen_print_group(FILE *out,
struct gen_group *group,
void intel_print_group(FILE *out,
struct intel_group *group,
uint64_t offset, const uint32_t *p, int p_bit,
bool color);
enum gen_batch_decode_flags {
enum intel_batch_decode_flags {
/** Print in color! */
GEN_BATCH_DECODE_IN_COLOR = (1 << 0),
/** Print everything, not just headers */
@ -216,20 +216,20 @@ enum gen_batch_decode_flags {
GEN_BATCH_DECODE_FLOATS = (1 << 3),
};
struct gen_batch_decode_bo {
struct intel_batch_decode_bo {
uint64_t addr;
uint32_t size;
const void *map;
};
struct gen_batch_decode_ctx {
struct intel_batch_decode_ctx {
/**
* Return information about the buffer containing the given address.
*
* If the given address is inside a buffer, the map pointer should be
* offset accordingly so it points at the data corresponding to address.
*/
struct gen_batch_decode_bo (*get_bo)(void *user_data, bool ppgtt, uint64_t address);
struct intel_batch_decode_bo (*get_bo)(void *user_data, bool ppgtt, uint64_t address);
unsigned (*get_state_size)(void *user_data,
uint64_t address,
uint64_t base_address);
@ -237,8 +237,8 @@ struct gen_batch_decode_ctx {
FILE *fp;
struct gen_device_info devinfo;
struct gen_spec *spec;
enum gen_batch_decode_flags flags;
struct intel_spec *spec;
enum intel_batch_decode_flags flags;
uint64_t surface_base;
uint64_t dynamic_base;
@ -251,21 +251,21 @@ struct gen_batch_decode_ctx {
int n_batch_buffer_start;
};
void gen_batch_decode_ctx_init(struct gen_batch_decode_ctx *ctx,
void intel_batch_decode_ctx_init(struct intel_batch_decode_ctx *ctx,
const struct gen_device_info *devinfo,
FILE *fp, enum gen_batch_decode_flags flags,
FILE *fp, enum intel_batch_decode_flags flags,
const char *xml_path,
struct gen_batch_decode_bo (*get_bo)(void *,
struct intel_batch_decode_bo (*get_bo)(void *,
bool,
uint64_t),
unsigned (*get_state_size)(void *, uint64_t,
uint64_t),
void *user_data);
void gen_batch_decode_ctx_finish(struct gen_batch_decode_ctx *ctx);
void intel_batch_decode_ctx_finish(struct intel_batch_decode_ctx *ctx);
void gen_print_batch(struct gen_batch_decode_ctx *ctx,
void intel_print_batch(struct intel_batch_decode_ctx *ctx,
const uint32_t *batch, uint32_t batch_size,
uint64_t batch_addr, bool from_ring);

View File

@ -38,7 +38,7 @@ is_send(uint32_t opcode)
}
static int
gen_disasm_find_end(const struct gen_device_info *devinfo,
intel_disasm_find_end(const struct gen_device_info *devinfo,
const void *assembly, int start)
{
int offset = start;
@ -64,10 +64,10 @@ gen_disasm_find_end(const struct gen_device_info *devinfo,
}
void
gen_disassemble(const struct gen_device_info *devinfo,
intel_disassemble(const struct gen_device_info *devinfo,
const void *assembly, int start, FILE *out)
{
int end = gen_disasm_find_end(devinfo, assembly, start);
int end = intel_disasm_find_end(devinfo, assembly, start);
/* Make a dummy disasm structure that brw_validate_instructions
* can work from.

View File

@ -30,7 +30,7 @@
extern "C" {
#endif
void gen_disassemble(const struct gen_device_info *devinfo,
void intel_disassemble(const struct gen_device_info *devinfo,
const void *assembly, int start, FILE *out);
#ifdef __cplusplus

View File

@ -24,14 +24,14 @@
#include "drm-uapi/i915_drm.h"
bool
gen_gem_supports_syncobj_wait(int fd)
intel_gem_supports_syncobj_wait(int fd)
{
int ret;
struct drm_syncobj_create create = {
.flags = 0,
};
ret = gen_ioctl(fd, DRM_IOCTL_SYNCOBJ_CREATE, &create);
ret = intel_ioctl(fd, DRM_IOCTL_SYNCOBJ_CREATE, &create);
if (ret)
return false;
@ -43,12 +43,12 @@ gen_gem_supports_syncobj_wait(int fd)
.timeout_nsec = 0,
.flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
};
ret = gen_ioctl(fd, DRM_IOCTL_SYNCOBJ_WAIT, &wait);
ret = intel_ioctl(fd, DRM_IOCTL_SYNCOBJ_WAIT, &wait);
struct drm_syncobj_destroy destroy = {
.handle = syncobj,
};
gen_ioctl(fd, DRM_IOCTL_SYNCOBJ_DESTROY, &destroy);
intel_ioctl(fd, DRM_IOCTL_SYNCOBJ_DESTROY, &destroy);
/* If it timed out, then we have the ioctl and it supports the
* DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT flag.

View File

@ -31,7 +31,7 @@
#include <sys/ioctl.h>
static inline uint64_t
gen_canonical_address(uint64_t v)
intel_canonical_address(uint64_t v)
{
/* From the Broadwell PRM Vol. 2a, MI_LOAD_REGISTER_MEM::MemoryAddress:
*
@ -49,10 +49,10 @@ gen_canonical_address(uint64_t v)
/**
* This returns a 48-bit address with the high 16 bits zeroed.
*
* It's the opposite of gen_canonicalize_address.
* It's the opposite of intel_canonicalize_address.
*/
static inline uint64_t
gen_48b_address(uint64_t v)
intel_48b_address(uint64_t v)
{
const int shift = 63 - 47;
return (uint64_t)(v << shift) >> shift;
@ -62,7 +62,7 @@ gen_48b_address(uint64_t v)
* Call ioctl, restarting if it is interupted
*/
static inline int
gen_ioctl(int fd, unsigned long request, void *arg)
intel_ioctl(int fd, unsigned long request, void *arg)
{
int ret;
@ -72,6 +72,6 @@ gen_ioctl(int fd, unsigned long request, void *arg)
return ret;
}
bool gen_gem_supports_syncobj_wait(int fd);
bool intel_gem_supports_syncobj_wait(int fd);
#endif /* INTEL_GEM_H */

View File

@ -24,7 +24,7 @@
#define INTEL_GUARDBAND_H
static inline void
gen_calculate_guardband_size(uint32_t fb_width, uint32_t fb_height,
intel_calculate_guardband_size(uint32_t fb_width, uint32_t fb_height,
float m00, float m11, float m30, float m31,
float *xmin, float *xmax,
float *ymin, float *ymax)

View File

@ -29,13 +29,13 @@
#include "intel_l3_config.h"
struct gen_l3_list {
const struct gen_l3_config *configs;
struct intel_l3_list {
const struct intel_l3_config *configs;
int length;
};
#define DECLARE_L3_LIST(hw) \
struct gen_l3_list hw##_l3_list = \
struct intel_l3_list hw##_l3_list = \
{ .configs = hw##_l3_configs, .length = ARRAY_SIZE(hw##_l3_configs) }
/**
@ -43,7 +43,7 @@ struct gen_l3_list {
* default by gen7_restore_default_l3_config(), otherwise the ordering is
* unimportant.
*/
static const struct gen_l3_config ivb_l3_configs[] = {
static const struct intel_l3_config ivb_l3_configs[] = {
/* SLM URB ALL DC RO IS C T */
{{ 0, 32, 0, 0, 32, 0, 0, 0 }},
{{ 0, 32, 0, 16, 16, 0, 0, 0 }},
@ -65,7 +65,7 @@ DECLARE_L3_LIST(ivb);
/**
* VLV validated L3 configurations. \sa ivb_l3_configs.
*/
static const struct gen_l3_config vlv_l3_configs[] = {
static const struct intel_l3_config vlv_l3_configs[] = {
/* SLM URB ALL DC RO IS C T */
{{ 0, 64, 0, 0, 32, 0, 0, 0 }},
{{ 0, 80, 0, 0, 16, 0, 0, 0 }},
@ -81,7 +81,7 @@ DECLARE_L3_LIST(vlv);
/**
* BDW validated L3 configurations. \sa ivb_l3_configs.
*/
static const struct gen_l3_config bdw_l3_configs[] = {
static const struct intel_l3_config bdw_l3_configs[] = {
/* SLM URB ALL DC RO IS C T */
{{ 0, 48, 48, 0, 0, 0, 0, 0 }},
{{ 0, 48, 0, 16, 32, 0, 0, 0 }},
@ -97,7 +97,7 @@ DECLARE_L3_LIST(bdw);
/**
* CHV/SKL validated L3 configurations. \sa ivb_l3_configs.
*/
static const struct gen_l3_config chv_l3_configs[] = {
static const struct intel_l3_config chv_l3_configs[] = {
/* SLM URB ALL DC RO IS C T */
{{ 0, 48, 48, 0, 0, 0, 0, 0 }},
{{ 0, 48, 0, 16, 32, 0, 0, 0 }},
@ -113,7 +113,7 @@ DECLARE_L3_LIST(chv);
/**
* BXT 2x6 validated L3 configurations. \sa ivb_l3_configs.
*/
static const struct gen_l3_config bxt_2x6_l3_configs[] = {
static const struct intel_l3_config bxt_2x6_l3_configs[] = {
/* SLM URB ALL DC RO IS C T */
{{ 0, 32, 48, 0, 0, 0, 0, 0 }},
{{ 0, 32, 0, 8, 40, 0, 0, 0 }},
@ -131,7 +131,7 @@ DECLARE_L3_LIST(bxt_2x6);
* suggested by h/w specification aren't added here because they
* do under allocation of L3 cache with below partitioning.
*/
static const struct gen_l3_config icl_l3_configs[] = {
static const struct intel_l3_config icl_l3_configs[] = {
/* SLM URB ALL DC RO IS C T */
/*{{ 0, 16, 80, 0, 0, 0, 0, 0 }},*/
{{ 0, 32, 64, 0, 0, 0, 0, 0 }},
@ -141,7 +141,7 @@ DECLARE_L3_LIST(icl);
/**
* TGL validated L3 configurations. \sa tgl_l3_configs.
*/
static const struct gen_l3_config tgl_l3_configs[] = {
static const struct intel_l3_config tgl_l3_configs[] = {
/* SLM URB ALL DC RO IS C T */
{{ 0, 32, 88, 0, 0, 0, 0, 0 }},
{{ 0, 16, 104, 0, 0, 0, 0, 0 }},
@ -151,7 +151,7 @@ DECLARE_L3_LIST(tgl);
/**
* DG1 validated L3 configurations. \sa dg1_l3_configs.
*/
static const struct gen_l3_config dg1_l3_configs[] = {
static const struct intel_l3_config dg1_l3_configs[] = {
/* No configurations. L3FullWayAllocationEnable is always set. */
};
DECLARE_L3_LIST(dg1);
@ -160,7 +160,7 @@ DECLARE_L3_LIST(dg1);
* Return a zero-terminated array of validated L3 configurations for the
* specified device.
*/
static const struct gen_l3_list *
static const struct intel_l3_list *
get_l3_list(const struct gen_device_info *devinfo)
{
switch (devinfo->gen) {
@ -192,8 +192,8 @@ get_l3_list(const struct gen_device_info *devinfo)
/**
* L1-normalize a vector of L3 partition weights.
*/
static struct gen_l3_weights
norm_l3_weights(struct gen_l3_weights w)
static struct intel_l3_weights
norm_l3_weights(struct intel_l3_weights w)
{
float sz = 0;
@ -209,18 +209,18 @@ norm_l3_weights(struct gen_l3_weights w)
/**
* Get the relative partition weights of the specified L3 configuration.
*/
struct gen_l3_weights
gen_get_l3_config_weights(const struct gen_l3_config *cfg)
struct intel_l3_weights
intel_get_l3_config_weights(const struct intel_l3_config *cfg)
{
if (cfg) {
struct gen_l3_weights w;
struct intel_l3_weights w;
for (unsigned i = 0; i < GEN_NUM_L3P; i++)
w.w[i] = cfg->n[i];
return norm_l3_weights(w);
} else {
const struct gen_l3_weights w = { { 0 } };
const struct intel_l3_weights w = { { 0 } };
return w;
}
}
@ -234,7 +234,7 @@ gen_get_l3_config_weights(const struct gen_l3_config *cfg)
* or URB but \p w1 doesn't provide it.
*/
float
gen_diff_l3_weights(struct gen_l3_weights w0, struct gen_l3_weights w1)
intel_diff_l3_weights(struct intel_l3_weights w0, struct intel_l3_weights w1)
{
if ((w0.w[GEN_L3P_SLM] && !w1.w[GEN_L3P_SLM]) ||
(w0.w[GEN_L3P_DC] && !w1.w[GEN_L3P_DC] && !w1.w[GEN_L3P_ALL]) ||
@ -256,11 +256,11 @@ gen_diff_l3_weights(struct gen_l3_weights w0, struct gen_l3_weights w1)
* on whether SLM and DC are required. In the non-SLM non-DC case the result
* is intended to approximately resemble the hardware defaults.
*/
struct gen_l3_weights
gen_get_default_l3_weights(const struct gen_device_info *devinfo,
struct intel_l3_weights
intel_get_default_l3_weights(const struct gen_device_info *devinfo,
bool needs_dc, bool needs_slm)
{
struct gen_l3_weights w = {{ 0 }};
struct intel_l3_weights w = {{ 0 }};
w.w[GEN_L3P_SLM] = devinfo->gen < 11 && needs_slm;
w.w[GEN_L3P_URB] = 1.0;
@ -278,18 +278,18 @@ gen_get_default_l3_weights(const struct gen_device_info *devinfo,
/**
* Get the default L3 configuration
*/
const struct gen_l3_config *
gen_get_default_l3_config(const struct gen_device_info *devinfo)
const struct intel_l3_config *
intel_get_default_l3_config(const struct gen_device_info *devinfo)
{
/* For efficiency assume that the first entry of the array matches the
* default configuration.
*/
const struct gen_l3_list *const list = get_l3_list(devinfo);
const struct intel_l3_list *const list = get_l3_list(devinfo);
assert(list->length > 0 || devinfo->gen >= 12);
if (list->length > 0) {
const struct gen_l3_config *const cfg = &list->configs[0];
assert(cfg == gen_get_l3_config(devinfo,
gen_get_default_l3_weights(devinfo, false, false)));
const struct intel_l3_config *const cfg = &list->configs[0];
assert(cfg == intel_get_l3_config(devinfo,
intel_get_default_l3_weights(devinfo, false, false)));
return cfg;
} else {
return NULL;
@ -300,18 +300,18 @@ gen_get_default_l3_config(const struct gen_device_info *devinfo)
* Return the closest validated L3 configuration for the specified device and
* weight vector.
*/
const struct gen_l3_config *
gen_get_l3_config(const struct gen_device_info *devinfo,
struct gen_l3_weights w0)
const struct intel_l3_config *
intel_get_l3_config(const struct gen_device_info *devinfo,
struct intel_l3_weights w0)
{
const struct gen_l3_list *const list = get_l3_list(devinfo);
const struct gen_l3_config *const cfgs = list->configs;
const struct gen_l3_config *cfg_best = NULL;
const struct intel_l3_list *const list = get_l3_list(devinfo);
const struct intel_l3_config *const cfgs = list->configs;
const struct intel_l3_config *cfg_best = NULL;
float dw_best = HUGE_VALF;
for (int i = 0; i < list->length; i++) {
const struct gen_l3_config *cfg = &cfgs[i];
const float dw = gen_diff_l3_weights(w0, gen_get_l3_config_weights(cfg));
const struct intel_l3_config *cfg = &cfgs[i];
const float dw = intel_diff_l3_weights(w0, intel_get_l3_config_weights(cfg));
if (dw < dw_best) {
cfg_best = cfg;
@ -348,8 +348,8 @@ get_urb_size_scale(const struct gen_device_info *devinfo)
}
unsigned
gen_get_l3_config_urb_size(const struct gen_device_info *devinfo,
const struct gen_l3_config *cfg)
intel_get_l3_config_urb_size(const struct gen_device_info *devinfo,
const struct intel_l3_config *cfg)
{
/* We don't have to program the URB size in DG1, it's a fixed value. */
if (devinfo->is_dg1)
@ -372,7 +372,7 @@ gen_get_l3_config_urb_size(const struct gen_device_info *devinfo,
* Print out the specified L3 configuration.
*/
void
gen_dump_l3_config(const struct gen_l3_config *cfg, FILE *fp)
intel_dump_l3_config(const struct intel_l3_config *cfg, FILE *fp)
{
fprintf(stderr, "SLM=%d URB=%d ALL=%d DC=%d RO=%d IS=%d C=%d T=%d\n",
cfg->n[GEN_L3P_SLM], cfg->n[GEN_L3P_URB], cfg->n[GEN_L3P_ALL],

View File

@ -31,7 +31,7 @@
/**
* Chunk of L3 cache reserved for some specific purpose.
*/
enum gen_l3_partition {
enum intel_l3_partition {
/** Shared local memory. */
GEN_L3P_SLM = 0,
/** Unified return buffer. */
@ -56,7 +56,7 @@ enum gen_l3_partition {
* L3 configuration represented as the number of ways allocated for each
* partition. \sa get_l3_way_size().
*/
struct gen_l3_config {
struct intel_l3_config {
unsigned n[GEN_NUM_L3P];
};
@ -66,44 +66,44 @@ struct gen_l3_config {
* between weights will have an influence on the selection of the closest L3
* configuration.
*/
struct gen_l3_weights {
struct intel_l3_weights {
float w[GEN_NUM_L3P];
};
float gen_diff_l3_weights(struct gen_l3_weights w0, struct gen_l3_weights w1);
float intel_diff_l3_weights(struct intel_l3_weights w0, struct intel_l3_weights w1);
struct gen_l3_weights
gen_get_default_l3_weights(const struct gen_device_info *devinfo,
struct intel_l3_weights
intel_get_default_l3_weights(const struct gen_device_info *devinfo,
bool needs_dc, bool needs_slm);
struct gen_l3_weights
gen_get_l3_config_weights(const struct gen_l3_config *cfg);
struct intel_l3_weights
intel_get_l3_config_weights(const struct intel_l3_config *cfg);
const struct gen_l3_config *
gen_get_default_l3_config(const struct gen_device_info *devinfo);
const struct intel_l3_config *
intel_get_default_l3_config(const struct gen_device_info *devinfo);
const struct gen_l3_config *
gen_get_l3_config(const struct gen_device_info *devinfo,
struct gen_l3_weights w0);
const struct intel_l3_config *
intel_get_l3_config(const struct gen_device_info *devinfo,
struct intel_l3_weights w0);
unsigned
gen_get_l3_config_urb_size(const struct gen_device_info *devinfo,
const struct gen_l3_config *cfg);
intel_get_l3_config_urb_size(const struct gen_device_info *devinfo,
const struct intel_l3_config *cfg);
void gen_dump_l3_config(const struct gen_l3_config *cfg, FILE *fp);
void intel_dump_l3_config(const struct intel_l3_config *cfg, FILE *fp);
enum gen_urb_deref_block_size {
enum intel_urb_deref_block_size {
GEN_URB_DEREF_BLOCK_SIZE_32 = 0,
GEN_URB_DEREF_BLOCK_SIZE_PER_POLY = 1,
GEN_URB_DEREF_BLOCK_SIZE_8 = 2,
};
void gen_get_urb_config(const struct gen_device_info *devinfo,
const struct gen_l3_config *l3_cfg,
void intel_get_urb_config(const struct gen_device_info *devinfo,
const struct intel_l3_config *l3_cfg,
bool tess_present, bool gs_present,
const unsigned entry_size[4],
unsigned entries[4], unsigned start[4],
enum gen_urb_deref_block_size *deref_block_size,
enum intel_urb_deref_block_size *deref_block_size,
bool *constrained);
#endif /* INTEL_L3_CONFIG_H */

View File

@ -26,7 +26,7 @@
/**
* 1x MSAA has a single sample at the center: (0.5, 0.5) -> (0x8, 0x8).
*/
const struct gen_sample_position gen_sample_positions_1x[] = {
const struct intel_sample_position intel_sample_positions_1x[] = {
{ 0.5, 0.5, },
};
@ -36,7 +36,7 @@ const struct gen_sample_position gen_sample_positions_1x[] = {
* 4 0
* c 1
*/
const struct gen_sample_position gen_sample_positions_2x[] = {
const struct intel_sample_position intel_sample_positions_2x[] = {
{ 0.75, 0.75 },
{ 0.25, 0.25 },
};
@ -49,7 +49,7 @@ const struct gen_sample_position gen_sample_positions_2x[] = {
* a 2
* e 3
*/
const struct gen_sample_position gen_sample_positions_4x[] = {
const struct intel_sample_position intel_sample_positions_4x[] = {
{ 0.375, 0.125 },
{ 0.875, 0.375 },
{ 0.125, 0.625 },
@ -78,7 +78,7 @@ const struct gen_sample_position gen_sample_positions_4x[] = {
* d 4
* f 6
*/
const struct gen_sample_position gen_sample_positions_8x[] = {
const struct intel_sample_position intel_sample_positions_8x[] = {
{ 0.5625, 0.3125 },
{ 0.4375, 0.6875 },
{ 0.8125, 0.5625 },
@ -110,7 +110,7 @@ const struct gen_sample_position gen_sample_positions_8x[] = {
* e 8
* f 14
*/
const struct gen_sample_position gen_sample_positions_16x[] = {
const struct intel_sample_position intel_sample_positions_16x[] = {
{ 0.5625, 0.5625 },
{ 0.4375, 0.3125 },
{ 0.3125, 0.6250 },

View File

@ -30,26 +30,26 @@
* Vulkan. These correspond to the Vulkan "standard sample locations".
*/
struct gen_sample_position {
struct intel_sample_position {
float x;
float y;
};
extern const struct gen_sample_position gen_sample_positions_1x[];
extern const struct gen_sample_position gen_sample_positions_2x[];
extern const struct gen_sample_position gen_sample_positions_4x[];
extern const struct gen_sample_position gen_sample_positions_8x[];
extern const struct gen_sample_position gen_sample_positions_16x[];
extern const struct intel_sample_position intel_sample_positions_1x[];
extern const struct intel_sample_position intel_sample_positions_2x[];
extern const struct intel_sample_position intel_sample_positions_4x[];
extern const struct intel_sample_position intel_sample_positions_8x[];
extern const struct intel_sample_position intel_sample_positions_16x[];
static inline const struct gen_sample_position *
gen_get_sample_positions(int samples)
static inline const struct intel_sample_position *
intel_get_sample_positions(int samples)
{
switch (samples) {
case 1: return gen_sample_positions_1x;
case 2: return gen_sample_positions_2x;
case 4: return gen_sample_positions_4x;
case 8: return gen_sample_positions_8x;
case 16: return gen_sample_positions_16x;
case 1: return intel_sample_positions_1x;
case 2: return intel_sample_positions_2x;
case 4: return intel_sample_positions_4x;
case 8: return intel_sample_positions_8x;
case 16: return intel_sample_positions_16x;
default: unreachable("Invalid sample count");
}
}
@ -112,18 +112,18 @@ prefix##sample_idx##YOffset = arr[sample_idx].y;
GEN_SAMPLE_POS_ELEM(prefix, arr, 15);
#define GEN_SAMPLE_POS_1X(prefix) \
GEN_SAMPLE_POS_1X_ARRAY(prefix, gen_sample_positions_1x)
GEN_SAMPLE_POS_1X_ARRAY(prefix, intel_sample_positions_1x)
#define GEN_SAMPLE_POS_2X(prefix) \
GEN_SAMPLE_POS_2X_ARRAY(prefix, gen_sample_positions_2x)
GEN_SAMPLE_POS_2X_ARRAY(prefix, intel_sample_positions_2x)
#define GEN_SAMPLE_POS_4X(prefix) \
GEN_SAMPLE_POS_4X_ARRAY(prefix, gen_sample_positions_4x)
GEN_SAMPLE_POS_4X_ARRAY(prefix, intel_sample_positions_4x)
#define GEN_SAMPLE_POS_8X(prefix) \
GEN_SAMPLE_POS_8X_ARRAY(prefix, gen_sample_positions_8x)
GEN_SAMPLE_POS_8X_ARRAY(prefix, intel_sample_positions_8x)
#define GEN_SAMPLE_POS_16X(prefix) \
GEN_SAMPLE_POS_16X_ARRAY(prefix, gen_sample_positions_16x)
GEN_SAMPLE_POS_16X_ARRAY(prefix, intel_sample_positions_16x)
#endif /* INTEL_SAMPLE_POSITIONS_H */

View File

@ -60,15 +60,15 @@
* \param[out] constrained - true if we wanted more space than we had
*/
void
gen_get_urb_config(const struct gen_device_info *devinfo,
const struct gen_l3_config *l3_cfg,
intel_get_urb_config(const struct gen_device_info *devinfo,
const struct intel_l3_config *l3_cfg,
bool tess_present, bool gs_present,
const unsigned entry_size[4],
unsigned entries[4], unsigned start[4],
enum gen_urb_deref_block_size *deref_block_size,
enum intel_urb_deref_block_size *deref_block_size,
bool *constrained)
{
unsigned urb_size_kB = gen_get_l3_config_urb_size(devinfo, l3_cfg);
unsigned urb_size_kB = intel_get_l3_config_urb_size(devinfo, l3_cfg);
/* RCU_MODE register for Gen12+ in BSpec says:
*

View File

@ -26,7 +26,7 @@
#include "util/mesa-sha1.h"
void
gen_uuid_compute_device_id(uint8_t *uuid,
intel_uuid_compute_device_id(uint8_t *uuid,
const struct isl_device *isldev,
size_t size)
{
@ -52,7 +52,7 @@ gen_uuid_compute_device_id(uint8_t *uuid,
}
void
gen_uuid_compute_driver_id(uint8_t *uuid,
intel_uuid_compute_driver_id(uint8_t *uuid,
const struct gen_device_info *devinfo,
size_t size)
{

View File

@ -32,11 +32,11 @@
extern "C" {
#endif
void gen_uuid_compute_device_id(uint8_t *uuid,
void intel_uuid_compute_device_id(uint8_t *uuid,
const struct isl_device *isldev,
size_t size);
void gen_uuid_compute_driver_id(uint8_t *uuid,
void intel_uuid_compute_driver_id(uint8_t *uuid,
const struct gen_device_info *devinfo,
size_t size);

View File

@ -48,7 +48,7 @@ _test_combine_address(void *data, void *location,
#include "gentest_pack.h"
static void
test_struct(struct gen_spec *spec) {
test_struct(struct intel_spec *spec) {
/* Fill struct fields and <group> tag */
struct GEN9_TEST_STRUCT test1 = {
.number1 = 5,
@ -64,20 +64,20 @@ test_struct(struct gen_spec *spec) {
GEN9_TEST_STRUCT_pack(NULL, dw, &test1);
/* Now decode the packed struct, and make sure it matches the original */
struct gen_group *group;
group = gen_spec_find_struct(spec, "TEST_STRUCT");
struct intel_group *group;
group = intel_spec_find_struct(spec, "TEST_STRUCT");
assert(group != NULL);
if (!quiet) {
printf("\nTEST_STRUCT:\n");
gen_print_group(stdout, group, 0, dw, 0, false);
intel_print_group(stdout, group, 0, dw, 0, false);
}
struct gen_field_iterator iter;
gen_field_iterator_init(&iter, group, dw, 0, false);
struct intel_field_iterator iter;
intel_field_iterator_init(&iter, group, dw, 0, false);
while (gen_field_iterator_next(&iter)) {
while (intel_field_iterator_next(&iter)) {
int idx;
if (strcmp(iter.name, "number1") == 0) {
uint16_t number = iter.raw_value;
@ -93,7 +93,7 @@ test_struct(struct gen_spec *spec) {
}
static void
test_two_levels(struct gen_spec *spec) {
test_two_levels(struct intel_spec *spec) {
struct GEN9_STRUCT_TWO_LEVELS test;
for (int i = 0; i < 4; i++) {
@ -105,20 +105,20 @@ test_two_levels(struct gen_spec *spec) {
uint32_t dw[GEN9_STRUCT_TWO_LEVELS_length];
GEN9_STRUCT_TWO_LEVELS_pack(NULL, dw, &test);
struct gen_group *group;
group = gen_spec_find_struct(spec, "STRUCT_TWO_LEVELS");
struct intel_group *group;
group = intel_spec_find_struct(spec, "STRUCT_TWO_LEVELS");
assert(group != NULL);
if (!quiet) {
printf("\nSTRUCT_TWO_LEVELS\n");
gen_print_group(stdout, group, 0, dw, 0, false);
intel_print_group(stdout, group, 0, dw, 0, false);
}
struct gen_field_iterator iter;
gen_field_iterator_init(&iter, group, dw, 0, false);
struct intel_field_iterator iter;
intel_field_iterator_init(&iter, group, dw, 0, false);
while (gen_field_iterator_next(&iter)) {
while (intel_field_iterator_next(&iter)) {
int i, j;
assert(sscanf(iter.name, "byte[%d][%d]", &i, &j) == 2);
@ -129,7 +129,7 @@ test_two_levels(struct gen_spec *spec) {
int main(int argc, char **argv)
{
struct gen_spec *spec = gen_spec_load_filename(GENXML_PATH);
struct intel_spec *spec = intel_spec_load_filename(GENXML_PATH);
if (argc > 1 && strcmp(argv[1], "-quiet") == 0)
quiet = true;
@ -137,7 +137,7 @@ int main(int argc, char **argv)
test_struct(spec);
test_two_levels(spec);
gen_spec_destroy(spec);
intel_spec_destroy(spec);
return 0;
}

View File

@ -1203,7 +1203,7 @@ getparam(int fd, uint32_t param, int *value)
.value = &tmp,
};
int ret = gen_ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp);
int ret = intel_ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp);
if (ret != 0)
return false;
@ -1323,7 +1323,7 @@ query_topology(struct gen_device_info *devinfo, int fd)
.items_ptr = (uintptr_t) &item,
};
if (gen_ioctl(fd, DRM_IOCTL_I915_QUERY, &query))
if (intel_ioctl(fd, DRM_IOCTL_I915_QUERY, &query))
return false;
if (item.length < 0)
@ -1333,7 +1333,7 @@ query_topology(struct gen_device_info *devinfo, int fd)
(struct drm_i915_query_topology_info *) calloc(1, item.length);
item.data_ptr = (uintptr_t) topo_info;
if (gen_ioctl(fd, DRM_IOCTL_I915_QUERY, &query) ||
if (intel_ioctl(fd, DRM_IOCTL_I915_QUERY, &query) ||
item.length <= 0)
return false;
@ -1350,7 +1350,7 @@ gen_get_aperture_size(int fd, uint64_t *size)
{
struct drm_i915_gem_get_aperture aperture = { 0 };
int ret = gen_ioctl(fd, DRM_IOCTL_I915_GEM_GET_APERTURE, &aperture);
int ret = intel_ioctl(fd, DRM_IOCTL_I915_GEM_GET_APERTURE, &aperture);
if (ret == 0 && size)
*size = aperture.aper_size;
@ -1366,7 +1366,7 @@ gen_has_get_tiling(int fd)
.size = 4096,
};
if (gen_ioctl(fd, DRM_IOCTL_I915_GEM_CREATE, &gem_create)) {
if (intel_ioctl(fd, DRM_IOCTL_I915_GEM_CREATE, &gem_create)) {
unreachable("Failed to create GEM BO");
return false;
}
@ -1374,12 +1374,12 @@ gen_has_get_tiling(int fd)
struct drm_i915_gem_get_tiling get_tiling = {
.handle = gem_create.handle,
};
ret = gen_ioctl(fd, DRM_IOCTL_I915_GEM_SET_TILING, &get_tiling);
ret = intel_ioctl(fd, DRM_IOCTL_I915_GEM_SET_TILING, &get_tiling);
struct drm_gem_close close = {
.handle = gem_create.handle,
};
gen_ioctl(fd, DRM_IOCTL_GEM_CLOSE, &close);
intel_ioctl(fd, DRM_IOCTL_GEM_CLOSE, &close);
return ret == 0;
}

View File

@ -56,8 +56,8 @@ pack_header = """%(license)s
#define __gen_validate_value(x)
#endif
#ifndef __gen_field_functions
#define __gen_field_functions
#ifndef __intel_field_functions
#define __intel_field_functions
#ifdef NDEBUG
#define NDEBUG_UNUSED __attribute__((unused))
@ -65,7 +65,7 @@ pack_header = """%(license)s
#define NDEBUG_UNUSED
#endif
union __gen_value {
union __intel_value {
float f;
uint32_t dw;
};
@ -145,7 +145,7 @@ static inline __attribute__((always_inline)) uint32_t
__gen_float(float v)
{
__gen_validate_value(v);
return ((union __gen_value) { .f = (v) }).dw;
return ((union __intel_value) { .f = (v) }).dw;
}
static inline __attribute__((always_inline)) uint64_t

View File

@ -250,7 +250,7 @@ kernel_has_dynamic_config_support(struct gen_perf_config *perf, int fd)
{
uint64_t invalid_config_id = UINT64_MAX;
return gen_ioctl(fd, DRM_IOCTL_I915_PERF_REMOVE_CONFIG,
return intel_ioctl(fd, DRM_IOCTL_I915_PERF_REMOVE_CONFIG,
&invalid_config_id) < 0 && errno == ENOENT;
}
@ -262,7 +262,7 @@ i915_query_items(struct gen_perf_config *perf, int fd,
.num_items = n_items,
.items_ptr = to_user_pointer(items),
};
return gen_ioctl(fd, DRM_IOCTL_I915_QUERY, &q);
return intel_ioctl(fd, DRM_IOCTL_I915_QUERY, &q);
}
static bool
@ -336,7 +336,7 @@ i915_add_config(struct gen_perf_config *perf, int fd,
i915_config.n_flex_regs = config->n_flex_regs;
i915_config.flex_regs_ptr = to_const_user_pointer(config->flex_regs);
int ret = gen_ioctl(fd, DRM_IOCTL_I915_PERF_ADD_CONFIG, &i915_config);
int ret = intel_ioctl(fd, DRM_IOCTL_I915_PERF_ADD_CONFIG, &i915_config);
return ret > 0 ? ret : 0;
}
@ -598,7 +598,7 @@ i915_perf_version(int drm_fd)
.value = &tmp,
};
int ret = gen_ioctl(drm_fd, DRM_IOCTL_I915_GETPARAM, &gp);
int ret = intel_ioctl(drm_fd, DRM_IOCTL_I915_GETPARAM, &gp);
/* Return 0 if this getparam is not supported, the first version supported
* is 1.
@ -615,7 +615,7 @@ i915_get_sseu(int drm_fd, struct drm_i915_gem_context_param_sseu *sseu)
.value = to_user_pointer(sseu)
};
gen_ioctl(drm_fd, DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM, &arg);
intel_ioctl(drm_fd, DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM, &arg);
}
static inline int

View File

@ -319,7 +319,7 @@ static bool
inc_n_users(struct gen_perf_context *perf_ctx)
{
if (perf_ctx->n_oa_users == 0 &&
gen_ioctl(perf_ctx->oa_stream_fd, I915_PERF_IOCTL_ENABLE, 0) < 0)
intel_ioctl(perf_ctx->oa_stream_fd, I915_PERF_IOCTL_ENABLE, 0) < 0)
{
return false;
}
@ -338,7 +338,7 @@ dec_n_users(struct gen_perf_context *perf_ctx)
*/
--perf_ctx->n_oa_users;
if (perf_ctx->n_oa_users == 0 &&
gen_ioctl(perf_ctx->oa_stream_fd, I915_PERF_IOCTL_DISABLE, 0) < 0)
intel_ioctl(perf_ctx->oa_stream_fd, I915_PERF_IOCTL_DISABLE, 0) < 0)
{
DBG("WARNING: Error disabling gen perf stream: %m\n");
}
@ -403,7 +403,7 @@ gen_perf_open(struct gen_perf_context *perf_ctx,
.num_properties = p / 2,
.properties_ptr = (uintptr_t) properties,
};
int fd = gen_ioctl(drm_fd, DRM_IOCTL_I915_PERF_OPEN, &param);
int fd = intel_ioctl(drm_fd, DRM_IOCTL_I915_PERF_OPEN, &param);
if (fd == -1) {
DBG("Error opening gen perf OA stream: %m\n");
return false;

View File

@ -32,7 +32,7 @@
struct bo_map {
struct list_head link;
struct gen_batch_decode_bo bo;
struct intel_batch_decode_bo bo;
bool unmap_after_use;
bool ppgtt;
};
@ -52,7 +52,7 @@ struct phys_mem {
};
static void
add_gtt_bo_map(struct aub_mem *mem, struct gen_batch_decode_bo bo, bool ppgtt, bool unmap_after_use)
add_gtt_bo_map(struct aub_mem *mem, struct intel_batch_decode_bo bo, bool ppgtt, bool unmap_after_use)
{
struct bo_map *m = calloc(1, sizeof(*m));
@ -196,7 +196,7 @@ aub_mem_local_write(void *_mem, uint64_t address,
const void *data, uint32_t size)
{
struct aub_mem *mem = _mem;
struct gen_batch_decode_bo bo = {
struct intel_batch_decode_bo bo = {
.map = data,
.addr = address,
.size = size,
@ -257,11 +257,11 @@ aub_mem_ggtt_write(void *_mem, uint64_t virt_address,
}
}
struct gen_batch_decode_bo
struct intel_batch_decode_bo
aub_mem_get_ggtt_bo(void *_mem, uint64_t address)
{
struct aub_mem *mem = _mem;
struct gen_batch_decode_bo bo = {0};
struct intel_batch_decode_bo bo = {0};
list_for_each_entry(struct bo_map, i, &mem->maps, link)
if (!i->ppgtt && i->bo.addr <= address && i->bo.addr + i->bo.size > address)
@ -333,11 +333,11 @@ ppgtt_mapped(struct aub_mem *mem, uint64_t pml4, uint64_t address)
return ppgtt_walk(mem, pml4, address) != NULL;
}
struct gen_batch_decode_bo
struct intel_batch_decode_bo
aub_mem_get_ppgtt_bo(void *_mem, uint64_t address)
{
struct aub_mem *mem = _mem;
struct gen_batch_decode_bo bo = {0};
struct intel_batch_decode_bo bo = {0};
list_for_each_entry(struct bo_map, i, &mem->maps, link)
if (i->ppgtt && i->bo.addr <= address && i->bo.addr + i->bo.size > address)
@ -407,29 +407,29 @@ aub_mem_fini(struct aub_mem *mem)
mem->mem_fd = -1;
}
struct gen_batch_decode_bo
struct intel_batch_decode_bo
aub_mem_get_phys_addr_data(struct aub_mem *mem, uint64_t phys_addr)
{
struct phys_mem *page = search_phys_mem(mem, phys_addr);
return page ?
(struct gen_batch_decode_bo) { .map = page->data, .addr = page->phys_addr, .size = 4096 } :
(struct gen_batch_decode_bo) {};
(struct intel_batch_decode_bo) { .map = page->data, .addr = page->phys_addr, .size = 4096 } :
(struct intel_batch_decode_bo) {};
}
struct gen_batch_decode_bo
struct intel_batch_decode_bo
aub_mem_get_ppgtt_addr_data(struct aub_mem *mem, uint64_t virt_addr)
{
struct phys_mem *page = ppgtt_walk(mem, mem->pml4, virt_addr);
return page ?
(struct gen_batch_decode_bo) { .map = page->data, .addr = virt_addr & ~((1ULL << 12) - 1), .size = 4096 } :
(struct gen_batch_decode_bo) {};
(struct intel_batch_decode_bo) { .map = page->data, .addr = virt_addr & ~((1ULL << 12) - 1), .size = 4096 } :
(struct intel_batch_decode_bo) {};
}
struct gen_batch_decode_bo
struct intel_batch_decode_bo
aub_mem_get_ppgtt_addr_aub_data(struct aub_mem *mem, uint64_t virt_addr)
{
struct phys_mem *page = ppgtt_walk(mem, mem->pml4, virt_addr);
return page ?
(struct gen_batch_decode_bo) { .map = page->aub_data, .addr = virt_addr & ~((1ULL << 12) - 1), .size = 4096 } :
(struct gen_batch_decode_bo) {};
(struct intel_batch_decode_bo) { .map = page->aub_data, .addr = virt_addr & ~((1ULL << 12) - 1), .size = 4096 } :
(struct intel_batch_decode_bo) {};
}

View File

@ -62,13 +62,13 @@ void aub_mem_ggtt_entry_write(void *mem, uint64_t virt_address,
void aub_mem_local_write(void *mem, uint64_t virt_address,
const void *data, uint32_t size);
struct gen_batch_decode_bo aub_mem_get_ggtt_bo(void *mem, uint64_t address);
struct gen_batch_decode_bo aub_mem_get_ppgtt_bo(void *mem, uint64_t address);
struct intel_batch_decode_bo aub_mem_get_ggtt_bo(void *mem, uint64_t address);
struct intel_batch_decode_bo aub_mem_get_ppgtt_bo(void *mem, uint64_t address);
struct gen_batch_decode_bo aub_mem_get_phys_addr_data(struct aub_mem *mem, uint64_t phys_addr);
struct gen_batch_decode_bo aub_mem_get_ppgtt_addr_data(struct aub_mem *mem, uint64_t virt_addr);
struct intel_batch_decode_bo aub_mem_get_phys_addr_data(struct aub_mem *mem, uint64_t phys_addr);
struct intel_batch_decode_bo aub_mem_get_ppgtt_addr_data(struct aub_mem *mem, uint64_t virt_addr);
struct gen_batch_decode_bo aub_mem_get_ppgtt_addr_aub_data(struct aub_mem *mem, uint64_t virt_addr);
struct intel_batch_decode_bo aub_mem_get_ppgtt_addr_aub_data(struct aub_mem *mem, uint64_t virt_addr);
#ifdef __cplusplus

View File

@ -139,7 +139,7 @@ handle_trace_block(struct aub_read *read, const uint32_t *p)
int header_length = p[0] & 0xffff;
enum drm_i915_gem_engine_class engine = I915_ENGINE_CLASS_RENDER;
const void *data = p + header_length + 2;
uint64_t address = gen_48b_address((read->devinfo.gen >= 8 ? ((uint64_t) p[5] << 32) : 0) |
uint64_t address = intel_48b_address((read->devinfo.gen >= 8 ? ((uint64_t) p[5] << 32) : 0) |
((uint64_t) p[3]));
uint32_t size = p[4];
@ -262,7 +262,7 @@ static void
handle_memtrace_mem_write(struct aub_read *read, const uint32_t *p)
{
const void *data = p + 5;
uint64_t addr = gen_48b_address(*(uint64_t*)&p[1]);
uint64_t addr = intel_48b_address(*(uint64_t*)&p[1]);
uint32_t size = p[4];
uint32_t address_space = p[3] >> 28;

View File

@ -118,7 +118,7 @@ static inline void
aub_write_reloc(const struct gen_device_info *devinfo, void *p, uint64_t v)
{
if (devinfo->gen >= 8) {
*(uint64_t *)p = gen_canonical_address(v);
*(uint64_t *)p = intel_canonical_address(v);
} else {
*(uint32_t *)p = v;
}

View File

@ -60,7 +60,7 @@ static enum { COLOR_AUTO, COLOR_ALWAYS, COLOR_NEVER } option_color;
uint16_t pci_id = 0;
char *input_file = NULL, *xml_path = NULL;
struct gen_device_info devinfo;
struct gen_batch_decode_ctx batch_ctx;
struct intel_batch_decode_ctx batch_ctx;
struct aub_mem mem;
FILE *outfile;
@ -83,7 +83,7 @@ aubinator_init(void *user_data, int aub_pci_id, const char *app_name)
exit(EXIT_FAILURE);
}
enum gen_batch_decode_flags batch_flags = 0;
enum intel_batch_decode_flags batch_flags = 0;
if (option_color == COLOR_ALWAYS)
batch_flags |= GEN_BATCH_DECODE_IN_COLOR;
if (option_full_decode)
@ -92,17 +92,17 @@ aubinator_init(void *user_data, int aub_pci_id, const char *app_name)
batch_flags |= GEN_BATCH_DECODE_OFFSETS;
batch_flags |= GEN_BATCH_DECODE_FLOATS;
gen_batch_decode_ctx_init(&batch_ctx, &devinfo, outfile, batch_flags,
intel_batch_decode_ctx_init(&batch_ctx, &devinfo, outfile, batch_flags,
xml_path, NULL, NULL, NULL);
/* Check for valid spec instance, if wrong xml_path is passed then spec
* instance is not initialized properly
*/
if (!batch_ctx.spec) {
fprintf(stderr, "Failed to initialize gen_batch_decode_ctx "
fprintf(stderr, "Failed to initialize intel_batch_decode_ctx "
"spec instance\n");
free(xml_path);
gen_batch_decode_ctx_finish(&batch_ctx);
intel_batch_decode_ctx_finish(&batch_ctx);
exit(EXIT_FAILURE);
}
@ -129,7 +129,7 @@ aubinator_init(void *user_data, int aub_pci_id, const char *app_name)
fprintf(outfile, "\n");
}
static struct gen_batch_decode_bo
static struct intel_batch_decode_bo
get_bo(void *user_data, bool ppgtt, uint64_t addr)
{
if (ppgtt)
@ -143,7 +143,7 @@ handle_execlist_write(void *user_data, enum drm_i915_gem_engine_class engine, ui
{
const uint32_t pphwsp_size = 4096;
uint32_t pphwsp_addr = context_descriptor & 0xfffff000;
struct gen_batch_decode_bo pphwsp_bo = aub_mem_get_ggtt_bo(&mem, pphwsp_addr);
struct intel_batch_decode_bo pphwsp_bo = aub_mem_get_ggtt_bo(&mem, pphwsp_addr);
uint32_t *context = (uint32_t *)((uint8_t *)pphwsp_bo.map +
(pphwsp_addr - pphwsp_bo.addr) +
pphwsp_size);
@ -156,7 +156,7 @@ handle_execlist_write(void *user_data, enum drm_i915_gem_engine_class engine, ui
mem.pml4 = (uint64_t)context[49] << 32 | context[51];
batch_ctx.user_data = &mem;
struct gen_batch_decode_bo ring_bo = aub_mem_get_ggtt_bo(&mem,
struct intel_batch_decode_bo ring_bo = aub_mem_get_ggtt_bo(&mem,
ring_buffer_start);
assert(ring_bo.size > 0);
void *commands = (uint8_t *)ring_bo.map + (ring_buffer_start - ring_bo.addr) + ring_buffer_head;
@ -164,13 +164,13 @@ handle_execlist_write(void *user_data, enum drm_i915_gem_engine_class engine, ui
batch_ctx.get_bo = get_bo;
batch_ctx.engine = engine;
gen_print_batch(&batch_ctx, commands,
intel_print_batch(&batch_ctx, commands,
MIN2(ring_buffer_tail - ring_buffer_head, ring_buffer_length),
ring_bo.addr + ring_buffer_head, true);
aub_mem_clear_bo_maps(&mem);
}
static struct gen_batch_decode_bo
static struct intel_batch_decode_bo
get_legacy_bo(void *user_data, bool ppgtt, uint64_t addr)
{
return aub_mem_get_ggtt_bo(user_data, addr);
@ -184,7 +184,7 @@ handle_ring_write(void *user_data, enum drm_i915_gem_engine_class engine,
batch_ctx.get_bo = get_legacy_bo;
batch_ctx.engine = engine;
gen_print_batch(&batch_ctx, data, data_len, 0, false);
intel_print_batch(&batch_ctx, data, data_len, 0, false);
aub_mem_clear_bo_maps(&mem);
}
@ -399,7 +399,7 @@ int main(int argc, char *argv[])
free(xml_path);
wait(NULL);
gen_batch_decode_ctx_finish(&batch_ctx);
intel_batch_decode_ctx_finish(&batch_ctx);
return EXIT_SUCCESS;
}

View File

@ -65,13 +65,13 @@ print_head(unsigned int reg)
}
static void
print_register(struct gen_spec *spec, const char *name, uint32_t reg)
print_register(struct intel_spec *spec, const char *name, uint32_t reg)
{
struct gen_group *reg_spec =
name ? gen_spec_find_register_by_name(spec, name) : NULL;
struct intel_group *reg_spec =
name ? intel_spec_find_register_by_name(spec, name) : NULL;
if (reg_spec) {
gen_print_group(stdout, reg_spec, 0, &reg, 0,
intel_print_group(stdout, reg_spec, 0, &reg, 0,
option_color == COLOR_ALWAYS);
}
}
@ -393,13 +393,13 @@ static int qsort_hw_context_first(const void *a, const void *b)
return 0;
}
static struct gen_batch_decode_bo
static struct intel_batch_decode_bo
get_gen_batch_bo(void *user_data, bool ppgtt, uint64_t address)
{
for (int s = 0; s < num_sections; s++) {
if (sections[s].gtt_offset <= address &&
address < sections[s].gtt_offset + sections[s].dword_count * 4) {
return (struct gen_batch_decode_bo) {
return (struct intel_batch_decode_bo) {
.addr = sections[s].gtt_offset,
.map = sections[s].data,
.size = sections[s].dword_count * 4,
@ -407,13 +407,13 @@ get_gen_batch_bo(void *user_data, bool ppgtt, uint64_t address)
}
}
return (struct gen_batch_decode_bo) { .map = NULL };
return (struct intel_batch_decode_bo) { .map = NULL };
}
static void
read_data_file(FILE *file)
{
struct gen_spec *spec = NULL;
struct intel_spec *spec = NULL;
long long unsigned fence;
int matched;
char *line = NULL;
@ -515,9 +515,9 @@ read_data_file(FILE *file)
printf("Detected GEN%i chipset\n", devinfo.gen);
if (xml_path == NULL)
spec = gen_spec_load(&devinfo);
spec = intel_spec_load(&devinfo);
else
spec = gen_spec_load_from_path(&devinfo, xml_path);
spec = intel_spec_load_from_path(&devinfo, xml_path);
}
matched = sscanf(line, " CTL: 0x%08x\n", &reg);
@ -652,7 +652,7 @@ read_data_file(FILE *file)
}
}
enum gen_batch_decode_flags batch_flags = 0;
enum intel_batch_decode_flags batch_flags = 0;
if (option_color == COLOR_ALWAYS)
batch_flags |= GEN_BATCH_DECODE_IN_COLOR;
if (option_full_decode)
@ -661,8 +661,8 @@ read_data_file(FILE *file)
batch_flags |= GEN_BATCH_DECODE_OFFSETS;
batch_flags |= GEN_BATCH_DECODE_FLOATS;
struct gen_batch_decode_ctx batch_ctx;
gen_batch_decode_ctx_init(&batch_ctx, &devinfo, stdout, batch_flags,
struct intel_batch_decode_ctx batch_ctx;
intel_batch_decode_ctx_init(&batch_ctx, &devinfo, stdout, batch_flags,
xml_path, get_gen_batch_bo, NULL, NULL);
@ -684,14 +684,14 @@ read_data_file(FILE *file)
batch_ctx.engine = class;
uint8_t *data = (uint8_t *)sections[s].data + sections[s].data_offset;
uint64_t batch_addr = sections[s].gtt_offset + sections[s].data_offset;
gen_print_batch(&batch_ctx, (uint32_t *)data,
intel_print_batch(&batch_ctx, (uint32_t *)data,
sections[s].dword_count * 4, batch_addr,
is_ring_buffer);
batch_ctx.flags = batch_flags;
}
}
gen_batch_decode_ctx_finish(&batch_ctx);
intel_batch_decode_ctx_finish(&batch_ctx);
for (int s = 0; s < num_sections; s++) {
free(sections[s].ring_name);

View File

@ -62,7 +62,7 @@ struct aub_file {
/* Device state */
struct gen_device_info devinfo;
struct gen_spec *spec;
struct intel_spec *spec;
};
static void
@ -129,7 +129,7 @@ handle_info(void *user_data, int pci_id, const char *app_name)
fprintf(stderr, "can't find device information: pci_id=0x%x\n", file->pci_id);
exit(EXIT_FAILURE);
}
file->spec = gen_spec_load(&file->devinfo);
file->spec = intel_spec_load(&file->devinfo);
}
static void
@ -250,10 +250,10 @@ struct edit_window {
uint64_t address;
uint32_t len;
struct gen_batch_decode_bo aub_bo;
struct intel_batch_decode_bo aub_bo;
uint64_t aub_offset;
struct gen_batch_decode_bo gtt_bo;
struct intel_batch_decode_bo gtt_bo;
uint64_t gtt_offset;
struct MemoryEditor editor;
@ -387,12 +387,12 @@ new_shader_window(struct aub_mem *mem, uint64_t address, const char *desc)
window->base.display = display_shader_window;
window->base.destroy = destroy_shader_window;
struct gen_batch_decode_bo shader_bo =
struct intel_batch_decode_bo shader_bo =
aub_mem_get_ppgtt_bo(mem, address);
if (shader_bo.map) {
FILE *f = open_memstream(&window->shader, &window->shader_size);
if (f) {
gen_disassemble(&context.file->devinfo,
intel_disassemble(&context.file->devinfo,
(const uint8_t *) shader_bo.map +
(address - shader_bo.addr), 0, f);
fclose(f);
@ -558,7 +558,7 @@ display_pml4_level(struct aub_mem *mem, uint64_t table_addr, uint64_t table_virt
if (level == 0)
return;
struct gen_batch_decode_bo table_bo =
struct intel_batch_decode_bo table_bo =
aub_mem_get_phys_addr_data(mem, table_addr);
const uint64_t *table = (const uint64_t *) ((const uint8_t *) table_bo.map +
table_addr - table_bo.addr);
@ -670,7 +670,7 @@ batch_edit_address(void *user_data, uint64_t address, uint32_t len)
list_add(&edit_window->base.parent_link, &window->base.children_windows);
}
static struct gen_batch_decode_bo
static struct intel_batch_decode_bo
batch_get_bo(void *user_data, bool ppgtt, uint64_t address)
{
struct batch_window *window = (struct batch_window *) user_data;
@ -712,7 +712,7 @@ display_batch_execlist_write(void *user_data,
const uint32_t pphwsp_size = 4096;
uint32_t pphwsp_addr = context_descriptor & 0xfffff000;
struct gen_batch_decode_bo pphwsp_bo =
struct intel_batch_decode_bo pphwsp_bo =
aub_mem_get_ggtt_bo(&window->mem, pphwsp_addr);
uint32_t *context_img = (uint32_t *)((uint8_t *)pphwsp_bo.map +
(pphwsp_addr - pphwsp_bo.addr) +
@ -725,7 +725,7 @@ display_batch_execlist_write(void *user_data,
window->mem.pml4 = (uint64_t)context_img[49] << 32 | context_img[51];
struct gen_batch_decode_bo ring_bo =
struct intel_batch_decode_bo ring_bo =
aub_mem_get_ggtt_bo(&window->mem, ring_buffer_start);
assert(ring_bo.size > 0);
void *commands = (uint8_t *)ring_bo.map + (ring_buffer_start - ring_bo.addr) + ring_buffer_head;
@ -841,10 +841,10 @@ display_registers_window(struct window *win)
ImGui::BeginChild(ImGui::GetID("##block"));
hash_table_foreach(context.file->spec->registers_by_name, entry) {
struct gen_group *reg = (struct gen_group *) entry->data;
struct intel_group *reg = (struct intel_group *) entry->data;
if (filter.PassFilter(reg->name) &&
ImGui::CollapsingHeader(reg->name)) {
const struct gen_field *field = reg->fields;
const struct intel_field *field = reg->fields;
while (field) {
ImGui::Text("%s : %i -> %i\n", field->name, field->start, field->end);
field = field->next;
@ -896,11 +896,11 @@ display_commands_window(struct window *win)
ImGui::BeginChild(ImGui::GetID("##block"));
hash_table_foreach(context.file->spec->commands, entry) {
struct gen_group *cmd = (struct gen_group *) entry->data;
struct intel_group *cmd = (struct intel_group *) entry->data;
if ((cmd_filter.PassFilter(cmd->name) &&
(opcode_len == 0 || (opcode & cmd->opcode_mask) == cmd->opcode)) &&
ImGui::CollapsingHeader(cmd->name)) {
const struct gen_field *field = cmd->fields;
const struct intel_field *field = cmd->fields;
int32_t last_dword = -1;
while (field) {
if (show_dwords && field->start / 32 != last_dword) {
@ -918,10 +918,10 @@ display_commands_window(struct window *win)
}
}
hash_table_foreach(context.file->spec->structs, entry) {
struct gen_group *cmd = (struct gen_group *) entry->data;
struct intel_group *cmd = (struct intel_group *) entry->data;
if (cmd_filter.PassFilter(cmd->name) && opcode_len == 0 &&
ImGui::CollapsingHeader(cmd->name)) {
const struct gen_field *field = cmd->fields;
const struct intel_field *field = cmd->fields;
int32_t last_dword = -1;
while (field) {
if (show_dwords && field->start / 32 != last_dword) {

View File

@ -58,7 +58,7 @@ struct aub_decode_urb_stage_state {
};
struct aub_viewer_decode_ctx {
struct gen_batch_decode_bo (*get_bo)(void *user_data, bool ppgtt, uint64_t address);
struct intel_batch_decode_bo (*get_bo)(void *user_data, bool ppgtt, uint64_t address);
unsigned (*get_state_size)(void *user_data,
uint32_t offset_from_dynamic_state_base_addr);
@ -69,7 +69,7 @@ struct aub_viewer_decode_ctx {
void *user_data;
const struct gen_device_info *devinfo;
struct gen_spec *spec;
struct intel_spec *spec;
enum drm_i915_gem_engine_class engine;
struct aub_viewer_cfg *cfg;
@ -90,8 +90,8 @@ void aub_viewer_decode_ctx_init(struct aub_viewer_decode_ctx *ctx,
struct aub_viewer_cfg *cfg,
struct aub_viewer_decode_cfg *decode_cfg,
const struct gen_device_info *devinfo,
struct gen_spec *spec,
struct gen_batch_decode_bo (*get_bo)(void *, bool, uint64_t),
struct intel_spec *spec,
struct intel_batch_decode_bo (*get_bo)(void *, bool, uint64_t),
unsigned (*get_state_size)(void *, uint32_t),
void *user_data);

View File

@ -32,8 +32,8 @@ aub_viewer_decode_ctx_init(struct aub_viewer_decode_ctx *ctx,
struct aub_viewer_cfg *cfg,
struct aub_viewer_decode_cfg *decode_cfg,
const struct gen_device_info *devinfo,
struct gen_spec *spec,
struct gen_batch_decode_bo (*get_bo)(void *, bool, uint64_t),
struct intel_spec *spec,
struct intel_batch_decode_bo (*get_bo)(void *, bool, uint64_t),
unsigned (*get_state_size)(void *, uint32_t),
void *user_data)
{
@ -52,15 +52,15 @@ aub_viewer_decode_ctx_init(struct aub_viewer_decode_ctx *ctx,
static void
aub_viewer_print_group(struct aub_viewer_decode_ctx *ctx,
struct gen_group *group,
struct intel_group *group,
uint64_t address, const void *map)
{
struct gen_field_iterator iter;
struct intel_field_iterator iter;
int last_dword = -1;
const uint32_t *p = (const uint32_t *) map;
gen_field_iterator_init(&iter, group, p, 0, false);
while (gen_field_iterator_next(&iter)) {
intel_field_iterator_init(&iter, group, p, 0, false);
while (intel_field_iterator_next(&iter)) {
if (ctx->decode_cfg->show_dwords) {
int iter_dword = iter.end_bit / 32;
if (last_dword != iter_dword) {
@ -72,9 +72,9 @@ aub_viewer_print_group(struct aub_viewer_decode_ctx *ctx,
last_dword = iter_dword;
}
}
if (!gen_field_is_header(iter.field)) {
if (!intel_field_is_header(iter.field)) {
if (ctx->decode_cfg->field_filter.PassFilter(iter.name)) {
if (iter.field->type.kind == gen_type::GEN_TYPE_BOOL && iter.raw_value) {
if (iter.field->type.kind == intel_type::GEN_TYPE_BOOL && iter.raw_value) {
ImGui::Text("%s: ", iter.name); ImGui::SameLine();
ImGui::TextColored(ctx->cfg->boolean_color, "true");
} else {
@ -91,10 +91,10 @@ aub_viewer_print_group(struct aub_viewer_decode_ctx *ctx,
}
}
static struct gen_batch_decode_bo
static struct intel_batch_decode_bo
ctx_get_bo(struct aub_viewer_decode_ctx *ctx, bool ppgtt, uint64_t addr)
{
if (gen_spec_get_gen(ctx->spec) >= gen_make_gen(8,0)) {
if (intel_spec_get_gen(ctx->spec) >= intel_make_gen(8,0)) {
/* On Broadwell and above, we have 48-bit addresses which consume two
* dwords. Some packets require that these get stored in a "canonical
* form" which means that bit 47 is sign-extended through the upper
@ -104,9 +104,9 @@ ctx_get_bo(struct aub_viewer_decode_ctx *ctx, bool ppgtt, uint64_t addr)
addr &= (~0ull >> 16);
}
struct gen_batch_decode_bo bo = ctx->get_bo(ctx->user_data, ppgtt, addr);
struct intel_batch_decode_bo bo = ctx->get_bo(ctx->user_data, ppgtt, addr);
if (gen_spec_get_gen(ctx->spec) >= gen_make_gen(8,0))
if (intel_spec_get_gen(ctx->spec) >= intel_make_gen(8,0))
bo.addr &= (~0ull >> 16);
/* We may actually have an offset into the bo */
@ -144,7 +144,7 @@ ctx_disassemble_program(struct aub_viewer_decode_ctx *ctx,
uint32_t ksp, const char *type)
{
uint64_t addr = ctx->instruction_base + ksp;
struct gen_batch_decode_bo bo = ctx_get_bo(ctx, true, addr);
struct intel_batch_decode_bo bo = ctx_get_bo(ctx, true, addr);
if (!bo.map) {
ImGui::TextColored(ctx->cfg->missing_color,
"Shader unavailable addr=0x%012" PRIx64, addr);
@ -159,16 +159,16 @@ ctx_disassemble_program(struct aub_viewer_decode_ctx *ctx,
static void
handle_state_base_address(struct aub_viewer_decode_ctx *ctx,
struct gen_group *inst,
struct intel_group *inst,
const uint32_t *p)
{
struct gen_field_iterator iter;
gen_field_iterator_init(&iter, inst, p, 0, false);
struct intel_field_iterator iter;
intel_field_iterator_init(&iter, inst, p, 0, false);
uint64_t surface_base = 0, dynamic_base = 0, instruction_base = 0;
bool surface_modify = 0, dynamic_modify = 0, instruction_modify = 0;
while (gen_field_iterator_next(&iter)) {
while (intel_field_iterator_next(&iter)) {
if (strcmp(iter.name, "Surface State Base Address") == 0) {
surface_base = iter.raw_value;
} else if (strcmp(iter.name, "Dynamic State Base Address") == 0) {
@ -197,8 +197,8 @@ handle_state_base_address(struct aub_viewer_decode_ctx *ctx,
static void
dump_binding_table(struct aub_viewer_decode_ctx *ctx, uint32_t offset, int count)
{
struct gen_group *strct =
gen_spec_find_struct(ctx->spec, "RENDER_SURFACE_STATE");
struct intel_group *strct =
intel_spec_find_struct(ctx->spec, "RENDER_SURFACE_STATE");
if (strct == NULL) {
ImGui::TextColored(ctx->cfg->missing_color, "did not find RENDER_SURFACE_STATE info");
return;
@ -212,7 +212,7 @@ dump_binding_table(struct aub_viewer_decode_ctx *ctx, uint32_t offset, int count
return;
}
struct gen_batch_decode_bo bind_bo =
struct intel_batch_decode_bo bind_bo =
ctx_get_bo(ctx, true, ctx->surface_base + offset);
if (bind_bo.map == NULL) {
@ -228,7 +228,7 @@ dump_binding_table(struct aub_viewer_decode_ctx *ctx, uint32_t offset, int count
continue;
uint64_t addr = ctx->surface_base + pointers[i];
struct gen_batch_decode_bo bo = ctx_get_bo(ctx, true, addr);
struct intel_batch_decode_bo bo = ctx_get_bo(ctx, true, addr);
uint32_t size = strct->dw_length * 4;
if (pointers[i] % 32 != 0 ||
@ -250,13 +250,13 @@ dump_binding_table(struct aub_viewer_decode_ctx *ctx, uint32_t offset, int count
static void
dump_samplers(struct aub_viewer_decode_ctx *ctx, uint32_t offset, int count)
{
struct gen_group *strct = gen_spec_find_struct(ctx->spec, "SAMPLER_STATE");
struct intel_group *strct = intel_spec_find_struct(ctx->spec, "SAMPLER_STATE");
if (count < 0)
count = update_count(ctx, offset, strct->dw_length, 4);
uint64_t state_addr = ctx->dynamic_base + offset;
struct gen_batch_decode_bo bo = ctx_get_bo(ctx, true, state_addr);
struct intel_batch_decode_bo bo = ctx_get_bo(ctx, true, state_addr);
const uint8_t *state_map = (const uint8_t *) bo.map;
if (state_map == NULL) {
@ -283,17 +283,17 @@ dump_samplers(struct aub_viewer_decode_ctx *ctx, uint32_t offset, int count)
static void
handle_media_interface_descriptor_load(struct aub_viewer_decode_ctx *ctx,
struct gen_group *inst,
struct intel_group *inst,
const uint32_t *p)
{
struct gen_group *desc =
gen_spec_find_struct(ctx->spec, "INTERFACE_DESCRIPTOR_DATA");
struct intel_group *desc =
intel_spec_find_struct(ctx->spec, "INTERFACE_DESCRIPTOR_DATA");
struct gen_field_iterator iter;
gen_field_iterator_init(&iter, inst, p, 0, false);
struct intel_field_iterator iter;
intel_field_iterator_init(&iter, inst, p, 0, false);
uint32_t descriptor_offset = 0;
int descriptor_count = 0;
while (gen_field_iterator_next(&iter)) {
while (intel_field_iterator_next(&iter)) {
if (strcmp(iter.name, "Interface Descriptor Data Start Address") == 0) {
descriptor_offset = strtol(iter.value, NULL, 16);
} else if (strcmp(iter.name, "Interface Descriptor Total Length") == 0) {
@ -303,7 +303,7 @@ handle_media_interface_descriptor_load(struct aub_viewer_decode_ctx *ctx,
}
uint64_t desc_addr = ctx->dynamic_base + descriptor_offset;
struct gen_batch_decode_bo bo = ctx_get_bo(ctx, true, desc_addr);
struct intel_batch_decode_bo bo = ctx_get_bo(ctx, true, desc_addr);
const uint32_t *desc_map = (const uint32_t *) bo.map;
if (desc_map == NULL) {
@ -317,11 +317,11 @@ handle_media_interface_descriptor_load(struct aub_viewer_decode_ctx *ctx,
aub_viewer_print_group(ctx, desc, desc_addr, desc_map);
gen_field_iterator_init(&iter, desc, desc_map, 0, false);
intel_field_iterator_init(&iter, desc, desc_map, 0, false);
uint64_t ksp = 0;
uint32_t sampler_offset = 0, sampler_count = 0;
uint32_t binding_table_offset = 0, binding_entry_count = 0;
while (gen_field_iterator_next(&iter)) {
while (intel_field_iterator_next(&iter)) {
if (strcmp(iter.name, "Kernel Start Pointer") == 0) {
ksp = strtoll(iter.value, NULL, 16);
} else if (strcmp(iter.name, "Sampler State Pointer") == 0) {
@ -347,28 +347,28 @@ handle_media_interface_descriptor_load(struct aub_viewer_decode_ctx *ctx,
static void
handle_3dstate_vertex_buffers(struct aub_viewer_decode_ctx *ctx,
struct gen_group *inst,
struct intel_group *inst,
const uint32_t *p)
{
struct gen_group *vbs = gen_spec_find_struct(ctx->spec, "VERTEX_BUFFER_STATE");
struct intel_group *vbs = intel_spec_find_struct(ctx->spec, "VERTEX_BUFFER_STATE");
struct gen_batch_decode_bo vb = {};
struct intel_batch_decode_bo vb = {};
uint32_t vb_size = 0;
int index = -1;
int pitch = -1;
bool ready = false;
struct gen_field_iterator iter;
gen_field_iterator_init(&iter, inst, p, 0, false);
while (gen_field_iterator_next(&iter)) {
struct intel_field_iterator iter;
intel_field_iterator_init(&iter, inst, p, 0, false);
while (intel_field_iterator_next(&iter)) {
if (iter.struct_desc != vbs)
continue;
uint64_t buffer_addr = 0;
struct gen_field_iterator vbs_iter;
gen_field_iterator_init(&vbs_iter, vbs, &iter.p[iter.start_bit / 32], 0, false);
while (gen_field_iterator_next(&vbs_iter)) {
struct intel_field_iterator vbs_iter;
intel_field_iterator_init(&vbs_iter, vbs, &iter.p[iter.start_bit / 32], 0, false);
while (intel_field_iterator_next(&vbs_iter)) {
if (strcmp(vbs_iter.name, "Vertex Buffer Index") == 0) {
index = vbs_iter.raw_value;
} else if (strcmp(vbs_iter.name, "Buffer Pitch") == 0) {
@ -412,17 +412,17 @@ handle_3dstate_vertex_buffers(struct aub_viewer_decode_ctx *ctx,
static void
handle_3dstate_index_buffer(struct aub_viewer_decode_ctx *ctx,
struct gen_group *inst,
struct intel_group *inst,
const uint32_t *p)
{
struct gen_batch_decode_bo ib = {};
struct intel_batch_decode_bo ib = {};
uint64_t buffer_addr = 0;
uint32_t ib_size = 0;
uint32_t format = 0;
struct gen_field_iterator iter;
gen_field_iterator_init(&iter, inst, p, 0, false);
while (gen_field_iterator_next(&iter)) {
struct intel_field_iterator iter;
intel_field_iterator_init(&iter, inst, p, 0, false);
while (intel_field_iterator_next(&iter)) {
if (strcmp(iter.name, "Index Format") == 0) {
format = iter.raw_value;
} else if (strcmp(iter.name, "Buffer Starting Address") == 0) {
@ -459,16 +459,16 @@ handle_3dstate_index_buffer(struct aub_viewer_decode_ctx *ctx,
static void
decode_single_ksp(struct aub_viewer_decode_ctx *ctx,
struct gen_group *inst,
struct intel_group *inst,
const uint32_t *p)
{
uint64_t ksp = 0;
bool is_simd8 = false; /* vertex shaders on Gen8+ only */
bool is_enabled = true;
struct gen_field_iterator iter;
gen_field_iterator_init(&iter, inst, p, 0, false);
while (gen_field_iterator_next(&iter)) {
struct intel_field_iterator iter;
intel_field_iterator_init(&iter, inst, p, 0, false);
while (intel_field_iterator_next(&iter)) {
if (strcmp(iter.name, "Kernel Start Pointer") == 0) {
ksp = iter.raw_value;
} else if (strcmp(iter.name, "SIMD8 Dispatch Enable") == 0) {
@ -499,15 +499,15 @@ decode_single_ksp(struct aub_viewer_decode_ctx *ctx,
static void
decode_ps_kernels(struct aub_viewer_decode_ctx *ctx,
struct gen_group *inst,
struct intel_group *inst,
const uint32_t *p)
{
uint64_t ksp[3] = {0, 0, 0};
bool enabled[3] = {false, false, false};
struct gen_field_iterator iter;
gen_field_iterator_init(&iter, inst, p, 0, false);
while (gen_field_iterator_next(&iter)) {
struct intel_field_iterator iter;
intel_field_iterator_init(&iter, inst, p, 0, false);
while (intel_field_iterator_next(&iter)) {
if (strncmp(iter.name, "Kernel Start Pointer ",
strlen("Kernel Start Pointer ")) == 0) {
int idx = iter.name[strlen("Kernel Start Pointer ")] - '0';
@ -546,26 +546,26 @@ decode_ps_kernels(struct aub_viewer_decode_ctx *ctx,
static void
decode_3dstate_constant(struct aub_viewer_decode_ctx *ctx,
struct gen_group *inst,
struct intel_group *inst,
const uint32_t *p)
{
struct gen_group *body =
gen_spec_find_struct(ctx->spec, "3DSTATE_CONSTANT_BODY");
struct intel_group *body =
intel_spec_find_struct(ctx->spec, "3DSTATE_CONSTANT_BODY");
uint32_t read_length[4] = {0};
uint64_t read_addr[4];
struct gen_field_iterator outer;
gen_field_iterator_init(&outer, inst, p, 0, false);
while (gen_field_iterator_next(&outer)) {
struct intel_field_iterator outer;
intel_field_iterator_init(&outer, inst, p, 0, false);
while (intel_field_iterator_next(&outer)) {
if (outer.struct_desc != body)
continue;
struct gen_field_iterator iter;
gen_field_iterator_init(&iter, body, &outer.p[outer.start_bit / 32],
struct intel_field_iterator iter;
intel_field_iterator_init(&iter, body, &outer.p[outer.start_bit / 32],
0, false);
while (gen_field_iterator_next(&iter)) {
while (intel_field_iterator_next(&iter)) {
int idx;
if (sscanf(iter.name, "Read Length[%d]", &idx) == 1) {
read_length[idx] = iter.raw_value;
@ -578,7 +578,7 @@ decode_3dstate_constant(struct aub_viewer_decode_ctx *ctx,
if (read_length[i] == 0)
continue;
struct gen_batch_decode_bo buffer = ctx_get_bo(ctx, true, read_addr[i]);
struct intel_batch_decode_bo buffer = ctx_get_bo(ctx, true, read_addr[i]);
if (!buffer.map) {
ImGui::TextColored(ctx->cfg->missing_color,
"constant buffer %d unavailable addr=0x%012" PRIx64,
@ -599,7 +599,7 @@ decode_3dstate_constant(struct aub_viewer_decode_ctx *ctx,
static void
decode_3dstate_binding_table_pointers(struct aub_viewer_decode_ctx *ctx,
struct gen_group *inst,
struct intel_group *inst,
const uint32_t *p)
{
dump_binding_table(ctx, p[1], -1);
@ -607,7 +607,7 @@ decode_3dstate_binding_table_pointers(struct aub_viewer_decode_ctx *ctx,
static void
decode_3dstate_sampler_state_pointers(struct aub_viewer_decode_ctx *ctx,
struct gen_group *inst,
struct intel_group *inst,
const uint32_t *p)
{
dump_samplers(ctx, p[1], -1);
@ -615,7 +615,7 @@ decode_3dstate_sampler_state_pointers(struct aub_viewer_decode_ctx *ctx,
static void
decode_3dstate_sampler_state_pointers_gen6(struct aub_viewer_decode_ctx *ctx,
struct gen_group *inst,
struct intel_group *inst,
const uint32_t *p)
{
dump_samplers(ctx, p[1], -1);
@ -635,14 +635,14 @@ str_ends_with(const char *str, const char *end)
static void
decode_dynamic_state_pointers(struct aub_viewer_decode_ctx *ctx,
struct gen_group *inst, const uint32_t *p,
struct intel_group *inst, const uint32_t *p,
const char *struct_type, int count)
{
uint32_t state_offset = 0;
struct gen_field_iterator iter;
gen_field_iterator_init(&iter, inst, p, 0, false);
while (gen_field_iterator_next(&iter)) {
struct intel_field_iterator iter;
intel_field_iterator_init(&iter, inst, p, 0, false);
while (intel_field_iterator_next(&iter)) {
if (str_ends_with(iter.name, "Pointer")) {
state_offset = iter.raw_value;
break;
@ -650,7 +650,7 @@ decode_dynamic_state_pointers(struct aub_viewer_decode_ctx *ctx,
}
uint64_t state_addr = ctx->dynamic_base + state_offset;
struct gen_batch_decode_bo bo = ctx_get_bo(ctx, true, state_addr);
struct intel_batch_decode_bo bo = ctx_get_bo(ctx, true, state_addr);
const uint8_t *state_map = (const uint8_t *) bo.map;
if (state_map == NULL) {
@ -660,7 +660,7 @@ decode_dynamic_state_pointers(struct aub_viewer_decode_ctx *ctx,
return;
}
struct gen_group *state = gen_spec_find_struct(ctx->spec, struct_type);
struct intel_group *state = intel_spec_find_struct(ctx->spec, struct_type);
if (strcmp(struct_type, "BLEND_STATE") == 0) {
/* Blend states are different from the others because they have a header
* struct called BLEND_STATE which is followed by a variable number of
@ -673,7 +673,7 @@ decode_dynamic_state_pointers(struct aub_viewer_decode_ctx *ctx,
state_map += state->dw_length * 4;
struct_type = "BLEND_STATE_ENTRY";
state = gen_spec_find_struct(ctx->spec, struct_type);
state = intel_spec_find_struct(ctx->spec, struct_type);
}
for (int i = 0; i < count; i++) {
@ -687,7 +687,7 @@ decode_dynamic_state_pointers(struct aub_viewer_decode_ctx *ctx,
static void
decode_3dstate_viewport_state_pointers_cc(struct aub_viewer_decode_ctx *ctx,
struct gen_group *inst,
struct intel_group *inst,
const uint32_t *p)
{
decode_dynamic_state_pointers(ctx, inst, p, "CC_VIEWPORT", 4);
@ -695,7 +695,7 @@ decode_3dstate_viewport_state_pointers_cc(struct aub_viewer_decode_ctx *ctx,
static void
decode_3dstate_viewport_state_pointers_sf_clip(struct aub_viewer_decode_ctx *ctx,
struct gen_group *inst,
struct intel_group *inst,
const uint32_t *p)
{
decode_dynamic_state_pointers(ctx, inst, p, "SF_CLIP_VIEWPORT", 4);
@ -703,7 +703,7 @@ decode_3dstate_viewport_state_pointers_sf_clip(struct aub_viewer_decode_ctx *ctx
static void
decode_3dstate_blend_state_pointers(struct aub_viewer_decode_ctx *ctx,
struct gen_group *inst,
struct intel_group *inst,
const uint32_t *p)
{
decode_dynamic_state_pointers(ctx, inst, p, "BLEND_STATE", 1);
@ -711,7 +711,7 @@ decode_3dstate_blend_state_pointers(struct aub_viewer_decode_ctx *ctx,
static void
decode_3dstate_cc_state_pointers(struct aub_viewer_decode_ctx *ctx,
struct gen_group *inst,
struct intel_group *inst,
const uint32_t *p)
{
decode_dynamic_state_pointers(ctx, inst, p, "COLOR_CALC_STATE", 1);
@ -719,7 +719,7 @@ decode_3dstate_cc_state_pointers(struct aub_viewer_decode_ctx *ctx,
static void
decode_3dstate_scissor_state_pointers(struct aub_viewer_decode_ctx *ctx,
struct gen_group *inst,
struct intel_group *inst,
const uint32_t *p)
{
decode_dynamic_state_pointers(ctx, inst, p, "SCISSOR_RECT", 1);
@ -727,10 +727,10 @@ decode_3dstate_scissor_state_pointers(struct aub_viewer_decode_ctx *ctx,
static void
decode_load_register_imm(struct aub_viewer_decode_ctx *ctx,
struct gen_group *inst,
struct intel_group *inst,
const uint32_t *p)
{
struct gen_group *reg = gen_spec_find_register(ctx->spec, p[1]);
struct intel_group *reg = intel_spec_find_register(ctx->spec, p[1]);
if (reg != NULL &&
ImGui::TreeNodeEx(&p[1], ImGuiTreeNodeFlags_Framed,
@ -743,7 +743,7 @@ decode_load_register_imm(struct aub_viewer_decode_ctx *ctx,
static void
decode_3dprimitive(struct aub_viewer_decode_ctx *ctx,
struct gen_group *inst,
struct intel_group *inst,
const uint32_t *p)
{
if (ctx->display_urb) {
@ -754,12 +754,12 @@ decode_3dprimitive(struct aub_viewer_decode_ctx *ctx,
static void
handle_urb(struct aub_viewer_decode_ctx *ctx,
struct gen_group *inst,
struct intel_group *inst,
const uint32_t *p)
{
struct gen_field_iterator iter;
gen_field_iterator_init(&iter, inst, p, 0, false);
while (gen_field_iterator_next(&iter)) {
struct intel_field_iterator iter;
intel_field_iterator_init(&iter, inst, p, 0, false);
while (intel_field_iterator_next(&iter)) {
if (strstr(iter.name, "URB Starting Address")) {
ctx->urb_stages[ctx->stage].start = iter.raw_value * 8192;
} else if (strstr(iter.name, "URB Entry Allocation Size")) {
@ -777,12 +777,12 @@ handle_urb(struct aub_viewer_decode_ctx *ctx,
static void
handle_urb_read(struct aub_viewer_decode_ctx *ctx,
struct gen_group *inst,
struct intel_group *inst,
const uint32_t *p)
{
struct gen_field_iterator iter;
gen_field_iterator_init(&iter, inst, p, 0, false);
while (gen_field_iterator_next(&iter)) {
struct intel_field_iterator iter;
intel_field_iterator_init(&iter, inst, p, 0, false);
while (intel_field_iterator_next(&iter)) {
/* Workaround the "Force * URB Entry Read Length" fields */
if (iter.end_bit - iter.start_bit < 2)
continue;
@ -801,24 +801,24 @@ handle_urb_read(struct aub_viewer_decode_ctx *ctx,
static void
handle_urb_constant(struct aub_viewer_decode_ctx *ctx,
struct gen_group *inst,
struct intel_group *inst,
const uint32_t *p)
{
struct gen_group *body =
gen_spec_find_struct(ctx->spec, "3DSTATE_CONSTANT_BODY");
struct intel_group *body =
intel_spec_find_struct(ctx->spec, "3DSTATE_CONSTANT_BODY");
struct gen_field_iterator outer;
gen_field_iterator_init(&outer, inst, p, 0, false);
while (gen_field_iterator_next(&outer)) {
struct intel_field_iterator outer;
intel_field_iterator_init(&outer, inst, p, 0, false);
while (intel_field_iterator_next(&outer)) {
if (outer.struct_desc != body)
continue;
struct gen_field_iterator iter;
gen_field_iterator_init(&iter, body, &outer.p[outer.start_bit / 32],
struct intel_field_iterator iter;
intel_field_iterator_init(&iter, body, &outer.p[outer.start_bit / 32],
0, false);
ctx->urb_stages[ctx->stage].const_rd_length = 0;
while (gen_field_iterator_next(&iter)) {
while (intel_field_iterator_next(&iter)) {
int idx;
if (sscanf(iter.name, "Read Length[%d]", &idx) == 1) {
ctx->urb_stages[ctx->stage].const_rd_length += iter.raw_value * 32;
@ -830,7 +830,7 @@ handle_urb_constant(struct aub_viewer_decode_ctx *ctx,
struct custom_decoder {
const char *cmd_name;
void (*decode)(struct aub_viewer_decode_ctx *ctx,
struct gen_group *inst,
struct intel_group *inst,
const uint32_t *p);
enum aub_decode_stage stage;
} display_decoders[] = {
@ -894,7 +894,7 @@ aub_viewer_render_batch(struct aub_viewer_decode_ctx *ctx,
const void *_batch, uint32_t batch_size,
uint64_t batch_addr, bool from_ring)
{
struct gen_group *inst;
struct intel_group *inst;
const uint32_t *p, *batch = (const uint32_t *) _batch, *end = batch + batch_size / sizeof(uint32_t);
int length;
@ -907,8 +907,8 @@ aub_viewer_render_batch(struct aub_viewer_decode_ctx *ctx,
ctx->n_batch_buffer_start++;
for (p = batch; p < end; p += length) {
inst = gen_spec_find_instruction(ctx->spec, ctx->engine, p);
length = gen_group_get_length(inst, p);
inst = intel_spec_find_instruction(ctx->spec, ctx->engine, p);
length = intel_group_get_length(inst, p);
assert(inst == NULL || length > 0);
length = MAX2(1, length);
@ -921,7 +921,7 @@ aub_viewer_render_batch(struct aub_viewer_decode_ctx *ctx,
continue;
}
const char *inst_name = gen_group_get_name(inst);
const char *inst_name = intel_group_get_name(inst);
for (unsigned i = 0; i < ARRAY_SIZE(info_decoders); i++) {
if (strcmp(inst_name, info_decoders[i].cmd_name) == 0) {
@ -958,9 +958,9 @@ aub_viewer_render_batch(struct aub_viewer_decode_ctx *ctx,
uint64_t next_batch_addr = 0xd0d0d0d0;
bool ppgtt = false;
bool second_level = false;
struct gen_field_iterator iter;
gen_field_iterator_init(&iter, inst, p, 0, false);
while (gen_field_iterator_next(&iter)) {
struct intel_field_iterator iter;
intel_field_iterator_init(&iter, inst, p, 0, false);
while (intel_field_iterator_next(&iter)) {
if (strcmp(iter.name, "Batch Buffer Start Address") == 0) {
next_batch_addr = iter.raw_value;
} else if (strcmp(iter.name, "Second Level Batch Buffer") == 0) {
@ -970,7 +970,7 @@ aub_viewer_render_batch(struct aub_viewer_decode_ctx *ctx,
}
}
struct gen_batch_decode_bo next_batch = ctx_get_bo(ctx, ppgtt, next_batch_addr);
struct intel_batch_decode_bo next_batch = ctx_get_bo(ctx, ppgtt, next_batch_addr);
if (next_batch.map == NULL) {
ImGui::TextColored(ctx->cfg->missing_color,

View File

@ -376,7 +376,7 @@ anv_block_pool_init(struct anv_block_pool *pool,
pool->nbos = 0;
pool->size = 0;
pool->center_bo_offset = 0;
pool->start_address = gen_canonical_address(start_address);
pool->start_address = intel_canonical_address(start_address);
pool->map = NULL;
if (pool->use_softpin) {
@ -1713,9 +1713,9 @@ anv_device_alloc_bo(struct anv_device *device,
if (new_bo._ccs_size > 0) {
assert(device->info.has_aux_map);
gen_aux_map_add_mapping(device->aux_map_ctx,
gen_canonical_address(new_bo.offset),
gen_canonical_address(new_bo.offset + new_bo.size),
intel_aux_map_add_mapping(device->aux_map_ctx,
intel_canonical_address(new_bo.offset),
intel_canonical_address(new_bo.offset + new_bo.size),
new_bo.size, 0 /* format_bits */);
}
@ -1779,7 +1779,7 @@ anv_device_import_bo_from_host_ptr(struct anv_device *device,
"device address");
}
if (client_address && client_address != gen_48b_address(bo->offset)) {
if (client_address && client_address != intel_48b_address(bo->offset)) {
pthread_mutex_unlock(&cache->mutex);
return vk_errorf(device, NULL, VK_ERROR_INVALID_EXTERNAL_HANDLE,
"The same BO was imported at two different "
@ -1802,7 +1802,7 @@ anv_device_import_bo_from_host_ptr(struct anv_device *device,
(alloc_flags & ANV_BO_ALLOC_CLIENT_VISIBLE_ADDRESS) != 0,
};
assert(client_address == gen_48b_address(client_address));
assert(client_address == intel_48b_address(client_address));
if (new_bo.flags & EXEC_OBJECT_PINNED) {
assert(new_bo._ccs_size == 0);
new_bo.offset = anv_vma_alloc(device, new_bo.size,
@ -1905,7 +1905,7 @@ anv_device_import_bo(struct anv_device *device,
"device address");
}
if (client_address && client_address != gen_48b_address(bo->offset)) {
if (client_address && client_address != intel_48b_address(bo->offset)) {
pthread_mutex_unlock(&cache->mutex);
return vk_errorf(device, NULL, VK_ERROR_INVALID_EXTERNAL_HANDLE,
"The same BO was imported at two different "
@ -1935,7 +1935,7 @@ anv_device_import_bo(struct anv_device *device,
(alloc_flags & ANV_BO_ALLOC_CLIENT_VISIBLE_ADDRESS) != 0,
};
assert(client_address == gen_48b_address(client_address));
assert(client_address == intel_48b_address(client_address));
if (new_bo.flags & EXEC_OBJECT_PINNED) {
assert(new_bo._ccs_size == 0);
new_bo.offset = anv_vma_alloc(device, new_bo.size,
@ -2035,8 +2035,8 @@ anv_device_release_bo(struct anv_device *device,
assert(device->physical->has_implicit_ccs);
assert(device->info.has_aux_map);
assert(bo->has_implicit_ccs);
gen_aux_map_unmap_range(device->aux_map_ctx,
gen_canonical_address(bo->offset),
intel_aux_map_unmap_range(device->aux_map_ctx,
intel_canonical_address(bo->offset),
bo->size);
}

View File

@ -1931,7 +1931,7 @@ anv_queue_execbuf_locked(struct anv_queue *queue,
khr_perf_query_preamble_offset(query_pool,
submit->perf_query_pass);
gen_print_batch(&device->decoder_ctx,
intel_print_batch(&device->decoder_ctx,
pass_batch_bo->map + pass_batch_offset, 64,
pass_batch_bo->offset + pass_batch_offset, false);
}
@ -1940,15 +1940,15 @@ anv_queue_execbuf_locked(struct anv_queue *queue,
struct anv_batch_bo **bo =
u_vector_tail(&submit->cmd_buffers[i]->seen_bbos);
device->cmd_buffer_being_decoded = submit->cmd_buffers[i];
gen_print_batch(&device->decoder_ctx, (*bo)->bo->map,
intel_print_batch(&device->decoder_ctx, (*bo)->bo->map,
(*bo)->bo->size, (*bo)->bo->offset, false);
device->cmd_buffer_being_decoded = NULL;
}
} else if (submit->simple_bo) {
gen_print_batch(&device->decoder_ctx, submit->simple_bo->map,
intel_print_batch(&device->decoder_ctx, submit->simple_bo->map,
submit->simple_bo->size, submit->simple_bo->offset, false);
} else {
gen_print_batch(&device->decoder_ctx,
intel_print_batch(&device->decoder_ctx,
device->trivial_batch_bo->map,
device->trivial_batch_bo->size,
device->trivial_batch_bo->offset, false);
@ -1994,7 +1994,7 @@ anv_queue_execbuf_locked(struct anv_queue *queue,
if ((INTEL_DEBUG & DEBUG_NO_OACONFIG) == 0 &&
(query_info->kind == GEN_PERF_QUERY_TYPE_OA ||
query_info->kind == GEN_PERF_QUERY_TYPE_RAW)) {
int ret = gen_ioctl(device->perf_fd, I915_PERF_IOCTL_CONFIG,
int ret = intel_ioctl(device->perf_fd, I915_PERF_IOCTL_CONFIG,
(void *)(uintptr_t) query_info->oa_metrics_set_id);
if (ret < 0) {
result = anv_device_set_lost(device,

View File

@ -488,8 +488,8 @@ anv_physical_device_init_uuids(struct anv_physical_device *device)
_mesa_sha1_final(&sha1_ctx, sha1);
memcpy(device->pipeline_cache_uuid, sha1, VK_UUID_SIZE);
gen_uuid_compute_driver_id(device->driver_uuid, &device->info, VK_UUID_SIZE);
gen_uuid_compute_device_id(device->device_uuid, &device->isl_dev, VK_UUID_SIZE);
intel_uuid_compute_driver_id(device->driver_uuid, &device->info, VK_UUID_SIZE);
intel_uuid_compute_device_id(device->device_uuid, &device->isl_dev, VK_UUID_SIZE);
return VK_SUCCESS;
}
@ -2785,14 +2785,14 @@ anv_device_init_hiz_clear_value_bo(struct anv_device *device)
}
static bool
get_bo_from_pool(struct gen_batch_decode_bo *ret,
get_bo_from_pool(struct intel_batch_decode_bo *ret,
struct anv_block_pool *pool,
uint64_t address)
{
anv_block_pool_foreach_bo(bo, pool) {
uint64_t bo_address = gen_48b_address(bo->offset);
uint64_t bo_address = intel_48b_address(bo->offset);
if (address >= bo_address && address < (bo_address + bo->size)) {
*ret = (struct gen_batch_decode_bo) {
*ret = (struct intel_batch_decode_bo) {
.addr = bo_address,
.size = bo->size,
.map = bo->map,
@ -2804,11 +2804,11 @@ get_bo_from_pool(struct gen_batch_decode_bo *ret,
}
/* Finding a buffer for batch decoding */
static struct gen_batch_decode_bo
static struct intel_batch_decode_bo
decode_get_bo(void *v_batch, bool ppgtt, uint64_t address)
{
struct anv_device *device = v_batch;
struct gen_batch_decode_bo ret_bo = {};
struct intel_batch_decode_bo ret_bo = {};
assert(ppgtt);
@ -2822,7 +2822,7 @@ decode_get_bo(void *v_batch, bool ppgtt, uint64_t address)
return ret_bo;
if (!device->cmd_buffer_being_decoded)
return (struct gen_batch_decode_bo) { };
return (struct intel_batch_decode_bo) { };
struct anv_batch_bo **bo;
@ -2831,7 +2831,7 @@ decode_get_bo(void *v_batch, bool ppgtt, uint64_t address)
uint64_t bo_address = (*bo)->bo->offset & (~0ull >> 16);
if (address >= bo_address && address < bo_address + (*bo)->bo->size) {
return (struct gen_batch_decode_bo) {
return (struct intel_batch_decode_bo) {
.addr = bo_address,
.size = (*bo)->bo->size,
.map = (*bo)->bo->map,
@ -2839,18 +2839,18 @@ decode_get_bo(void *v_batch, bool ppgtt, uint64_t address)
}
}
return (struct gen_batch_decode_bo) { };
return (struct intel_batch_decode_bo) { };
}
struct gen_aux_map_buffer {
struct gen_buffer base;
struct intel_aux_map_buffer {
struct intel_buffer base;
struct anv_state state;
};
static struct gen_buffer *
gen_aux_map_buffer_alloc(void *driver_ctx, uint32_t size)
static struct intel_buffer *
intel_aux_map_buffer_alloc(void *driver_ctx, uint32_t size)
{
struct gen_aux_map_buffer *buf = malloc(sizeof(struct gen_aux_map_buffer));
struct intel_aux_map_buffer *buf = malloc(sizeof(struct intel_aux_map_buffer));
if (!buf)
return NULL;
@ -2869,9 +2869,9 @@ gen_aux_map_buffer_alloc(void *driver_ctx, uint32_t size)
}
static void
gen_aux_map_buffer_free(void *driver_ctx, struct gen_buffer *buffer)
intel_aux_map_buffer_free(void *driver_ctx, struct intel_buffer *buffer)
{
struct gen_aux_map_buffer *buf = (struct gen_aux_map_buffer*)buffer;
struct intel_aux_map_buffer *buf = (struct intel_aux_map_buffer*)buffer;
struct anv_device *device = (struct anv_device*)driver_ctx;
struct anv_state_pool *pool = &device->dynamic_state_pool;
anv_state_pool_free(pool, buf->state);
@ -2879,8 +2879,8 @@ gen_aux_map_buffer_free(void *driver_ctx, struct gen_buffer *buffer)
}
static struct gen_mapped_pinned_buffer_alloc aux_map_allocator = {
.alloc = gen_aux_map_buffer_alloc,
.free = gen_aux_map_buffer_free,
.alloc = intel_aux_map_buffer_alloc,
.free = intel_aux_map_buffer_free,
};
static VkResult
@ -2988,7 +2988,7 @@ VkResult anv_CreateDevice(
GEN_BATCH_DECODE_OFFSETS |
GEN_BATCH_DECODE_FLOATS;
gen_batch_decode_ctx_init(&device->decoder_ctx,
intel_batch_decode_ctx_init(&device->decoder_ctx,
&physical_device->info,
stderr, decode_flags, NULL,
decode_get_bo, NULL, device);
@ -3204,7 +3204,7 @@ VkResult anv_CreateDevice(
}
if (device->info.has_aux_map) {
device->aux_map_ctx = gen_aux_map_init(device, &aux_map_allocator,
device->aux_map_ctx = intel_aux_map_init(device, &aux_map_allocator,
&physical_device->info);
if (!device->aux_map_ctx)
goto fail_binding_table_pool;
@ -3281,7 +3281,7 @@ VkResult anv_CreateDevice(
anv_device_release_bo(device, device->workaround_bo);
fail_surface_aux_map_pool:
if (device->info.has_aux_map) {
gen_aux_map_finish(device->aux_map_ctx);
intel_aux_map_finish(device->aux_map_ctx);
device->aux_map_ctx = NULL;
}
fail_binding_table_pool:
@ -3357,7 +3357,7 @@ void anv_DestroyDevice(
anv_device_release_bo(device, device->hiz_clear_bo);
if (device->info.has_aux_map) {
gen_aux_map_finish(device->aux_map_ctx);
intel_aux_map_finish(device->aux_map_ctx);
device->aux_map_ctx = NULL;
}
@ -3388,7 +3388,7 @@ void anv_DestroyDevice(
anv_gem_destroy_context(device, device->context_id);
if (INTEL_DEBUG & DEBUG_BATCH)
gen_batch_decode_ctx_finish(&device->decoder_ctx);
intel_batch_decode_ctx_finish(&device->decoder_ctx);
close(device->fd);
@ -3634,15 +3634,15 @@ anv_vma_alloc(struct anv_device *device,
done:
pthread_mutex_unlock(&device->vma_mutex);
assert(addr == gen_48b_address(addr));
return gen_canonical_address(addr);
assert(addr == intel_48b_address(addr));
return intel_canonical_address(addr);
}
void
anv_vma_free(struct anv_device *device,
uint64_t address, uint64_t size)
{
const uint64_t addr_48b = gen_48b_address(address);
const uint64_t addr_48b = intel_48b_address(address);
pthread_mutex_lock(&device->vma_mutex);
@ -4432,7 +4432,7 @@ uint64_t anv_GetDeviceMemoryOpaqueCaptureAddress(
assert(memory->bo->flags & EXEC_OBJECT_PINNED);
assert(memory->bo->has_client_visible_address);
return gen_48b_address(memory->bo->offset);
return intel_48b_address(memory->bo->offset);
}
void

View File

@ -46,7 +46,7 @@ anv_gem_create(struct anv_device *device, uint64_t size)
.size = size,
};
int ret = gen_ioctl(device->fd, DRM_IOCTL_I915_GEM_CREATE, &gem_create);
int ret = intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_CREATE, &gem_create);
if (ret != 0) {
/* FIXME: What do we do if this fails? */
return 0;
@ -62,7 +62,7 @@ anv_gem_close(struct anv_device *device, uint32_t gem_handle)
.handle = gem_handle,
};
gen_ioctl(device->fd, DRM_IOCTL_GEM_CLOSE, &close);
intel_ioctl(device->fd, DRM_IOCTL_GEM_CLOSE, &close);
}
/**
@ -80,7 +80,7 @@ anv_gem_mmap_offset(struct anv_device *device, uint32_t gem_handle,
assert(offset == 0);
/* Get the fake offset back */
int ret = gen_ioctl(device->fd, DRM_IOCTL_I915_GEM_MMAP_OFFSET, &gem_mmap);
int ret = intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_MMAP_OFFSET, &gem_mmap);
if (ret != 0)
return MAP_FAILED;
@ -101,7 +101,7 @@ anv_gem_mmap_legacy(struct anv_device *device, uint32_t gem_handle,
.flags = flags,
};
int ret = gen_ioctl(device->fd, DRM_IOCTL_I915_GEM_MMAP, &gem_mmap);
int ret = intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_MMAP, &gem_mmap);
if (ret != 0)
return MAP_FAILED;
@ -146,7 +146,7 @@ anv_gem_userptr(struct anv_device *device, void *mem, size_t size)
.flags = 0,
};
int ret = gen_ioctl(device->fd, DRM_IOCTL_I915_GEM_USERPTR, &userptr);
int ret = intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_USERPTR, &userptr);
if (ret == -1)
return 0;
@ -162,7 +162,7 @@ anv_gem_set_caching(struct anv_device *device,
.caching = caching,
};
return gen_ioctl(device->fd, DRM_IOCTL_I915_GEM_SET_CACHING, &gem_caching);
return intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_SET_CACHING, &gem_caching);
}
int
@ -175,7 +175,7 @@ anv_gem_set_domain(struct anv_device *device, uint32_t gem_handle,
.write_domain = write_domain,
};
return gen_ioctl(device->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &gem_set_domain);
return intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &gem_set_domain);
}
/**
@ -188,7 +188,7 @@ anv_gem_busy(struct anv_device *device, uint32_t gem_handle)
.handle = gem_handle,
};
int ret = gen_ioctl(device->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
int ret = intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
if (ret < 0)
return ret;
@ -207,7 +207,7 @@ anv_gem_wait(struct anv_device *device, uint32_t gem_handle, int64_t *timeout_ns
.flags = 0,
};
int ret = gen_ioctl(device->fd, DRM_IOCTL_I915_GEM_WAIT, &wait);
int ret = intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_WAIT, &wait);
*timeout_ns = wait.timeout_ns;
return ret;
@ -218,9 +218,9 @@ anv_gem_execbuffer(struct anv_device *device,
struct drm_i915_gem_execbuffer2 *execbuf)
{
if (execbuf->flags & I915_EXEC_FENCE_OUT)
return gen_ioctl(device->fd, DRM_IOCTL_I915_GEM_EXECBUFFER2_WR, execbuf);
return intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_EXECBUFFER2_WR, execbuf);
else
return gen_ioctl(device->fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, execbuf);
return intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, execbuf);
}
/** Return -1 on error. */
@ -236,7 +236,7 @@ anv_gem_get_tiling(struct anv_device *device, uint32_t gem_handle)
* is only used in Android code, so we may need some other way to
* communicate the tiling mode.
*/
if (gen_ioctl(device->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling)) {
if (intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling)) {
assert(!"Failed to get BO tiling");
return -1;
}
@ -257,7 +257,7 @@ anv_gem_set_tiling(struct anv_device *device,
return 0;
/* set_tiling overwrites the input on the error path, so we have to open
* code gen_ioctl.
* code intel_ioctl.
*/
do {
struct drm_i915_gem_set_tiling set_tiling = {
@ -282,7 +282,7 @@ anv_gem_get_param(int fd, uint32_t param)
.value = &tmp,
};
int ret = gen_ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp);
int ret = intel_ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp);
if (ret == 0)
return tmp;
@ -296,7 +296,7 @@ anv_gem_get_drm_cap(int fd, uint32_t capability)
.capability = capability,
};
gen_ioctl(fd, DRM_IOCTL_GET_CAP, &cap);
intel_ioctl(fd, DRM_IOCTL_GET_CAP, &cap);
return cap.value;
}
@ -310,7 +310,7 @@ anv_gem_get_bit6_swizzle(int fd, uint32_t tiling)
.size = 4096,
};
if (gen_ioctl(fd, DRM_IOCTL_I915_GEM_CREATE, &gem_create)) {
if (intel_ioctl(fd, DRM_IOCTL_I915_GEM_CREATE, &gem_create)) {
assert(!"Failed to create GEM BO");
return false;
}
@ -318,7 +318,7 @@ anv_gem_get_bit6_swizzle(int fd, uint32_t tiling)
bool swizzled = false;
/* set_tiling overwrites the input on the error path, so we have to open
* code gen_ioctl.
* code intel_ioctl.
*/
do {
struct drm_i915_gem_set_tiling set_tiling = {
@ -339,7 +339,7 @@ anv_gem_get_bit6_swizzle(int fd, uint32_t tiling)
.handle = gem_create.handle,
};
if (gen_ioctl(fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling)) {
if (intel_ioctl(fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling)) {
assert(!"Failed to get BO tiling");
goto close_and_return;
}
@ -350,7 +350,7 @@ close_and_return:
memset(&close, 0, sizeof(close));
close.handle = gem_create.handle;
gen_ioctl(fd, DRM_IOCTL_GEM_CLOSE, &close);
intel_ioctl(fd, DRM_IOCTL_GEM_CLOSE, &close);
return swizzled;
}
@ -367,7 +367,7 @@ anv_gem_create_context(struct anv_device *device)
{
struct drm_i915_gem_context_create create = { 0 };
int ret = gen_ioctl(device->fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create);
int ret = intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create);
if (ret == -1)
return -1;
@ -450,7 +450,7 @@ anv_gem_create_context_engines(struct anv_device *device,
.flags = I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS,
.extensions = (uintptr_t)&set_engines,
};
int ret = gen_ioctl(device->fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE_EXT, &create);
int ret = intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE_EXT, &create);
free(engines_param);
if (ret == -1)
return -1;
@ -465,7 +465,7 @@ anv_gem_destroy_context(struct anv_device *device, int context)
.ctx_id = context,
};
return gen_ioctl(device->fd, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY, &destroy);
return intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY, &destroy);
}
int
@ -478,7 +478,7 @@ anv_gem_set_context_param(int fd, int context, uint32_t param, uint64_t value)
};
int err = 0;
if (gen_ioctl(fd, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM, &p))
if (intel_ioctl(fd, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM, &p))
err = -errno;
return err;
}
@ -491,7 +491,7 @@ anv_gem_get_context_param(int fd, int context, uint32_t param, uint64_t *value)
.param = param,
};
int ret = gen_ioctl(fd, DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM, &gp);
int ret = intel_ioctl(fd, DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM, &gp);
if (ret == -1)
return -1;
@ -507,7 +507,7 @@ anv_gem_context_get_reset_stats(int fd, int context,
.ctx_id = context,
};
int ret = gen_ioctl(fd, DRM_IOCTL_I915_GET_RESET_STATS, &stats);
int ret = intel_ioctl(fd, DRM_IOCTL_I915_GET_RESET_STATS, &stats);
if (ret == 0) {
*active = stats.batch_active;
*pending = stats.batch_pending;
@ -524,7 +524,7 @@ anv_gem_handle_to_fd(struct anv_device *device, uint32_t gem_handle)
.flags = DRM_CLOEXEC | DRM_RDWR,
};
int ret = gen_ioctl(device->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &args);
int ret = intel_ioctl(device->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &args);
if (ret == -1)
return -1;
@ -538,7 +538,7 @@ anv_gem_fd_to_handle(struct anv_device *device, int fd)
.fd = fd,
};
int ret = gen_ioctl(device->fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &args);
int ret = intel_ioctl(device->fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &args);
if (ret == -1)
return 0;
@ -552,7 +552,7 @@ anv_gem_reg_read(int fd, uint32_t offset, uint64_t *result)
.offset = offset
};
int ret = gen_ioctl(fd, DRM_IOCTL_I915_REG_READ, &args);
int ret = intel_ioctl(fd, DRM_IOCTL_I915_REG_READ, &args);
*result = args.val;
return ret;
@ -567,7 +567,7 @@ anv_gem_sync_file_merge(struct anv_device *device, int fd1, int fd2)
.fence = -1,
};
int ret = gen_ioctl(fd1, SYNC_IOC_MERGE, &args);
int ret = intel_ioctl(fd1, SYNC_IOC_MERGE, &args);
if (ret == -1)
return -1;
@ -581,7 +581,7 @@ anv_gem_syncobj_create(struct anv_device *device, uint32_t flags)
.flags = flags,
};
int ret = gen_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_CREATE, &args);
int ret = intel_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_CREATE, &args);
if (ret)
return 0;
@ -595,7 +595,7 @@ anv_gem_syncobj_destroy(struct anv_device *device, uint32_t handle)
.handle = handle,
};
gen_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_DESTROY, &args);
intel_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_DESTROY, &args);
}
int
@ -605,7 +605,7 @@ anv_gem_syncobj_handle_to_fd(struct anv_device *device, uint32_t handle)
.handle = handle,
};
int ret = gen_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, &args);
int ret = intel_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, &args);
if (ret)
return -1;
@ -619,7 +619,7 @@ anv_gem_syncobj_fd_to_handle(struct anv_device *device, int fd)
.fd = fd,
};
int ret = gen_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, &args);
int ret = intel_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, &args);
if (ret)
return 0;
@ -634,7 +634,7 @@ anv_gem_syncobj_export_sync_file(struct anv_device *device, uint32_t handle)
.flags = DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE,
};
int ret = gen_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, &args);
int ret = intel_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, &args);
if (ret)
return -1;
@ -651,7 +651,7 @@ anv_gem_syncobj_import_sync_file(struct anv_device *device,
.flags = DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE,
};
return gen_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, &args);
return intel_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, &args);
}
void
@ -662,13 +662,13 @@ anv_gem_syncobj_reset(struct anv_device *device, uint32_t handle)
.count_handles = 1,
};
gen_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_RESET, &args);
intel_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_RESET, &args);
}
bool
anv_gem_supports_syncobj_wait(int fd)
{
return gen_gem_supports_syncobj_wait(fd);
return intel_gem_supports_syncobj_wait(fd);
}
int
@ -686,7 +686,7 @@ anv_gem_syncobj_wait(struct anv_device *device,
if (wait_all)
args.flags |= DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL;
return gen_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_WAIT, &args);
return intel_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_WAIT, &args);
}
int
@ -710,7 +710,7 @@ anv_gem_syncobj_timeline_wait(struct anv_device *device,
if (wait_materialize)
args.flags |= DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE;
return gen_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT, &args);
return intel_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT, &args);
}
int
@ -726,7 +726,7 @@ anv_gem_syncobj_timeline_signal(struct anv_device *device,
.count_handles = num_items,
};
return gen_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_TIMELINE_SIGNAL, &args);
return intel_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_TIMELINE_SIGNAL, &args);
}
int
@ -742,7 +742,7 @@ anv_gem_syncobj_timeline_query(struct anv_device *device,
.count_handles = num_items,
};
return gen_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_QUERY, &args);
return intel_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_QUERY, &args);
}
int
@ -761,7 +761,7 @@ anv_i915_query(int fd, uint64_t query_id, void *buffer,
.items_ptr = (uintptr_t)&item,
};
int ret = gen_ioctl(fd, DRM_IOCTL_I915_QUERY, &args);
int ret = intel_ioctl(fd, DRM_IOCTL_I915_QUERY, &args);
*buffer_len = item.length;
return ret;
}

View File

@ -72,7 +72,7 @@ void genX(flush_pipeline_select_3d)(struct anv_cmd_buffer *cmd_buffer);
void genX(flush_pipeline_select_gpgpu)(struct anv_cmd_buffer *cmd_buffer);
void genX(cmd_buffer_config_l3)(struct anv_cmd_buffer *cmd_buffer,
const struct gen_l3_config *cfg);
const struct intel_l3_config *cfg);
void genX(cmd_buffer_flush_state)(struct anv_cmd_buffer *cmd_buffer);
void genX(cmd_buffer_flush_dynamic_state)(struct anv_cmd_buffer *cmd_buffer);
@ -94,10 +94,10 @@ void genX(cmd_emit_conditional_render_predicate)(struct anv_cmd_buffer *cmd_buff
void
genX(emit_urb_setup)(struct anv_device *device, struct anv_batch *batch,
const struct gen_l3_config *l3_config,
const struct intel_l3_config *l3_config,
VkShaderStageFlags active_stages,
const unsigned entry_size[4],
enum gen_urb_deref_block_size *deref_block_size);
enum intel_urb_deref_block_size *deref_block_size);
void genX(emit_multisample)(struct anv_batch *batch, uint32_t samples,
const VkSampleLocationEXT *locations);

View File

@ -151,7 +151,7 @@ anv_device_perf_open(struct anv_device *device, uint64_t metric_id)
param.properties_ptr = (uintptr_t)properties;
param.num_properties = p / 2;
stream_fd = gen_ioctl(device->fd, DRM_IOCTL_I915_PERF_OPEN, &param);
stream_fd = intel_ioctl(device->fd, DRM_IOCTL_I915_PERF_OPEN, &param);
return stream_fd;
}
@ -260,7 +260,7 @@ VkResult anv_ReleasePerformanceConfigurationINTEL(
ANV_FROM_HANDLE(anv_performance_configuration_intel, config, _configuration);
if (!(INTEL_DEBUG & DEBUG_NO_OACONFIG))
gen_ioctl(device->fd, DRM_IOCTL_I915_PERF_REMOVE_CONFIG, &config->config_id);
intel_ioctl(device->fd, DRM_IOCTL_I915_PERF_REMOVE_CONFIG, &config->config_id);
ralloc_free(config->register_config);
vk_object_base_finish(&config->base);
@ -283,7 +283,7 @@ VkResult anv_QueueSetPerformanceConfigurationINTEL(
if (device->perf_fd < 0)
return VK_ERROR_INITIALIZATION_FAILED;
} else {
int ret = gen_ioctl(device->perf_fd, I915_PERF_IOCTL_CONFIG,
int ret = intel_ioctl(device->perf_fd, I915_PERF_IOCTL_CONFIG,
(void *)(uintptr_t) config->config_id);
if (ret < 0)
return anv_device_set_lost(device, "i915-perf config failed: %m");

View File

@ -1196,7 +1196,7 @@ anv_pipeline_add_executable(struct anv_pipeline *pipeline,
/* Creating this is far cheaper than it looks. It's perfectly fine to
* do it for every binary.
*/
gen_disassemble(&pipeline->device->info,
intel_disassemble(&pipeline->device->info,
stage->code, code_offset, stream);
fclose(stream);
@ -2083,8 +2083,8 @@ copy_non_dynamic_state(struct anv_graphics_pipeline *pipeline,
} else {
dynamic->sample_locations.samples =
ms_info ? ms_info->rasterizationSamples : 1;
const struct gen_sample_position *positions =
gen_get_sample_positions(dynamic->sample_locations.samples);
const struct intel_sample_position *positions =
intel_get_sample_positions(dynamic->sample_locations.samples);
for (uint32_t i = 0; i < dynamic->sample_locations.samples; i++) {
dynamic->sample_locations.locations[i].x = positions[i].x;
dynamic->sample_locations.locations[i].y = positions[i].y;
@ -2163,10 +2163,10 @@ anv_pipeline_setup_l3_config(struct anv_pipeline *pipeline, bool needs_slm)
{
const struct gen_device_info *devinfo = &pipeline->device->info;
const struct gen_l3_weights w =
gen_get_default_l3_weights(devinfo, true, needs_slm);
const struct intel_l3_weights w =
intel_get_default_l3_weights(devinfo, true, needs_slm);
pipeline->l3_config = gen_get_l3_config(devinfo, w);
pipeline->l3_config = intel_get_l3_config(devinfo, w);
}
VkResult

View File

@ -81,7 +81,7 @@ struct anv_buffer_view;
struct anv_image_view;
struct anv_instance;
struct gen_aux_map_context;
struct intel_aux_map_context;
struct gen_perf_config;
struct gen_perf_counter_pass;
struct gen_perf_query_result;
@ -1242,7 +1242,7 @@ struct anv_device {
int _lost;
int lost_reported;
struct gen_batch_decode_ctx decoder_ctx;
struct intel_batch_decode_ctx decoder_ctx;
/*
* When decoding a anv_cmd_buffer, we might need to search for BOs through
* the cmd_buffer's list.
@ -1252,7 +1252,7 @@ struct anv_device {
int perf_fd; /* -1 if no opened */
uint64_t perf_metric; /* 0 if unset */
struct gen_aux_map_context *aux_map_ctx;
struct intel_aux_map_context *aux_map_ctx;
struct gen_debug_block_frame *debug_frame_desc;
};
@ -1590,9 +1590,9 @@ static inline uint64_t
anv_address_physical(struct anv_address addr)
{
if (addr.bo && (addr.bo->flags & EXEC_OBJECT_PINNED))
return gen_canonical_address(addr.bo->offset + addr.offset);
return intel_canonical_address(addr.bo->offset + addr.offset);
else
return gen_canonical_address(addr.offset);
return intel_canonical_address(addr.offset);
}
static inline struct anv_address
@ -1608,14 +1608,14 @@ write_reloc(const struct anv_device *device, void *p, uint64_t v, bool flush)
unsigned reloc_size = 0;
if (device->info.gen >= 8) {
reloc_size = sizeof(uint64_t);
*(uint64_t *)p = gen_canonical_address(v);
*(uint64_t *)p = intel_canonical_address(v);
} else {
reloc_size = sizeof(uint32_t);
*(uint32_t *)p = v;
}
if (flush && !device->info.has_llc)
gen_flush_range(p, reloc_size);
intel_flush_range(p, reloc_size);
}
static inline uint64_t
@ -2784,7 +2784,7 @@ struct anv_cmd_compute_state {
struct anv_cmd_state {
/* PIPELINE_SELECT.PipelineSelection */
uint32_t current_pipeline;
const struct gen_l3_config * current_l3_config;
const struct intel_l3_config * current_l3_config;
uint32_t last_aux_map_state;
struct anv_cmd_graphics_state gfx;
@ -3331,7 +3331,7 @@ struct anv_pipeline {
struct util_dynarray executables;
const struct gen_l3_config * l3_config;
const struct intel_l3_config * l3_config;
};
struct anv_graphics_pipeline {

View File

@ -831,7 +831,7 @@ anv_queue_submit_simple_batch(struct anv_queue *queue,
memcpy(batch_bo->map, batch->start, size);
if (!device->info.has_llc)
gen_flush_range(batch_bo->map, size);
intel_flush_range(batch_bo->map, size);
submit->simple_bo = batch_bo;
submit->simple_bo_size = size;

View File

@ -71,7 +71,7 @@ gen8_cmd_buffer_emit_viewport(struct anv_cmd_buffer *cmd_buffer)
* framebuffer at the time we emit the packet. Otherwise, we have
* fall back to a worst-case guardband of [-1, 1].
*/
gen_calculate_guardband_size(fb->width, fb->height,
intel_calculate_guardband_size(fb->width, fb->height,
sfv.ViewportMatrixElementm00,
sfv.ViewportMatrixElementm11,
sfv.ViewportMatrixElementm30,

View File

@ -206,7 +206,7 @@ blorp_flush_range(struct blorp_batch *batch, void *start, size_t size)
*/
}
static const struct gen_l3_config *
static const struct intel_l3_config *
blorp_get_l3_config(struct blorp_batch *batch)
{
struct anv_cmd_buffer *cmd_buffer = batch->driver_batch;
@ -220,8 +220,8 @@ genX(blorp_exec)(struct blorp_batch *batch,
struct anv_cmd_buffer *cmd_buffer = batch->driver_batch;
if (!cmd_buffer->state.current_l3_config) {
const struct gen_l3_config *cfg =
gen_get_default_l3_config(&cmd_buffer->device->info);
const struct intel_l3_config *cfg =
intel_get_default_l3_config(&cmd_buffer->device->info);
genX(cmd_buffer_config_l3)(cmd_buffer, cfg);
}

View File

@ -469,7 +469,7 @@ anv_image_init_aux_tt(struct anv_cmd_buffer *cmd_buffer,
anv_address_physical(anv_image_address(image, &surface->memory_range));
const struct isl_surf *isl_surf = &image->planes[plane].primary_surface.isl;
uint64_t format_bits = gen_aux_map_format_bits_for_isl_surf(isl_surf);
uint64_t format_bits = intel_aux_map_format_bits_for_isl_surf(isl_surf);
/* We're about to live-update the AUX-TT. We really don't want anyone else
* trying to read it while we're doing this. We could probably get away
@ -527,7 +527,7 @@ anv_image_init_aux_tt(struct anv_cmd_buffer *cmd_buffer,
uint64_t address = base_address + offset;
uint64_t aux_entry_addr64, *aux_entry_map;
aux_entry_map = gen_aux_map_get_entry(cmd_buffer->device->aux_map_ctx,
aux_entry_map = intel_aux_map_get_entry(cmd_buffer->device->aux_map_ctx,
address, &aux_entry_addr64);
assert(cmd_buffer->device->physical->use_softpin);
@ -1894,7 +1894,7 @@ genX(CmdExecuteCommands)(
*/
void
genX(cmd_buffer_config_l3)(struct anv_cmd_buffer *cmd_buffer,
const struct gen_l3_config *cfg)
const struct intel_l3_config *cfg)
{
assert(cfg || GEN_GEN >= 12);
if (cfg == cmd_buffer->state.current_l3_config)
@ -1902,7 +1902,7 @@ genX(cmd_buffer_config_l3)(struct anv_cmd_buffer *cmd_buffer,
if (INTEL_DEBUG & DEBUG_L3) {
mesa_logd("L3 config transition: ");
gen_dump_l3_config(cfg, stderr);
intel_dump_l3_config(cfg, stderr);
}
UNUSED const bool has_slm = cfg->n[GEN_L3P_SLM];
@ -5028,7 +5028,7 @@ genX(cmd_buffer_set_binding_for_gen8_vb_flush)(struct anv_cmd_buffer *cmd_buffer
}
assert(vb_address.bo && (vb_address.bo->flags & EXEC_OBJECT_PINNED));
bound->start = gen_48b_address(anv_address_physical(vb_address));
bound->start = intel_48b_address(anv_address_physical(vb_address));
bound->end = bound->start + vb_size;
assert(bound->end > bound->start); /* No overflow */

View File

@ -73,8 +73,8 @@ genX(cmd_buffer_so_memcpy)(struct anv_cmd_buffer *cmd_buffer,
}
if (!cmd_buffer->state.current_l3_config) {
const struct gen_l3_config *cfg =
gen_get_default_l3_config(&cmd_buffer->device->info);
const struct intel_l3_config *cfg =
intel_get_default_l3_config(&cmd_buffer->device->info);
genX(cmd_buffer_config_l3)(cmd_buffer, cfg);
}

View File

@ -263,17 +263,17 @@ emit_vertex_input(struct anv_graphics_pipeline *pipeline,
void
genX(emit_urb_setup)(struct anv_device *device, struct anv_batch *batch,
const struct gen_l3_config *l3_config,
const struct intel_l3_config *l3_config,
VkShaderStageFlags active_stages,
const unsigned entry_size[4],
enum gen_urb_deref_block_size *deref_block_size)
enum intel_urb_deref_block_size *deref_block_size)
{
const struct gen_device_info *devinfo = &device->info;
unsigned entries[4];
unsigned start[4];
bool constrained;
gen_get_urb_config(devinfo, l3_config,
intel_get_urb_config(devinfo, l3_config,
active_stages &
VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT,
active_stages & VK_SHADER_STAGE_GEOMETRY_BIT,
@ -308,7 +308,7 @@ genX(emit_urb_setup)(struct anv_device *device, struct anv_batch *batch,
static void
emit_urb_setup(struct anv_graphics_pipeline *pipeline,
enum gen_urb_deref_block_size *deref_block_size)
enum intel_urb_deref_block_size *deref_block_size)
{
unsigned entry_size[4];
for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++) {
@ -585,7 +585,7 @@ emit_rs_state(struct anv_graphics_pipeline *pipeline,
const uint32_t dynamic_states,
const struct anv_render_pass *pass,
const struct anv_subpass *subpass,
enum gen_urb_deref_block_size urb_deref_block_size)
enum intel_urb_deref_block_size urb_deref_block_size)
{
struct GENX(3DSTATE_SF) sf = {
GENX(3DSTATE_SF_header),
@ -2280,7 +2280,7 @@ genX(graphics_pipeline_create)(
anv_cmd_dirty_bit_for_vk_dynamic_state(dyn_info->pDynamicStates[i]);
}
enum gen_urb_deref_block_size urb_deref_block_size;
enum intel_urb_deref_block_size urb_deref_block_size;
emit_urb_setup(pipeline, &urb_deref_block_size);
assert(pCreateInfo->pVertexInputState);

View File

@ -958,7 +958,7 @@ void genX(CmdBeginQueryIndexedEXT)(
struct mi_value reg_addr =
mi_iadd(
&b,
mi_imm(gen_canonical_address(pool->bo->offset +
mi_imm(intel_canonical_address(pool->bo->offset +
khr_perf_query_data_offset(pool, query, 0, end) +
field->location)),
mi_reg64(ANV_PERF_QUERY_OFFSET_REG));
@ -969,7 +969,7 @@ void genX(CmdBeginQueryIndexedEXT)(
reg_addr =
mi_iadd(
&b,
mi_imm(gen_canonical_address(pool->bo->offset +
mi_imm(intel_canonical_address(pool->bo->offset +
khr_perf_query_data_offset(pool, query, 0, end) +
field->location + 4)),
mi_reg64(ANV_PERF_QUERY_OFFSET_REG));
@ -982,7 +982,7 @@ void genX(CmdBeginQueryIndexedEXT)(
mi_iadd(
&b,
mi_imm(
gen_canonical_address(
intel_canonical_address(
pool->bo->offset +
khr_perf_query_availability_offset(pool, query, 0 /* pass */))),
mi_reg64(ANV_PERF_QUERY_OFFSET_REG));

View File

@ -292,7 +292,7 @@ init_render_queue_state(struct anv_queue *queue)
#if GEN_GEN == 12
if (device->info.has_aux_map) {
uint64_t aux_base_addr = gen_aux_map_get_base(device->aux_map_ctx);
uint64_t aux_base_addr = intel_aux_map_get_base(device->aux_map_ctx);
assert(aux_base_addr % (32 * 1024) == 0);
anv_batch_emit(&batch, GENX(MI_LOAD_REGISTER_IMM), lri) {
lri.RegisterOffset = GENX(GFX_AUX_TABLE_BASE_ADDR_num);
@ -332,7 +332,7 @@ init_render_queue_state(struct anv_queue *queue)
}
#if GEN_GEN >= 12
const struct gen_l3_config *cfg = gen_get_default_l3_config(&device->info);
const struct intel_l3_config *cfg = intel_get_default_l3_config(&device->info);
if (!cfg) {
/* Platforms with no configs just setup full-way allocation. */
uint32_t l3cr;

View File

@ -317,7 +317,7 @@ bucket_vma_alloc(struct brw_bufmgr *bufmgr,
return 0ull;
uint64_t addr = vma_alloc(bufmgr, memzone, node_size, node_size);
node->start_address = gen_48b_address(addr);
node->start_address = intel_48b_address(addr);
node->bitmap = ~1ull;
return node->start_address;
}
@ -434,7 +434,7 @@ vma_alloc(struct brw_bufmgr *bufmgr,
assert((addr >> 48ull) == 0);
assert((addr % alignment) == 0);
return gen_canonical_address(addr);
return intel_canonical_address(addr);
}
/**
@ -448,7 +448,7 @@ vma_free(struct brw_bufmgr *bufmgr,
assert(brw_using_softpin(bufmgr));
/* Un-canonicalize the address. */
address = gen_48b_address(address);
address = intel_48b_address(address);
if (address == 0ull)
return;
@ -922,7 +922,7 @@ bo_unreference_final(struct brw_bo *bo, time_t time)
list_for_each_entry_safe(struct bo_export, export, &bo->exports, link) {
struct drm_gem_close close = { .handle = export->gem_handle };
gen_ioctl(export->drm_fd, DRM_IOCTL_GEM_CLOSE, &close);
intel_ioctl(export->drm_fd, DRM_IOCTL_GEM_CLOSE, &close);
list_del(&export->link);
free(export);

View File

@ -528,7 +528,7 @@ struct brw_batch {
/** Map from batch offset to brw_state_batch data (with DEBUG_BATCH) */
struct hash_table_u64 *state_batch_sizes;
struct gen_batch_decode_ctx decoder;
struct intel_batch_decode_ctx decoder;
};
#define BRW_MAX_XFB_STREAMS 4
@ -682,7 +682,7 @@ enum brw_predicate_state {
struct shader_times;
struct gen_l3_config;
struct intel_l3_config;
struct gen_perf;
struct brw_uploader {
@ -1221,7 +1221,7 @@ struct brw_context
int baseinstance;
struct {
const struct gen_l3_config *config;
const struct intel_l3_config *config;
} l3;
struct {

View File

@ -157,7 +157,7 @@ brw_get_perf_query_info(struct gl_context *ctx,
}
static GLuint
gen_counter_type_enum_to_gl_type(enum gen_perf_counter_type type)
intel_counter_type_enum_to_gl_type(enum gen_perf_counter_type type)
{
switch (type) {
case GEN_PERF_COUNTER_TYPE_EVENT: return GL_PERFQUERY_COUNTER_EVENT_INTEL;
@ -211,7 +211,7 @@ brw_get_perf_counter_info(struct gl_context *ctx,
*desc = counter->desc;
*offset = counter->offset;
*data_size = gen_perf_query_counter_get_size(counter);
*type_enum = gen_counter_type_enum_to_gl_type(counter->type);
*type_enum = intel_counter_type_enum_to_gl_type(counter->type);
*data_type_enum = gen_counter_data_type_to_gl_type(counter->data_type);
*raw_max = counter->raw_max;
}

View File

@ -35,7 +35,7 @@
* more statistics from the pipeline state (e.g. guess of expected URB usage
* and bound surfaces), or by using feed-back from performance counters.
*/
static struct gen_l3_weights
static struct intel_l3_weights
get_pipeline_state_l3_weights(const struct brw_context *brw)
{
const struct brw_stage_state *stage_states[] = {
@ -60,7 +60,7 @@ get_pipeline_state_l3_weights(const struct brw_context *brw)
needs_slm |= prog_data && prog_data->total_shared;
}
return gen_get_default_l3_weights(&brw->screen->devinfo,
return intel_get_default_l3_weights(&brw->screen->devinfo,
needs_dc, needs_slm);
}
@ -68,7 +68,7 @@ get_pipeline_state_l3_weights(const struct brw_context *brw)
* Program the hardware to use the specified L3 configuration.
*/
static void
setup_l3_config(struct brw_context *brw, const struct gen_l3_config *cfg)
setup_l3_config(struct brw_context *brw, const struct intel_l3_config *cfg)
{
const struct gen_device_info *devinfo = &brw->screen->devinfo;
const bool has_dc = cfg->n[GEN_L3P_DC] || cfg->n[GEN_L3P_ALL];
@ -192,10 +192,10 @@ setup_l3_config(struct brw_context *brw, const struct gen_l3_config *cfg)
* configuration.
*/
static void
update_urb_size(struct brw_context *brw, const struct gen_l3_config *cfg)
update_urb_size(struct brw_context *brw, const struct intel_l3_config *cfg)
{
const struct gen_device_info *devinfo = &brw->screen->devinfo;
const unsigned sz = gen_get_l3_config_urb_size(devinfo, cfg);
const unsigned sz = intel_get_l3_config_urb_size(devinfo, cfg);
if (brw->urb.size != sz) {
brw->urb.size = sz;
@ -215,8 +215,8 @@ update_urb_size(struct brw_context *brw, const struct gen_l3_config *cfg)
void
brw_emit_l3_state(struct brw_context *brw)
{
const struct gen_l3_weights w = get_pipeline_state_l3_weights(brw);
const float dw = gen_diff_l3_weights(w, gen_get_l3_config_weights(brw->l3.config));
const struct intel_l3_weights w = get_pipeline_state_l3_weights(brw);
const float dw = intel_diff_l3_weights(w, intel_get_l3_config_weights(brw->l3.config));
/* The distance between any two compatible weight vectors cannot exceed two
* due to the triangle inequality.
*/
@ -235,8 +235,8 @@ brw_emit_l3_state(struct brw_context *brw)
small_dw_threshold : large_dw_threshold);
if (dw > dw_threshold && can_do_pipelined_register_writes(brw->screen)) {
const struct gen_l3_config *const cfg =
gen_get_l3_config(&brw->screen->devinfo, w);
const struct intel_l3_config *const cfg =
intel_get_l3_config(&brw->screen->devinfo, w);
setup_l3_config(brw, cfg);
update_urb_size(brw, cfg);
@ -244,7 +244,7 @@ brw_emit_l3_state(struct brw_context *brw)
if (INTEL_DEBUG & DEBUG_L3) {
fprintf(stderr, "L3 config transition (%f > %f): ", dw, dw_threshold);
gen_dump_l3_config(cfg, stderr);
intel_dump_l3_config(cfg, stderr);
}
}
}
@ -301,7 +301,7 @@ void
gen7_restore_default_l3_config(struct brw_context *brw)
{
const struct gen_device_info *devinfo = &brw->screen->devinfo;
const struct gen_l3_config *const cfg = gen_get_default_l3_config(devinfo);
const struct intel_l3_config *const cfg = intel_get_default_l3_config(devinfo);
if (cfg != brw->l3.config &&
can_do_pipelined_register_writes(brw->screen)) {

View File

@ -248,7 +248,7 @@ gen7_upload_urb(struct brw_context *brw, unsigned vs_size,
unsigned entries[4];
unsigned start[4];
bool constrained;
gen_get_urb_config(devinfo, brw->l3.config,
intel_get_urb_config(devinfo, brw->l3.config,
tess_present, gs_present, entry_size,
entries, start, NULL, &constrained);

View File

@ -256,7 +256,7 @@ blorp_flush_range(UNUSED struct blorp_batch *batch, UNUSED void *start,
}
#if GEN_GEN >= 7
static const struct gen_l3_config *
static const struct intel_l3_config *
blorp_get_l3_config(struct blorp_batch *batch)
{
assert(batch->blorp->driver_ctx == batch->driver_batch);

View File

@ -2463,7 +2463,7 @@ genX(upload_sf_clip_viewport)(struct brw_context *brw)
sfv.ViewportMatrixElementm30 = translate[0],
sfv.ViewportMatrixElementm31 = translate[1] * y_scale + y_bias,
sfv.ViewportMatrixElementm32 = translate[2],
gen_calculate_guardband_size(fb_width, fb_height,
intel_calculate_guardband_size(fb_width, fb_height,
sfv.ViewportMatrixElementm00,
sfv.ViewportMatrixElementm11,
sfv.ViewportMatrixElementm30,

View File

@ -80,7 +80,7 @@ dump_validation_list(struct brw_batch *batch)
}
}
static struct gen_batch_decode_bo
static struct intel_batch_decode_bo
decode_get_bo(void *v_brw, bool ppgtt, uint64_t address)
{
struct brw_context *brw = v_brw;
@ -92,7 +92,7 @@ decode_get_bo(void *v_brw, bool ppgtt, uint64_t address)
uint64_t bo_address = bo->gtt_offset & (~0ull >> 16);
if (address >= bo_address && address < bo_address + bo->size) {
return (struct gen_batch_decode_bo) {
return (struct intel_batch_decode_bo) {
.addr = address,
.size = bo->size,
.map = brw_bo_map(brw, bo, MAP_READ) + (address - bo_address),
@ -100,7 +100,7 @@ decode_get_bo(void *v_brw, bool ppgtt, uint64_t address)
}
}
return (struct gen_batch_decode_bo) { };
return (struct intel_batch_decode_bo) { };
}
static unsigned
@ -158,7 +158,7 @@ brw_batch_init(struct brw_context *brw)
GEN_BATCH_DECODE_OFFSETS |
GEN_BATCH_DECODE_FLOATS;
gen_batch_decode_ctx_init(&batch->decoder, devinfo, stderr,
intel_batch_decode_ctx_init(&batch->decoder, devinfo, stderr,
decode_flags, NULL, decode_get_bo,
decode_get_state_size, brw);
batch->decoder.max_vbo_decoded_lines = 100;
@ -346,7 +346,7 @@ brw_batch_free(struct brw_batch *batch)
brw_bo_unreference(batch->state.bo);
if (batch->state_batch_sizes) {
_mesa_hash_table_u64_destroy(batch->state_batch_sizes, NULL);
gen_batch_decode_ctx_finish(&batch->decoder);
intel_batch_decode_ctx_finish(&batch->decoder);
}
}
@ -831,7 +831,7 @@ submit_batch(struct brw_context *brw, int in_fence_fd, int *out_fence_fd)
}
if (INTEL_DEBUG & DEBUG_BATCH) {
gen_print_batch(&batch->decoder, batch->batch.map,
intel_print_batch(&batch->decoder, batch->batch.map,
4 * USED_BATCH(*batch),
batch->batch.bo->gtt_offset, false);
}
@ -945,7 +945,7 @@ emit_reloc(struct brw_batch *batch,
if (target->kflags & EXEC_OBJECT_PINNED) {
brw_use_pinned_bo(batch, target, reloc_flags & RELOC_WRITE);
return gen_canonical_address(target->gtt_offset + target_offset);
return intel_canonical_address(target->gtt_offset + target_offset);
}
unsigned int index = add_exec_bo(batch, target);