vulkan,anv: Auto-detect syncobj features

Instead of having a bunch of const vk_sync_type for each permutation of
vk_drm_syncobj capabilities, have a vk_drm_syncobj_get_type helper which
auto-detects features.  If a driver can't support a feature for some
reason (i915 got timeline support very late, for instance), they can
always mask off feature bits they don't want.

Reviewed-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Acked-by: Bas Nieuwenhuizen <bas@basnieuwenhuizen.nl>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/13427>
This commit is contained in:
Jason Ekstrand 2021-11-12 09:51:59 -06:00
parent 36b4d12f02
commit 2a910071bc
6 changed files with 83 additions and 126 deletions

View File

@ -172,6 +172,9 @@ static void
get_device_extensions(const struct anv_physical_device *device,
struct vk_device_extension_table *ext)
{
const bool has_syncobj_wait =
(device->sync_syncobj_type.features & VK_SYNC_FEATURE_CPU_WAIT) != 0;
*ext = (struct vk_device_extension_table) {
.KHR_8bit_storage = device->info.ver >= 8,
.KHR_16bit_storage = device->info.ver >= 8,
@ -186,8 +189,8 @@ get_device_extensions(const struct anv_physical_device *device,
.KHR_device_group = true,
.KHR_draw_indirect_count = true,
.KHR_driver_properties = true,
.KHR_external_fence = device->has_syncobj_wait,
.KHR_external_fence_fd = device->has_syncobj_wait,
.KHR_external_fence = has_syncobj_wait,
.KHR_external_fence_fd = has_syncobj_wait,
.KHR_external_memory = true,
.KHR_external_memory_fd = true,
.KHR_external_semaphore = true,
@ -871,9 +874,6 @@ anv_physical_device_try_create(struct anv_instance *instance,
device->has_exec_async = anv_gem_get_param(fd, I915_PARAM_HAS_EXEC_ASYNC);
device->has_exec_capture = anv_gem_get_param(fd, I915_PARAM_HAS_EXEC_CAPTURE);
device->has_syncobj_wait = intel_gem_supports_syncobj_wait(fd);
device->has_syncobj_wait_available =
anv_gem_get_drm_cap(fd, DRM_CAP_SYNCOBJ_TIMELINE) != 0;
/* Start with medium; sorted low to high */
const int priorities[] = {
@ -908,19 +908,20 @@ anv_physical_device_try_create(struct anv_instance *instance,
device->has_exec_timeline = false;
unsigned st_idx = 0;
if (device->has_syncobj_wait) {
device->sync_types[st_idx++] = &vk_drm_binary_syncobj_type;
} else {
device->sync_types[st_idx++] = &vk_drm_binary_syncobj_no_wait_type;
device->sync_types[st_idx++] = &anv_bo_sync_type;
}
if (device->has_syncobj_wait_available && device->has_exec_timeline) {
device->sync_types[st_idx++] = &vk_drm_timeline_syncobj_type;
} else {
device->sync_syncobj_type = vk_drm_syncobj_get_type(fd);
if (!device->has_exec_timeline)
device->sync_syncobj_type.features &= ~VK_SYNC_FEATURE_TIMELINE;
device->sync_types[st_idx++] = &device->sync_syncobj_type;
if (!(device->sync_syncobj_type.features & VK_SYNC_FEATURE_CPU_WAIT))
device->sync_types[st_idx++] = &anv_bo_sync_type;
if (!(device->sync_syncobj_type.features & VK_SYNC_FEATURE_TIMELINE)) {
device->sync_timeline_type = vk_sync_timeline_get_type(&anv_bo_sync_type);
device->sync_types[st_idx++] = &device->sync_timeline_type.sync;
}
device->sync_types[st_idx++] = NULL;
assert(st_idx <= ARRAY_SIZE(device->sync_types));
device->vk.supported_sync_types = device->sync_types;

View File

@ -318,17 +318,6 @@ anv_gem_get_param(int fd, uint32_t param)
return 0;
}
uint64_t
anv_gem_get_drm_cap(int fd, uint32_t capability)
{
struct drm_get_cap cap = {
.capability = capability,
};
intel_ioctl(fd, DRM_IOCTL_GET_CAP, &cap);
return cap.value;
}
bool
anv_gem_has_context_priority(int fd, int priority)
{

View File

@ -137,12 +137,6 @@ anv_gem_get_param(int fd, uint32_t param)
unreachable("Unused");
}
uint64_t
anv_gem_get_drm_cap(int fd, uint32_t capability)
{
return 0;
}
int
anv_gem_create_context(struct anv_device *device)
{

View File

@ -918,8 +918,6 @@ struct anv_physical_device {
int cmd_parser_version;
bool has_exec_async;
bool has_exec_capture;
bool has_syncobj_wait;
bool has_syncobj_wait_available;
int max_context_priority;
bool has_context_isolation;
bool has_mmap_offset;
@ -976,6 +974,7 @@ struct anv_physical_device {
uint8_t driver_uuid[VK_UUID_SIZE];
uint8_t device_uuid[VK_UUID_SIZE];
struct vk_sync_type sync_syncobj_type;
struct vk_sync_timeline_type sync_timeline_type;
const struct vk_sync_type * sync_types[4];
@ -1377,7 +1376,6 @@ int anv_gem_set_context_param(int fd, int context, uint32_t param,
int anv_gem_get_context_param(int fd, int context, uint32_t param,
uint64_t *value);
int anv_gem_get_param(int fd, uint32_t param);
uint64_t anv_gem_get_drm_cap(int fd, uint32_t capability);
int anv_gem_get_tiling(struct anv_device *device, uint32_t gem_handle);
int anv_gem_context_get_reset_stats(int fd, int context,
uint32_t *active, uint32_t *pending);

View File

@ -39,14 +39,14 @@ to_drm_syncobj(struct vk_sync *sync)
}
static VkResult
vk_drm_binary_syncobj_init(struct vk_device *device,
struct vk_sync *sync,
uint64_t initial_value)
vk_drm_syncobj_init(struct vk_device *device,
struct vk_sync *sync,
uint64_t initial_value)
{
struct vk_drm_syncobj *sobj = to_drm_syncobj(sync);
uint32_t flags = 0;
if (initial_value)
if (!(sync->flags & VK_SYNC_IS_TIMELINE) && initial_value)
flags |= DRM_SYNCOBJ_CREATE_SIGNALED;
assert(device->drm_fd >= 0);
@ -56,35 +56,7 @@ vk_drm_binary_syncobj_init(struct vk_device *device,
"DRM_IOCTL_SYNCOBJ_CREATE failed: %m");
}
return VK_SUCCESS;
}
static void
vk_drm_syncobj_finish(struct vk_device *device,
struct vk_sync *sync)
{
struct vk_drm_syncobj *sobj = to_drm_syncobj(sync);
assert(device->drm_fd >= 0);
ASSERTED int err = drmSyncobjDestroy(device->drm_fd, sobj->syncobj);
assert(err == 0);
}
static VkResult
vk_drm_timeline_syncobj_init(struct vk_device *device,
struct vk_sync *sync,
uint64_t initial_value)
{
struct vk_drm_syncobj *sobj = to_drm_syncobj(sync);
assert(device->drm_fd >= 0);
int err = drmSyncobjCreate(device->drm_fd, 0, &sobj->syncobj);
if (err < 0) {
return vk_errorf(device, VK_ERROR_OUT_OF_HOST_MEMORY,
"DRM_IOCTL_SYNCOBJ_CREATE failed: %m");
}
if (initial_value) {
if ((sync->flags & VK_SYNC_IS_TIMELINE) && initial_value) {
err = drmSyncobjTimelineSignal(device->drm_fd, &sobj->syncobj,
&initial_value, 1);
if (err < 0) {
@ -97,6 +69,17 @@ vk_drm_timeline_syncobj_init(struct vk_device *device,
return VK_SUCCESS;
}
void
vk_drm_syncobj_finish(struct vk_device *device,
struct vk_sync *sync)
{
struct vk_drm_syncobj *sobj = to_drm_syncobj(sync);
assert(device->drm_fd >= 0);
ASSERTED int err = drmSyncobjDestroy(device->drm_fd, sobj->syncobj);
assert(err == 0);
}
static VkResult
vk_drm_syncobj_signal(struct vk_device *device,
struct vk_sync *sync,
@ -337,57 +320,50 @@ vk_drm_syncobj_move(struct vk_device *device,
}
}
const struct vk_sync_type vk_drm_binary_syncobj_no_wait_type = {
.size = sizeof(struct vk_drm_syncobj),
.features = VK_SYNC_FEATURE_BINARY |
VK_SYNC_FEATURE_GPU_WAIT |
VK_SYNC_FEATURE_CPU_RESET |
VK_SYNC_FEATURE_CPU_SIGNAL,
.init = vk_drm_binary_syncobj_init,
.finish = vk_drm_syncobj_finish,
.signal = vk_drm_syncobj_signal,
.reset = vk_drm_syncobj_reset,
.move = vk_drm_syncobj_move,
.import_opaque_fd = vk_drm_syncobj_import_opaque_fd,
.export_opaque_fd = vk_drm_syncobj_export_opaque_fd,
.import_sync_file = vk_drm_syncobj_import_sync_file,
.export_sync_file = vk_drm_syncobj_export_sync_file,
};
struct vk_sync_type
vk_drm_syncobj_get_type(int drm_fd)
{
uint32_t syncobj = 0;
int err = drmSyncobjCreate(drm_fd, DRM_SYNCOBJ_CREATE_SIGNALED, &syncobj);
if (err < 0)
return (struct vk_sync_type) { .features = 0 };
const struct vk_sync_type vk_drm_binary_syncobj_type = {
.size = sizeof(struct vk_drm_syncobj),
.features = VK_SYNC_FEATURE_BINARY |
VK_SYNC_FEATURE_GPU_WAIT |
VK_SYNC_FEATURE_CPU_WAIT |
VK_SYNC_FEATURE_CPU_RESET |
VK_SYNC_FEATURE_CPU_SIGNAL |
VK_SYNC_FEATURE_WAIT_ANY |
VK_SYNC_FEATURE_WAIT_PENDING,
.init = vk_drm_binary_syncobj_init,
.finish = vk_drm_syncobj_finish,
.signal = vk_drm_syncobj_signal,
.reset = vk_drm_syncobj_reset,
.move = vk_drm_syncobj_move,
.wait_many = vk_drm_syncobj_wait_many,
.import_opaque_fd = vk_drm_syncobj_import_opaque_fd,
.export_opaque_fd = vk_drm_syncobj_export_opaque_fd,
.import_sync_file = vk_drm_syncobj_import_sync_file,
.export_sync_file = vk_drm_syncobj_export_sync_file,
};
struct vk_sync_type type = {
.size = sizeof(struct vk_drm_syncobj),
.features = VK_SYNC_FEATURE_BINARY |
VK_SYNC_FEATURE_GPU_WAIT |
VK_SYNC_FEATURE_CPU_RESET |
VK_SYNC_FEATURE_CPU_SIGNAL,
.init = vk_drm_syncobj_init,
.finish = vk_drm_syncobj_finish,
.signal = vk_drm_syncobj_signal,
.reset = vk_drm_syncobj_reset,
.move = vk_drm_syncobj_move,
.import_opaque_fd = vk_drm_syncobj_import_opaque_fd,
.export_opaque_fd = vk_drm_syncobj_export_opaque_fd,
.import_sync_file = vk_drm_syncobj_import_sync_file,
.export_sync_file = vk_drm_syncobj_export_sync_file,
};
const struct vk_sync_type vk_drm_timeline_syncobj_type = {
.size = sizeof(struct vk_drm_syncobj),
.features = VK_SYNC_FEATURE_TIMELINE |
VK_SYNC_FEATURE_GPU_WAIT |
VK_SYNC_FEATURE_CPU_WAIT |
VK_SYNC_FEATURE_CPU_SIGNAL |
VK_SYNC_FEATURE_WAIT_ANY |
VK_SYNC_FEATURE_WAIT_PENDING,
.init = vk_drm_timeline_syncobj_init,
.finish = vk_drm_syncobj_finish,
.signal = vk_drm_syncobj_signal,
.get_value = vk_drm_syncobj_get_value,
.wait_many = vk_drm_syncobj_wait_many,
.import_opaque_fd = vk_drm_syncobj_import_opaque_fd,
.export_opaque_fd = vk_drm_syncobj_export_opaque_fd,
};
err = drmSyncobjWait(drm_fd, &syncobj, 1, 0,
DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL,
NULL /* first_signaled */);
if (err == 0) {
type.wait_many = vk_drm_syncobj_wait_many;
type.features |= VK_SYNC_FEATURE_CPU_WAIT |
VK_SYNC_FEATURE_WAIT_ANY;
}
uint64_t cap;
err = drmGetCap(drm_fd, DRM_CAP_SYNCOBJ_TIMELINE, &cap);
if (err == 0 && cap != 0) {
type.get_value = vk_drm_syncobj_get_value;
type.features |= VK_SYNC_FEATURE_TIMELINE |
VK_SYNC_FEATURE_WAIT_PENDING;
}
err = drmSyncobjDestroy(drm_fd, syncobj);
assert(err == 0);
return type;
}

View File

@ -31,21 +31,18 @@
extern "C" {
#endif
extern const struct vk_sync_type vk_drm_binary_syncobj_no_wait_type;
extern const struct vk_sync_type vk_drm_binary_syncobj_type;
extern const struct vk_sync_type vk_drm_timeline_syncobj_type;
struct vk_drm_syncobj {
struct vk_sync base;
uint32_t syncobj;
};
void vk_drm_syncobj_finish(struct vk_device *device,
struct vk_sync *sync);
static inline bool
vk_sync_type_is_drm_syncobj(const struct vk_sync_type *type)
{
return type == &vk_drm_binary_syncobj_no_wait_type ||
type == &vk_drm_binary_syncobj_type ||
type == &vk_drm_timeline_syncobj_type;
return type->finish == vk_drm_syncobj_finish;
}
static inline struct vk_drm_syncobj *
@ -57,6 +54,8 @@ vk_sync_as_drm_syncobj(struct vk_sync *sync)
return container_of(sync, struct vk_drm_syncobj, base);
}
struct vk_sync_type vk_drm_syncobj_get_type(int drm_fd);
#ifdef __cplusplus
}
#endif