turnip: Replace fd_bo with tu_bo

(olv, after rebase) remove inc_drm_uapi
This commit is contained in:
Chad Versace 2018-11-06 21:26:45 -07:00 committed by Chia-I Wu
parent eb16ec715f
commit 359e9016c5
4 changed files with 211 additions and 10 deletions

View File

@ -54,6 +54,7 @@ libtu_files = files(
'tu_descriptor_set.c',
'tu_descriptor_set.h',
'tu_formats.c',
'tu_gem.c',
'tu_image.c',
'tu_meta_blit.c',
'tu_meta_buffer.c',

View File

@ -34,9 +34,11 @@
#include <fcntl.h>
#include <stdbool.h>
#include <string.h>
#include <sys/mman.h>
#include <sys/sysinfo.h>
#include <unistd.h>
#include <xf86drm.h>
#include <msm_drm.h>
static int
tu_device_get_cache_uuid(uint16_t family, void *uuid)
@ -66,6 +68,75 @@ tu_get_device_uuid(void *uuid)
stub();
}
VkResult
tu_bo_init_new(struct tu_device *dev, struct tu_bo *bo, uint64_t size)
{
/* TODO: Choose better flags. As of 2018-11-12, freedreno/drm/msm_bo.c
* always sets `flags = MSM_BO_WC`, and we copy that behavior here.
*/
uint32_t gem_handle = tu_gem_new(dev, size, MSM_BO_WC);
if (!gem_handle)
goto fail_new;
/* Calling DRM_MSM_GEM_INFO forces the kernel to allocate backing pages. We
* want immediate backing pages because vkAllocateMemory and friends must
* not lazily fail.
*
* TODO(chadv): Must we really call DRM_MSM_GEM_INFO to acquire backing
* pages? I infer so from reading comments in msm_bo.c:bo_allocate(), but
* maybe I misunderstand.
*/
/* TODO: Do we need 'offset' if we have 'iova'? */
uint64_t offset = tu_gem_info_offset(dev, bo->gem_handle);
if (!offset)
goto fail_info;
uint64_t iova = tu_gem_info_iova(dev, bo->gem_handle);
if (!iova)
goto fail_info;
*bo = (struct tu_bo) {
.gem_handle = gem_handle,
.size = size,
.offset = offset,
.iova = iova,
};
return VK_SUCCESS;
fail_info:
tu_gem_close(dev, bo->gem_handle);
fail_new:
return vk_error(dev->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
}
VkResult
tu_bo_map(struct tu_device *dev, struct tu_bo *bo)
{
if (bo->map)
return VK_SUCCESS;
/* TODO: Should we use the wrapper os_mmap() like Freedreno does? */
void *map = mmap(0, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED,
dev->physical_device->local_fd, bo->offset);
if (map == MAP_FAILED)
return vk_error(dev->instance, VK_ERROR_MEMORY_MAP_FAILED);
return VK_SUCCESS;
}
void
tu_bo_finish(struct tu_device *dev, struct tu_bo *bo)
{
assert(bo->gem_handle);
if (bo->map)
munmap(bo->map, bo->size);
tu_gem_close(dev, bo->gem_handle);
}
static VkResult
tu_physical_device_init(struct tu_physical_device *device,
struct tu_instance *instance,
@ -1220,6 +1291,7 @@ tu_alloc_memory(struct tu_device *device,
VkDeviceMemory *pMem)
{
struct tu_device_memory *mem;
VkResult result;
assert(pAllocateInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO);
@ -1237,13 +1309,12 @@ tu_alloc_memory(struct tu_device *device,
if (mem == NULL)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
mem->bo = fd_bo_new(device->physical_device->drm_device, pAllocateInfo->allocationSize,
DRM_FREEDRENO_GEM_CACHE_WCOMBINE |
DRM_FREEDRENO_GEM_TYPE_KMEM);
if (!mem->bo) {
result = tu_bo_init_new(device, &mem->bo, pAllocateInfo->allocationSize);
if (!result) {
vk_free2(&device->alloc, pAllocator, mem);
return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
return result;
}
mem->size = pAllocateInfo->allocationSize;
mem->type_index = pAllocateInfo->memoryTypeIndex;
@ -1276,9 +1347,7 @@ tu_FreeMemory(VkDevice _device,
if (mem == NULL)
return;
if (mem->bo)
fd_bo_del(mem->bo);
tu_bo_finish(device, &mem->bo);
vk_free2(&device->alloc, pAllocator, mem);
}
@ -1292,6 +1361,7 @@ tu_MapMemory(VkDevice _device,
{
TU_FROM_HANDLE(tu_device, device, _device);
TU_FROM_HANDLE(tu_device_memory, mem, _memory);
VkResult result;
if (mem == NULL) {
*ppData = NULL;
@ -1301,7 +1371,10 @@ tu_MapMemory(VkDevice _device,
if (mem->user_ptr) {
*ppData = mem->user_ptr;
} else if (!mem->map){
*ppData = mem->map = fd_bo_map(mem->bo);
result = tu_bo_map(device, &mem->bo);
if (result != VK_SUCCESS)
return result;
mem->map = mem->bo.map;
} else
*ppData = mem->map;

View File

@ -0,0 +1,102 @@
/*
* Copyright © 2018 Google, Inc.
* Copyright © 2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <stdint.h>
#include <sys/ioctl.h>
#include <errno.h>
#include <msm_drm.h>
#include "tu_private.h"
static int
tu_ioctl(int fd, unsigned long request, void *arg)
{
int ret;
do {
ret = ioctl(fd, request, arg);
} while (ret == -1 && (errno == EINTR || errno == EAGAIN));
return ret;
}
/**
* Return gem handle on success. Return 0 on failure.
*/
uint32_t
tu_gem_new(struct tu_device *dev, uint64_t size, uint32_t flags)
{
struct drm_msm_gem_new req = {
.size = size,
.flags = flags,
};
int ret = tu_ioctl(dev->physical_device->local_fd, DRM_MSM_GEM_NEW, &req);
if (ret)
return 0;
return req.handle;
}
void
tu_gem_close(struct tu_device *dev, uint32_t gem_handle)
{
struct drm_gem_close req = {
.handle = gem_handle,
};
tu_ioctl(dev->physical_device->local_fd, DRM_IOCTL_GEM_CLOSE, &req);
}
/** Return UINT64_MAX on error. */
static uint64_t
tu_gem_info(struct tu_device *dev, uint32_t gem_handle, uint32_t flags)
{
struct drm_msm_gem_info req = {
.handle = gem_handle,
.flags = flags,
};
int ret = tu_ioctl(dev->physical_device->local_fd, DRM_MSM_GEM_INFO, &req);
if (ret == -1)
return UINT64_MAX;
return req.offset;
}
/** Return UINT64_MAX on error. */
uint64_t
tu_gem_info_offset(struct tu_device *dev, uint32_t gem_handle)
{
return tu_gem_info(dev, gem_handle, 0);
}
/** Return UINT64_MAX on error. */
uint64_t
tu_gem_info_iova(struct tu_device *dev, uint32_t gem_handle)
{
return tu_gem_info(dev, gem_handle, MSM_INFO_IOVA);
}

View File

@ -440,9 +440,25 @@ struct tu_device
struct tu_bo_list bo_list;
};
struct tu_bo
{
uint32_t gem_handle;
uint64_t size;
uint64_t offset;
uint64_t iova;
void *map;
};
VkResult
tu_bo_init_new(struct tu_device *dev, struct tu_bo *bo, uint64_t size);
void
tu_bo_finish(struct tu_device *dev, struct tu_bo *bo);
VkResult
tu_bo_map(struct tu_device *dev, struct tu_bo *bo);
struct tu_device_memory
{
struct fd_bo *bo;
struct tu_bo bo;
VkDeviceSize size;
/* for dedicated allocations */
@ -1169,6 +1185,15 @@ struct tu_nir_compiler_options;
struct radeon_winsys_sem;
uint32_t
tu_gem_new(struct tu_device *dev, uint64_t size, uint32_t flags);
void
tu_gem_close(struct tu_device *dev, uint32_t gem_handle);
uint64_t
tu_gem_info_offset(struct tu_device *dev, uint32_t gem_handle);
uint64_t
tu_gem_info_iova(struct tu_device *dev, uint32_t gem_handle);
#define TU_DEFINE_HANDLE_CASTS(__tu_type, __VkType) \
\
static inline struct __tu_type *__tu_type##_from_handle(__VkType _handle) \