vkd3d: Remove d3d12_heap and old resource creation functions.

Signed-off-by: Philip Rebohle <philip.rebohle@tu-dortmund.de>
This commit is contained in:
Philip Rebohle 2021-02-04 17:34:22 +01:00 committed by Hans-Kristian Arntzen
parent 9792b02b26
commit 8826f3c5bc
2 changed files with 0 additions and 893 deletions

View File

@ -207,33 +207,6 @@ static HRESULT vkd3d_import_host_memory(struct d3d12_device *device,
type_mask, &import_info, vk_memory, vk_memory_type);
}
static HRESULT vkd3d_allocate_device_memory(struct d3d12_device *device,
const D3D12_HEAP_PROPERTIES *heap_properties, D3D12_HEAP_FLAGS heap_flags,
VkDeviceSize size, VkDeviceMemory *vk_memory, uint32_t *vk_memory_type)
{
VkMemoryAllocateFlagsInfo flags_info;
VkMemoryPropertyFlags type_flags;
HRESULT hr;
flags_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO;
flags_info.pNext = NULL;
flags_info.flags = 0;
if (!(heap_flags & D3D12_HEAP_FLAG_DENY_BUFFERS) &&
device->device_info.buffer_device_address_features.bufferDeviceAddress)
flags_info.flags |= VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR;
if (FAILED(hr = vkd3d_select_memory_flags(device, heap_properties, &type_flags)))
return hr;
if (FAILED(hr = vkd3d_allocate_memory(device, size, type_flags,
vkd3d_select_memory_types(device, heap_properties, heap_flags),
&flags_info, vk_memory, vk_memory_type)))
return hr;
return S_OK;
}
HRESULT vkd3d_allocate_buffer_memory(struct d3d12_device *device, VkBuffer vk_buffer, void *host_memory,
const D3D12_HEAP_PROPERTIES *heap_properties, D3D12_HEAP_FLAGS heap_flags,
VkDeviceMemory *vk_memory, uint32_t *vk_memory_type, VkDeviceSize *vk_memory_size)
@ -397,525 +370,6 @@ static HRESULT vkd3d_allocate_image_memory(struct d3d12_device *device, VkImage
return S_OK;
}
/* ID3D12Heap */
static inline struct d3d12_heap *impl_from_ID3D12Heap(d3d12_heap_iface *iface)
{
return CONTAINING_RECORD(iface, struct d3d12_heap, ID3D12Heap_iface);
}
static HRESULT STDMETHODCALLTYPE d3d12_heap_QueryInterface(d3d12_heap_iface *iface,
REFIID iid, void **object)
{
TRACE("iface %p, iid %s, object %p.\n", iface, debugstr_guid(iid), object);
if (IsEqualGUID(iid, &IID_ID3D12Heap)
|| IsEqualGUID(iid, &IID_ID3D12Heap1)
|| IsEqualGUID(iid, &IID_ID3D12Pageable)
|| IsEqualGUID(iid, &IID_ID3D12DeviceChild)
|| IsEqualGUID(iid, &IID_ID3D12Object)
|| IsEqualGUID(iid, &IID_IUnknown))
{
ID3D12Heap_AddRef(iface);
*object = iface;
return S_OK;
}
WARN("%s not implemented, returning E_NOINTERFACE.\n", debugstr_guid(iid));
*object = NULL;
return E_NOINTERFACE;
}
static ULONG STDMETHODCALLTYPE d3d12_heap_AddRef(d3d12_heap_iface *iface)
{
struct d3d12_heap *heap = impl_from_ID3D12Heap(iface);
ULONG refcount = InterlockedIncrement(&heap->refcount);
TRACE("%p increasing refcount to %u.\n", heap, refcount);
assert(!heap->is_private);
return refcount;
}
static ULONG d3d12_resource_decref(struct d3d12_resource *resource);
static void d3d12_heap_cleanup(struct d3d12_heap *heap)
{
struct d3d12_device *device = heap->device;
const struct vkd3d_vk_device_procs *vk_procs = &device->vk_procs;
if (heap->buffer_resource)
d3d12_resource_decref(heap->buffer_resource);
VK_CALL(vkFreeMemory(device->vk_device, heap->vk_memory, NULL));
if (heap->is_private)
device = NULL;
if (device)
d3d12_device_release(device);
}
static void d3d12_heap_destroy(struct d3d12_heap *heap)
{
TRACE("Destroying heap %p.\n", heap);
d3d12_heap_cleanup(heap);
vkd3d_private_store_destroy(&heap->private_store);
vkd3d_free(heap);
}
static ULONG STDMETHODCALLTYPE d3d12_heap_Release(d3d12_heap_iface *iface)
{
struct d3d12_heap *heap = impl_from_ID3D12Heap(iface);
ULONG refcount = InterlockedDecrement(&heap->refcount);
TRACE("%p decreasing refcount to %u.\n", heap, refcount);
if (!refcount)
d3d12_heap_destroy(heap);
return refcount;
}
static HRESULT STDMETHODCALLTYPE d3d12_heap_GetPrivateData(d3d12_heap_iface *iface,
REFGUID guid, UINT *data_size, void *data)
{
struct d3d12_heap *heap = impl_from_ID3D12Heap(iface);
TRACE("iface %p, guid %s, data_size %p, data %p.\n", iface, debugstr_guid(guid), data_size, data);
return vkd3d_get_private_data(&heap->private_store, guid, data_size, data);
}
static HRESULT STDMETHODCALLTYPE d3d12_heap_SetPrivateData(d3d12_heap_iface *iface,
REFGUID guid, UINT data_size, const void *data)
{
struct d3d12_heap *heap = impl_from_ID3D12Heap(iface);
TRACE("iface %p, guid %s, data_size %u, data %p.\n", iface, debugstr_guid(guid), data_size, data);
return vkd3d_set_private_data(&heap->private_store, guid, data_size, data);
}
static HRESULT STDMETHODCALLTYPE d3d12_heap_SetPrivateDataInterface(d3d12_heap_iface *iface,
REFGUID guid, const IUnknown *data)
{
struct d3d12_heap *heap = impl_from_ID3D12Heap(iface);
TRACE("iface %p, guid %s, data %p.\n", iface, debugstr_guid(guid), data);
return vkd3d_set_private_data_interface(&heap->private_store, guid, data);
}
static HRESULT STDMETHODCALLTYPE d3d12_heap_SetName(d3d12_heap_iface *iface, const WCHAR *name)
{
struct d3d12_heap *heap = impl_from_ID3D12Heap(iface);
TRACE("iface %p, name %s.\n", iface, debugstr_w(name));
return vkd3d_set_vk_object_name(heap->device, (uint64_t)heap->vk_memory,
VK_OBJECT_TYPE_DEVICE_MEMORY, name);
}
static HRESULT STDMETHODCALLTYPE d3d12_heap_GetDevice(d3d12_heap_iface *iface, REFIID iid, void **device)
{
struct d3d12_heap *heap = impl_from_ID3D12Heap(iface);
TRACE("iface %p, iid %s, device %p.\n", iface, debugstr_guid(iid), device);
return d3d12_device_query_interface(heap->device, iid, device);
}
static D3D12_HEAP_DESC * STDMETHODCALLTYPE d3d12_heap_GetDesc(d3d12_heap_iface *iface,
D3D12_HEAP_DESC *desc)
{
struct d3d12_heap *heap = impl_from_ID3D12Heap(iface);
TRACE("iface %p, desc %p.\n", iface, desc);
*desc = heap->desc;
return desc;
}
static HRESULT STDMETHODCALLTYPE d3d12_heap_GetProtectedResourceSession(d3d12_heap_iface *iface,
REFIID iid, void **protected_session)
{
FIXME("iface %p, iid %s, protected_session %p stub!", iface, debugstr_guid(iid), protected_session);
return E_NOTIMPL;
}
static CONST_VTBL struct ID3D12Heap1Vtbl d3d12_heap_vtbl =
{
/* IUnknown methods */
d3d12_heap_QueryInterface,
d3d12_heap_AddRef,
d3d12_heap_Release,
/* ID3D12Object methods */
d3d12_heap_GetPrivateData,
d3d12_heap_SetPrivateData,
d3d12_heap_SetPrivateDataInterface,
d3d12_heap_SetName,
/* ID3D12DeviceChild methods */
d3d12_heap_GetDevice,
/* ID3D12Heap methods */
d3d12_heap_GetDesc,
/* ID3D12Heap1 methods */
d3d12_heap_GetProtectedResourceSession,
};
static struct d3d12_heap *unsafe_impl_from_ID3D12Heap1(ID3D12Heap1 *iface)
{
if (!iface)
return NULL;
assert(iface->lpVtbl == &d3d12_heap_vtbl);
return impl_from_ID3D12Heap(iface);
}
struct d3d12_heap *unsafe_impl_from_ID3D12Heap(ID3D12Heap *iface)
{
return unsafe_impl_from_ID3D12Heap1((ID3D12Heap1 *)iface);
}
static HRESULT validate_heap_desc(const D3D12_HEAP_DESC *desc, const struct d3d12_resource *resource)
{
if (!resource && !desc->SizeInBytes)
{
WARN("Invalid size %"PRIu64".\n", desc->SizeInBytes);
return E_INVALIDARG;
}
if (desc->Alignment != D3D12_DEFAULT_RESOURCE_PLACEMENT_ALIGNMENT
&& desc->Alignment != D3D12_DEFAULT_MSAA_RESOURCE_PLACEMENT_ALIGNMENT)
{
WARN("Invalid alignment %"PRIu64".\n", desc->Alignment);
return E_INVALIDARG;
}
if (!resource && desc->Flags & D3D12_HEAP_FLAG_ALLOW_DISPLAY)
{
WARN("D3D12_HEAP_FLAG_ALLOW_DISPLAY is only for committed resources.\n");
return E_INVALIDARG;
}
return S_OK;
}
static HRESULT validate_placed_resource_heap(struct d3d12_heap *heap, const D3D12_RESOURCE_DESC *resource_desc)
{
D3D12_HEAP_FLAGS deny_flag;
if (resource_desc->Dimension == D3D12_RESOURCE_DIMENSION_BUFFER)
deny_flag = D3D12_HEAP_FLAG_DENY_BUFFERS;
else if (resource_desc->Flags & (D3D12_RESOURCE_FLAG_ALLOW_RENDER_TARGET | D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL))
deny_flag = D3D12_HEAP_FLAG_DENY_RT_DS_TEXTURES;
else
deny_flag = D3D12_HEAP_FLAG_DENY_NON_RT_DS_TEXTURES;
if (heap->desc.Flags & deny_flag)
{
WARN("Cannot create placed resource on heap that denies resource category %#x.\n", deny_flag);
return E_INVALIDARG;
}
if ((heap->desc.Flags & D3D12_HEAP_FLAG_SHARED_CROSS_ADAPTER) &&
!(resource_desc->Flags & D3D12_RESOURCE_FLAG_ALLOW_CROSS_ADAPTER))
{
ERR("Must declare ALLOW_CROSS_ADAPTER resource flag when heap is cross adapter.\n");
return E_INVALIDARG;
}
return S_OK;
}
static HRESULT d3d12_resource_create(struct d3d12_device *device,
const D3D12_HEAP_PROPERTIES *heap_properties, D3D12_HEAP_FLAGS heap_flags,
const D3D12_RESOURCE_DESC *desc, D3D12_RESOURCE_STATES initial_state,
const D3D12_CLEAR_VALUE *optimized_clear_value, bool placed,
struct d3d12_resource **resource);
static HRESULT d3d12_heap_init_omnipotent_buffer(struct d3d12_heap *heap, struct d3d12_device *device,
const D3D12_HEAP_DESC *desc)
{
D3D12_RESOURCE_STATES initial_resource_state;
D3D12_RESOURCE_DESC resource_desc;
HRESULT hr;
/* Create a single omnipotent buffer which fills the entire heap.
* Whenever we place buffer resources on this heap, we'll just offset this VkBuffer.
* This allows us to keep VA space somewhat sane, and keeps number of (limited) VA allocations down.
* One possible downside is that the buffer might be slightly slower to access,
* but D3D12 has very lenient usage flags for buffers. */
memset(&resource_desc, 0, sizeof(resource_desc));
resource_desc.Dimension = D3D12_RESOURCE_DIMENSION_BUFFER;
resource_desc.Width = desc->SizeInBytes;
resource_desc.Height = 1;
resource_desc.DepthOrArraySize = 1;
resource_desc.MipLevels = 1;
resource_desc.SampleDesc.Count = 1;
resource_desc.Layout = D3D12_TEXTURE_LAYOUT_ROW_MAJOR;
if (heap->desc.Flags & D3D12_HEAP_FLAG_SHARED_CROSS_ADAPTER)
resource_desc.Flags |= D3D12_RESOURCE_FLAG_ALLOW_CROSS_ADAPTER;
switch (desc->Properties.Type)
{
case D3D12_HEAP_TYPE_UPLOAD:
initial_resource_state = D3D12_RESOURCE_STATE_GENERIC_READ;
break;
case D3D12_HEAP_TYPE_READBACK:
initial_resource_state = D3D12_RESOURCE_STATE_COPY_DEST;
break;
default:
/* Upload and readback heaps do not allow UAV access, only enable this flag for other heaps. */
resource_desc.Flags |= D3D12_RESOURCE_FLAG_ALLOW_UNORDERED_ACCESS;
initial_resource_state = D3D12_RESOURCE_STATE_COMMON;
break;
}
if (FAILED(hr = d3d12_resource_create(device, &desc->Properties, desc->Flags,
&resource_desc, initial_resource_state,
NULL, false, &heap->buffer_resource)))
return hr;
/* This internal resource should not own a reference on the device.
* d3d12_resource_create takes a reference on the device. */
d3d12_device_release(device);
return S_OK;
}
static HRESULT d3d12_heap_allocate_storage(struct d3d12_heap *heap,
struct d3d12_device *device, const struct d3d12_resource *resource, void *host_memory)
{
const struct vkd3d_vk_device_procs *vk_procs = &device->vk_procs;
const VkMemoryType *memory_type;
VkDeviceSize vk_memory_size;
VkResult vr;
HRESULT hr;
if (resource)
{
if (d3d12_resource_is_buffer(resource))
{
hr = vkd3d_allocate_buffer_memory(device, resource->vk_buffer, NULL,
&heap->desc.Properties, heap->desc.Flags | D3D12_HEAP_FLAG_ALLOW_ONLY_BUFFERS,
&heap->vk_memory, &heap->vk_memory_type, &vk_memory_size);
}
else
{
hr = vkd3d_allocate_image_memory(device, resource->vk_image,
&heap->desc.Properties, heap->desc.Flags,
&heap->vk_memory, &heap->vk_memory_type, &vk_memory_size);
}
heap->desc.SizeInBytes = vk_memory_size;
}
else if (heap->buffer_resource)
{
hr = vkd3d_allocate_buffer_memory(device, heap->buffer_resource->vk_buffer, host_memory,
&heap->desc.Properties, heap->desc.Flags,
&heap->vk_memory, &heap->vk_memory_type, &vk_memory_size);
}
else
{
hr = vkd3d_allocate_device_memory(device, &heap->desc.Properties,
heap->desc.Flags, heap->desc.SizeInBytes, &heap->vk_memory,
&heap->vk_memory_type);
}
if (FAILED(hr))
return hr;
memory_type = &device->memory_properties.memoryTypes[heap->vk_memory_type];
if (memory_type->propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT)
{
if ((vr = VK_CALL(vkMapMemory(device->vk_device,
heap->vk_memory, 0, VK_WHOLE_SIZE, 0, &heap->map_ptr))) < 0)
{
ERR("Failed to map memory, vr %d.\n", vr);
return hresult_from_vk_result(hr);
}
if ((heap->desc.Flags & D3D12_HEAP_FLAG_SHARED) == 0)
{
/* Zero private host-visible memory */
memset(heap->map_ptr, 0, heap->desc.SizeInBytes);
}
if (!(memory_type->propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT))
{
VkMappedMemoryRange mapped_range;
mapped_range.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
mapped_range.pNext = NULL;
mapped_range.memory = heap->vk_memory;
mapped_range.offset = 0;
mapped_range.size = VK_WHOLE_SIZE;
VK_CALL(vkFlushMappedMemoryRanges(device->vk_device, 1, &mapped_range));
}
}
return S_OK;
}
static HRESULT d3d12_heap_init(struct d3d12_heap *heap,
struct d3d12_device *device, const D3D12_HEAP_DESC *desc, const struct d3d12_resource *resource)
{
bool buffers_allowed;
HRESULT hr;
memset(heap, 0, sizeof(*heap));
heap->ID3D12Heap_iface.lpVtbl = &d3d12_heap_vtbl;
heap->refcount = 1;
heap->device = device;
heap->is_private = !!resource;
heap->desc = *desc;
heap->map_ptr = NULL;
heap->buffer_resource = NULL;
if (!heap->is_private)
d3d12_device_add_ref(heap->device);
if (!heap->desc.Properties.CreationNodeMask)
heap->desc.Properties.CreationNodeMask = 1;
if (!heap->desc.Properties.VisibleNodeMask)
heap->desc.Properties.VisibleNodeMask = 1;
debug_ignored_node_mask(heap->desc.Properties.CreationNodeMask);
debug_ignored_node_mask(heap->desc.Properties.VisibleNodeMask);
if (!heap->desc.Alignment)
heap->desc.Alignment = D3D12_DEFAULT_RESOURCE_PLACEMENT_ALIGNMENT;
if (FAILED(hr = validate_heap_desc(&heap->desc, resource)))
{
d3d12_heap_cleanup(heap);
return hr;
}
buffers_allowed = !(heap->desc.Flags & D3D12_HEAP_FLAG_DENY_BUFFERS);
if (buffers_allowed && !resource)
{
if (FAILED(hr = d3d12_heap_init_omnipotent_buffer(heap, device, desc)))
{
d3d12_heap_cleanup(heap);
return hr;
}
}
if (FAILED(hr = vkd3d_private_store_init(&heap->private_store)))
{
d3d12_heap_cleanup(heap);
return hr;
}
if (FAILED(hr = d3d12_heap_allocate_storage(heap, device, resource, NULL)))
{
d3d12_heap_cleanup(heap);
return hr;
}
return S_OK;
}
static HRESULT d3d12_heap_init_from_host_pointer(struct d3d12_heap *heap,
struct d3d12_device *device, void *address, size_t size)
{
HRESULT hr;
if (!device->vk_info.EXT_external_memory_host)
{
WARN("VK_EXT_external_memory_host is not supported. Falling back to a private allocation. This will likely break debug code.\n");
address = NULL;
}
memset(heap, 0, sizeof(*heap));
heap->ID3D12Heap_iface.lpVtbl = &d3d12_heap_vtbl;
heap->refcount = 1;
heap->device = device;
heap->desc.Alignment = D3D12_DEFAULT_RESOURCE_PLACEMENT_ALIGNMENT;
heap->desc.Flags = D3D12_HEAP_FLAG_ALLOW_ONLY_BUFFERS |
(address ? (D3D12_HEAP_FLAG_SHARED | D3D12_HEAP_FLAG_SHARED_CROSS_ADAPTER) : 0);
heap->desc.Properties.CPUPageProperty = D3D12_CPU_PAGE_PROPERTY_WRITE_BACK;
heap->desc.Properties.MemoryPoolPreference = D3D12_MEMORY_POOL_L0;
heap->desc.Properties.Type = D3D12_HEAP_TYPE_CUSTOM;
heap->desc.Properties.CreationNodeMask = 1;
heap->desc.Properties.VisibleNodeMask = 1;
heap->desc.SizeInBytes = size;
d3d12_device_add_ref(heap->device);
if (FAILED(hr = d3d12_heap_init_omnipotent_buffer(heap, device, &heap->desc)))
{
d3d12_heap_cleanup(heap);
return hr;
}
if (FAILED(hr = vkd3d_private_store_init(&heap->private_store)))
{
d3d12_heap_cleanup(heap);
return hr;
}
if (FAILED(hr = d3d12_heap_allocate_storage(heap, device, NULL, address)))
{
d3d12_heap_cleanup(heap);
return hr;
}
return S_OK;
}
HRESULT d3d12_heap_create(struct d3d12_device *device, const D3D12_HEAP_DESC *desc,
const struct d3d12_resource *resource, struct d3d12_heap **heap)
{
struct d3d12_heap *object;
HRESULT hr;
if (!(object = vkd3d_malloc(sizeof(*object))))
return E_OUTOFMEMORY;
if (FAILED(hr = d3d12_heap_init(object, device, desc, resource)))
{
vkd3d_free(object);
return hr;
}
TRACE("Created %s %p.\n", object->is_private ? "private heap" : "heap", object);
*heap = object;
return S_OK;
}
HRESULT d3d12_heap_create_from_host_pointer(struct d3d12_device *device, void *address, size_t size,
struct d3d12_heap **heap)
{
struct d3d12_heap *object;
HRESULT hr;
if (!(object = vkd3d_malloc(sizeof(*object))))
return E_OUTOFMEMORY;
if (FAILED(hr = d3d12_heap_init_from_host_pointer(object, device, address, size)))
{
vkd3d_free(object);
return hr;
}
*heap = object;
return S_OK;
}
static VkImageType vk_image_type_from_d3d12_resource_dimension(D3D12_RESOURCE_DIMENSION dimension)
{
switch (dimension)
@ -1980,42 +1434,6 @@ static void d3d12_resource_get_tiling(struct d3d12_device *device, struct d3d12_
static void d3d12_resource_destroy_2(struct d3d12_resource *resource, struct d3d12_device *device);
static void d3d12_resource_destroy(struct d3d12_resource *resource, struct d3d12_device *device)
{
const struct vkd3d_vk_device_procs *vk_procs = &device->vk_procs;
vkd3d_view_map_destroy(&resource->view_map, resource->device);
#ifdef VKD3D_ENABLE_DESCRIPTOR_QA
vkd3d_descriptor_debug_unregister_cookie(resource->cookie);
#endif
if (resource->flags & VKD3D_RESOURCE_EXTERNAL)
return;
if (resource->flags & VKD3D_RESOURCE_SPARSE)
{
VK_CALL(vkFreeMemory(device->vk_device, resource->sparse.vk_metadata_memory, NULL));
vkd3d_free(resource->sparse.tiles);
vkd3d_free(resource->sparse.tilings);
}
if (!(resource->flags & VKD3D_RESOURCE_PLACED_BUFFER))
{
if (resource->gpu_address)
vkd3d_gpu_va_allocator_free(&device->gpu_va_allocator, resource->gpu_address);
if (d3d12_resource_is_buffer(resource))
VK_CALL(vkDestroyBuffer(device->vk_device, resource->vk_buffer, NULL));
else
VK_CALL(vkDestroyImage(device->vk_device, resource->vk_image, NULL));
}
if (resource->flags & VKD3D_RESOURCE_DEDICATED_HEAP)
d3d12_heap_destroy(resource->heap);
}
static ULONG d3d12_resource_incref(struct d3d12_resource *resource)
{
ULONG refcount = InterlockedIncrement(&resource->internal_refcount);
@ -3068,107 +2486,6 @@ static HRESULT d3d12_resource_init_sparse_info(struct d3d12_resource *resource,
return S_OK;
}
static HRESULT d3d12_resource_init(struct d3d12_resource *resource, struct d3d12_device *device,
const D3D12_HEAP_PROPERTIES *heap_properties, D3D12_HEAP_FLAGS heap_flags,
const D3D12_RESOURCE_DESC *desc, D3D12_RESOURCE_STATES initial_state,
const D3D12_CLEAR_VALUE *optimized_clear_value, bool placed)
{
HRESULT hr;
if (FAILED(hr = d3d12_resource_validate_create_info(desc,
heap_properties, initial_state, optimized_clear_value, device)))
return hr;
resource->ID3D12Resource_iface.lpVtbl = &d3d12_resource_vtbl;
resource->refcount = 1;
resource->internal_refcount = 1;
resource->desc = *desc;
resource->cookie = vkd3d_allocate_cookie();
#ifdef VKD3D_ENABLE_DESCRIPTOR_QA
vkd3d_descriptor_debug_register_resource_cookie(resource->cookie, desc);
#endif
if (FAILED(hr = vkd3d_view_map_init(&resource->view_map)))
return hr;
#ifdef VKD3D_ENABLE_DESCRIPTOR_QA
resource->view_map.resource_cookie = resource->cookie;
#endif
resource->gpu_address = 0;
resource->flags = 0;
resource->initial_layout_transition = 0;
resource->common_layout = VK_IMAGE_LAYOUT_UNDEFINED;
if (placed && d3d12_resource_is_buffer(resource))
resource->flags |= VKD3D_RESOURCE_PLACED_BUFFER;
if (!heap_properties)
resource->flags |= VKD3D_RESOURCE_SPARSE;
resource->format = vkd3d_format_from_d3d12_resource_desc(device, desc, 0);
switch (desc->Dimension)
{
case D3D12_RESOURCE_DIMENSION_BUFFER:
/* We'll inherit a VkBuffer reference from the heap with an implied offset. */
if (placed)
{
resource->vk_buffer = VK_NULL_HANDLE;
break;
}
if (FAILED(hr = vkd3d_create_buffer(device, heap_properties, heap_flags,
&resource->desc, &resource->vk_buffer)))
return hr;
if (!(resource->gpu_address = vkd3d_gpu_va_allocator_allocate(&device->gpu_va_allocator,
desc->Alignment ? desc->Alignment : D3D12_DEFAULT_RESOURCE_PLACEMENT_ALIGNMENT,
desc->Width, resource)))
{
ERR("Failed to allocate GPU VA.\n");
d3d12_resource_destroy(resource, device);
return E_OUTOFMEMORY;
}
break;
case D3D12_RESOURCE_DIMENSION_TEXTURE1D:
case D3D12_RESOURCE_DIMENSION_TEXTURE2D:
case D3D12_RESOURCE_DIMENSION_TEXTURE3D:
if (!resource->desc.MipLevels)
resource->desc.MipLevels = max_miplevel_count(desc);
resource->initial_layout_transition = 1;
if (FAILED(hr = vkd3d_create_image(device, heap_properties, heap_flags,
&resource->desc, resource, &resource->vk_image)))
return hr;
break;
default:
WARN("Invalid resource dimension %#x.\n", resource->desc.Dimension);
return E_INVALIDARG;
}
if (FAILED(hr = d3d12_resource_init_sparse_info(resource, device, &resource->sparse)))
{
d3d12_resource_destroy(resource, device);
return hr;
}
resource->heap = NULL;
resource->heap_offset = 0;
if (FAILED(hr = vkd3d_private_store_init(&resource->private_store)))
{
d3d12_resource_destroy(resource, device);
return hr;
}
d3d12_device_add_ref(resource->device = device);
return S_OK;
}
static void d3d12_resource_destroy_2(struct d3d12_resource *resource, struct d3d12_device *device)
{
const struct vkd3d_vk_device_procs *vk_procs = &device->vk_procs;
@ -3510,180 +2827,6 @@ fail:
return hr;
}
static HRESULT d3d12_resource_create(struct d3d12_device *device,
const D3D12_HEAP_PROPERTIES *heap_properties, D3D12_HEAP_FLAGS heap_flags,
const D3D12_RESOURCE_DESC *desc, D3D12_RESOURCE_STATES initial_state,
const D3D12_CLEAR_VALUE *optimized_clear_value, bool placed, struct d3d12_resource **resource)
{
struct d3d12_resource *object;
HRESULT hr;
if (!(object = vkd3d_malloc(sizeof(*object))))
return E_OUTOFMEMORY;
if (FAILED(hr = d3d12_resource_init(object, device, heap_properties, heap_flags,
desc, initial_state, optimized_clear_value, placed)))
{
vkd3d_free(object);
return hr;
}
*resource = object;
return hr;
}
static HRESULT vkd3d_allocate_resource_memory(
struct d3d12_device *device, struct d3d12_resource *resource,
const D3D12_HEAP_PROPERTIES *heap_properties, D3D12_HEAP_FLAGS heap_flags)
{
D3D12_HEAP_DESC heap_desc;
HRESULT hr;
heap_desc.SizeInBytes = 0;
heap_desc.Properties = *heap_properties;
heap_desc.Alignment = 0;
heap_desc.Flags = heap_flags;
if (SUCCEEDED(hr = d3d12_heap_create(device, &heap_desc, resource, &resource->heap)))
resource->flags |= VKD3D_RESOURCE_DEDICATED_HEAP;
return hr;
}
HRESULT d3d12_committed_resource_create(struct d3d12_device *device,
const D3D12_HEAP_PROPERTIES *heap_properties, D3D12_HEAP_FLAGS heap_flags,
const D3D12_RESOURCE_DESC *desc, D3D12_RESOURCE_STATES initial_state,
const D3D12_CLEAR_VALUE *optimized_clear_value, struct d3d12_resource **resource)
{
struct d3d12_resource *object;
HRESULT hr;
if (!heap_properties)
{
WARN("Heap properties are NULL.\n");
return E_INVALIDARG;
}
if (FAILED(hr = d3d12_resource_create(device, heap_properties, heap_flags,
desc, initial_state, optimized_clear_value, false, &object)))
return hr;
if (FAILED(hr = vkd3d_allocate_resource_memory(device, object, heap_properties, heap_flags)))
{
d3d12_resource_Release(&object->ID3D12Resource_iface);
return hr;
}
TRACE("Created committed resource %p.\n", object);
*resource = object;
return S_OK;
}
static HRESULT vkd3d_bind_heap_memory(struct d3d12_device *device,
struct d3d12_resource *resource, struct d3d12_heap *heap, uint64_t heap_offset)
{
const struct vkd3d_vk_device_procs *vk_procs = &device->vk_procs;
VkDevice vk_device = device->vk_device;
VkMemoryRequirements requirements;
VkResult vr;
if (resource->flags & VKD3D_RESOURCE_PLACED_BUFFER)
{
/* Just inherit the buffer from the heap. */
resource->vk_buffer = heap->buffer_resource->vk_buffer;
resource->heap = heap;
resource->heap_offset = heap_offset;
resource->gpu_address = heap->buffer_resource->gpu_address + heap_offset;
return S_OK;
}
if (d3d12_resource_is_buffer(resource))
VK_CALL(vkGetBufferMemoryRequirements(vk_device, resource->vk_buffer, &requirements));
else
VK_CALL(vkGetImageMemoryRequirements(vk_device, resource->vk_image, &requirements));
if (heap_offset % requirements.alignment)
{
FIXME("Invalid heap offset %#"PRIx64" (alignment %#"PRIx64").\n",
heap_offset, requirements.alignment);
goto allocate_memory;
}
if (!(requirements.memoryTypeBits & (1u << heap->vk_memory_type)))
{
FIXME("Memory type %u cannot be bound to resource %p (allowed types %#x).\n",
heap->vk_memory_type, resource, requirements.memoryTypeBits);
goto allocate_memory;
}
if (d3d12_resource_is_buffer(resource))
vr = VK_CALL(vkBindBufferMemory(vk_device, resource->vk_buffer, heap->vk_memory, heap_offset));
else
vr = VK_CALL(vkBindImageMemory(vk_device, resource->vk_image, heap->vk_memory, heap_offset));
if (vr == VK_SUCCESS)
{
resource->heap = heap;
resource->heap_offset = heap_offset;
}
else
{
WARN("Failed to bind memory, vr %d.\n", vr);
}
return hresult_from_vk_result(vr);
allocate_memory:
FIXME("Allocating device memory.\n");
return vkd3d_allocate_resource_memory(device, resource, &heap->desc.Properties, heap->desc.Flags);
}
HRESULT d3d12_placed_resource_create(struct d3d12_device *device, struct d3d12_heap *heap, uint64_t heap_offset,
const D3D12_RESOURCE_DESC *desc, D3D12_RESOURCE_STATES initial_state,
const D3D12_CLEAR_VALUE *optimized_clear_value, struct d3d12_resource **resource)
{
struct d3d12_resource *object;
HRESULT hr;
if (FAILED(hr = validate_placed_resource_heap(heap, desc)))
return hr;
if (FAILED(hr = d3d12_resource_create(device, &heap->desc.Properties, heap->desc.Flags,
desc, initial_state, optimized_clear_value, true, &object)))
return hr;
if (FAILED(hr = vkd3d_bind_heap_memory(device, object, heap, heap_offset)))
{
d3d12_resource_Release(&object->ID3D12Resource_iface);
return hr;
}
TRACE("Created placed resource %p.\n", object);
*resource = object;
return S_OK;
}
HRESULT d3d12_reserved_resource_create(struct d3d12_device *device,
const D3D12_RESOURCE_DESC *desc, D3D12_RESOURCE_STATES initial_state,
const D3D12_CLEAR_VALUE *optimized_clear_value, struct d3d12_resource **resource)
{
struct d3d12_resource *object;
HRESULT hr;
if (FAILED(hr = d3d12_resource_create(device, NULL, 0,
desc, initial_state, optimized_clear_value, false, &object)))
return hr;
TRACE("Created reserved resource %p.\n", object);
*resource = object;
return S_OK;
}
VKD3D_EXPORT HRESULT vkd3d_create_image_resource(ID3D12Device *device,
const struct vkd3d_image_resource_create_info *create_info, ID3D12Resource **resource)
{

View File

@ -618,31 +618,6 @@ HRESULT vkd3d_memory_allocator_flush_clears(struct vkd3d_memory_allocator *alloc
/* ID3D12Heap */
typedef ID3D12Heap1 d3d12_heap_iface;
struct d3d12_heap
{
d3d12_heap_iface ID3D12Heap_iface;
LONG refcount;
bool is_private;
D3D12_HEAP_DESC desc;
VkDeviceMemory vk_memory;
void *map_ptr;
uint32_t vk_memory_type;
struct d3d12_resource *buffer_resource;
struct d3d12_device *device;
struct vkd3d_private_store private_store;
};
HRESULT d3d12_heap_create(struct d3d12_device *device, const D3D12_HEAP_DESC *desc,
const struct d3d12_resource *resource, struct d3d12_heap **heap);
HRESULT d3d12_heap_create_from_host_pointer(struct d3d12_device *device, void *addr, size_t size,
struct d3d12_heap **heap);
bool d3d12_heap_needs_host_barrier_for_write(struct d3d12_heap *heap);
struct d3d12_heap *unsafe_impl_from_ID3D12Heap(ID3D12Heap *iface);
struct d3d12_heap_2
{
d3d12_heap_iface ID3D12Heap_iface;
@ -740,7 +715,6 @@ struct d3d12_resource
VkImage vk_image;
};
struct d3d12_heap *heap;
struct d3d12_heap_2 *heap_2;
uint64_t heap_offset;
@ -781,16 +755,6 @@ bool d3d12_resource_is_cpu_accessible(const struct d3d12_resource *resource);
HRESULT d3d12_resource_validate_desc(const D3D12_RESOURCE_DESC *desc, struct d3d12_device *device);
VkImageSubresource d3d12_resource_get_vk_subresource(const struct d3d12_resource *resource, uint32_t subresource_idx, bool all_aspects);
HRESULT d3d12_committed_resource_create(struct d3d12_device *device,
const D3D12_HEAP_PROPERTIES *heap_properties, D3D12_HEAP_FLAGS heap_flags,
const D3D12_RESOURCE_DESC *desc, D3D12_RESOURCE_STATES initial_state,
const D3D12_CLEAR_VALUE *optimized_clear_value, struct d3d12_resource **resource);
HRESULT d3d12_placed_resource_create(struct d3d12_device *device, struct d3d12_heap *heap, uint64_t heap_offset,
const D3D12_RESOURCE_DESC *desc, D3D12_RESOURCE_STATES initial_state,
const D3D12_CLEAR_VALUE *optimized_clear_value, struct d3d12_resource **resource);
HRESULT d3d12_reserved_resource_create(struct d3d12_device *device,
const D3D12_RESOURCE_DESC *desc, D3D12_RESOURCE_STATES initial_state,
const D3D12_CLEAR_VALUE *optimized_clear_value, struct d3d12_resource **resource);
HRESULT d3d12_resource_create_committed_2(struct d3d12_device *device, const D3D12_RESOURCE_DESC *desc,
const D3D12_HEAP_PROPERTIES *heap_properties, D3D12_HEAP_FLAGS heap_flags, D3D12_RESOURCE_STATES initial_state,
const D3D12_CLEAR_VALUE *optimized_clear_value, struct d3d12_resource **resource);