Orange/include/Orange/Render/RenderContext.h

146 lines
5.0 KiB
C++

#pragma once
#include <Orange/Core/Types.h>
#include <Orange/Core/Traits.h>
#include <Orange/Core/Span.h>
#include <Orange/Core/Result.h>
#include <vulkan/vulkan_core.h>
namespace orange
{
struct BufferSlice
{
VkBuffer buffer;
VkDeviceSize offset;
VkDeviceSize size;
void* ptr;
};
struct GPUMemoryBuffer
{
VkDeviceMemory memory;
VkBuffer buffer;
VkDeviceSize size;
void* ptr;
};
class MemoryPool
{
public:
MemoryPool(GPUMemoryBuffer buffer)
: m_buffer{ buffer } {}
Result<BufferSlice> AllocSlice(VkDeviceSize size, VkDeviceSize alignment = 1u)
{
uint32_t offset = Align(m_offset, alignment);
BufferSlice slice{ m_buffer.buffer, offset, size,
m_buffer.ptr ? reinterpret_cast<uint8_t*>(m_buffer.ptr) + offset : nullptr };
if (offset + size > m_buffer.size)
return Result<BufferSlice>::Error();
m_offset = offset + size;
return Result<BufferSlice>::Success(slice);
}
VkDeviceMemory Memory() const
{
return m_buffer.memory;
}
private:
GPUMemoryBuffer m_buffer = {};
VkDeviceSize m_offset = 0u;
};
template <typename T>
using VulkanResult = Result<T, VkResult, VK_SUCCESS, VK_ERROR_UNKNOWN, VK_ERROR_UNKNOWN>;
struct VkMemoryTypes
{
uint32_t cpuTypeIdx = -1;
uint32_t gpuOnlyTypeIdx = -1;
uint32_t gpuHostVisibleTypeIdx = -1;
};
class RenderContext
{
public:
~RenderContext();
static VulkanResult<RenderContext> Create(const char *appName);
VkInstance Instance() const { return m_instance; }
VkPhysicalDevice PhysicalDevice() const { return m_physicalDevice; }
VkDevice Device() const { return m_device; }
VkQueue Queue() const { return m_queue; }
VkCommandPool CommandPool() const { return m_commandPool; }
VulkanResult<VkFence> CreateFence(bool signalled);
VulkanResult<VkSemaphore> CreateSemaphore();
VulkanResult<VoidResult> BeginCommandBuffer(VkCommandBuffer buffer);
VulkanResult<VoidResult> EndCommandBuffer(VkCommandBuffer buffer);
VulkanResult<GPUMemoryBuffer> CreateBuffer(VkDeviceSize size);
VulkanResult<VkImage> CreateImage(MemoryPool& pool, uint32_t width, uint32_t height, VkFormat format, VkImageUsageFlags usage);
VulkanResult<VkImageView> CreateImageView(VkImage image, VkFormat format, VkImageAspectFlagBits aspect);
VulkanResult<VkShaderModule> CreateShader(Span<const uint32_t> code);
const VkPhysicalDeviceProperties& Props() const { return m_props; }
protected:
friend VulkanResult<RenderContext>;
RenderContext(VkInstance instance, VkPhysicalDevice physicalDevice, VkDevice device,
VkQueue queue, VkCommandPool commandPool)
: m_instance{ instance }, m_physicalDevice{ physicalDevice }, m_device{ device }
, m_queue{ queue }, m_commandPool{ commandPool }
{
VkPhysicalDeviceMemoryProperties memoryProps;
vkGetPhysicalDeviceMemoryProperties(m_physicalDevice, &memoryProps);
struct
{
uint32_t* ptr;
VkMemoryPropertyFlags flags;
} memoryTypeMapping[] =
{
{ &m_memoryTypes.gpuHostVisibleTypeIdx,
VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
VK_MEMORY_PROPERTY_HOST_COHERENT_BIT },
{ &m_memoryTypes.gpuOnlyTypeIdx,
VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT },
{ &m_memoryTypes.cpuTypeIdx,
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
VK_MEMORY_PROPERTY_HOST_COHERENT_BIT },
};
for (auto& mapping : memoryTypeMapping)
{
for (uint32_t i = 0; i < memoryProps.memoryTypeCount; i++)
{
if (memoryProps.memoryTypes[i].propertyFlags & mapping.flags)
{
*mapping.ptr = i;
if (memoryProps.memoryTypes[i].propertyFlags == mapping.flags)
break;
}
}
}
vkGetPhysicalDeviceProperties(m_physicalDevice, &m_props);
}
private:
VkInstance m_instance = VK_NULL_HANDLE;
VkPhysicalDevice m_physicalDevice = VK_NULL_HANDLE;
VkDevice m_device = VK_NULL_HANDLE;
VkQueue m_queue = VK_NULL_HANDLE;
VkCommandPool m_commandPool = VK_NULL_HANDLE;
VkPhysicalDeviceProperties m_props{};
VkMemoryTypes m_memoryTypes{};
};
}