anv: Soft-pin state pools
The state_pools reserve virtual address space of the full BLOCK_POOL_MEMFD_SIZE, but maintain the current behavior of growing from the middle. v2: - rename block_pool::offset to block_pool::start_address (Jason) - assign state pool start_address statically (Jason) v3: - remove unnecessary bo_flags tampering for the dynamic pool (Jason) Reviewed-by: Jason Ekstrand <jason@jlekstrand.net> Reviewed-by: Jordan Justen <jordan.l.justen@intel.com>
This commit is contained in:
parent
f00fcfb7a2
commit
e662bdb820
|
@ -243,6 +243,7 @@ anv_block_pool_expand_range(struct anv_block_pool *pool,
|
|||
VkResult
|
||||
anv_block_pool_init(struct anv_block_pool *pool,
|
||||
struct anv_device *device,
|
||||
uint64_t start_address,
|
||||
uint32_t initial_size,
|
||||
uint64_t bo_flags)
|
||||
{
|
||||
|
@ -250,6 +251,8 @@ anv_block_pool_init(struct anv_block_pool *pool,
|
|||
|
||||
pool->device = device;
|
||||
pool->bo_flags = bo_flags;
|
||||
pool->start_address = gen_canonical_address(start_address);
|
||||
|
||||
anv_bo_init(&pool->bo, 0, 0);
|
||||
|
||||
pool->fd = memfd_create("block pool", MFD_CLOEXEC);
|
||||
|
@ -402,6 +405,10 @@ anv_block_pool_expand_range(struct anv_block_pool *pool,
|
|||
* hard work for us.
|
||||
*/
|
||||
anv_bo_init(&pool->bo, gem_handle, size);
|
||||
if (pool->bo_flags & EXEC_OBJECT_PINNED) {
|
||||
pool->bo.offset = pool->start_address + BLOCK_POOL_MEMFD_CENTER -
|
||||
center_bo_offset;
|
||||
}
|
||||
pool->bo.flags = pool->bo_flags;
|
||||
pool->bo.map = map;
|
||||
|
||||
|
@ -610,10 +617,12 @@ anv_block_pool_alloc_back(struct anv_block_pool *pool,
|
|||
VkResult
|
||||
anv_state_pool_init(struct anv_state_pool *pool,
|
||||
struct anv_device *device,
|
||||
uint64_t start_address,
|
||||
uint32_t block_size,
|
||||
uint64_t bo_flags)
|
||||
{
|
||||
VkResult result = anv_block_pool_init(&pool->block_pool, device,
|
||||
start_address,
|
||||
block_size * 16,
|
||||
bo_flags);
|
||||
if (result != VK_SUCCESS)
|
||||
|
|
|
@ -1615,21 +1615,28 @@ VkResult anv_CreateDevice(
|
|||
if (result != VK_SUCCESS)
|
||||
goto fail_batch_bo_pool;
|
||||
|
||||
/* For the state pools we explicitly disable 48bit. */
|
||||
bo_flags = (physical_device->has_exec_async ? EXEC_OBJECT_ASYNC : 0) |
|
||||
(physical_device->has_exec_capture ? EXEC_OBJECT_CAPTURE : 0);
|
||||
if (physical_device->use_softpin)
|
||||
bo_flags |= EXEC_OBJECT_PINNED;
|
||||
else
|
||||
bo_flags &= ~EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
|
||||
|
||||
result = anv_state_pool_init(&device->dynamic_state_pool, device, 16384,
|
||||
result = anv_state_pool_init(&device->dynamic_state_pool, device,
|
||||
DYNAMIC_STATE_POOL_MIN_ADDRESS,
|
||||
16384,
|
||||
bo_flags);
|
||||
if (result != VK_SUCCESS)
|
||||
goto fail_bo_cache;
|
||||
|
||||
result = anv_state_pool_init(&device->instruction_state_pool, device, 16384,
|
||||
result = anv_state_pool_init(&device->instruction_state_pool, device,
|
||||
INSTRUCTION_STATE_POOL_MIN_ADDRESS,
|
||||
16384,
|
||||
bo_flags);
|
||||
if (result != VK_SUCCESS)
|
||||
goto fail_dynamic_state_pool;
|
||||
|
||||
result = anv_state_pool_init(&device->surface_state_pool, device, 4096,
|
||||
result = anv_state_pool_init(&device->surface_state_pool, device,
|
||||
SURFACE_STATE_POOL_MIN_ADDRESS,
|
||||
4096,
|
||||
bo_flags);
|
||||
if (result != VK_SUCCESS)
|
||||
goto fail_instruction_state_pool;
|
||||
|
|
|
@ -617,6 +617,12 @@ struct anv_block_pool {
|
|||
|
||||
struct anv_bo bo;
|
||||
|
||||
/* The address where the start of the pool is pinned. The various bos that
|
||||
* are created as the pool grows will have addresses in the range
|
||||
* [start_address, start_address + BLOCK_POOL_MEMFD_SIZE).
|
||||
*/
|
||||
uint64_t start_address;
|
||||
|
||||
/* The offset from the start of the bo to the "center" of the block
|
||||
* pool. Pointers to allocated blocks are given by
|
||||
* bo.map + center_bo_offset + offsets.
|
||||
|
@ -713,6 +719,7 @@ struct anv_state_stream {
|
|||
*/
|
||||
VkResult anv_block_pool_init(struct anv_block_pool *pool,
|
||||
struct anv_device *device,
|
||||
uint64_t start_address,
|
||||
uint32_t initial_size,
|
||||
uint64_t bo_flags);
|
||||
void anv_block_pool_finish(struct anv_block_pool *pool);
|
||||
|
@ -723,6 +730,7 @@ int32_t anv_block_pool_alloc_back(struct anv_block_pool *pool,
|
|||
|
||||
VkResult anv_state_pool_init(struct anv_state_pool *pool,
|
||||
struct anv_device *device,
|
||||
uint64_t start_address,
|
||||
uint32_t block_size,
|
||||
uint64_t bo_flags);
|
||||
void anv_state_pool_finish(struct anv_state_pool *pool);
|
||||
|
|
|
@ -116,7 +116,7 @@ static void run_test()
|
|||
struct anv_block_pool pool;
|
||||
|
||||
pthread_mutex_init(&device.mutex, NULL);
|
||||
anv_block_pool_init(&pool, &device, 4096, 0);
|
||||
anv_block_pool_init(&pool, &device, 4096, 4096, 0);
|
||||
|
||||
for (unsigned i = 0; i < NUM_THREADS; i++) {
|
||||
jobs[i].pool = &pool;
|
||||
|
|
|
@ -43,7 +43,7 @@ int main(int argc, char **argv)
|
|||
pthread_mutex_init(&device.mutex, NULL);
|
||||
|
||||
for (unsigned i = 0; i < NUM_RUNS; i++) {
|
||||
anv_state_pool_init(&state_pool, &device, 256, 0);
|
||||
anv_state_pool_init(&state_pool, &device, 4096, 256, 0);
|
||||
|
||||
/* Grab one so a zero offset is impossible */
|
||||
anv_state_pool_alloc(&state_pool, 16, 16);
|
||||
|
|
|
@ -40,7 +40,7 @@ int main(int argc, char **argv)
|
|||
struct anv_state_pool state_pool;
|
||||
|
||||
pthread_mutex_init(&device.mutex, NULL);
|
||||
anv_state_pool_init(&state_pool, &device, 4096, 0);
|
||||
anv_state_pool_init(&state_pool, &device, 4096, 4096, 0);
|
||||
|
||||
/* Grab one so a zero offset is impossible */
|
||||
anv_state_pool_alloc(&state_pool, 16, 16);
|
||||
|
|
|
@ -61,7 +61,7 @@ static void run_test()
|
|||
struct anv_state_pool state_pool;
|
||||
|
||||
pthread_mutex_init(&device.mutex, NULL);
|
||||
anv_state_pool_init(&state_pool, &device, 64, 0);
|
||||
anv_state_pool_init(&state_pool, &device, 4096, 64, 0);
|
||||
|
||||
pthread_barrier_init(&barrier, NULL, NUM_THREADS);
|
||||
|
||||
|
|
Loading…
Reference in New Issue