gallium/radeon: use gart_page_size instead of hardcoded 4096

Reviewed-by: Bas Nieuwenhuizen <bas@basnieuwenhuizen.nl>
Reviewed-by: Nicolai Hähnle <nicolai.haehnle@amd.com>
This commit is contained in:
Marek Olšák 2016-05-08 12:30:25 +02:00
parent bfa8a00920
commit 544967faf5
6 changed files with 18 additions and 10 deletions

View File

@ -58,7 +58,9 @@ static struct pipe_query *r300_create_query(struct pipe_context *pipe,
else
q->num_pipes = r300screen->info.r300_num_gb_pipes;
q->buf = r300->rws->buffer_create(r300->rws, 4096, 4096,
q->buf = r300->rws->buffer_create(r300->rws,
r300screen->info.gart_page_size,
r300screen->info.gart_page_size,
RADEON_DOMAIN_GTT, 0);
if (!q->buf) {
FREE(q);

View File

@ -291,8 +291,9 @@ bool r600_common_context_init(struct r600_common_context *rctx,
r600_query_init(rctx);
cayman_init_msaa(&rctx->b);
rctx->allocator_so_filled_size = u_suballocator_create(&rctx->b, 4096, 4,
0, PIPE_USAGE_DEFAULT, TRUE);
rctx->allocator_so_filled_size =
u_suballocator_create(&rctx->b, rscreen->info.gart_page_size,
4, 0, PIPE_USAGE_DEFAULT, TRUE);
if (!rctx->allocator_so_filled_size)
return false;
@ -845,8 +846,11 @@ static void r600_query_memory_info(struct pipe_screen *screen,
struct pipe_resource *r600_resource_create_common(struct pipe_screen *screen,
const struct pipe_resource *templ)
{
struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
if (templ->target == PIPE_BUFFER) {
return r600_buffer_create(screen, templ, 4096);
return r600_buffer_create(screen, templ,
rscreen->info.gart_page_size);
} else {
return r600_texture_create(screen, templ);
}

View File

@ -262,7 +262,8 @@ void r600_query_hw_destroy(struct r600_common_context *rctx,
static struct r600_resource *r600_new_query_buffer(struct r600_common_context *ctx,
struct r600_query_hw *query)
{
unsigned buf_size = MAX2(query->result_size, 4096);
unsigned buf_size = MAX2(query->result_size,
ctx->screen->info.gart_page_size);
/* Queries are normally read by the CPU after
* being written by the gpu, hence staging is probably a good

View File

@ -598,7 +598,7 @@ static void si_dump_last_bo_list(struct si_context *sctx, FILE *f)
for (i = 0; i < sctx->last_bo_count; i++) {
/* Note: Buffer sizes are expected to be aligned to 4k by the winsys. */
const unsigned page_size = 4096;
const unsigned page_size = sctx->b.screen->info.gart_page_size;
uint64_t va = sctx->last_bo_list[i].vm_address;
uint64_t size = sctx->last_bo_list[i].buf->size;
bool hit = false;

View File

@ -138,8 +138,8 @@ static struct radeon_winsys_ctx *amdgpu_ctx_create(struct radeon_winsys *ws)
return NULL;
}
alloc_buffer.alloc_size = 4 * 1024;
alloc_buffer.phys_alignment = 4 *1024;
alloc_buffer.alloc_size = ctx->ws->info.gart_page_size;
alloc_buffer.phys_alignment = ctx->ws->info.gart_page_size;
alloc_buffer.preferred_heap = AMDGPU_GEM_DOMAIN_GTT;
r = amdgpu_bo_alloc(ctx->ws->dev, &alloc_buffer, &buf_handle);
@ -201,6 +201,7 @@ amdgpu_ctx_query_reset_status(struct radeon_winsys_ctx *rwctx)
static bool amdgpu_get_new_ib(struct radeon_winsys *ws, struct amdgpu_ib *ib,
struct amdgpu_cs_ib_info *info, unsigned ib_type)
{
struct amdgpu_winsys *aws = (struct amdgpu_winsys*)ws;
/* Small IBs are better than big IBs, because the GPU goes idle quicker
* and there is less waiting for buffers and fences. Proof:
* http://www.phoronix.com/scan.php?page=article&item=mesa-111-si&num=1
@ -236,7 +237,7 @@ static bool amdgpu_get_new_ib(struct radeon_winsys *ws, struct amdgpu_ib *ib,
ib->used_ib_space = 0;
ib->big_ib_buffer = ws->buffer_create(ws, buffer_size,
4096,
aws->info.gart_page_size,
RADEON_DOMAIN_GTT,
RADEON_FLAG_CPU_ACCESS);
if (!ib->big_ib_buffer)

View File

@ -779,7 +779,7 @@ static struct pb_buffer *radeon_winsys_bo_from_ptr(struct radeon_winsys *rws,
memset(&args, 0, sizeof(args));
args.addr = (uintptr_t)pointer;
args.size = align(size, sysconf(_SC_PAGE_SIZE));
args.size = align(size, ws->info.gart_page_size);
args.flags = RADEON_GEM_USERPTR_ANONONLY |
RADEON_GEM_USERPTR_VALIDATE |
RADEON_GEM_USERPTR_REGISTER;