r600g: remove the fences which were used for the cache buffer manager

Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Marek Olšák 2011-08-03 01:59:02 +02:00
parent cdbb8a195a
commit 591d8c3350
4 changed files with 10 additions and 145 deletions

View File

@ -221,21 +221,8 @@ static int radeon_get_backend_map(struct radeon *radeon)
return 0;
}
static int radeon_init_fence(struct radeon *radeon)
{
radeon->fence = 1;
radeon->fence_bo = r600_bo(radeon, 4096, 0, 0, 0);
if (radeon->fence_bo == NULL) {
return -ENOMEM;
}
radeon->cfence = r600_bo_map(radeon, radeon->fence_bo, PIPE_TRANSFER_UNSYNCHRONIZED, NULL);
*radeon->cfence = 0;
return 0;
}
struct radeon *radeon_create(struct radeon_winsys *ws)
{
int r;
struct radeon *radeon = CALLOC_STRUCT(radeon);
if (radeon == NULL) {
return NULL;
@ -305,12 +292,6 @@ struct radeon *radeon_create(struct radeon_winsys *ws)
radeon_get_backend_map(radeon);
}
r = radeon_init_fence(radeon);
if (r) {
radeon_destroy(radeon);
return NULL;
}
return radeon;
}
@ -319,10 +300,6 @@ struct radeon *radeon_destroy(struct radeon *radeon)
if (radeon == NULL)
return NULL;
if (radeon->fence_bo) {
r600_bo_reference(radeon, &radeon->fence_bo, NULL);
}
FREE(radeon);
return NULL;
}

View File

@ -143,32 +143,6 @@ void r600_init_cs(struct r600_context *ctx)
ctx->init_dwords = ctx->pm4_cdwords;
}
static void INLINE r600_context_update_fenced_list(struct r600_context *ctx)
{
for (int i = 0; i < ctx->creloc; i++) {
if (!LIST_IS_EMPTY(&ctx->bo[i]->fencedlist))
LIST_DELINIT(&ctx->bo[i]->fencedlist);
LIST_ADDTAIL(&ctx->bo[i]->fencedlist, &ctx->fenced_bo);
ctx->bo[i]->fence = ctx->radeon->fence;
ctx->bo[i]->ctx = ctx;
}
}
static void INLINE r600_context_fence_wraparound(struct r600_context *ctx, unsigned fence)
{
struct radeon_bo *bo = NULL;
struct radeon_bo *tmp;
LIST_FOR_EACH_ENTRY_SAFE(bo, tmp, &ctx->fenced_bo, fencedlist) {
if (bo->fence <= *ctx->radeon->cfence) {
LIST_DELINIT(&bo->fencedlist);
bo->fence = 0;
} else {
bo->fence = fence;
}
}
}
static void r600_init_block(struct r600_context *ctx,
struct r600_block *block,
const struct r600_reg *reg, int index, int nreg,
@ -757,17 +731,6 @@ static int r600_loop_const_init(struct r600_context *ctx, u32 offset)
return r600_context_add_block(ctx, r600_loop_consts, nreg, PKT3_SET_LOOP_CONST, R600_LOOP_CONST_OFFSET);
}
static void r600_context_clear_fenced_bo(struct r600_context *ctx)
{
struct radeon_bo *bo, *tmp;
LIST_FOR_EACH_ENTRY_SAFE(bo, tmp, &ctx->fenced_bo, fencedlist) {
LIST_DELINIT(&bo->fencedlist);
bo->fence = 0;
bo->ctx = NULL;
}
}
static void r600_free_resource_range(struct r600_context *ctx, struct r600_range *range, int nblocks)
{
struct r600_block *block;
@ -817,7 +780,6 @@ void r600_context_fini(struct r600_context *ctx)
free(ctx->bo);
free(ctx->pm4);
r600_context_clear_fenced_bo(ctx);
memset(ctx, 0, sizeof(struct r600_context));
}
@ -1058,7 +1020,6 @@ void r600_context_get_reloc(struct r600_context *ctx, struct r600_bo *rbo)
ctx->reloc[ctx->creloc].write_domain = rbo->domains & (RADEON_GEM_DOMAIN_GTT | RADEON_GEM_DOMAIN_VRAM);
ctx->reloc[ctx->creloc].flags = 0;
radeon_bo_reference(ctx->radeon, &ctx->bo[ctx->creloc], bo);
rbo->fence = ctx->radeon->fence;
ctx->creloc++;
}
@ -1138,7 +1099,6 @@ void r600_context_pipe_state_set(struct r600_context *ctx, struct r600_pipe_stat
/* find relocation */
reloc_id = block->pm4_bo_index[id];
r600_bo_reference(ctx->radeon, &block->reloc[reloc_id].bo, reg->bo);
reg->bo->fence = ctx->radeon->fence;
/* always force dirty for relocs for now */
dirty |= R600_BLOCK_STATUS_DIRTY;
}
@ -1205,31 +1165,21 @@ void r600_context_pipe_state_set_resource(struct r600_context *ctx, struct r600_
dirty |= R600_BLOCK_STATUS_RESOURCE_DIRTY;
}
}
if (!dirty) {
if (is_vertex)
state->bo[0]->fence = ctx->radeon->fence;
else {
state->bo[0]->fence = ctx->radeon->fence;
state->bo[1]->fence = ctx->radeon->fence;
}
} else {
if (dirty) {
if (is_vertex) {
/* VERTEX RESOURCE, we preted there is 2 bo to relocate so
* we have single case btw VERTEX & TEXTURE resource
*/
r600_bo_reference(ctx->radeon, &block->reloc[1].bo, state->bo[0]);
r600_bo_reference(ctx->radeon, &block->reloc[2].bo, NULL);
state->bo[0]->fence = ctx->radeon->fence;
} else {
/* TEXTURE RESOURCE */
r600_bo_reference(ctx->radeon, &block->reloc[1].bo, state->bo[0]);
r600_bo_reference(ctx->radeon, &block->reloc[2].bo, state->bo[1]);
state->bo[0]->fence = ctx->radeon->fence;
state->bo[1]->fence = ctx->radeon->fence;
state->bo[0]->bo->binding |= BO_BOUND_TEXTURE;
}
}
if (dirty) {
if (is_vertex)
block->status |= R600_BLOCK_STATUS_RESOURCE_VERTEX;
else
@ -1574,7 +1524,6 @@ void r600_context_flush(struct r600_context *ctx)
struct drm_radeon_cs drmib = {};
struct drm_radeon_cs_chunk chunks[2];
uint64_t chunk_array[2];
unsigned fence;
int r;
struct r600_block *enable_block = NULL;
@ -1592,16 +1541,6 @@ void r600_context_flush(struct r600_context *ctx)
/* partial flush is needed to avoid lockups on some chips with user fences */
ctx->pm4[ctx->pm4_cdwords++] = PKT3(PKT3_EVENT_WRITE, 0, 0);
ctx->pm4[ctx->pm4_cdwords++] = EVENT_TYPE(EVENT_TYPE_PS_PARTIAL_FLUSH) | EVENT_INDEX(4);
/* emit fence */
ctx->pm4[ctx->pm4_cdwords++] = PKT3(PKT3_EVENT_WRITE_EOP, 4, 0);
ctx->pm4[ctx->pm4_cdwords++] = EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5);
ctx->pm4[ctx->pm4_cdwords++] = 0;
ctx->pm4[ctx->pm4_cdwords++] = (1 << 29) | (0 << 24);
ctx->pm4[ctx->pm4_cdwords++] = ctx->radeon->fence;
ctx->pm4[ctx->pm4_cdwords++] = 0;
ctx->pm4[ctx->pm4_cdwords++] = PKT3(PKT3_NOP, 0, 0);
ctx->pm4[ctx->pm4_cdwords++] = 0;
r600_context_bo_reloc(ctx, &ctx->pm4[ctx->pm4_cdwords - 1], ctx->radeon->fence_bo);
#if 1
/* emit cs */
@ -1625,16 +1564,6 @@ void r600_context_flush(struct r600_context *ctx)
*ctx->radeon->cfence = ctx->radeon->fence;
#endif
r600_context_update_fenced_list(ctx);
fence = ctx->radeon->fence + 1;
if (fence < ctx->radeon->fence) {
/* wrap around */
fence = 1;
r600_context_fence_wraparound(ctx, fence);
}
ctx->radeon->fence = fence;
/* restart */
for (int i = 0; i < ctx->creloc; i++) {
ctx->bo[i]->reloc = NULL;

View File

@ -34,17 +34,12 @@
#define PKT_COUNT_C 0xC000FFFF
#define PKT_COUNT_S(x) (((x) & 0x3FFF) << 16)
struct r600_bo;
struct radeon {
struct radeon_winsys *ws;
struct radeon_info info;
unsigned family;
enum chip_class chip_class;
struct r600_tiling_info tiling_info;
unsigned fence;
unsigned *cfence;
struct r600_bo *fence_bo;
unsigned num_tile_pipes;
unsigned backend_map;
boolean backend_map_valid;
@ -70,15 +65,11 @@ struct r600_reg {
struct radeon_bo {
struct pipe_reference reference;
struct pb_buffer *buf;
unsigned handle;
unsigned size;
int map_count;
void *data;
struct list_head fencedlist;
unsigned fence;
struct r600_context *ctx;
boolean shared;
struct r600_reloc *reloc;
unsigned reloc_id;
unsigned last_flush;
@ -90,7 +81,6 @@ struct r600_bo {
/* DO NOT MOVE THIS ^ */
unsigned domains;
struct radeon_bo *bo;
unsigned fence;
};
/*

View File

@ -83,35 +83,24 @@ struct radeon_bo *radeon_bo(struct radeon *radeon, unsigned handle,
if (bo == NULL) {
return NULL;
}
bo->size = size;
bo->handle = handle;
pipe_reference_init(&bo->reference, 1);
LIST_INITHEAD(&bo->fencedlist);
if (handle) {
unsigned size;
bo->buf = radeon->ws->buffer_from_handle(radeon->ws, &whandle, NULL, &size);
if (!bo->buf) {
FREE(bo);
return NULL;
}
bo->handle = radeon->ws->trans_get_buffer_handle(bo->buf);
bo->size = size;
bo->shared = TRUE;
} else {
bo->buf = radeon->ws->buffer_create(radeon->ws, size, alignment, bind, initial_domain);
if (!bo->buf) {
FREE(bo);
return NULL;
}
bo->handle = radeon->ws->trans_get_buffer_handle(bo->buf);
}
if (!bo->buf) {
FREE(bo);
return NULL;
}
bo->handle = radeon->ws->trans_get_buffer_handle(bo->buf);
bo->size = size;
return bo;
}
static void radeon_bo_destroy(struct radeon *radeon, struct radeon_bo *bo)
{
LIST_DEL(&bo->fencedlist);
radeon_bo_fixed_unmap(radeon, bo);
pb_reference(&bo->buf, NULL);
FREE(bo);
@ -133,16 +122,6 @@ int radeon_bo_wait(struct radeon *radeon, struct radeon_bo *bo)
struct drm_radeon_gem_wait_idle args;
int ret;
if (!bo->shared) {
if (!bo->fence)
return 0;
if (bo->fence <= *radeon->cfence) {
LIST_DELINIT(&bo->fencedlist);
bo->fence = 0;
return 0;
}
}
/* Zero out args to make valgrind happy */
memset(&args, 0, sizeof(args));
args.handle = bo->handle;
@ -158,16 +137,6 @@ int radeon_bo_busy(struct radeon *radeon, struct radeon_bo *bo, uint32_t *domain
struct drm_radeon_gem_busy args;
int ret;
if (!bo->shared) {
if (!bo->fence)
return 0;
if (bo->fence <= *radeon->cfence) {
LIST_DELINIT(&bo->fencedlist);
bo->fence = 0;
return 0;
}
}
memset(&args, 0, sizeof(args));
args.handle = bo->handle;
args.domain = 0;