nouveau: add valid range tracking to nouveau_buffer

This logic is borrowed from the radeon code. The transfer logic will
only get called for PIPE_BUFFER resources, so it shouldn't be necessary
to worry about them becoming render targets.

Signed-off-by: Ilia Mirkin <imirkin@alum.mit.edu>
Reviewed-by: Christoph Bumiller <e0425955@student.tuwien.ac.at>
This commit is contained in:
Ilia Mirkin 2014-02-27 01:07:51 -05:00
parent cf1c52575d
commit 5bf90cb521
6 changed files with 47 additions and 0 deletions

View File

@ -69,6 +69,8 @@ nouveau_buffer_allocate(struct nouveau_screen *screen,
if (buf->bo)
buf->address = buf->bo->offset + buf->offset;
util_range_set_empty(&buf->valid_buffer_range);
return TRUE;
}
@ -124,6 +126,8 @@ nouveau_buffer_destroy(struct pipe_screen *pscreen,
nouveau_fence_ref(NULL, &res->fence);
nouveau_fence_ref(NULL, &res->fence_wr);
util_range_destroy(&res->valid_buffer_range);
FREE(res);
NOUVEAU_DRV_STAT(nouveau_screen(pscreen), buf_obj_current_count, -1);
@ -387,6 +391,17 @@ nouveau_buffer_transfer_map(struct pipe_context *pipe,
if (usage & PIPE_TRANSFER_WRITE)
NOUVEAU_DRV_STAT(nv->screen, buf_transfers_wr, 1);
/* If we are trying to write to an uninitialized range, the user shouldn't
* care what was there before. So we can treat the write as if the target
* range were being discarded. Furthermore, since we know that even if this
* buffer is busy due to GPU activity, because the contents were
* uninitialized, the GPU can't care what was there, and so we can treat
* the write as being unsynchronized.
*/
if ((usage & PIPE_TRANSFER_WRITE) &&
!util_ranges_intersect(&buf->valid_buffer_range, box->x, box->x + box->width))
usage |= PIPE_TRANSFER_DISCARD_RANGE | PIPE_TRANSFER_UNSYNCHRONIZED;
if (buf->domain == NOUVEAU_BO_VRAM) {
if (usage & NOUVEAU_TRANSFER_DISCARD) {
/* Set up a staging area for the user to write to. It will be copied
@ -492,8 +507,14 @@ nouveau_buffer_transfer_flush_region(struct pipe_context *pipe,
const struct pipe_box *box)
{
struct nouveau_transfer *tx = nouveau_transfer(transfer);
struct nv04_resource *buf = nv04_resource(transfer->resource);
if (tx->map)
nouveau_transfer_write(nouveau_context(pipe), tx, box->x, box->width);
util_range_add(&buf->valid_buffer_range,
tx->base.box.x + box->x,
tx->base.box.x + box->x + box->width);
}
/* Unmap stage of the transfer. If it was a WRITE transfer and the map that
@ -522,6 +543,9 @@ nouveau_buffer_transfer_unmap(struct pipe_context *pipe,
if (bind & (PIPE_BIND_CONSTANT_BUFFER))
nv->cb_dirty = TRUE;
}
util_range_add(&buf->valid_buffer_range,
tx->base.box.x, tx->base.box.x + tx->base.box.width);
}
if (!tx->bo && (tx->base.usage & PIPE_TRANSFER_WRITE))
@ -562,6 +586,8 @@ nouveau_copy_buffer(struct nouveau_context *nv,
&dst->base, 0, dstx, 0, 0,
&src->base, 0, &src_box);
}
util_range_add(&dst->valid_buffer_range, dstx, dstx + size);
}
@ -659,6 +685,8 @@ nouveau_buffer_create(struct pipe_screen *pscreen,
NOUVEAU_DRV_STAT(screen, buf_obj_current_count, 1);
util_range_init(&buffer->valid_buffer_range);
return &buffer->base;
fail:
@ -690,6 +718,9 @@ nouveau_user_buffer_create(struct pipe_screen *pscreen, void *ptr,
buffer->data = ptr;
buffer->status = NOUVEAU_BUFFER_STATUS_USER_MEMORY;
util_range_init(&buffer->valid_buffer_range);
util_range_add(&buffer->valid_buffer_range, 0, bytes);
return &buffer->base;
}

View File

@ -1,6 +1,7 @@
#ifndef __NOUVEAU_BUFFER_H__
#define __NOUVEAU_BUFFER_H__
#include "util/u_range.h"
#include "util/u_transfer.h"
#include "util/u_double_list.h"
@ -44,6 +45,9 @@ struct nv04_resource {
struct nouveau_fence *fence_wr;
struct nouveau_mm_allocation *mm;
/* buffer range that has been initialized */
struct util_range valid_buffer_range;
};
void

View File

@ -68,6 +68,8 @@ nv50_surface_create(struct pipe_context *pipe,
struct pipe_resource *pres,
const struct pipe_surface *templ)
{
/* surfaces are assumed to be miptrees all over the place. */
assert(pres->target != PIPE_BUFFER);
if (unlikely(pres->target == PIPE_BUFFER))
return nv50_surface_from_buffer(pipe, pres, templ);
return nv50_miptree_surface_new(pipe, pres, templ);

View File

@ -1010,6 +1010,7 @@ nv50_so_target_create(struct pipe_context *pipe,
struct pipe_resource *res,
unsigned offset, unsigned size)
{
struct nv04_resource *buf = (struct nv04_resource *)res;
struct nv50_so_target *targ = MALLOC_STRUCT(nv50_so_target);
if (!targ)
return NULL;
@ -1033,6 +1034,9 @@ nv50_so_target_create(struct pipe_context *pipe,
pipe_resource_reference(&targ->pipe.buffer, res);
pipe_reference_init(&targ->pipe.reference, 1);
assert(buf->base.target == PIPE_BUFFER);
util_range_add(&buf->valid_buffer_range, offset, offset + size);
return &targ->pipe;
}

View File

@ -36,6 +36,8 @@ nvc0_surface_create(struct pipe_context *pipe,
struct pipe_resource *pres,
const struct pipe_surface *templ)
{
/* surfaces are assumed to be miptrees all over the place. */
assert(pres->target != PIPE_BUFFER);
if (unlikely(pres->target == PIPE_BUFFER))
return nv50_surface_from_buffer(pipe, pres, templ);
return nvc0_miptree_surface_new(pipe, pres, templ);

View File

@ -992,6 +992,7 @@ nvc0_so_target_create(struct pipe_context *pipe,
struct pipe_resource *res,
unsigned offset, unsigned size)
{
struct nv04_resource *buf = (struct nv04_resource *)res;
struct nvc0_so_target *targ = MALLOC_STRUCT(nvc0_so_target);
if (!targ)
return NULL;
@ -1010,6 +1011,9 @@ nvc0_so_target_create(struct pipe_context *pipe,
pipe_resource_reference(&targ->pipe.buffer, res);
pipe_reference_init(&targ->pipe.reference, 1);
assert(buf->base.target == PIPE_BUFFER);
util_range_add(&buf->valid_buffer_range, offset, offset + size);
return &targ->pipe;
}