Merge branch 'vbo_clean'

Conflicts:
	src/mesa/drivers/dri/r300/r300_draw.c
This commit is contained in:
Maciej Cencora 2009-08-15 00:52:44 +02:00
commit a89963cec1
13 changed files with 642 additions and 355 deletions

View File

@ -36,7 +36,8 @@ RADEON_COMMON_SOURCES = \
radeon_cs_legacy.c \
radeon_mipmap_tree.c \
radeon_span.c \
radeon_fbo.c
radeon_fbo.c \
radeon_buffer_objects.c
DRIVER_SOURCES = \
radeon_screen.c \

View File

@ -64,9 +64,9 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#include "r300_ioctl.h"
#include "r300_tex.h"
#include "r300_emit.h"
#include "r300_render.h"
#include "r300_swtcl.h"
#include "radeon_bocs_wrapper.h"
#include "radeon_buffer_objects.h"
#include "vblank.h"
@ -154,7 +154,6 @@ const struct dri_extension gl_20_extension[] = {
};
static const struct tnl_pipeline_stage *r300_pipeline[] = {
/* Catch any t&l fallbacks
*/
&_tnl_vertex_transform_stage,
@ -165,21 +164,7 @@ static const struct tnl_pipeline_stage *r300_pipeline[] = {
&_tnl_texture_transform_stage,
&_tnl_point_attenuation_stage,
&_tnl_vertex_program_stage,
/* Try again to go to tcl?
* - no good for asymmetric-twoside (do with multipass)
* - no good for asymmetric-unfilled (do with multipass)
* - good for material
* - good for texgen
* - need to manipulate a bit of state
*
* - worth it/not worth it?
*/
/* Else do them here.
*/
&_r300_render_stage,
&_tnl_render_stage, /* FALLBACK */
&_tnl_render_stage,
0,
};
@ -398,6 +383,7 @@ GLboolean r300CreateContext(const __GLcontextModes * glVisual,
r300InitStateFuncs(&functions);
r300InitTextureFuncs(&functions);
r300InitShaderFuncs(&functions);
radeonInitBufferObjectFuncs(&functions);
if (!radeonInitContext(&r300->radeon, &functions,
glVisual, driContextPriv,

View File

@ -478,11 +478,12 @@ struct r300_vertex_buffer {
struct vertex_attribute {
/* generic */
GLubyte element;
GLvoid *data;
GLboolean free_needed;
GLuint stride;
GLuint dwords;
GLubyte size; /* number of components */
GLboolean is_named_bo;
struct radeon_bo *bo;
GLint bo_offset;
/* hw specific */
uint32_t data_type:4;
@ -497,9 +498,10 @@ struct r300_vertex_buffer {
};
struct r300_index_buffer {
GLvoid *ptr;
struct radeon_bo *bo;
int bo_offset;
GLboolean is_32bit;
GLboolean free_needed;
GLuint count;
};

View File

@ -39,78 +39,14 @@
#include "r300_state.h"
#include "r300_tex.h"
#include "radeon_buffer_objects.h"
#include "tnl/tnl.h"
#include "tnl/t_vp_build.h"
#include "vbo/vbo_context.h"
#include "swrast/swrast.h"
#include "swrast_setup/swrast_setup.h"
static void r300FixupIndexBuffer(GLcontext *ctx, const struct _mesa_index_buffer *mesa_ind_buf, struct gl_buffer_object **bo, GLuint *nr_bo)
{
r300ContextPtr r300 = R300_CONTEXT(ctx);
struct r300_index_buffer *ind_buf = &r300->ind_buf;
GLvoid *src_ptr;
if (!mesa_ind_buf) {
ind_buf->ptr = NULL;
return;
}
ind_buf->count = mesa_ind_buf->count;
if (mesa_ind_buf->obj->Name && !mesa_ind_buf->obj->Pointer) {
bo[*nr_bo] = mesa_ind_buf->obj;
(*nr_bo)++;
ctx->Driver.MapBuffer(ctx, GL_ELEMENT_ARRAY_BUFFER, GL_READ_ONLY_ARB, mesa_ind_buf->obj);
assert(mesa_ind_buf->obj->Pointer != NULL);
}
src_ptr = ADD_POINTERS(mesa_ind_buf->obj->Pointer, mesa_ind_buf->ptr);
if (mesa_ind_buf->type == GL_UNSIGNED_BYTE) {
GLubyte *in = (GLubyte *)src_ptr;
GLuint *out = _mesa_malloc(sizeof(GLushort) * ((mesa_ind_buf->count + 1) & ~1));
int i;
ind_buf->ptr = out;
for (i = 0; i + 1 < mesa_ind_buf->count; i += 2) {
*out++ = in[i] | in[i + 1] << 16;
}
if (i < mesa_ind_buf->count) {
*out++ = in[i];
}
ind_buf->free_needed = GL_TRUE;
ind_buf->is_32bit = GL_FALSE;
} else if (mesa_ind_buf->type == GL_UNSIGNED_SHORT) {
#if MESA_BIG_ENDIAN
GLushort *in = (GLushort *)src_ptr;
GLuint *out = _mesa_malloc(sizeof(GLushort) *
((mesa_ind_buf->count + 1) & ~1));
int i;
ind_buf->ptr = out;
for (i = 0; i + 1 < mesa_ind_buf->count; i += 2) {
*out++ = in[i] | in[i + 1] << 16;
}
if (i < mesa_ind_buf->count) {
*out++ = in[i];
}
ind_buf->free_needed = GL_TRUE;
#else
ind_buf->ptr = src_ptr;
ind_buf->free_needed = GL_FALSE;
#endif
ind_buf->is_32bit = GL_FALSE;
} else {
ind_buf->ptr = src_ptr;
ind_buf->free_needed = GL_FALSE;
ind_buf->is_32bit = GL_TRUE;
}
}
static int getTypeSize(GLenum type)
{
@ -137,6 +73,112 @@ static int getTypeSize(GLenum type)
}
}
static void r300FixupIndexBuffer(GLcontext *ctx, const struct _mesa_index_buffer *mesa_ind_buf)
{
r300ContextPtr r300 = R300_CONTEXT(ctx);
GLvoid *src_ptr;
GLuint *out;
int i;
GLboolean mapped_named_bo = GL_FALSE;
if (mesa_ind_buf->obj->Name && !mesa_ind_buf->obj->Pointer) {
ctx->Driver.MapBuffer(ctx, GL_ELEMENT_ARRAY_BUFFER, GL_READ_ONLY_ARB, mesa_ind_buf->obj);
mapped_named_bo = GL_TRUE;
assert(mesa_ind_buf->obj->Pointer != NULL);
}
src_ptr = ADD_POINTERS(mesa_ind_buf->obj->Pointer, mesa_ind_buf->ptr);
if (mesa_ind_buf->type == GL_UNSIGNED_BYTE) {
GLuint size = sizeof(GLushort) * ((mesa_ind_buf->count + 1) & ~1);
GLubyte *in = (GLubyte *)src_ptr;
radeonAllocDmaRegion(&r300->radeon, &r300->ind_buf.bo, &r300->ind_buf.bo_offset, size, 4);
assert(r300->ind_buf.bo->ptr != NULL);
out = (GLuint *)ADD_POINTERS(r300->ind_buf.bo->ptr, r300->ind_buf.bo_offset);
for (i = 0; i + 1 < mesa_ind_buf->count; i += 2) {
*out++ = in[i] | in[i + 1] << 16;
}
if (i < mesa_ind_buf->count) {
*out++ = in[i];
}
#if MESA_BIG_ENDIAN
} else { /* if (mesa_ind_buf->type == GL_UNSIGNED_SHORT) */
GLushort *in = (GLushort *)src_ptr;
size = sizeof(GLushort) * ((mesa_ind_buf->count + 1) & ~1);
radeonAllocDmaRegion(&r300->radeon, &r300->ind_buf.bo, &r300->ind_buf.bo_offet, size, 4);
assert(r300->ind_buf.bo->ptr != NULL)
out = (GLuint *)ADD_POINTERS(r300->ind_buf.bo->ptr, r300->ind_buf.bo_offset);
for (i = 0; i + 1 < mesa_ind_buf->count; i += 2) {
*out++ = in[i] | in[i + 1] << 16;
}
if (i < mesa_ind_buf->count) {
*out++ = in[i];
}
#endif
}
r300->ind_buf.is_32bit = GL_FALSE;
r300->ind_buf.count = mesa_ind_buf->count;
if (mapped_named_bo) {
ctx->Driver.UnmapBuffer(ctx, GL_ELEMENT_ARRAY_BUFFER, mesa_ind_buf->obj);
}
}
static void r300SetupIndexBuffer(GLcontext *ctx, const struct _mesa_index_buffer *mesa_ind_buf)
{
r300ContextPtr r300 = R300_CONTEXT(ctx);
if (!mesa_ind_buf) {
r300->ind_buf.bo = NULL;
return;
}
#if MESA_BIG_ENDIAN
if (mesa_ind_buf->type == GL_UNSIGNED_INT) {
#else
if (mesa_ind_buf->type != GL_UNSIGNED_BYTE) {
#endif
const GLvoid *src_ptr;
GLvoid *dst_ptr;
GLboolean mapped_named_bo = GL_FALSE;
if (mesa_ind_buf->obj->Name && !mesa_ind_buf->obj->Pointer) {
ctx->Driver.MapBuffer(ctx, GL_ELEMENT_ARRAY_BUFFER, GL_READ_ONLY_ARB, mesa_ind_buf->obj);
assert(mesa_ind_buf->obj->Pointer != NULL);
mapped_named_bo = GL_TRUE;
}
src_ptr = ADD_POINTERS(mesa_ind_buf->obj->Pointer, mesa_ind_buf->ptr);
const GLuint size = mesa_ind_buf->count * getTypeSize(mesa_ind_buf->type);
radeonAllocDmaRegion(&r300->radeon, &r300->ind_buf.bo, &r300->ind_buf.bo_offset, size, 4);
assert(r300->ind_buf.bo->ptr != NULL);
dst_ptr = ADD_POINTERS(r300->ind_buf.bo->ptr, r300->ind_buf.bo_offset);
_mesa_memcpy(dst_ptr, src_ptr, size);
r300->ind_buf.is_32bit = (mesa_ind_buf->type == GL_UNSIGNED_INT);
r300->ind_buf.count = mesa_ind_buf->count;
if (mapped_named_bo) {
ctx->Driver.UnmapBuffer(ctx, GL_ELEMENT_ARRAY_BUFFER, mesa_ind_buf->obj);
}
} else {
r300FixupIndexBuffer(ctx, mesa_ind_buf);
}
}
#define CONVERT( TYPE, MACRO ) do { \
GLuint i, j, sz; \
sz = input->Size; \
@ -161,27 +203,119 @@ static int getTypeSize(GLenum type)
} \
} while (0)
static void r300TranslateAttrib(GLcontext *ctx, GLuint attr, int count, const struct gl_client_array *input, struct gl_buffer_object **bo, GLuint *nr_bo)
/**
* Convert attribute data type to float
* If the attribute uses named buffer object replace the bo with newly allocated bo
*/
static void r300ConvertAttrib(GLcontext *ctx, int count, const struct gl_client_array *input, struct vertex_attribute *attr)
{
r300ContextPtr r300 = R300_CONTEXT(ctx);
const GLvoid *src_ptr;
GLboolean mapped_named_bo = GL_FALSE;
GLfloat *dst_ptr;
GLuint stride;
stride = (input->StrideB == 0) ? getTypeSize(input->Type) * input->Size : input->StrideB;
/* Convert value for first element only */
if (input->StrideB == 0)
count = 1;
if (input->BufferObj->Name) {
if (!input->BufferObj->Pointer) {
ctx->Driver.MapBuffer(ctx, GL_ARRAY_BUFFER, GL_READ_ONLY_ARB, input->BufferObj);
mapped_named_bo = GL_TRUE;
}
src_ptr = ADD_POINTERS(input->BufferObj->Pointer, input->Ptr);
} else {
src_ptr = input->Ptr;
}
radeonAllocDmaRegion(&r300->radeon, &attr->bo, &attr->bo_offset, sizeof(GLfloat) * input->Size * count, 32);
dst_ptr = (GLfloat *)ADD_POINTERS(attr->bo->ptr, attr->bo_offset);
if (RADEON_DEBUG & DEBUG_FALLBACKS) {
fprintf(stderr, "%s: Converting vertex attributes, attribute data format %x,", __FUNCTION__, input->Type);
fprintf(stderr, "stride %d, components %d\n", stride, input->Size);
}
assert(src_ptr != NULL);
switch (input->Type) {
case GL_DOUBLE:
CONVERT(GLdouble, (GLfloat));
break;
case GL_UNSIGNED_INT:
CONVERT(GLuint, UINT_TO_FLOAT);
break;
case GL_INT:
CONVERT(GLint, INT_TO_FLOAT);
break;
case GL_UNSIGNED_SHORT:
CONVERT(GLushort, USHORT_TO_FLOAT);
break;
case GL_SHORT:
CONVERT(GLshort, SHORT_TO_FLOAT);
break;
case GL_UNSIGNED_BYTE:
assert(input->Format != GL_BGRA);
CONVERT(GLubyte, UBYTE_TO_FLOAT);
break;
case GL_BYTE:
CONVERT(GLbyte, BYTE_TO_FLOAT);
break;
default:
assert(0);
break;
}
if (mapped_named_bo) {
ctx->Driver.UnmapBuffer(ctx, GL_ARRAY_BUFFER, input->BufferObj);
}
}
static void r300AlignDataToDword(GLcontext *ctx, const struct gl_client_array *input, int count, struct vertex_attribute *attr)
{
r300ContextPtr r300 = R300_CONTEXT(ctx);
const int dst_stride = (input->StrideB + 3) & ~3;
const int size = getTypeSize(input->Type) * input->Size * count;
GLboolean mapped_named_bo = GL_FALSE;
radeonAllocDmaRegion(&r300->radeon, &attr->bo, &attr->bo_offset, size, 32);
if (!input->BufferObj->Pointer) {
ctx->Driver.MapBuffer(ctx, GL_ARRAY_BUFFER, GL_READ_ONLY_ARB, input->BufferObj);
mapped_named_bo = GL_TRUE;
}
{
GLvoid *src_ptr = ADD_POINTERS(input->BufferObj->Pointer, input->Ptr);
GLvoid *dst_ptr = ADD_POINTERS(attr->bo->ptr, attr->bo_offset);
int i;
for (i = 0; i < count; ++i) {
_mesa_memcpy(dst_ptr, src_ptr, input->StrideB);
src_ptr += input->StrideB;
dst_ptr += dst_stride;
}
}
if (mapped_named_bo) {
ctx->Driver.UnmapBuffer(ctx, GL_ARRAY_BUFFER, input->BufferObj);
}
attr->stride = dst_stride;
}
static void r300TranslateAttrib(GLcontext *ctx, GLuint attr, int count, const struct gl_client_array *input)
{
r300ContextPtr r300 = R300_CONTEXT(ctx);
struct r300_vertex_buffer *vbuf = &r300->vbuf;
struct vertex_attribute r300_attr;
const void *src_ptr;
GLenum type;
GLuint stride;
if (input->BufferObj->Name) {
if (!input->BufferObj->Pointer) {
bo[*nr_bo] = input->BufferObj;
(*nr_bo)++;
ctx->Driver.MapBuffer(ctx, GL_ARRAY_BUFFER, GL_READ_ONLY_ARB, input->BufferObj);
assert(input->BufferObj->Pointer != NULL);
}
src_ptr = ADD_POINTERS(input->BufferObj->Pointer, input->Ptr);
} else
src_ptr = input->Ptr;
stride = (input->StrideB == 0) ? getTypeSize(input->Type) * input->Size : input->StrideB;
if (input->Type == GL_DOUBLE || input->Type == GL_UNSIGNED_INT || input->Type == GL_INT ||
@ -189,62 +323,57 @@ static void r300TranslateAttrib(GLcontext *ctx, GLuint attr, int count, const st
getTypeSize(input->Type) != 4 ||
#endif
stride < 4) {
if (RADEON_DEBUG & DEBUG_FALLBACKS) {
fprintf(stderr, "%s: Converting vertex attributes, attribute data format %x,", __FUNCTION__, input->Type);
fprintf(stderr, "stride %d, components %d\n", stride, input->Size);
}
GLfloat *dst_ptr, *tmp;
/* Convert value for first element only */
if (input->StrideB == 0)
count = 1;
tmp = dst_ptr = _mesa_malloc(sizeof(GLfloat) * input->Size * count);
switch (input->Type) {
case GL_DOUBLE:
CONVERT(GLdouble, (GLfloat));
break;
case GL_UNSIGNED_INT:
CONVERT(GLuint, UINT_TO_FLOAT);
break;
case GL_INT:
CONVERT(GLint, INT_TO_FLOAT);
break;
case GL_UNSIGNED_SHORT:
CONVERT(GLushort, USHORT_TO_FLOAT);
break;
case GL_SHORT:
CONVERT(GLshort, SHORT_TO_FLOAT);
break;
case GL_UNSIGNED_BYTE:
assert(input->Format != GL_BGRA);
CONVERT(GLubyte, UBYTE_TO_FLOAT);
break;
case GL_BYTE:
CONVERT(GLbyte, BYTE_TO_FLOAT);
break;
default:
assert(0);
break;
}
type = GL_FLOAT;
r300_attr.free_needed = GL_TRUE;
r300_attr.data = tmp;
r300ConvertAttrib(ctx, count, input, &r300_attr);
if (input->StrideB == 0) {
r300_attr.stride = 0;
} else {
r300_attr.stride = sizeof(GLfloat) * input->Size;
}
r300_attr.dwords = input->Size;
r300_attr.is_named_bo = GL_FALSE;
} else {
type = input->Type;
r300_attr.free_needed = GL_FALSE;
r300_attr.data = (GLvoid *)src_ptr;
r300_attr.stride = input->StrideB;
r300_attr.dwords = (getTypeSize(type) * input->Size + 3)/ 4;
r300_attr.dwords = (getTypeSize(type) * input->Size + 3)/ 4;
if (input->BufferObj->Name) {
if (stride % 4 != 0) {
assert(((int) input->Ptr) % input->StrideB == 0);
r300AlignDataToDword(ctx, input, count, &r300_attr);
r300_attr.is_named_bo = GL_FALSE;
} else {
r300_attr.stride = input->StrideB;
r300_attr.bo_offset = (GLuint) input->Ptr;
r300_attr.bo = get_radeon_buffer_object(input->BufferObj)->bo;
r300_attr.is_named_bo = GL_TRUE;
}
} else {
int size;
uint32_t *dst;
if (input->StrideB == 0) {
size = getTypeSize(input->Type) * input->Size;
count = 1;
r300_attr.stride = 0;
} else {
size = getTypeSize(input->Type) * input->Size * count;
r300_attr.stride = (getTypeSize(type) * input->Size + 3) & ~3;
}
radeonAllocDmaRegion(&r300->radeon, &r300_attr.bo, &r300_attr.bo_offset, size, 32);
assert(r300_attr.bo->ptr != NULL);
dst = (uint32_t *)ADD_POINTERS(r300_attr.bo->ptr, r300_attr.bo_offset);
switch (r300_attr.dwords) {
case 1: radeonEmitVec4(dst, input->Ptr, input->StrideB, count); break;
case 2: radeonEmitVec8(dst, input->Ptr, input->StrideB, count); break;
case 3: radeonEmitVec12(dst, input->Ptr, input->StrideB, count); break;
case 4: radeonEmitVec16(dst, input->Ptr, input->StrideB, count); break;
default: assert(0); break;
}
r300_attr.is_named_bo = GL_FALSE;
}
}
r300_attr.size = input->Size;
@ -333,7 +462,7 @@ static void r300TranslateAttrib(GLcontext *ctx, GLuint attr, int count, const st
++vbuf->num_attribs;
}
static void r300SetVertexFormat(GLcontext *ctx, const struct gl_client_array *arrays[], int count, struct gl_buffer_object **bo, GLuint *nr_bo)
static void r300SetVertexFormat(GLcontext *ctx, const struct gl_client_array *arrays[], int count)
{
r300ContextPtr r300 = R300_CONTEXT(ctx);
struct r300_vertex_buffer *vbuf = &r300->vbuf;
@ -351,7 +480,7 @@ static void r300SetVertexFormat(GLcontext *ctx, const struct gl_client_array *ar
++i;
}
r300TranslateAttrib(ctx, i, count, arrays[i], bo, nr_bo);
r300TranslateAttrib(ctx, i, count, arrays[i]);
tmp >>= 1;
++i;
@ -366,38 +495,54 @@ static void r300SetVertexFormat(GLcontext *ctx, const struct gl_client_array *ar
int i;
for (i = 0; i < vbuf->num_attribs; i++) {
rcommon_emit_vector(ctx, &r300->radeon.tcl.aos[i],
vbuf->attribs[i].data, vbuf->attribs[i].dwords,
vbuf->attribs[i].stride, count);
}
struct radeon_aos *aos = &r300->radeon.tcl.aos[i];
aos->count = vbuf->attribs[i].stride == 0 ? 1 : count;
aos->stride = vbuf->attribs[i].stride / sizeof(float);
aos->offset = vbuf->attribs[i].bo_offset;
aos->components = vbuf->attribs[i].dwords;
aos->bo = vbuf->attribs[i].bo;
radeon_cs_space_check_with_bo(r300->radeon.cmdbuf.cs,
r300->vbuf.attribs[i].bo,
RADEON_GEM_DOMAIN_GTT, 0);
if (vbuf->attribs[i].is_named_bo) {
radeon_cs_space_add_persistent_bo(r300->radeon.cmdbuf.cs,
r300->vbuf.attribs[i].bo,
RADEON_GEM_DOMAIN_GTT, 0);
}
}
r300->radeon.tcl.aos_count = vbuf->num_attribs;
if (r300->ind_buf.bo) {
radeon_cs_space_check_with_bo(r300->radeon.cmdbuf.cs,
r300->ind_buf.bo,
RADEON_GEM_DOMAIN_GTT, 0);
}
}
}
static void r300FreeData(GLcontext *ctx, struct gl_buffer_object **bo, GLuint nr_bo)
static void r300FreeData(GLcontext *ctx)
{
/* Need to zero tcl.aos[n].bo and tcl.elt_dma_bo
* to prevent double unref in radeonReleaseArrays
* called during context destroy
*/
r300ContextPtr r300 = R300_CONTEXT(ctx);
{
struct r300_vertex_buffer *vbuf = &R300_CONTEXT(ctx)->vbuf;
int i;
for (i = 0; i < vbuf->num_attribs; i++) {
if (vbuf->attribs[i].free_needed)
_mesa_free(vbuf->attribs[i].data);
for (i = 0; i < r300->vbuf.num_attribs; i++) {
if (!r300->vbuf.attribs[i].is_named_bo) {
radeon_bo_unref(r300->vbuf.attribs[i].bo);
}
r300->radeon.tcl.aos[i].bo = NULL;
}
}
{
struct r300_index_buffer *ind_buf = &R300_CONTEXT(ctx)->ind_buf;
if (ind_buf->free_needed)
_mesa_free(ind_buf->ptr);
}
{
int i;
for (i = 0; i < nr_bo; ++i) {
ctx->Driver.UnmapBuffer(ctx, 0, bo[i]);
if (r300->ind_buf.bo != NULL) {
radeon_bo_unref(r300->ind_buf.bo);
}
}
}
@ -411,8 +556,7 @@ static GLboolean r300TryDrawPrims(GLcontext *ctx,
GLuint max_index )
{
struct r300_context *r300 = R300_CONTEXT(ctx);
struct gl_buffer_object *bo[VERT_ATTRIB_MAX+1];
GLuint i, nr_bo = 0;
GLuint i;
if (ctx->NewState)
_mesa_update_state( ctx );
@ -424,7 +568,7 @@ static GLboolean r300TryDrawPrims(GLcontext *ctx,
r300SwitchFallback(ctx, R300_FALLBACK_INVALID_BUFFERS, !r300ValidateBuffers(ctx));
r300FixupIndexBuffer(ctx, ib, bo, &nr_bo);
r300SetupIndexBuffer(ctx, ib);
/* ensure we have the cmd buf space in advance to cover
* the state + DMA AOS pointers */
@ -432,7 +576,7 @@ static GLboolean r300TryDrawPrims(GLcontext *ctx,
r300->radeon.hw.max_state_size + (50*sizeof(int)),
__FUNCTION__);
r300SetVertexFormat(ctx, arrays, max_index + 1, bo, &nr_bo);
r300SetVertexFormat(ctx, arrays, max_index + 1);
if (r300->fallback)
return GL_FALSE;
@ -450,9 +594,7 @@ static GLboolean r300TryDrawPrims(GLcontext *ctx,
r300EmitCacheFlush(r300);
radeonReleaseArrays(ctx, ~0);
r300FreeData(ctx, bo, nr_bo);
r300FreeData(ctx);
return GL_TRUE;
}
@ -466,31 +608,19 @@ static void r300DrawPrims(GLcontext *ctx,
GLuint min_index,
GLuint max_index)
{
struct split_limits limits;
GLboolean retval;
if (ib)
limits.max_verts = 0xffffffff;
else
limits.max_verts = 65535;
limits.max_indices = 65535;
limits.max_vb_size = 1024*1024;
/* This check should get folded into just the places that
* min/max index are really needed.
*/
if (!index_bounds_valid)
vbo_get_minmax_index(ctx, prim, ib, &min_index, &max_index);
if (!index_bounds_valid) {
vbo_get_minmax_index(ctx, prim, ib, &min_index, &max_index);
}
if (min_index) {
vbo_rebase_prims( ctx, arrays, prim, nr_prims, ib, min_index, max_index, r300DrawPrims );
return;
}
if ((ib && ib->count > 65535)) {
vbo_split_prims (ctx, arrays, prim, nr_prims, ib, min_index, max_index, r300DrawPrims, &limits);
return;
}
/* Make an attempt at drawing */
retval = r300TryDrawPrims(ctx, arrays, prim, nr_prims, ib, min_index, max_index);

View File

@ -124,41 +124,6 @@ GLuint r300VAPOutputCntl1(GLcontext * ctx, GLuint vp_writes)
return ret;
}
GLboolean r300EmitArrays(GLcontext * ctx)
{
r300ContextPtr r300 = R300_CONTEXT(ctx);
struct r300_vertex_buffer *vbuf = &r300->vbuf;
GLuint InputsRead, OutputsWritten;
r300ChooseSwtclVertexFormat(ctx, &InputsRead, &OutputsWritten);
r300SwitchFallback(ctx, R300_FALLBACK_AOS_LIMIT, vbuf->num_attribs > R300_MAX_AOS_ARRAYS);
if (r300->fallback & R300_RASTER_FALLBACK_MASK)
return GL_FALSE;
{
struct vertex_buffer *mesa_vb = &TNL_CONTEXT(ctx)->vb;
GLuint attr, i;
for (i = 0; i < vbuf->num_attribs; i++) {
attr = vbuf->attribs[i].element;
rcommon_emit_vector(ctx, &r300->radeon.tcl.aos[i], mesa_vb->AttribPtr[attr]->data,
mesa_vb->AttribPtr[attr]->size, mesa_vb->AttribPtr[attr]->stride, mesa_vb->Count);
}
r300->radeon.tcl.aos_count = vbuf->num_attribs;
/* Fill index buffer info */
r300->ind_buf.ptr = mesa_vb->Elts;
r300->ind_buf.is_32bit = GL_TRUE;
r300->ind_buf.free_needed = GL_FALSE;
}
r300SetupVAP(ctx, InputsRead, OutputsWritten);
return GL_TRUE;
}
void r300EmitCacheFlush(r300ContextPtr rmesa)
{
BATCH_LOCALS(&rmesa->radeon);

View File

@ -104,7 +104,7 @@ static INLINE uint32_t cmdpacket3(struct radeon_screen *rscrn, int packet)
return cmd.u;
}
static INLINE uint32_t cmdcpdelay(struct radeon_screen *rscrn,
static INLINE uint32_t cmdcpdelay(struct radeon_screen *rscrn,
unsigned short count)
{
drm_r300_cmd_header_t cmd;
@ -216,8 +216,6 @@ void static INLINE cp_wait(radeonContextPtr radeon, unsigned char flags)
}
}
extern GLboolean r300EmitArrays(GLcontext * ctx);
extern int r300PrimitiveType(r300ContextPtr rmesa, int prim);
extern int r300NumVerts(r300ContextPtr rmesa, int num_verts, int prim);

View File

@ -172,64 +172,42 @@ int r300NumVerts(r300ContextPtr rmesa, int num_verts, int prim)
return num_verts - verts_off;
}
static void r300EmitElts(GLcontext * ctx, unsigned long n_elts)
{
r300ContextPtr rmesa = R300_CONTEXT(ctx);
void *out;
GLuint size;
size = ((rmesa->ind_buf.is_32bit ? 4 : 2) * n_elts + 3) & ~3;
radeonAllocDmaRegion(&rmesa->radeon, &rmesa->radeon.tcl.elt_dma_bo,
&rmesa->radeon.tcl.elt_dma_offset, size, 4);
radeon_bo_map(rmesa->radeon.tcl.elt_dma_bo, 1);
out = rmesa->radeon.tcl.elt_dma_bo->ptr + rmesa->radeon.tcl.elt_dma_offset;
memcpy(out, rmesa->ind_buf.ptr, size);
radeon_bo_unmap(rmesa->radeon.tcl.elt_dma_bo);
}
static void r300FireEB(r300ContextPtr rmesa, int vertex_count, int type)
{
BATCH_LOCALS(&rmesa->radeon);
int size;
r300_emit_scissor(rmesa->radeon.glCtx);
if (vertex_count > 0) {
int size;
r300_emit_scissor(rmesa->radeon.glCtx);
BEGIN_BATCH(10);
OUT_BATCH_PACKET3(R300_PACKET3_3D_DRAW_INDX_2, 0);
if (rmesa->ind_buf.is_32bit) {
size = vertex_count;
OUT_BATCH(R300_VAP_VF_CNTL__PRIM_WALK_INDICES |
((vertex_count + 0) << 16) | type |
R300_VAP_VF_CNTL__INDEX_SIZE_32bit);
} else {
size = (vertex_count + 1) >> 1;
OUT_BATCH(R300_VAP_VF_CNTL__PRIM_WALK_INDICES |
((vertex_count + 0) << 16) | type);
}
if (!rmesa->radeon.radeonScreen->kernel_mm) {
OUT_BATCH_PACKET3(R300_PACKET3_INDX_BUFFER, 2);
OUT_BATCH(R300_INDX_BUFFER_ONE_REG_WR | (0 << R300_INDX_BUFFER_SKIP_SHIFT) |
(R300_VAP_PORT_IDX0 >> 2));
OUT_BATCH_RELOC(rmesa->radeon.tcl.elt_dma_offset,
rmesa->radeon.tcl.elt_dma_bo,
rmesa->radeon.tcl.elt_dma_offset,
RADEON_GEM_DOMAIN_GTT, 0, 0);
OUT_BATCH(size);
} else {
OUT_BATCH_PACKET3(R300_PACKET3_INDX_BUFFER, 2);
OUT_BATCH(R300_INDX_BUFFER_ONE_REG_WR | (0 << R300_INDX_BUFFER_SKIP_SHIFT) |
(R300_VAP_PORT_IDX0 >> 2));
OUT_BATCH(rmesa->radeon.tcl.elt_dma_offset);
OUT_BATCH(size);
radeon_cs_write_reloc(rmesa->radeon.cmdbuf.cs,
rmesa->radeon.tcl.elt_dma_bo,
RADEON_GEM_DOMAIN_GTT, 0, 0);
}
END_BATCH();
BEGIN_BATCH(10);
OUT_BATCH_PACKET3(R300_PACKET3_3D_DRAW_INDX_2, 0);
if (rmesa->ind_buf.is_32bit) {
size = vertex_count;
OUT_BATCH(R300_VAP_VF_CNTL__PRIM_WALK_INDICES |
(vertex_count << 16) | type |
R300_VAP_VF_CNTL__INDEX_SIZE_32bit);
} else {
size = (vertex_count + 1) >> 1;
OUT_BATCH(R300_VAP_VF_CNTL__PRIM_WALK_INDICES |
(vertex_count << 16) | type);
}
if (!rmesa->radeon.radeonScreen->kernel_mm) {
OUT_BATCH_PACKET3(R300_PACKET3_INDX_BUFFER, 2);
OUT_BATCH(R300_INDX_BUFFER_ONE_REG_WR | (0 << R300_INDX_BUFFER_SKIP_SHIFT) |
(R300_VAP_PORT_IDX0 >> 2));
OUT_BATCH_RELOC(0, rmesa->ind_buf.bo, rmesa->ind_buf.bo_offset, RADEON_GEM_DOMAIN_GTT, 0, 0);
OUT_BATCH(size);
} else {
OUT_BATCH_PACKET3(R300_PACKET3_INDX_BUFFER, 2);
OUT_BATCH(R300_INDX_BUFFER_ONE_REG_WR | (0 << R300_INDX_BUFFER_SKIP_SHIFT) |
(R300_VAP_PORT_IDX0 >> 2));
OUT_BATCH(rmesa->ind_buf.bo_offset);
OUT_BATCH(size);
radeon_cs_write_reloc(rmesa->radeon.cmdbuf.cs,
rmesa->ind_buf.bo, RADEON_GEM_DOMAIN_GTT, 0, 0);
}
END_BATCH();
}
static void r300EmitAOS(r300ContextPtr rmesa, GLuint nr, GLuint offset)
@ -359,14 +337,18 @@ void r300RunRenderPrimitive(GLcontext * ctx, int start, int end, int prim)
if (type < 0 || num_verts <= 0)
return;
if (num_verts > 65535) {
WARN_ONCE("Can't handle more then 65535 vertices at once\n");
return;
}
/* Make space for at least 128 dwords.
* This is supposed to ensure that we can get all rendering
* commands into a single command buffer.
*/
rcommonEnsureCmdBufSpace(&rmesa->radeon, 128, __FUNCTION__);
if (rmesa->ind_buf.ptr) {
r300EmitElts(ctx, num_verts);
if (rmesa->ind_buf.bo) {
r300EmitAOS(rmesa, rmesa->radeon.tcl.aos_count, 0);
if (rmesa->radeon.radeonScreen->kernel_mm) {
BEGIN_BATCH_NO_AUTOSTATE(2);
@ -382,37 +364,6 @@ void r300RunRenderPrimitive(GLcontext * ctx, int start, int end, int prim)
COMMIT_BATCH();
}
static void r300RunRender(GLcontext * ctx, struct tnl_pipeline_stage *stage)
{
r300ContextPtr rmesa = R300_CONTEXT(ctx);
int i;
TNLcontext *tnl = TNL_CONTEXT(ctx);
struct vertex_buffer *vb = &tnl->vb;
if (RADEON_DEBUG & DEBUG_PRIMS)
fprintf(stderr, "%s\n", __FUNCTION__);
r300UpdateShaders(rmesa);
r300EmitArrays(ctx);
r300UpdateShaderStates(rmesa);
r300EmitCacheFlush(rmesa);
radeonEmitState(&rmesa->radeon);
for (i = 0; i < vb->PrimitiveCount; i++) {
GLuint prim = _tnl_translate_prim(&vb->Primitive[i]);
GLuint start = vb->Primitive[i].start;
GLuint end = vb->Primitive[i].start + vb->Primitive[i].count;
r300RunRenderPrimitive(ctx, start, end, prim);
}
r300EmitCacheFlush(rmesa);
radeonReleaseArrays(ctx, ~0);
}
static const char *getFallbackString(uint32_t bit)
{
switch (bit) {
@ -449,7 +400,7 @@ void r300SwitchFallback(GLcontext *ctx, uint32_t bit, GLboolean mode)
r300ContextPtr rmesa = R300_CONTEXT(ctx);
uint32_t old_fallback = rmesa->fallback;
static uint32_t fallback_warn = 0;
if (mode) {
if ((fallback_warn & bit) == 0) {
if (RADEON_DEBUG & DEBUG_FALLBACKS)
@ -470,7 +421,7 @@ void r300SwitchFallback(GLcontext *ctx, uint32_t bit, GLboolean mode)
/* update only if we change from no raster fallbacks to some raster fallbacks */
if (((old_fallback & R300_RASTER_FALLBACK_MASK) == 0) &&
((bit & R300_RASTER_FALLBACK_MASK) > 0)) {
radeon_firevertices(&rmesa->radeon);
rmesa->radeon.swtcl.RenderIndex = ~0;
_swsetup_Wakeup( ctx );
@ -489,7 +440,7 @@ void r300SwitchFallback(GLcontext *ctx, uint32_t bit, GLboolean mode)
/* update only if we have disabled all raster fallbacks */
if ((old_fallback & R300_RASTER_FALLBACK_MASK) == bit) {
_swrast_flush( ctx );
tnl->Driver.Render.Start = r300RenderStart;
tnl->Driver.Render.Finish = r300RenderFinish;
tnl->Driver.Render.PrimitiveNotify = r300RenderPrimitive;
@ -497,38 +448,10 @@ void r300SwitchFallback(GLcontext *ctx, uint32_t bit, GLboolean mode)
tnl->Driver.Render.BuildVertices = _tnl_build_vertices;
tnl->Driver.Render.CopyPV = _tnl_copy_pv;
tnl->Driver.Render.Interp = _tnl_interp;
_tnl_invalidate_vertex_state( ctx, ~0 );
_tnl_invalidate_vertices( ctx, ~0 );
}
}
}
static GLboolean r300RunNonTCLRender(GLcontext * ctx,
struct tnl_pipeline_stage *stage)
{
r300ContextPtr rmesa = R300_CONTEXT(ctx);
if (RADEON_DEBUG & DEBUG_PRIMS)
fprintf(stderr, "%s\n", __FUNCTION__);
if (rmesa->fallback & R300_RASTER_FALLBACK_MASK)
return GL_TRUE;
if (rmesa->options.hw_tcl_enabled == GL_FALSE)
return GL_TRUE;
r300RunRender(ctx, stage);
return GL_FALSE;
}
const struct tnl_pipeline_stage _r300_render_stage = {
"r300 Hardware Rasterization",
NULL,
NULL,
NULL,
NULL,
r300RunNonTCLRender
};

View File

@ -0,0 +1 @@
../radeon/radeon_buffer_objects.c

View File

@ -0,0 +1 @@
../radeon/radeon_buffer_objects.h

View File

@ -0,0 +1,226 @@
/*
* Copyright 2009 Maciej Cencora <m.cencora@gmail.com>
*
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "radeon_buffer_objects.h"
#include "main/imports.h"
#include "main/mtypes.h"
#include "main/bufferobj.h"
#include "radeon_common.h"
struct radeon_buffer_object *
get_radeon_buffer_object(struct gl_buffer_object *obj)
{
return (struct radeon_buffer_object *) obj;
}
static struct gl_buffer_object *
radeonNewBufferObject(GLcontext * ctx,
GLuint name,
GLenum target)
{
struct radeon_buffer_object *obj = CALLOC_STRUCT(radeon_buffer_object);
_mesa_initialize_buffer_object(&obj->Base, name, target);
obj->bo = NULL;
return &obj->Base;
}
/**
* Called via glDeleteBuffersARB().
*/
static void
radeonDeleteBufferObject(GLcontext * ctx,
struct gl_buffer_object *obj)
{
struct radeon_buffer_object *radeon_obj = get_radeon_buffer_object(obj);
if (obj->Pointer) {
radeon_bo_unmap(radeon_obj->bo);
}
if (radeon_obj->bo) {
radeon_bo_unref(radeon_obj->bo);
}
_mesa_free(radeon_obj);
}
/**
* Allocate space for and store data in a buffer object. Any data that was
* previously stored in the buffer object is lost. If data is NULL,
* memory will be allocated, but no copy will occur.
* Called via glBufferDataARB().
*/
static void
radeonBufferData(GLcontext * ctx,
GLenum target,
GLsizeiptrARB size,
const GLvoid * data,
GLenum usage,
struct gl_buffer_object *obj)
{
radeonContextPtr radeon = RADEON_CONTEXT(ctx);
struct radeon_buffer_object *radeon_obj = get_radeon_buffer_object(obj);
radeon_obj->Base.Size = size;
radeon_obj->Base.Usage = usage;
if (radeon_obj->bo != NULL) {
radeon_bo_unref(radeon_obj->bo);
radeon_obj->bo = NULL;
}
if (size != 0) {
#ifdef RADEON_DEBUG_BO
radeon_obj->bo = radeon_bo_open(radeon->radeonScreen->bom,
0,
size,
32,
RADEON_GEM_DOMAIN_GTT,
0,
"Radeon Named object");
#else
radeon_obj->bo = radeon_bo_open(radeon->radeonScreen->bom,
0,
size,
32,
RADEON_GEM_DOMAIN_GTT,
0);
#endif
if (data != NULL) {
radeon_bo_map(radeon_obj->bo, GL_TRUE);
_mesa_memcpy(radeon_obj->bo->ptr, data, size);
radeon_bo_unmap(radeon_obj->bo);
}
}
}
/**
* Replace data in a subrange of buffer object. If the data range
* specified by size + offset extends beyond the end of the buffer or
* if data is NULL, no copy is performed.
* Called via glBufferSubDataARB().
*/
static void
radeonBufferSubData(GLcontext * ctx,
GLenum target,
GLintptrARB offset,
GLsizeiptrARB size,
const GLvoid * data,
struct gl_buffer_object *obj)
{
struct radeon_buffer_object *radeon_obj = get_radeon_buffer_object(obj);
radeon_bo_map(radeon_obj->bo, GL_TRUE);
_mesa_memcpy(radeon_obj->bo->ptr + offset, data, size);
radeon_bo_unmap(radeon_obj->bo);
}
/**
* Called via glGetBufferSubDataARB()
*/
static void
radeonGetBufferSubData(GLcontext * ctx,
GLenum target,
GLintptrARB offset,
GLsizeiptrARB size,
GLvoid * data,
struct gl_buffer_object *obj)
{
struct radeon_buffer_object *radeon_obj = get_radeon_buffer_object(obj);
radeon_bo_map(radeon_obj->bo, GL_FALSE);
_mesa_memcpy(data, radeon_obj->bo->ptr + offset, size);
radeon_bo_unmap(radeon_obj->bo);
}
/**
* Called via glMapBufferARB()
*/
static void *
radeonMapBuffer(GLcontext * ctx,
GLenum target,
GLenum access,
struct gl_buffer_object *obj)
{
struct radeon_buffer_object *radeon_obj = get_radeon_buffer_object(obj);
if (access == GL_WRITE_ONLY_ARB) {
radeonFlush(ctx);
}
if (radeon_obj->bo == NULL) {
obj->Pointer = NULL;
return NULL;
}
radeon_bo_map(radeon_obj->bo, access == GL_WRITE_ONLY_ARB);
return obj->Pointer = radeon_obj->bo->ptr;
}
/**
* Called via glUnmapBufferARB()
*/
static GLboolean
radeonUnmapBuffer(GLcontext * ctx,
GLenum target,
struct gl_buffer_object *obj)
{
struct radeon_buffer_object *radeon_obj = get_radeon_buffer_object(obj);
if (radeon_obj->bo != NULL) {
radeon_bo_unmap(radeon_obj->bo);
obj->Pointer = NULL;
}
return GL_TRUE;
}
void
radeonInitBufferObjectFuncs(struct dd_function_table *functions)
{
functions->NewBufferObject = radeonNewBufferObject;
functions->DeleteBuffer = radeonDeleteBufferObject;
functions->BufferData = radeonBufferData;
functions->BufferSubData = radeonBufferSubData;
functions->GetBufferSubData = radeonGetBufferSubData;
functions->MapBuffer = radeonMapBuffer;
functions->UnmapBuffer = radeonUnmapBuffer;
}

View File

@ -0,0 +1,52 @@
/*
* Copyright 2009 Maciej Cencora <m.cencora@gmail.com>
*
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef RADEON_BUFFER_OBJECTS_H
#define RADEON_BUFFER_OBJECTS_H
#include "main/mtypes.h"
struct radeon_bo;
/**
* Radeon vertex/pixel buffer object, derived from Mesa's gl_buffer_object.
*/
struct radeon_buffer_object
{
struct gl_buffer_object Base;
struct radeon_bo *bo;
};
struct radeon_buffer_object *
get_radeon_buffer_object(struct gl_buffer_object *obj);
/**
* Hook the bufferobject implementation into mesa:
*/
void radeonInitBufferObjectFuncs(struct dd_function_table *functions);
#endif

View File

@ -52,7 +52,7 @@ do { \
} while (0)
#endif
static void radeonEmitVec4(uint32_t *out, GLvoid * data, int stride, int count)
void radeonEmitVec4(uint32_t *out, const GLvoid * data, int stride, int count)
{
int i;
@ -70,7 +70,7 @@ static void radeonEmitVec4(uint32_t *out, GLvoid * data, int stride, int count)
}
}
void radeonEmitVec8(uint32_t *out, GLvoid * data, int stride, int count)
void radeonEmitVec8(uint32_t *out, const GLvoid * data, int stride, int count)
{
int i;
@ -89,7 +89,7 @@ void radeonEmitVec8(uint32_t *out, GLvoid * data, int stride, int count)
}
}
void radeonEmitVec12(uint32_t *out, GLvoid * data, int stride, int count)
void radeonEmitVec12(uint32_t *out, const GLvoid * data, int stride, int count)
{
int i;
@ -110,7 +110,7 @@ void radeonEmitVec12(uint32_t *out, GLvoid * data, int stride, int count)
}
}
static void radeonEmitVec16(uint32_t *out, GLvoid * data, int stride, int count)
void radeonEmitVec16(uint32_t *out, const GLvoid * data, int stride, int count)
{
int i;
@ -132,7 +132,7 @@ static void radeonEmitVec16(uint32_t *out, GLvoid * data, int stride, int count)
}
void rcommon_emit_vector(GLcontext * ctx, struct radeon_aos *aos,
GLvoid * data, int size, int stride, int count)
const GLvoid * data, int size, int stride, int count)
{
radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
uint32_t *out;

View File

@ -33,11 +33,13 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
#ifndef RADEON_DMA_H
#define RADEON_DMA_H
void radeonEmitVec8(uint32_t *out, GLvoid * data, int stride, int count);
void radeonEmitVec12(uint32_t *out, GLvoid * data, int stride, int count);
void radeonEmitVec4(uint32_t *out, const GLvoid * data, int stride, int count);
void radeonEmitVec8(uint32_t *out, const GLvoid * data, int stride, int count);
void radeonEmitVec12(uint32_t *out, const GLvoid * data, int stride, int count);
void radeonEmitVec16(uint32_t *out, const GLvoid * data, int stride, int count);
void rcommon_emit_vector(GLcontext * ctx, struct radeon_aos *aos,
GLvoid * data, int size, int stride, int count);
const GLvoid * data, int size, int stride, int count);
void radeonRefillCurrentDmaRegion(radeonContextPtr rmesa, int size);
void radeonAllocDmaRegion(radeonContextPtr rmesa,