gallium: remove pipe_index_buffer and set_index_buffer

pipe_draw_info::indexed is replaced with index_size. index_size == 0 means
non-indexed.

Instead of pipe_index_buffer::offset, pipe_draw_info::start is used.
For indexed indirect draws, pipe_draw_info::start is added to the indirect
start. This is the only case when "start" affects indirect draws.

pipe_draw_info::index is a union. Use either index::resource or
index::user depending on the value of pipe_draw_info::has_user_indices.

v2: fixes for nine, svga
This commit is contained in:
Marek Olšák 2017-04-02 16:24:39 +02:00
parent 22f6624ed3
commit 330d0607ed
107 changed files with 667 additions and 1217 deletions

View File

@ -349,8 +349,6 @@ void cso_destroy_context( struct cso_context *ctx )
unsigned i; unsigned i;
if (ctx->pipe) { if (ctx->pipe) {
ctx->pipe->set_index_buffer(ctx->pipe, NULL);
ctx->pipe->bind_blend_state( ctx->pipe, NULL ); ctx->pipe->bind_blend_state( ctx->pipe, NULL );
ctx->pipe->bind_rasterizer_state( ctx->pipe, NULL ); ctx->pipe->bind_rasterizer_state( ctx->pipe, NULL );
@ -1696,20 +1694,6 @@ cso_restore_state(struct cso_context *cso)
/* drawing */ /* drawing */
void
cso_set_index_buffer(struct cso_context *cso,
const struct pipe_index_buffer *ib)
{
struct u_vbuf *vbuf = cso->vbuf;
if (vbuf) {
u_vbuf_set_index_buffer(vbuf, ib);
} else {
struct pipe_context *pipe = cso->pipe;
pipe->set_index_buffer(pipe, ib);
}
}
void void
cso_draw_vbo(struct cso_context *cso, cso_draw_vbo(struct cso_context *cso,
const struct pipe_draw_info *info) const struct pipe_draw_info *info)

View File

@ -221,10 +221,6 @@ void cso_restore_constant_buffer_slot0(struct cso_context *cso,
/* drawing */ /* drawing */
void
cso_set_index_buffer(struct cso_context *cso,
const struct pipe_index_buffer *ib);
void void
cso_draw_vbo(struct cso_context *cso, cso_draw_vbo(struct cso_context *cso,
const struct pipe_draw_info *info); const struct pipe_draw_info *info);

View File

@ -777,9 +777,6 @@ void draw_set_render( struct draw_context *draw,
/** /**
* Tell the draw module where vertex indexes/elements are located, and * Tell the draw module where vertex indexes/elements are located, and
* their size (in bytes). * their size (in bytes).
*
* Note: the caller must apply the pipe_index_buffer::offset value to
* the address. The draw module doesn't do that.
*/ */
void void
draw_set_indexes(struct draw_context *draw, draw_set_indexes(struct draw_context *draw,

View File

@ -443,7 +443,7 @@ resolve_draw_info(const struct pipe_draw_info *raw_info,
info->count = target->internal_offset / vertex_buffer->stride; info->count = target->internal_offset / vertex_buffer->stride;
/* Stream output draw can not be indexed */ /* Stream output draw can not be indexed */
debug_assert(!info->indexed); debug_assert(!info->index_size);
info->max_index = info->count - 1; info->max_index = info->count - 1;
} }
} }
@ -473,7 +473,7 @@ draw_vbo(struct draw_context *draw,
info = &resolved_info; info = &resolved_info;
assert(info->instance_count > 0); assert(info->instance_count > 0);
if (info->indexed) if (info->index_size)
assert(draw->pt.user.elts); assert(draw->pt.user.elts);
count = info->count; count = info->count;
@ -481,7 +481,7 @@ draw_vbo(struct draw_context *draw,
draw->pt.user.eltBias = info->index_bias; draw->pt.user.eltBias = info->index_bias;
draw->pt.user.min_index = info->min_index; draw->pt.user.min_index = info->min_index;
draw->pt.user.max_index = info->max_index; draw->pt.user.max_index = info->max_index;
draw->pt.user.eltSize = info->indexed ? draw->pt.user.eltSizeIB : 0; draw->pt.user.eltSize = info->index_size ? draw->pt.user.eltSizeIB : 0;
if (0) if (0)
debug_printf("draw_vbo(mode=%u start=%u count=%u):\n", debug_printf("draw_vbo(mode=%u start=%u count=%u):\n",

View File

@ -33,7 +33,6 @@
* *
* // emulate unsupported primitives: * // emulate unsupported primitives:
* if (info->mode needs emulating) { * if (info->mode needs emulating) {
* util_primconvert_save_index_buffer(ctx->primconvert, &ctx->indexbuf);
* util_primconvert_save_rasterizer_state(ctx->primconvert, ctx->rasterizer); * util_primconvert_save_rasterizer_state(ctx->primconvert, ctx->rasterizer);
* util_primconvert_draw_vbo(ctx->primconvert, info); * util_primconvert_draw_vbo(ctx->primconvert, info);
* return; * return;
@ -53,7 +52,6 @@
struct primconvert_context struct primconvert_context
{ {
struct pipe_context *pipe; struct pipe_context *pipe;
struct pipe_index_buffer saved_ib;
uint32_t primtypes_mask; uint32_t primtypes_mask;
unsigned api_pv; unsigned api_pv;
}; };
@ -73,25 +71,9 @@ util_primconvert_create(struct pipe_context *pipe, uint32_t primtypes_mask)
void void
util_primconvert_destroy(struct primconvert_context *pc) util_primconvert_destroy(struct primconvert_context *pc)
{ {
util_primconvert_save_index_buffer(pc, NULL);
FREE(pc); FREE(pc);
} }
void
util_primconvert_save_index_buffer(struct primconvert_context *pc,
const struct pipe_index_buffer *ib)
{
if (ib) {
pipe_resource_reference(&pc->saved_ib.buffer, ib->buffer);
pc->saved_ib.index_size = ib->index_size;
pc->saved_ib.offset = ib->offset;
pc->saved_ib.user_buffer = ib->user_buffer;
}
else {
pipe_resource_reference(&pc->saved_ib.buffer, NULL);
}
}
void void
util_primconvert_save_rasterizer_state(struct primconvert_context *pc, util_primconvert_save_rasterizer_state(struct primconvert_context *pc,
const struct pipe_rasterizer_state const struct pipe_rasterizer_state
@ -108,18 +90,15 @@ void
util_primconvert_draw_vbo(struct primconvert_context *pc, util_primconvert_draw_vbo(struct primconvert_context *pc,
const struct pipe_draw_info *info) const struct pipe_draw_info *info)
{ {
struct pipe_index_buffer *ib = &pc->saved_ib;
struct pipe_index_buffer new_ib;
struct pipe_draw_info new_info; struct pipe_draw_info new_info;
struct pipe_transfer *src_transfer = NULL; struct pipe_transfer *src_transfer = NULL;
u_translate_func trans_func; u_translate_func trans_func;
u_generate_func gen_func; u_generate_func gen_func;
const void *src = NULL; const void *src = NULL;
void *dst; void *dst;
unsigned ib_offset;
memset(&new_ib, 0, sizeof(new_ib));
util_draw_init_info(&new_info); util_draw_init_info(&new_info);
new_info.indexed = true;
new_info.min_index = info->min_index; new_info.min_index = info->min_index;
new_info.max_index = info->max_index; new_info.max_index = info->max_index;
new_info.index_bias = info->index_bias; new_info.index_bias = info->index_bias;
@ -127,38 +106,43 @@ util_primconvert_draw_vbo(struct primconvert_context *pc,
new_info.instance_count = info->instance_count; new_info.instance_count = info->instance_count;
new_info.primitive_restart = info->primitive_restart; new_info.primitive_restart = info->primitive_restart;
new_info.restart_index = info->restart_index; new_info.restart_index = info->restart_index;
if (info->indexed) { if (info->index_size) {
enum pipe_prim_type mode = 0; enum pipe_prim_type mode = 0;
unsigned index_size;
u_index_translator(pc->primtypes_mask, u_index_translator(pc->primtypes_mask,
info->mode, pc->saved_ib.index_size, info->count, info->mode, info->index_size, info->count,
pc->api_pv, pc->api_pv, pc->api_pv, pc->api_pv,
info->primitive_restart ? PR_ENABLE : PR_DISABLE, info->primitive_restart ? PR_ENABLE : PR_DISABLE,
&mode, &new_ib.index_size, &new_info.count, &mode, &index_size, &new_info.count,
&trans_func); &trans_func);
new_info.mode = mode; new_info.mode = mode;
src = ib->user_buffer; new_info.index_size = index_size;
src = info->has_user_indices ? info->index.user : NULL;
if (!src) { if (!src) {
src = pipe_buffer_map(pc->pipe, ib->buffer, src = pipe_buffer_map(pc->pipe, info->index.resource,
PIPE_TRANSFER_READ, &src_transfer); PIPE_TRANSFER_READ, &src_transfer);
} }
src = (const uint8_t *)src + ib->offset; src = (const uint8_t *)src;
} }
else { else {
enum pipe_prim_type mode = 0; enum pipe_prim_type mode = 0;
unsigned index_size;
u_index_generator(pc->primtypes_mask, u_index_generator(pc->primtypes_mask,
info->mode, info->start, info->count, info->mode, info->start, info->count,
pc->api_pv, pc->api_pv, pc->api_pv, pc->api_pv,
&mode, &new_ib.index_size, &new_info.count, &mode, &index_size, &new_info.count,
&gen_func); &gen_func);
new_info.mode = mode; new_info.mode = mode;
new_info.index_size = index_size;
} }
u_upload_alloc(pc->pipe->stream_uploader, 0, new_ib.index_size * new_info.count, 4, u_upload_alloc(pc->pipe->stream_uploader, 0, new_info.index_size * new_info.count, 4,
&new_ib.offset, &new_ib.buffer, &dst); &ib_offset, &new_info.index.resource, &dst);
new_info.start = ib_offset / new_info.index_size;
if (info->indexed) { if (info->index_size) {
trans_func(src, info->start, info->count, new_info.count, info->restart_index, dst); trans_func(src, info->start, info->count, new_info.count, info->restart_index, dst);
} }
else { else {
@ -170,14 +154,8 @@ util_primconvert_draw_vbo(struct primconvert_context *pc,
u_upload_unmap(pc->pipe->stream_uploader); u_upload_unmap(pc->pipe->stream_uploader);
/* bind new index buffer: */
pc->pipe->set_index_buffer(pc->pipe, &new_ib);
/* to the translated draw: */ /* to the translated draw: */
pc->pipe->draw_vbo(pc->pipe, &new_info); pc->pipe->draw_vbo(pc->pipe, &new_info);
/* and then restore saved ib: */ pipe_resource_reference(&new_info.index.resource, NULL);
pc->pipe->set_index_buffer(pc->pipe, ib);
pipe_resource_reference(&new_ib.buffer, NULL);
} }

View File

@ -34,8 +34,6 @@ struct primconvert_context;
struct primconvert_context *util_primconvert_create(struct pipe_context *pipe, struct primconvert_context *util_primconvert_create(struct pipe_context *pipe,
uint32_t primtypes_mask); uint32_t primtypes_mask);
void util_primconvert_destroy(struct primconvert_context *pc); void util_primconvert_destroy(struct primconvert_context *pc);
void util_primconvert_save_index_buffer(struct primconvert_context *pc,
const struct pipe_index_buffer *ib);
void util_primconvert_save_rasterizer_state(struct primconvert_context *pc, void util_primconvert_save_rasterizer_state(struct primconvert_context *pc,
const struct pipe_rasterizer_state const struct pipe_rasterizer_state
*rast); *rast);

View File

@ -136,7 +136,7 @@ util_draw_indirect(struct pipe_context *pipe,
struct pipe_draw_info info; struct pipe_draw_info info;
struct pipe_transfer *transfer; struct pipe_transfer *transfer;
uint32_t *params; uint32_t *params;
const unsigned num_params = info_in->indexed ? 5 : 4; const unsigned num_params = info_in->index_size ? 5 : 4;
assert(info_in->indirect); assert(info_in->indirect);
assert(!info_in->count_from_stream_output); assert(!info_in->count_from_stream_output);
@ -158,8 +158,8 @@ util_draw_indirect(struct pipe_context *pipe,
info.count = params[0]; info.count = params[0];
info.instance_count = params[1]; info.instance_count = params[1];
info.start = params[2]; info.start = params[2];
info.index_bias = info_in->indexed ? params[3] : 0; info.index_bias = info_in->index_size ? params[3] : 0;
info.start_instance = info_in->indexed ? params[4] : params[3]; info.start_instance = info_in->index_size ? params[4] : params[3];
info.indirect = NULL; info.indirect = NULL;
pipe_buffer_unmap(pipe, transfer); pipe_buffer_unmap(pipe, transfer);

View File

@ -67,15 +67,15 @@ util_draw_arrays(struct pipe_context *pipe,
} }
static inline void static inline void
util_draw_elements(struct pipe_context *pipe, int index_bias, util_draw_elements(struct pipe_context *pipe, unsigned index_size,
enum pipe_prim_type mode, int index_bias, enum pipe_prim_type mode,
uint start, uint start,
uint count) uint count)
{ {
struct pipe_draw_info info; struct pipe_draw_info info;
util_draw_init_info(&info); util_draw_init_info(&info);
info.indexed = TRUE; info.index_size = index_size;
info.mode = mode; info.mode = mode;
info.start = start; info.start = start;
info.count = count; info.count = count;
@ -108,6 +108,7 @@ util_draw_arrays_instanced(struct pipe_context *pipe,
static inline void static inline void
util_draw_elements_instanced(struct pipe_context *pipe, util_draw_elements_instanced(struct pipe_context *pipe,
unsigned index_size,
int index_bias, int index_bias,
enum pipe_prim_type mode, enum pipe_prim_type mode,
uint start, uint start,
@ -118,7 +119,7 @@ util_draw_elements_instanced(struct pipe_context *pipe,
struct pipe_draw_info info; struct pipe_draw_info info;
util_draw_init_info(&info); util_draw_init_info(&info);
info.indexed = TRUE; info.index_size = index_size;
info.mode = mode; info.mode = mode;
info.start = start; info.start = start;
info.count = count; info.count = count;

View File

@ -172,9 +172,6 @@ void
util_dump_constant_buffer(FILE *stream, util_dump_constant_buffer(FILE *stream,
const struct pipe_constant_buffer *state); const struct pipe_constant_buffer *state);
void
util_dump_index_buffer(FILE *stream, const struct pipe_index_buffer *state);
void void
util_dump_vertex_buffer(FILE *stream, util_dump_vertex_buffer(FILE *stream,
const struct pipe_vertex_buffer *state); const struct pipe_vertex_buffer *state);

View File

@ -833,25 +833,6 @@ util_dump_constant_buffer(FILE *stream,
} }
void
util_dump_index_buffer(FILE *stream, const struct pipe_index_buffer *state)
{
if (!state) {
util_dump_null(stream);
return;
}
util_dump_struct_begin(stream, "pipe_index_buffer");
util_dump_member(stream, uint, state, index_size);
util_dump_member(stream, uint, state, offset);
util_dump_member(stream, ptr, state, buffer);
util_dump_member(stream, ptr, state, user_buffer);
util_dump_struct_end(stream);
}
void void
util_dump_vertex_buffer(FILE *stream, const struct pipe_vertex_buffer *state) util_dump_vertex_buffer(FILE *stream, const struct pipe_vertex_buffer *state)
{ {
@ -919,7 +900,8 @@ util_dump_draw_info(FILE *stream, const struct pipe_draw_info *state)
util_dump_struct_begin(stream, "pipe_draw_info"); util_dump_struct_begin(stream, "pipe_draw_info");
util_dump_member(stream, bool, state, indexed); util_dump_member(stream, uint, state, index_size);
util_dump_member(stream, uint, state, has_user_indices);
util_dump_member(stream, enum_prim_mode, state, mode); util_dump_member(stream, enum_prim_mode, state, mode);
util_dump_member(stream, uint, state, start); util_dump_member(stream, uint, state, start);
@ -939,6 +921,7 @@ util_dump_draw_info(FILE *stream, const struct pipe_draw_info *state)
util_dump_member(stream, bool, state, primitive_restart); util_dump_member(stream, bool, state, primitive_restart);
util_dump_member(stream, uint, state, restart_index); util_dump_member(stream, uint, state, restart_index);
util_dump_member(stream, ptr, state, index.resource);
util_dump_member(stream, ptr, state, count_from_stream_output); util_dump_member(stream, ptr, state, count_from_stream_output);
if (!state->indirect) { if (!state->indirect) {

View File

@ -98,48 +98,24 @@ void util_set_vertex_buffers_count(struct pipe_vertex_buffer *dst,
*dst_count = util_last_bit(enabled_buffers); *dst_count = util_last_bit(enabled_buffers);
} }
void
util_set_index_buffer(struct pipe_index_buffer *dst,
const struct pipe_index_buffer *src)
{
if (src) {
pipe_resource_reference(&dst->buffer, src->buffer);
memcpy(dst, src, sizeof(*dst));
}
else {
pipe_resource_reference(&dst->buffer, NULL);
memset(dst, 0, sizeof(*dst));
}
}
/** /**
* Given a user index buffer, save the structure to "saved", and upload it. * Given a user index buffer, save the structure to "saved", and upload it.
*/ */
bool bool
util_save_and_upload_index_buffer(struct pipe_context *pipe, util_upload_index_buffer(struct pipe_context *pipe,
const struct pipe_draw_info *info, const struct pipe_draw_info *info,
const struct pipe_index_buffer *ib, struct pipe_resource **out_buffer,
struct pipe_index_buffer *out_saved) unsigned *out_offset)
{ {
struct pipe_index_buffer new_ib = {0}; unsigned start_offset = info->start * info->index_size;
unsigned start_offset = info->start * ib->index_size;
u_upload_data(pipe->stream_uploader, start_offset, u_upload_data(pipe->stream_uploader, start_offset,
info->count * ib->index_size, 4, info->count * info->index_size, 4,
(char*)ib->user_buffer + start_offset, (char*)info->index.user + start_offset,
&new_ib.offset, &new_ib.buffer); out_offset, out_buffer);
if (!new_ib.buffer)
return false;
u_upload_unmap(pipe->stream_uploader); u_upload_unmap(pipe->stream_uploader);
*out_offset -= start_offset;
new_ib.offset -= start_offset; return *out_buffer != NULL;
new_ib.index_size = ib->index_size;
util_set_index_buffer(out_saved, ib);
pipe->set_index_buffer(pipe, &new_ib);
pipe_resource_reference(&new_ib.buffer, NULL);
return true;
} }
struct pipe_query * struct pipe_query *

View File

@ -45,13 +45,10 @@ void util_set_vertex_buffers_count(struct pipe_vertex_buffer *dst,
const struct pipe_vertex_buffer *src, const struct pipe_vertex_buffer *src,
unsigned start_slot, unsigned count); unsigned start_slot, unsigned count);
void util_set_index_buffer(struct pipe_index_buffer *dst, bool util_upload_index_buffer(struct pipe_context *pipe,
const struct pipe_index_buffer *src); const struct pipe_draw_info *info,
struct pipe_resource **out_buffer,
bool util_save_and_upload_index_buffer(struct pipe_context *pipe, unsigned *out_offset);
const struct pipe_draw_info *info,
const struct pipe_index_buffer *ib,
struct pipe_index_buffer *out_saved);
struct pipe_query * struct pipe_query *
util_begin_pipestat_query(struct pipe_context *ctx); util_begin_pipestat_query(struct pipe_context *ctx);

View File

@ -27,7 +27,7 @@
/* Ubyte indices. */ /* Ubyte indices. */
void util_shorten_ubyte_elts_to_userptr(struct pipe_context *context, void util_shorten_ubyte_elts_to_userptr(struct pipe_context *context,
const struct pipe_index_buffer *ib, const struct pipe_draw_info *info,
unsigned add_transfer_flags, unsigned add_transfer_flags,
int index_bias, int index_bias,
unsigned start, unsigned start,
@ -39,10 +39,10 @@ void util_shorten_ubyte_elts_to_userptr(struct pipe_context *context,
unsigned short *out_map = out; unsigned short *out_map = out;
unsigned i; unsigned i;
if (ib->user_buffer) { if (info->has_user_indices) {
in_map = ib->user_buffer; in_map = info->index.user;
} else { } else {
in_map = pipe_buffer_map(context, ib->buffer, in_map = pipe_buffer_map(context, info->index.resource,
PIPE_TRANSFER_READ | PIPE_TRANSFER_READ |
add_transfer_flags, add_transfer_flags,
&src_transfer); &src_transfer);
@ -62,7 +62,7 @@ void util_shorten_ubyte_elts_to_userptr(struct pipe_context *context,
/* Ushort indices. */ /* Ushort indices. */
void util_rebuild_ushort_elts_to_userptr(struct pipe_context *context, void util_rebuild_ushort_elts_to_userptr(struct pipe_context *context,
const struct pipe_index_buffer *ib, const struct pipe_draw_info *info,
unsigned add_transfer_flags, unsigned add_transfer_flags,
int index_bias, int index_bias,
unsigned start, unsigned count, unsigned start, unsigned count,
@ -73,10 +73,10 @@ void util_rebuild_ushort_elts_to_userptr(struct pipe_context *context,
unsigned short *out_map = out; unsigned short *out_map = out;
unsigned i; unsigned i;
if (ib->user_buffer) { if (info->has_user_indices) {
in_map = ib->user_buffer; in_map = info->index.user;
} else { } else {
in_map = pipe_buffer_map(context, ib->buffer, in_map = pipe_buffer_map(context, info->index.resource,
PIPE_TRANSFER_READ | PIPE_TRANSFER_READ |
add_transfer_flags, add_transfer_flags,
&in_transfer); &in_transfer);
@ -96,7 +96,7 @@ void util_rebuild_ushort_elts_to_userptr(struct pipe_context *context,
/* Uint indices. */ /* Uint indices. */
void util_rebuild_uint_elts_to_userptr(struct pipe_context *context, void util_rebuild_uint_elts_to_userptr(struct pipe_context *context,
const struct pipe_index_buffer *ib, const struct pipe_draw_info *info,
unsigned add_transfer_flags, unsigned add_transfer_flags,
int index_bias, int index_bias,
unsigned start, unsigned count, unsigned start, unsigned count,
@ -107,10 +107,10 @@ void util_rebuild_uint_elts_to_userptr(struct pipe_context *context,
unsigned int *out_map = out; unsigned int *out_map = out;
unsigned i; unsigned i;
if (ib->user_buffer) { if (info->has_user_indices) {
in_map = ib->user_buffer; in_map = info->index.user;
} else { } else {
in_map = pipe_buffer_map(context, ib->buffer, in_map = pipe_buffer_map(context, info->index.resource,
PIPE_TRANSFER_READ | PIPE_TRANSFER_READ |
add_transfer_flags, add_transfer_flags,
&in_transfer); &in_transfer);

View File

@ -25,10 +25,9 @@
struct pipe_context; struct pipe_context;
struct pipe_resource; struct pipe_resource;
struct pipe_index_buffer;
void util_shorten_ubyte_elts_to_userptr(struct pipe_context *context, void util_shorten_ubyte_elts_to_userptr(struct pipe_context *context,
const struct pipe_index_buffer *ib, const struct pipe_draw_info *info,
unsigned add_transfer_flags, unsigned add_transfer_flags,
int index_bias, int index_bias,
unsigned start, unsigned start,
@ -36,14 +35,14 @@ void util_shorten_ubyte_elts_to_userptr(struct pipe_context *context,
void *out); void *out);
void util_rebuild_ushort_elts_to_userptr(struct pipe_context *context, void util_rebuild_ushort_elts_to_userptr(struct pipe_context *context,
const struct pipe_index_buffer *ib, const struct pipe_draw_info *info,
unsigned add_transfer_flags, unsigned add_transfer_flags,
int index_bias, int index_bias,
unsigned start, unsigned count, unsigned start, unsigned count,
void *out); void *out);
void util_rebuild_uint_elts_to_userptr(struct pipe_context *context, void util_rebuild_uint_elts_to_userptr(struct pipe_context *context,
const struct pipe_index_buffer *ib, const struct pipe_draw_info *info,
unsigned add_transfer_flags, unsigned add_transfer_flags,
int index_bias, int index_bias,
unsigned start, unsigned count, unsigned start, unsigned count,

View File

@ -39,28 +39,26 @@
*/ */
enum pipe_error enum pipe_error
util_translate_prim_restart_ib(struct pipe_context *context, util_translate_prim_restart_ib(struct pipe_context *context,
struct pipe_index_buffer *src_buffer, const struct pipe_draw_info *info,
struct pipe_resource **dst_buffer, struct pipe_resource **dst_buffer)
unsigned num_indexes,
unsigned restart_index)
{ {
struct pipe_screen *screen = context->screen; struct pipe_screen *screen = context->screen;
struct pipe_transfer *src_transfer = NULL, *dst_transfer = NULL; struct pipe_transfer *src_transfer = NULL, *dst_transfer = NULL;
void *src_map = NULL, *dst_map = NULL; void *src_map = NULL, *dst_map = NULL;
const unsigned src_index_size = src_buffer->index_size; const unsigned src_index_size = info->index_size;
unsigned dst_index_size; unsigned dst_index_size;
/* 1-byte indexes are converted to 2-byte indexes, 4-byte stays 4-byte */ /* 1-byte indexes are converted to 2-byte indexes, 4-byte stays 4-byte */
dst_index_size = MAX2(2, src_buffer->index_size); dst_index_size = MAX2(2, info->index_size);
assert(dst_index_size == 2 || dst_index_size == 4); assert(dst_index_size == 2 || dst_index_size == 4);
/* no user buffers for now */ /* no user buffers for now */
assert(src_buffer->user_buffer == NULL); assert(!info->has_user_indices);
/* Create new index buffer */ /* Create new index buffer */
*dst_buffer = pipe_buffer_create(screen, PIPE_BIND_INDEX_BUFFER, *dst_buffer = pipe_buffer_create(screen, PIPE_BIND_INDEX_BUFFER,
PIPE_USAGE_STREAM, PIPE_USAGE_STREAM,
num_indexes * dst_index_size); info->count * dst_index_size);
if (!*dst_buffer) if (!*dst_buffer)
goto error; goto error;
@ -71,9 +69,9 @@ util_translate_prim_restart_ib(struct pipe_context *context,
goto error; goto error;
/* Map original / src index buffer */ /* Map original / src index buffer */
src_map = pipe_buffer_map_range(context, src_buffer->buffer, src_map = pipe_buffer_map_range(context, info->index.resource,
src_buffer->offset, info->start * src_index_size,
num_indexes * src_index_size, info->count * src_index_size,
PIPE_TRANSFER_READ, PIPE_TRANSFER_READ,
&src_transfer); &src_transfer);
if (!src_map) if (!src_map)
@ -83,16 +81,16 @@ util_translate_prim_restart_ib(struct pipe_context *context,
uint8_t *src = (uint8_t *) src_map; uint8_t *src = (uint8_t *) src_map;
uint16_t *dst = (uint16_t *) dst_map; uint16_t *dst = (uint16_t *) dst_map;
unsigned i; unsigned i;
for (i = 0; i < num_indexes; i++) { for (i = 0; i < info->count; i++) {
dst[i] = (src[i] == restart_index) ? 0xffff : src[i]; dst[i] = (src[i] == info->restart_index) ? 0xffff : src[i];
} }
} }
else if (src_index_size == 2 && dst_index_size == 2) { else if (src_index_size == 2 && dst_index_size == 2) {
uint16_t *src = (uint16_t *) src_map; uint16_t *src = (uint16_t *) src_map;
uint16_t *dst = (uint16_t *) dst_map; uint16_t *dst = (uint16_t *) dst_map;
unsigned i; unsigned i;
for (i = 0; i < num_indexes; i++) { for (i = 0; i < info->count; i++) {
dst[i] = (src[i] == restart_index) ? 0xffff : src[i]; dst[i] = (src[i] == info->restart_index) ? 0xffff : src[i];
} }
} }
else { else {
@ -101,8 +99,8 @@ util_translate_prim_restart_ib(struct pipe_context *context,
unsigned i; unsigned i;
assert(src_index_size == 4); assert(src_index_size == 4);
assert(dst_index_size == 4); assert(dst_index_size == 4);
for (i = 0; i < num_indexes; i++) { for (i = 0; i < info->count; i++) {
dst[i] = (src[i] == restart_index) ? 0xffffffff : src[i]; dst[i] = (src[i] == info->restart_index) ? 0xffffffff : src[i];
} }
} }
@ -177,7 +175,6 @@ add_range(struct range_info *info, unsigned start, unsigned count)
*/ */
enum pipe_error enum pipe_error
util_draw_vbo_without_prim_restart(struct pipe_context *context, util_draw_vbo_without_prim_restart(struct pipe_context *context,
const struct pipe_index_buffer *ib,
const struct pipe_draw_info *info) const struct pipe_draw_info *info)
{ {
const void *src_map; const void *src_map;
@ -186,15 +183,15 @@ util_draw_vbo_without_prim_restart(struct pipe_context *context,
struct pipe_transfer *src_transfer = NULL; struct pipe_transfer *src_transfer = NULL;
unsigned i, start, count; unsigned i, start, count;
assert(info->indexed); assert(info->index_size);
assert(info->primitive_restart); assert(info->primitive_restart);
/* Get pointer to the index data */ /* Get pointer to the index data */
if (ib->buffer) { if (!info->has_user_indices) {
/* map the index buffer (only the range we need to scan) */ /* map the index buffer (only the range we need to scan) */
src_map = pipe_buffer_map_range(context, ib->buffer, src_map = pipe_buffer_map_range(context, info->index.resource,
ib->offset + info->start * ib->index_size, info->start * info->index_size,
info->count * ib->index_size, info->count * info->index_size,
PIPE_TRANSFER_READ, PIPE_TRANSFER_READ,
&src_transfer); &src_transfer);
if (!src_map) { if (!src_map) {
@ -202,13 +199,12 @@ util_draw_vbo_without_prim_restart(struct pipe_context *context,
} }
} }
else { else {
if (!ib->user_buffer) { if (!info->index.user) {
debug_printf("User-space index buffer is null!"); debug_printf("User-space index buffer is null!");
return PIPE_ERROR_BAD_INPUT; return PIPE_ERROR_BAD_INPUT;
} }
src_map = (const uint8_t *) ib->user_buffer src_map = (const uint8_t *) info->index.user
+ ib->offset + info->start * info->index_size;
+ info->start * ib->index_size;
} }
#define SCAN_INDEXES(TYPE) \ #define SCAN_INDEXES(TYPE) \
@ -231,9 +227,9 @@ util_draw_vbo_without_prim_restart(struct pipe_context *context,
} \ } \
} }
start = info->start; start = 0;
count = 0; count = 0;
switch (ib->index_size) { switch (info->index_size) {
case 1: case 1:
SCAN_INDEXES(uint8_t); SCAN_INDEXES(uint8_t);
break; break;

View File

@ -38,20 +38,17 @@ extern "C" {
struct pipe_context; struct pipe_context;
struct pipe_draw_info; struct pipe_draw_info;
struct pipe_index_buffer; union pipe_index_binding;
struct pipe_resource; struct pipe_resource;
enum pipe_error enum pipe_error
util_translate_prim_restart_ib(struct pipe_context *context, util_translate_prim_restart_ib(struct pipe_context *context,
struct pipe_index_buffer *src_buffer, const struct pipe_draw_info *info,
struct pipe_resource **dst_buffer, struct pipe_resource **dst_buffer);
unsigned num_indexes,
unsigned restart_index);
enum pipe_error enum pipe_error
util_draw_vbo_without_prim_restart(struct pipe_context *context, util_draw_vbo_without_prim_restart(struct pipe_context *context,
const struct pipe_index_buffer *ib,
const struct pipe_draw_info *info); const struct pipe_draw_info *info);

View File

@ -162,9 +162,6 @@ struct u_vbuf {
uint32_t dirty_real_vb_mask; /* which buffers are dirty since the last uint32_t dirty_real_vb_mask; /* which buffers are dirty since the last
call of set_vertex_buffers */ call of set_vertex_buffers */
/* The index buffer. */
struct pipe_index_buffer index_buffer;
/* Vertex elements. */ /* Vertex elements. */
struct u_vbuf_elements *ve, *ve_saved; struct u_vbuf_elements *ve, *ve_saved;
@ -372,9 +369,6 @@ void u_vbuf_destroy(struct u_vbuf *mgr)
unsigned num_vb = screen->get_shader_param(screen, PIPE_SHADER_VERTEX, unsigned num_vb = screen->get_shader_param(screen, PIPE_SHADER_VERTEX,
PIPE_SHADER_CAP_MAX_INPUTS); PIPE_SHADER_CAP_MAX_INPUTS);
mgr->pipe->set_index_buffer(mgr->pipe, NULL);
pipe_resource_reference(&mgr->index_buffer.buffer, NULL);
mgr->pipe->set_vertex_buffers(mgr->pipe, 0, num_vb, NULL); mgr->pipe->set_vertex_buffers(mgr->pipe, 0, num_vb, NULL);
for (i = 0; i < PIPE_MAX_ATTRIBS; i++) for (i = 0; i < PIPE_MAX_ATTRIBS; i++)
@ -391,10 +385,10 @@ void u_vbuf_destroy(struct u_vbuf *mgr)
static enum pipe_error static enum pipe_error
u_vbuf_translate_buffers(struct u_vbuf *mgr, struct translate_key *key, u_vbuf_translate_buffers(struct u_vbuf *mgr, struct translate_key *key,
const struct pipe_draw_info *info,
unsigned vb_mask, unsigned out_vb, unsigned vb_mask, unsigned out_vb,
int start_vertex, unsigned num_vertices, int start_vertex, unsigned num_vertices,
int start_index, unsigned num_indices, int min_index, int min_index, boolean unroll_indices)
boolean unroll_indices)
{ {
struct translate *tr; struct translate *tr;
struct pipe_transfer *vb_transfer[PIPE_MAX_ATTRIBS] = {0}; struct pipe_transfer *vb_transfer[PIPE_MAX_ATTRIBS] = {0};
@ -440,38 +434,35 @@ u_vbuf_translate_buffers(struct u_vbuf *mgr, struct translate_key *key,
/* Translate. */ /* Translate. */
if (unroll_indices) { if (unroll_indices) {
struct pipe_index_buffer *ib = &mgr->index_buffer;
struct pipe_transfer *transfer = NULL; struct pipe_transfer *transfer = NULL;
unsigned offset = ib->offset + start_index * ib->index_size; unsigned offset = info->start * info->index_size;
uint8_t *map; uint8_t *map;
assert((ib->buffer || ib->user_buffer) && ib->index_size);
/* Create and map the output buffer. */ /* Create and map the output buffer. */
u_upload_alloc(mgr->pipe->stream_uploader, 0, u_upload_alloc(mgr->pipe->stream_uploader, 0,
key->output_stride * num_indices, 4, key->output_stride * info->count, 4,
&out_offset, &out_buffer, &out_offset, &out_buffer,
(void**)&out_map); (void**)&out_map);
if (!out_buffer) if (!out_buffer)
return PIPE_ERROR_OUT_OF_MEMORY; return PIPE_ERROR_OUT_OF_MEMORY;
if (ib->user_buffer) { if (info->has_user_indices) {
map = (uint8_t*)ib->user_buffer + offset; map = (uint8_t*)info->index.user + offset;
} else { } else {
map = pipe_buffer_map_range(mgr->pipe, ib->buffer, offset, map = pipe_buffer_map_range(mgr->pipe, info->index.resource, offset,
num_indices * ib->index_size, info->count * info->index_size,
PIPE_TRANSFER_READ, &transfer); PIPE_TRANSFER_READ, &transfer);
} }
switch (ib->index_size) { switch (info->index_size) {
case 4: case 4:
tr->run_elts(tr, (unsigned*)map, num_indices, 0, 0, out_map); tr->run_elts(tr, (unsigned*)map, info->count, 0, 0, out_map);
break; break;
case 2: case 2:
tr->run_elts16(tr, (uint16_t*)map, num_indices, 0, 0, out_map); tr->run_elts16(tr, (uint16_t*)map, info->count, 0, 0, out_map);
break; break;
case 1: case 1:
tr->run_elts8(tr, map, num_indices, 0, 0, out_map); tr->run_elts8(tr, map, info->count, 0, 0, out_map);
break; break;
} }
@ -556,10 +547,9 @@ u_vbuf_translate_find_free_vb_slots(struct u_vbuf *mgr,
static boolean static boolean
u_vbuf_translate_begin(struct u_vbuf *mgr, u_vbuf_translate_begin(struct u_vbuf *mgr,
const struct pipe_draw_info *info,
int start_vertex, unsigned num_vertices, int start_vertex, unsigned num_vertices,
int start_instance, unsigned num_instances, int min_index, boolean unroll_indices)
int start_index, unsigned num_indices, int min_index,
boolean unroll_indices)
{ {
unsigned mask[VB_NUM] = {0}; unsigned mask[VB_NUM] = {0};
struct translate_key key[VB_NUM]; struct translate_key key[VB_NUM];
@ -569,15 +559,15 @@ u_vbuf_translate_begin(struct u_vbuf *mgr,
mgr->ve->used_vb_mask; mgr->ve->used_vb_mask;
int start[VB_NUM] = { int start[VB_NUM] = {
start_vertex, /* VERTEX */ start_vertex, /* VERTEX */
start_instance, /* INSTANCE */ info->start_instance, /* INSTANCE */
0 /* CONST */ 0 /* CONST */
}; };
unsigned num[VB_NUM] = { unsigned num[VB_NUM] = {
num_vertices, /* VERTEX */ num_vertices, /* VERTEX */
num_instances, /* INSTANCE */ info->instance_count, /* INSTANCE */
1 /* CONST */ 1 /* CONST */
}; };
memset(key, 0, sizeof(key)); memset(key, 0, sizeof(key));
@ -664,10 +654,9 @@ u_vbuf_translate_begin(struct u_vbuf *mgr,
for (type = 0; type < VB_NUM; type++) { for (type = 0; type < VB_NUM; type++) {
if (key[type].nr_elements) { if (key[type].nr_elements) {
enum pipe_error err; enum pipe_error err;
err = u_vbuf_translate_buffers(mgr, &key[type], mask[type], err = u_vbuf_translate_buffers(mgr, &key[type], info, mask[type],
mgr->fallback_vbs[type], mgr->fallback_vbs[type],
start[type], num[type], start[type], num[type], min_index,
start_index, num_indices, min_index,
unroll_indices && type == VB_VERTEX); unroll_indices && type == VB_VERTEX);
if (err != PIPE_OK) if (err != PIPE_OK)
return FALSE; return FALSE;
@ -889,22 +878,6 @@ void u_vbuf_set_vertex_buffers(struct u_vbuf *mgr,
mgr->dirty_real_vb_mask |= ~mask; mgr->dirty_real_vb_mask |= ~mask;
} }
void u_vbuf_set_index_buffer(struct u_vbuf *mgr,
const struct pipe_index_buffer *ib)
{
struct pipe_context *pipe = mgr->pipe;
if (ib) {
assert(ib->offset % ib->index_size == 0);
pipe_resource_reference(&mgr->index_buffer.buffer, ib->buffer);
memcpy(&mgr->index_buffer, ib, sizeof(*ib));
} else {
pipe_resource_reference(&mgr->index_buffer.buffer, NULL);
}
pipe->set_index_buffer(pipe, ib);
}
static enum pipe_error static enum pipe_error
u_vbuf_upload_buffers(struct u_vbuf *mgr, u_vbuf_upload_buffers(struct u_vbuf *mgr,
int start_vertex, unsigned num_vertices, int start_vertex, unsigned num_vertices,
@ -1023,42 +996,38 @@ static boolean u_vbuf_mapping_vertex_buffer_blocks(const struct u_vbuf *mgr)
} }
static void u_vbuf_get_minmax_index(struct pipe_context *pipe, static void u_vbuf_get_minmax_index(struct pipe_context *pipe,
struct pipe_index_buffer *ib, const struct pipe_draw_info *info,
boolean primitive_restart, int *out_min_index, int *out_max_index)
unsigned restart_index,
unsigned start, unsigned count,
int *out_min_index,
int *out_max_index)
{ {
struct pipe_transfer *transfer = NULL; struct pipe_transfer *transfer = NULL;
const void *indices; const void *indices;
unsigned i; unsigned i;
if (ib->user_buffer) { if (info->has_user_indices) {
indices = (uint8_t*)ib->user_buffer + indices = (uint8_t*)info->index.user +
ib->offset + start * ib->index_size; info->start * info->index_size;
} else { } else {
indices = pipe_buffer_map_range(pipe, ib->buffer, indices = pipe_buffer_map_range(pipe, info->index.resource,
ib->offset + start * ib->index_size, info->start * info->index_size,
count * ib->index_size, info->count * info->index_size,
PIPE_TRANSFER_READ, &transfer); PIPE_TRANSFER_READ, &transfer);
} }
switch (ib->index_size) { switch (info->index_size) {
case 4: { case 4: {
const unsigned *ui_indices = (const unsigned*)indices; const unsigned *ui_indices = (const unsigned*)indices;
unsigned max_ui = 0; unsigned max_ui = 0;
unsigned min_ui = ~0U; unsigned min_ui = ~0U;
if (primitive_restart) { if (info->primitive_restart) {
for (i = 0; i < count; i++) { for (i = 0; i < info->count; i++) {
if (ui_indices[i] != restart_index) { if (ui_indices[i] != info->restart_index) {
if (ui_indices[i] > max_ui) max_ui = ui_indices[i]; if (ui_indices[i] > max_ui) max_ui = ui_indices[i];
if (ui_indices[i] < min_ui) min_ui = ui_indices[i]; if (ui_indices[i] < min_ui) min_ui = ui_indices[i];
} }
} }
} }
else { else {
for (i = 0; i < count; i++) { for (i = 0; i < info->count; i++) {
if (ui_indices[i] > max_ui) max_ui = ui_indices[i]; if (ui_indices[i] > max_ui) max_ui = ui_indices[i];
if (ui_indices[i] < min_ui) min_ui = ui_indices[i]; if (ui_indices[i] < min_ui) min_ui = ui_indices[i];
} }
@ -1071,16 +1040,16 @@ static void u_vbuf_get_minmax_index(struct pipe_context *pipe,
const unsigned short *us_indices = (const unsigned short*)indices; const unsigned short *us_indices = (const unsigned short*)indices;
unsigned max_us = 0; unsigned max_us = 0;
unsigned min_us = ~0U; unsigned min_us = ~0U;
if (primitive_restart) { if (info->primitive_restart) {
for (i = 0; i < count; i++) { for (i = 0; i < info->count; i++) {
if (us_indices[i] != restart_index) { if (us_indices[i] != info->restart_index) {
if (us_indices[i] > max_us) max_us = us_indices[i]; if (us_indices[i] > max_us) max_us = us_indices[i];
if (us_indices[i] < min_us) min_us = us_indices[i]; if (us_indices[i] < min_us) min_us = us_indices[i];
} }
} }
} }
else { else {
for (i = 0; i < count; i++) { for (i = 0; i < info->count; i++) {
if (us_indices[i] > max_us) max_us = us_indices[i]; if (us_indices[i] > max_us) max_us = us_indices[i];
if (us_indices[i] < min_us) min_us = us_indices[i]; if (us_indices[i] < min_us) min_us = us_indices[i];
} }
@ -1093,16 +1062,16 @@ static void u_vbuf_get_minmax_index(struct pipe_context *pipe,
const unsigned char *ub_indices = (const unsigned char*)indices; const unsigned char *ub_indices = (const unsigned char*)indices;
unsigned max_ub = 0; unsigned max_ub = 0;
unsigned min_ub = ~0U; unsigned min_ub = ~0U;
if (primitive_restart) { if (info->primitive_restart) {
for (i = 0; i < count; i++) { for (i = 0; i < info->count; i++) {
if (ub_indices[i] != restart_index) { if (ub_indices[i] != info->restart_index) {
if (ub_indices[i] > max_ub) max_ub = ub_indices[i]; if (ub_indices[i] > max_ub) max_ub = ub_indices[i];
if (ub_indices[i] < min_ub) min_ub = ub_indices[i]; if (ub_indices[i] < min_ub) min_ub = ub_indices[i];
} }
} }
} }
else { else {
for (i = 0; i < count; i++) { for (i = 0; i < info->count; i++) {
if (ub_indices[i] > max_ub) max_ub = ub_indices[i]; if (ub_indices[i] > max_ub) max_ub = ub_indices[i];
if (ub_indices[i] < min_ub) min_ub = ub_indices[i]; if (ub_indices[i] < min_ub) min_ub = ub_indices[i];
} }
@ -1167,7 +1136,7 @@ void u_vbuf_draw_vbo(struct u_vbuf *mgr, const struct pipe_draw_info *info)
struct pipe_transfer *transfer = NULL; struct pipe_transfer *transfer = NULL;
int *data; int *data;
if (new_info.indexed) { if (new_info.index_size) {
data = pipe_buffer_map_range(pipe, new_info.indirect->buffer, data = pipe_buffer_map_range(pipe, new_info.indirect->buffer,
new_info.indirect->offset, 20, new_info.indirect->offset, 20,
PIPE_TRANSFER_READ, &transfer); PIPE_TRANSFER_READ, &transfer);
@ -1188,7 +1157,7 @@ void u_vbuf_draw_vbo(struct u_vbuf *mgr, const struct pipe_draw_info *info)
new_info.indirect = NULL; new_info.indirect = NULL;
} }
if (new_info.indexed) { if (new_info.index_size) {
/* See if anything needs to be done for per-vertex attribs. */ /* See if anything needs to be done for per-vertex attribs. */
if (u_vbuf_need_minmax_index(mgr)) { if (u_vbuf_need_minmax_index(mgr)) {
int max_index; int max_index;
@ -1197,10 +1166,8 @@ void u_vbuf_draw_vbo(struct u_vbuf *mgr, const struct pipe_draw_info *info)
min_index = new_info.min_index; min_index = new_info.min_index;
max_index = new_info.max_index; max_index = new_info.max_index;
} else { } else {
u_vbuf_get_minmax_index(mgr->pipe, &mgr->index_buffer, u_vbuf_get_minmax_index(mgr->pipe, &new_info,
new_info.primitive_restart, &min_index, &max_index);
new_info.restart_index, new_info.start,
new_info.count, &min_index, &max_index);
} }
assert(min_index <= max_index); assert(min_index <= max_index);
@ -1236,16 +1203,14 @@ void u_vbuf_draw_vbo(struct u_vbuf *mgr, const struct pipe_draw_info *info)
if (unroll_indices || if (unroll_indices ||
incompatible_vb_mask || incompatible_vb_mask ||
mgr->ve->incompatible_elem_mask) { mgr->ve->incompatible_elem_mask) {
if (!u_vbuf_translate_begin(mgr, start_vertex, num_vertices, if (!u_vbuf_translate_begin(mgr, &new_info, start_vertex, num_vertices,
new_info.start_instance, min_index, unroll_indices)) {
new_info.instance_count, new_info.start,
new_info.count, min_index, unroll_indices)) {
debug_warn_once("u_vbuf_translate_begin() failed"); debug_warn_once("u_vbuf_translate_begin() failed");
return; return;
} }
if (unroll_indices) { if (unroll_indices) {
new_info.indexed = FALSE; new_info.index_size = 0;
new_info.index_bias = 0; new_info.index_bias = 0;
new_info.min_index = 0; new_info.min_index = 0;
new_info.max_index = new_info.count - 1; new_info.max_index = new_info.count - 1;

View File

@ -72,8 +72,6 @@ void u_vbuf_set_vertex_elements(struct u_vbuf *mgr, unsigned count,
void u_vbuf_set_vertex_buffers(struct u_vbuf *mgr, void u_vbuf_set_vertex_buffers(struct u_vbuf *mgr,
unsigned start_slot, unsigned count, unsigned start_slot, unsigned count,
const struct pipe_vertex_buffer *bufs); const struct pipe_vertex_buffer *bufs);
void u_vbuf_set_index_buffer(struct u_vbuf *mgr,
const struct pipe_index_buffer *ib);
void u_vbuf_draw_vbo(struct u_vbuf *mgr, const struct pipe_draw_info *info); void u_vbuf_draw_vbo(struct u_vbuf *mgr, const struct pipe_draw_info *info);
/* Save/restore functionality. */ /* Save/restore functionality. */

View File

@ -53,8 +53,6 @@ buffers, surfaces) are bound to the driver.
* ``set_vertex_buffers`` * ``set_vertex_buffers``
* ``set_index_buffer``
Non-CSO State Non-CSO State
^^^^^^^^^^^^^ ^^^^^^^^^^^^^
@ -290,8 +288,8 @@ the mode of the primitive and the vertices to be fetched, in the range between
Every instance with instanceID in the range between ``start_instance`` and Every instance with instanceID in the range between ``start_instance`` and
``start_instance``+``instance_count``-1, inclusive, will be drawn. ``start_instance``+``instance_count``-1, inclusive, will be drawn.
If there is an index buffer bound, and ``indexed`` field is true, all vertex If ``index_size`` != 0, all vertex indices will be looked up from the index
indices will be looked up in the index buffer. buffer.
In indexed draw, ``min_index`` and ``max_index`` respectively provide a lower In indexed draw, ``min_index`` and ``max_index`` respectively provide a lower
and upper bound of the indices contained in the index buffer inside the range and upper bound of the indices contained in the index buffer inside the range

View File

@ -577,17 +577,6 @@ dd_context_set_vertex_buffers(struct pipe_context *_pipe,
pipe->set_vertex_buffers(pipe, start, num_buffers, buffers); pipe->set_vertex_buffers(pipe, start, num_buffers, buffers);
} }
static void
dd_context_set_index_buffer(struct pipe_context *_pipe,
const struct pipe_index_buffer *ib)
{
struct dd_context *dctx = dd_context(_pipe);
struct pipe_context *pipe = dctx->pipe;
safe_memcpy(&dctx->draw_state.index_buffer, ib, sizeof(*ib));
pipe->set_index_buffer(pipe, ib);
}
static void static void
dd_context_set_stream_output_targets(struct pipe_context *_pipe, dd_context_set_stream_output_targets(struct pipe_context *_pipe,
unsigned num_targets, unsigned num_targets,
@ -852,7 +841,6 @@ dd_context_create(struct dd_screen *dscreen, struct pipe_context *pipe)
CTX_INIT(set_shader_buffers); CTX_INIT(set_shader_buffers);
CTX_INIT(set_shader_images); CTX_INIT(set_shader_images);
CTX_INIT(set_vertex_buffers); CTX_INIT(set_vertex_buffers);
CTX_INIT(set_index_buffer);
CTX_INIT(create_stream_output_target); CTX_INIT(create_stream_output_target);
CTX_INIT(stream_output_target_destroy); CTX_INIT(stream_output_target_destroy);
CTX_INIT(set_stream_output_targets); CTX_INIT(set_stream_output_targets);

View File

@ -289,11 +289,6 @@ dd_dump_draw_vbo(struct dd_draw_state *dstate, struct pipe_draw_info *info, FILE
int sh, i; int sh, i;
DUMP(draw_info, info); DUMP(draw_info, info);
if (info->indexed) {
DUMP(index_buffer, &dstate->index_buffer);
if (dstate->index_buffer.buffer)
DUMP_M(resource, &dstate->index_buffer, buffer);
}
if (info->count_from_stream_output) if (info->count_from_stream_output)
DUMP_M(stream_output_target, info, DUMP_M(stream_output_target, info,
count_from_stream_output); count_from_stream_output);
@ -624,6 +619,11 @@ dd_unreference_copy_of_call(struct dd_call *dst)
pipe_so_target_reference(&dst->info.draw_vbo.draw.count_from_stream_output, NULL); pipe_so_target_reference(&dst->info.draw_vbo.draw.count_from_stream_output, NULL);
pipe_resource_reference(&dst->info.draw_vbo.indirect.buffer, NULL); pipe_resource_reference(&dst->info.draw_vbo.indirect.buffer, NULL);
pipe_resource_reference(&dst->info.draw_vbo.indirect.indirect_draw_count, NULL); pipe_resource_reference(&dst->info.draw_vbo.indirect.indirect_draw_count, NULL);
if (dst->info.draw_vbo.draw.index_size &&
!dst->info.draw_vbo.draw.has_user_indices)
pipe_resource_reference(&dst->info.draw_vbo.draw.index.resource, NULL);
else
dst->info.draw_vbo.draw.index.user = NULL;
break; break;
case CALL_LAUNCH_GRID: case CALL_LAUNCH_GRID:
pipe_resource_reference(&dst->info.launch_grid.indirect, NULL); pipe_resource_reference(&dst->info.launch_grid.indirect, NULL);
@ -669,6 +669,19 @@ dd_copy_call(struct dd_call *dst, struct dd_call *src)
src->info.draw_vbo.indirect.buffer); src->info.draw_vbo.indirect.buffer);
pipe_resource_reference(&dst->info.draw_vbo.indirect.indirect_draw_count, pipe_resource_reference(&dst->info.draw_vbo.indirect.indirect_draw_count,
src->info.draw_vbo.indirect.indirect_draw_count); src->info.draw_vbo.indirect.indirect_draw_count);
if (dst->info.draw_vbo.draw.index_size &&
!dst->info.draw_vbo.draw.has_user_indices)
pipe_resource_reference(&dst->info.draw_vbo.draw.index.resource, NULL);
else
dst->info.draw_vbo.draw.index.user = NULL;
if (src->info.draw_vbo.draw.index_size &&
!src->info.draw_vbo.draw.has_user_indices) {
pipe_resource_reference(&dst->info.draw_vbo.draw.index.resource,
src->info.draw_vbo.draw.index.resource);
}
dst->info.draw_vbo = src->info.draw_vbo; dst->info.draw_vbo = src->info.draw_vbo;
if (!src->info.draw_vbo.draw.indirect) if (!src->info.draw_vbo.draw.indirect)
dst->info.draw_vbo.draw.indirect = NULL; dst->info.draw_vbo.draw.indirect = NULL;
@ -728,8 +741,6 @@ dd_init_copy_of_draw_state(struct dd_draw_state_copy *state)
/* Just clear pointers to gallium objects. Don't clear the whole structure, /* Just clear pointers to gallium objects. Don't clear the whole structure,
* because it would kill performance with its size of 130 KB. * because it would kill performance with its size of 130 KB.
*/ */
memset(&state->base.index_buffer, 0,
sizeof(state->base.index_buffer));
memset(state->base.vertex_buffers, 0, memset(state->base.vertex_buffers, 0,
sizeof(state->base.vertex_buffers)); sizeof(state->base.vertex_buffers));
memset(state->base.so_targets, 0, memset(state->base.so_targets, 0,
@ -767,8 +778,6 @@ dd_unreference_copy_of_draw_state(struct dd_draw_state_copy *state)
struct dd_draw_state *dst = &state->base; struct dd_draw_state *dst = &state->base;
unsigned i,j; unsigned i,j;
util_set_index_buffer(&dst->index_buffer, NULL);
for (i = 0; i < ARRAY_SIZE(dst->vertex_buffers); i++) for (i = 0; i < ARRAY_SIZE(dst->vertex_buffers); i++)
pipe_vertex_buffer_unreference(&dst->vertex_buffers[i]); pipe_vertex_buffer_unreference(&dst->vertex_buffers[i]);
for (i = 0; i < ARRAY_SIZE(dst->so_targets); i++) for (i = 0; i < ARRAY_SIZE(dst->so_targets); i++)
@ -804,8 +813,6 @@ dd_copy_draw_state(struct dd_draw_state *dst, struct dd_draw_state *src)
dst->render_cond.query = NULL; dst->render_cond.query = NULL;
} }
util_set_index_buffer(&dst->index_buffer, &src->index_buffer);
for (i = 0; i < ARRAY_SIZE(src->vertex_buffers); i++) { for (i = 0; i < ARRAY_SIZE(src->vertex_buffers); i++) {
pipe_vertex_buffer_reference(&dst->vertex_buffers[i], pipe_vertex_buffer_reference(&dst->vertex_buffers[i],
&src->vertex_buffers[i]); &src->vertex_buffers[i]);

View File

@ -156,7 +156,6 @@ struct dd_draw_state
unsigned mode; unsigned mode;
} render_cond; } render_cond;
struct pipe_index_buffer index_buffer;
struct pipe_vertex_buffer vertex_buffers[PIPE_MAX_ATTRIBS]; struct pipe_vertex_buffer vertex_buffers[PIPE_MAX_ATTRIBS];
unsigned num_so_targets; unsigned num_so_targets;

View File

@ -92,7 +92,7 @@ etna_update_state_for_draw(struct etna_context *ctx, const struct pipe_draw_info
* buffer state as dirty * buffer state as dirty
*/ */
if (info->indexed) { if (info->index_size) {
uint32_t new_control = ctx->index_buffer.FE_INDEX_STREAM_CONTROL; uint32_t new_control = ctx->index_buffer.FE_INDEX_STREAM_CONTROL;
if (info->primitive_restart) if (info->primitive_restart)
@ -159,7 +159,6 @@ etna_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info)
if (!(ctx->prim_hwsupport & (1 << info->mode))) { if (!(ctx->prim_hwsupport & (1 << info->mode))) {
struct primconvert_context *primconvert = ctx->primconvert; struct primconvert_context *primconvert = ctx->primconvert;
util_primconvert_save_index_buffer(primconvert, &ctx->index_buffer.ib);
util_primconvert_save_rasterizer_state(primconvert, ctx->rasterizer); util_primconvert_save_rasterizer_state(primconvert, ctx->rasterizer);
util_primconvert_draw_vbo(primconvert, info); util_primconvert_draw_vbo(primconvert, info);
return; return;
@ -178,15 +177,23 @@ etna_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info)
} }
/* Upload a user index buffer. */ /* Upload a user index buffer. */
struct pipe_index_buffer ibuffer_saved = {}; unsigned index_offset = 0;
if (info->indexed && ctx->index_buffer.ib.user_buffer && struct pipe_resource *indexbuf = info->has_user_indices ? NULL : info->index.resource;
!util_save_and_upload_index_buffer(pctx, info, &ctx->index_buffer.ib, if (info->index_size && info->has_user_indices &&
&ibuffer_saved)) { !util_upload_index_buffer(pctx, info, &indexbuf, &index_offset)) {
BUG("Index buffer upload failed."); BUG("Index buffer upload failed.");
return; return;
} }
if (info->indexed && !ctx->index_buffer.FE_INDEX_STREAM_BASE_ADDR.bo) { if (info->index_size && indexbuf) {
ctx->index_buffer.FE_INDEX_STREAM_BASE_ADDR.bo = etna_resource(indexbuf)->bo;
ctx->index_buffer.FE_INDEX_STREAM_BASE_ADDR.offset = index_offset;
ctx->index_buffer.FE_INDEX_STREAM_BASE_ADDR.flags = ETNA_RELOC_READ;
ctx->index_buffer.FE_INDEX_STREAM_CONTROL = translate_index_size(info->index_size);
ctx->dirty |= ETNA_DIRTY_INDEX_BUFFER;
}
if (info->index_size && !ctx->index_buffer.FE_INDEX_STREAM_BASE_ADDR.bo) {
BUG("Unsupported or no index buffer"); BUG("Unsupported or no index buffer");
return; return;
} }
@ -239,7 +246,7 @@ etna_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info)
} }
/* Mark index buffer as being read */ /* Mark index buffer as being read */
resource_read(ctx, ctx->index_buffer.ib.buffer); resource_read(ctx, indexbuf);
/* Mark textures as being read */ /* Mark textures as being read */
for (i = 0; i < PIPE_MAX_SAMPLERS; i++) for (i = 0; i < PIPE_MAX_SAMPLERS; i++)
@ -255,7 +262,7 @@ etna_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info)
/* First, sync state, then emit DRAW_PRIMITIVES or DRAW_INDEXED_PRIMITIVES */ /* First, sync state, then emit DRAW_PRIMITIVES or DRAW_INDEXED_PRIMITIVES */
etna_emit_state(ctx); etna_emit_state(ctx);
if (info->indexed) if (info->index_size)
etna_draw_indexed_primitives(ctx->stream, draw_mode, info->start, prims, info->index_bias); etna_draw_indexed_primitives(ctx->stream, draw_mode, info->start, prims, info->index_bias);
else else
etna_draw_primitives(ctx->stream, draw_mode, info->start, prims); etna_draw_primitives(ctx->stream, draw_mode, info->start, prims);
@ -274,8 +281,8 @@ etna_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info)
etna_resource(ctx->framebuffer.cbuf->texture)->seqno++; etna_resource(ctx->framebuffer.cbuf->texture)->seqno++;
if (ctx->framebuffer.zsbuf) if (ctx->framebuffer.zsbuf)
etna_resource(ctx->framebuffer.zsbuf->texture)->seqno++; etna_resource(ctx->framebuffer.zsbuf->texture)->seqno++;
if (info->indexed && ibuffer_saved.user_buffer) if (info->index_size && indexbuf != info->index.resource)
pctx->set_index_buffer(pctx, &ibuffer_saved); pipe_resource_reference(&indexbuf, NULL);
} }
static void static void

View File

@ -44,7 +44,6 @@ struct pipe_screen;
struct etna_shader_variant; struct etna_shader_variant;
struct etna_index_buffer { struct etna_index_buffer {
struct pipe_index_buffer ib;
struct etna_reloc FE_INDEX_STREAM_BASE_ADDR; struct etna_reloc FE_INDEX_STREAM_BASE_ADDR;
uint32_t FE_INDEX_STREAM_CONTROL; uint32_t FE_INDEX_STREAM_CONTROL;
uint32_t FE_PRIMITIVE_RESTART_INDEX; uint32_t FE_PRIMITIVE_RESTART_INDEX;

View File

@ -369,8 +369,7 @@ etna_emit_state(struct etna_context *ctx)
/*03818*/ EMIT_STATE(GL_MULTI_SAMPLE_CONFIG, val); /*03818*/ EMIT_STATE(GL_MULTI_SAMPLE_CONFIG, val);
} }
if (likely(dirty & (ETNA_DIRTY_INDEX_BUFFER)) && if (likely(dirty & (ETNA_DIRTY_INDEX_BUFFER))) {
ctx->index_buffer.ib.buffer) {
/*00644*/ EMIT_STATE_RELOC(FE_INDEX_STREAM_BASE_ADDR, &ctx->index_buffer.FE_INDEX_STREAM_BASE_ADDR); /*00644*/ EMIT_STATE_RELOC(FE_INDEX_STREAM_BASE_ADDR, &ctx->index_buffer.FE_INDEX_STREAM_BASE_ADDR);
/*00648*/ EMIT_STATE(FE_INDEX_STREAM_CONTROL, ctx->index_buffer.FE_INDEX_STREAM_CONTROL); /*00648*/ EMIT_STATE(FE_INDEX_STREAM_CONTROL, ctx->index_buffer.FE_INDEX_STREAM_CONTROL);
} }

View File

@ -446,34 +446,6 @@ etna_set_vertex_buffers(struct pipe_context *pctx, unsigned start_slot,
ctx->dirty |= ETNA_DIRTY_VERTEX_BUFFERS; ctx->dirty |= ETNA_DIRTY_VERTEX_BUFFERS;
} }
static void
etna_set_index_buffer(struct pipe_context *pctx, const struct pipe_index_buffer *ib)
{
struct etna_context *ctx = etna_context(pctx);
uint32_t ctrl;
if (ib) {
pipe_resource_reference(&ctx->index_buffer.ib.buffer, ib->buffer);
memcpy(&ctx->index_buffer.ib, ib, sizeof(ctx->index_buffer.ib));
ctrl = translate_index_size(ctx->index_buffer.ib.index_size);
} else {
pipe_resource_reference(&ctx->index_buffer.ib.buffer, NULL);
ctrl = 0;
}
if (ctx->index_buffer.ib.buffer && ctrl != ETNA_NO_MATCH) {
ctx->index_buffer.FE_INDEX_STREAM_BASE_ADDR.bo = etna_resource(ctx->index_buffer.ib.buffer)->bo;
ctx->index_buffer.FE_INDEX_STREAM_BASE_ADDR.offset = ctx->index_buffer.ib.offset;
ctx->index_buffer.FE_INDEX_STREAM_BASE_ADDR.flags = ETNA_RELOC_READ;
ctx->index_buffer.FE_INDEX_STREAM_CONTROL = ctrl;
} else {
ctx->index_buffer.FE_INDEX_STREAM_BASE_ADDR.bo = NULL;
ctx->index_buffer.FE_INDEX_STREAM_CONTROL = 0;
}
ctx->dirty |= ETNA_DIRTY_INDEX_BUFFER;
}
static void static void
etna_blend_state_bind(struct pipe_context *pctx, void *bs) etna_blend_state_bind(struct pipe_context *pctx, void *bs)
{ {
@ -652,7 +624,6 @@ etna_state_init(struct pipe_context *pctx)
pctx->set_viewport_states = etna_set_viewport_states; pctx->set_viewport_states = etna_set_viewport_states;
pctx->set_vertex_buffers = etna_set_vertex_buffers; pctx->set_vertex_buffers = etna_set_vertex_buffers;
pctx->set_index_buffer = etna_set_index_buffer;
pctx->bind_blend_state = etna_blend_state_bind; pctx->bind_blend_state = etna_blend_state_bind;
pctx->delete_blend_state = etna_blend_state_delete; pctx->delete_blend_state = etna_blend_state_delete;

View File

@ -80,7 +80,8 @@ emit_vertexbufs(struct fd_context *ctx)
} }
static bool static bool
fd2_draw_vbo(struct fd_context *ctx, const struct pipe_draw_info *info) fd2_draw_vbo(struct fd_context *ctx, const struct pipe_draw_info *info,
unsigned index_offset)
{ {
struct fd_ringbuffer *ring = ctx->batch->draw; struct fd_ringbuffer *ring = ctx->batch->draw;

View File

@ -72,7 +72,7 @@ draw_impl(struct fd_context *ctx, struct fd_ringbuffer *ring,
OUT_RING(ring, add_sat(info->min_index, info->index_bias)); /* VFD_INDEX_MIN */ OUT_RING(ring, add_sat(info->min_index, info->index_bias)); /* VFD_INDEX_MIN */
OUT_RING(ring, add_sat(info->max_index, info->index_bias)); /* VFD_INDEX_MAX */ OUT_RING(ring, add_sat(info->max_index, info->index_bias)); /* VFD_INDEX_MAX */
OUT_RING(ring, info->start_instance); /* VFD_INSTANCEID_OFFSET */ OUT_RING(ring, info->start_instance); /* VFD_INSTANCEID_OFFSET */
OUT_RING(ring, info->indexed ? info->index_bias : info->start); /* VFD_INDEX_OFFSET */ OUT_RING(ring, info->index_size ? info->index_bias : info->start); /* VFD_INDEX_OFFSET */
OUT_PKT0(ring, REG_A3XX_PC_RESTART_INDEX, 1); OUT_PKT0(ring, REG_A3XX_PC_RESTART_INDEX, 1);
OUT_RING(ring, info->primitive_restart ? /* PC_RESTART_INDEX */ OUT_RING(ring, info->primitive_restart ? /* PC_RESTART_INDEX */
@ -115,7 +115,8 @@ fixup_shader_state(struct fd_context *ctx, struct ir3_shader_key *key)
} }
static bool static bool
fd3_draw_vbo(struct fd_context *ctx, const struct pipe_draw_info *info) fd3_draw_vbo(struct fd_context *ctx, const struct pipe_draw_info *info,
unsigned index_offset)
{ {
struct fd3_context *fd3_ctx = fd3_context(ctx); struct fd3_context *fd3_ctx = fd3_context(ctx);
struct fd3_emit emit = { struct fd3_emit emit = {

View File

@ -622,7 +622,7 @@ fd3_emit_state(struct fd_context *ctx, struct fd_ringbuffer *ring,
val |= A3XX_PC_PRIM_VTX_CNTL_STRIDE_IN_VPC(stride_in_vpc); val |= A3XX_PC_PRIM_VTX_CNTL_STRIDE_IN_VPC(stride_in_vpc);
} }
if (info->indexed && info->primitive_restart) { if (info->index_size && info->primitive_restart) {
val |= A3XX_PC_PRIM_VTX_CNTL_PRIMITIVE_RESTART; val |= A3XX_PC_PRIM_VTX_CNTL_PRIMITIVE_RESTART;
} }

View File

@ -44,7 +44,7 @@
static void static void
draw_impl(struct fd_context *ctx, struct fd_ringbuffer *ring, draw_impl(struct fd_context *ctx, struct fd_ringbuffer *ring,
struct fd4_emit *emit) struct fd4_emit *emit, unsigned index_offset)
{ {
const struct pipe_draw_info *info = emit->info; const struct pipe_draw_info *info = emit->info;
enum pc_di_primtype primtype = ctx->primtypes[info->mode]; enum pc_di_primtype primtype = ctx->primtypes[info->mode];
@ -55,7 +55,7 @@ draw_impl(struct fd_context *ctx, struct fd_ringbuffer *ring,
fd4_emit_vertex_bufs(ring, emit); fd4_emit_vertex_bufs(ring, emit);
OUT_PKT0(ring, REG_A4XX_VFD_INDEX_OFFSET, 2); OUT_PKT0(ring, REG_A4XX_VFD_INDEX_OFFSET, 2);
OUT_RING(ring, info->indexed ? info->index_bias : info->start); /* VFD_INDEX_OFFSET */ OUT_RING(ring, info->index_size ? info->index_bias : info->start); /* VFD_INDEX_OFFSET */
OUT_RING(ring, info->start_instance); /* ??? UNKNOWN_2209 */ OUT_RING(ring, info->start_instance); /* ??? UNKNOWN_2209 */
OUT_PKT0(ring, REG_A4XX_PC_RESTART_INDEX, 1); OUT_PKT0(ring, REG_A4XX_PC_RESTART_INDEX, 1);
@ -70,7 +70,7 @@ draw_impl(struct fd_context *ctx, struct fd_ringbuffer *ring,
fd4_draw_emit(ctx->batch, ring, primtype, fd4_draw_emit(ctx->batch, ring, primtype,
emit->key.binning_pass ? IGNORE_VISIBILITY : USE_VISIBILITY, emit->key.binning_pass ? IGNORE_VISIBILITY : USE_VISIBILITY,
info); info, index_offset);
} }
/* fixup dirty shader state in case some "unrelated" (from the state- /* fixup dirty shader state in case some "unrelated" (from the state-
@ -99,7 +99,8 @@ fixup_shader_state(struct fd_context *ctx, struct ir3_shader_key *key)
} }
static bool static bool
fd4_draw_vbo(struct fd_context *ctx, const struct pipe_draw_info *info) fd4_draw_vbo(struct fd_context *ctx, const struct pipe_draw_info *info,
unsigned index_offset)
{ {
struct fd4_context *fd4_ctx = fd4_context(ctx); struct fd4_context *fd4_ctx = fd4_context(ctx);
struct fd4_emit emit = { struct fd4_emit emit = {
@ -153,7 +154,7 @@ fd4_draw_vbo(struct fd_context *ctx, const struct pipe_draw_info *info)
OUT_RING(ring, A4XX_RB_RENDER_CONTROL_DISABLE_COLOR_PIPE); OUT_RING(ring, A4XX_RB_RENDER_CONTROL_DISABLE_COLOR_PIPE);
} }
draw_impl(ctx, ctx->batch->draw, &emit); draw_impl(ctx, ctx->batch->draw, &emit, index_offset);
if (ctx->rasterizer->rasterizer_discard) { if (ctx->rasterizer->rasterizer_discard) {
fd_wfi(ctx->batch, ring); fd_wfi(ctx->batch, ring);
@ -168,7 +169,7 @@ fd4_draw_vbo(struct fd_context *ctx, const struct pipe_draw_info *info)
emit.dirty = dirty & ~(FD_DIRTY_BLEND); emit.dirty = dirty & ~(FD_DIRTY_BLEND);
emit.vp = NULL; /* we changed key so need to refetch vp */ emit.vp = NULL; /* we changed key so need to refetch vp */
emit.fp = NULL; emit.fp = NULL;
draw_impl(ctx, ctx->batch->binning, &emit); draw_impl(ctx, ctx->batch->binning, &emit, index_offset);
fd_context_all_clean(ctx); fd_context_all_clean(ctx);

View File

@ -104,22 +104,21 @@ static inline void
fd4_draw_emit(struct fd_batch *batch, struct fd_ringbuffer *ring, fd4_draw_emit(struct fd_batch *batch, struct fd_ringbuffer *ring,
enum pc_di_primtype primtype, enum pc_di_primtype primtype,
enum pc_di_vis_cull_mode vismode, enum pc_di_vis_cull_mode vismode,
const struct pipe_draw_info *info) const struct pipe_draw_info *info,
unsigned index_offset)
{ {
struct pipe_resource *idx_buffer = NULL; struct pipe_resource *idx_buffer = NULL;
enum a4xx_index_size idx_type; enum a4xx_index_size idx_type;
enum pc_di_src_sel src_sel; enum pc_di_src_sel src_sel;
uint32_t idx_size, idx_offset; uint32_t idx_size, idx_offset;
if (info->indexed) { if (info->index_size) {
struct pipe_index_buffer *idx = &batch->ctx->indexbuf; assert(!info->has_user_indices);
assert(!idx->user_buffer); idx_buffer = info->index.resource;
idx_type = fd4_size2indextype(info->index_size);
idx_buffer = idx->buffer; idx_size = info->index_size * info->count;
idx_type = fd4_size2indextype(idx->index_size); idx_offset = index_offset + info->start * info->index_size;
idx_size = idx->index_size * info->count;
idx_offset = idx->offset + (info->start * idx->index_size);
src_sel = DI_SRC_SEL_DMA; src_sel = DI_SRC_SEL_DMA;
} else { } else {
idx_buffer = NULL; idx_buffer = NULL;

View File

@ -600,7 +600,7 @@ fd4_emit_state(struct fd_context *ctx, struct fd_ringbuffer *ring,
fd4_rasterizer_stateobj(ctx->rasterizer); fd4_rasterizer_stateobj(ctx->rasterizer);
uint32_t val = rast->pc_prim_vtx_cntl; uint32_t val = rast->pc_prim_vtx_cntl;
if (info->indexed && info->primitive_restart) if (info->index_size && info->primitive_restart)
val |= A4XX_PC_PRIM_VTX_CNTL_PRIMITIVE_RESTART; val |= A4XX_PC_PRIM_VTX_CNTL_PRIMITIVE_RESTART;
val |= COND(vp->writes_psize, A4XX_PC_PRIM_VTX_CNTL_PSIZE); val |= COND(vp->writes_psize, A4XX_PC_PRIM_VTX_CNTL_PSIZE);

View File

@ -42,7 +42,7 @@
static void static void
draw_impl(struct fd_context *ctx, struct fd_ringbuffer *ring, draw_impl(struct fd_context *ctx, struct fd_ringbuffer *ring,
struct fd5_emit *emit) struct fd5_emit *emit, unsigned index_offset)
{ {
const struct pipe_draw_info *info = emit->info; const struct pipe_draw_info *info = emit->info;
enum pc_di_primtype primtype = ctx->primtypes[info->mode]; enum pc_di_primtype primtype = ctx->primtypes[info->mode];
@ -53,7 +53,7 @@ draw_impl(struct fd_context *ctx, struct fd_ringbuffer *ring,
fd5_emit_vertex_bufs(ring, emit); fd5_emit_vertex_bufs(ring, emit);
OUT_PKT4(ring, REG_A5XX_VFD_INDEX_OFFSET, 2); OUT_PKT4(ring, REG_A5XX_VFD_INDEX_OFFSET, 2);
OUT_RING(ring, info->indexed ? info->index_bias : info->start); /* VFD_INDEX_OFFSET */ OUT_RING(ring, info->index_size ? info->index_bias : info->start); /* VFD_INDEX_OFFSET */
OUT_RING(ring, info->start_instance); /* ??? UNKNOWN_2209 */ OUT_RING(ring, info->start_instance); /* ??? UNKNOWN_2209 */
OUT_PKT4(ring, REG_A5XX_PC_RESTART_INDEX, 1); OUT_PKT4(ring, REG_A5XX_PC_RESTART_INDEX, 1);
@ -63,7 +63,7 @@ draw_impl(struct fd_context *ctx, struct fd_ringbuffer *ring,
fd5_emit_render_cntl(ctx, false); fd5_emit_render_cntl(ctx, false);
fd5_draw_emit(ctx->batch, ring, primtype, fd5_draw_emit(ctx->batch, ring, primtype,
emit->key.binning_pass ? IGNORE_VISIBILITY : USE_VISIBILITY, emit->key.binning_pass ? IGNORE_VISIBILITY : USE_VISIBILITY,
info); info, index_offset);
} }
/* fixup dirty shader state in case some "unrelated" (from the state- /* fixup dirty shader state in case some "unrelated" (from the state-
@ -92,7 +92,8 @@ fixup_shader_state(struct fd_context *ctx, struct ir3_shader_key *key)
} }
static bool static bool
fd5_draw_vbo(struct fd_context *ctx, const struct pipe_draw_info *info) fd5_draw_vbo(struct fd_context *ctx, const struct pipe_draw_info *info,
unsigned index_offset)
{ {
struct fd5_context *fd5_ctx = fd5_context(ctx); struct fd5_context *fd5_ctx = fd5_context(ctx);
struct fd5_emit emit = { struct fd5_emit emit = {
@ -136,7 +137,7 @@ fd5_draw_vbo(struct fd_context *ctx, const struct pipe_draw_info *info)
emit.key.binning_pass = false; emit.key.binning_pass = false;
emit.dirty = dirty; emit.dirty = dirty;
draw_impl(ctx, ctx->batch->draw, &emit); draw_impl(ctx, ctx->batch->draw, &emit, index_offset);
// /* and now binning pass: */ // /* and now binning pass: */
// emit.key.binning_pass = true; // emit.key.binning_pass = true;

View File

@ -80,22 +80,21 @@ static inline void
fd5_draw_emit(struct fd_batch *batch, struct fd_ringbuffer *ring, fd5_draw_emit(struct fd_batch *batch, struct fd_ringbuffer *ring,
enum pc_di_primtype primtype, enum pc_di_primtype primtype,
enum pc_di_vis_cull_mode vismode, enum pc_di_vis_cull_mode vismode,
const struct pipe_draw_info *info) const struct pipe_draw_info *info,
unsigned index_offset)
{ {
struct pipe_resource *idx_buffer = NULL; struct pipe_resource *idx_buffer = NULL;
enum a4xx_index_size idx_type; enum a4xx_index_size idx_type;
enum pc_di_src_sel src_sel; enum pc_di_src_sel src_sel;
uint32_t idx_size, idx_offset; uint32_t idx_size, idx_offset;
if (info->indexed) { if (info->index_size) {
struct pipe_index_buffer *idx = &batch->ctx->indexbuf; assert(!info->has_user_indices);
assert(!idx->user_buffer); idx_buffer = info->index.resource;
idx_type = fd4_size2indextype(info->index_size);
idx_buffer = idx->buffer; idx_size = info->index_size * info->count;
idx_type = fd4_size2indextype(idx->index_size); idx_offset = index_offset + info->start * info->index_size;
idx_size = idx->index_size * info->count;
idx_offset = idx->offset + (info->start * idx->index_size);
src_sel = DI_SRC_SEL_DMA; src_sel = DI_SRC_SEL_DMA;
} else { } else {
idx_buffer = NULL; idx_buffer = NULL;

View File

@ -126,7 +126,7 @@ enum fd_dirty_3d_state {
FD_DIRTY_VIEWPORT = BIT(8), FD_DIRTY_VIEWPORT = BIT(8),
FD_DIRTY_VTXSTATE = BIT(9), FD_DIRTY_VTXSTATE = BIT(9),
FD_DIRTY_VTXBUF = BIT(10), FD_DIRTY_VTXBUF = BIT(10),
FD_DIRTY_INDEXBUF = BIT(11),
FD_DIRTY_SCISSOR = BIT(12), FD_DIRTY_SCISSOR = BIT(12),
FD_DIRTY_STREAMOUT = BIT(13), FD_DIRTY_STREAMOUT = BIT(13),
FD_DIRTY_UCP = BIT(14), FD_DIRTY_UCP = BIT(14),
@ -273,7 +273,6 @@ struct fd_context {
struct pipe_viewport_state viewport; struct pipe_viewport_state viewport;
struct fd_constbuf_stateobj constbuf[PIPE_SHADER_TYPES]; struct fd_constbuf_stateobj constbuf[PIPE_SHADER_TYPES];
struct fd_shaderbuf_stateobj shaderbuf[PIPE_SHADER_TYPES]; struct fd_shaderbuf_stateobj shaderbuf[PIPE_SHADER_TYPES];
struct pipe_index_buffer indexbuf;
struct fd_streamout_stateobj streamout; struct fd_streamout_stateobj streamout;
struct pipe_clip_state ucp; struct pipe_clip_state ucp;
@ -296,7 +295,8 @@ struct fd_context {
void (*emit_sysmem_fini)(struct fd_batch *batch); void (*emit_sysmem_fini)(struct fd_batch *batch);
/* draw: */ /* draw: */
bool (*draw_vbo)(struct fd_context *ctx, const struct pipe_draw_info *info); bool (*draw_vbo)(struct fd_context *ctx, const struct pipe_draw_info *info,
unsigned index_offset);
void (*clear)(struct fd_context *ctx, unsigned buffers, void (*clear)(struct fd_context *ctx, unsigned buffers,
const union pipe_color_union *color, double depth, unsigned stencil); const union pipe_color_union *color, double depth, unsigned stencil);

View File

@ -85,17 +85,16 @@ fd_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info)
if (!fd_supported_prim(ctx, info->mode)) { if (!fd_supported_prim(ctx, info->mode)) {
if (ctx->streamout.num_targets > 0) if (ctx->streamout.num_targets > 0)
debug_error("stream-out with emulated prims"); debug_error("stream-out with emulated prims");
util_primconvert_save_index_buffer(ctx->primconvert, &ctx->indexbuf);
util_primconvert_save_rasterizer_state(ctx->primconvert, ctx->rasterizer); util_primconvert_save_rasterizer_state(ctx->primconvert, ctx->rasterizer);
util_primconvert_draw_vbo(ctx->primconvert, info); util_primconvert_draw_vbo(ctx->primconvert, info);
return; return;
} }
/* Upload a user index buffer. */ /* Upload a user index buffer. */
struct pipe_index_buffer ibuffer_saved = {}; struct pipe_resource *indexbuf = info->has_user_indices ? NULL : info->index.resource;
if (info->indexed && ctx->indexbuf.user_buffer && unsigned index_offset = 0;
!util_save_and_upload_index_buffer(pctx, info, &ctx->indexbuf, if (info->index_size && info->has_user_indices &&
&ibuffer_saved)) { !util_upload_index_buffer(pctx, info, &indexbuf, &index_offset)) {
return; return;
} }
@ -169,7 +168,7 @@ fd_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info)
} }
/* Mark index buffer as being read */ /* Mark index buffer as being read */
resource_read(batch, ctx->indexbuf.buffer); resource_read(batch, indexbuf);
/* Mark textures as being read */ /* Mark textures as being read */
foreach_bit(i, ctx->tex[PIPE_SHADER_VERTEX].valid_textures) foreach_bit(i, ctx->tex[PIPE_SHADER_VERTEX].valid_textures)
@ -215,7 +214,7 @@ fd_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info)
util_format_short_name(pipe_surface_format(pfb->cbufs[0])), util_format_short_name(pipe_surface_format(pfb->cbufs[0])),
util_format_short_name(pipe_surface_format(pfb->zsbuf))); util_format_short_name(pipe_surface_format(pfb->zsbuf)));
if (ctx->draw_vbo(ctx, info)) if (ctx->draw_vbo(ctx, info, index_offset))
batch->needs_flush = true; batch->needs_flush = true;
for (i = 0; i < ctx->streamout.num_targets; i++) for (i = 0; i < ctx->streamout.num_targets; i++)
@ -225,9 +224,8 @@ fd_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info)
fd_context_all_dirty(ctx); fd_context_all_dirty(ctx);
fd_batch_check_size(batch); fd_batch_check_size(batch);
if (info->index_size && indexbuf != info->index.resource)
if (info->indexed && ibuffer_saved.user_buffer) pipe_resource_reference(&indexbuf, NULL);
pctx->set_index_buffer(pctx, &ibuffer_saved);
} }
/* Generic clear implementation (partially) using u_blitter: */ /* Generic clear implementation (partially) using u_blitter: */
@ -286,7 +284,7 @@ fd_blitter_clear(struct pipe_context *pctx, unsigned buffers,
.max_index = 1, .max_index = 1,
.instance_count = 1, .instance_count = 1,
}; };
ctx->draw_vbo(ctx, &info); ctx->draw_vbo(ctx, &info, 0);
util_blitter_restore_constant_buffer_state(blitter); util_blitter_restore_constant_buffer_state(blitter);
util_blitter_restore_vertex_states(blitter); util_blitter_restore_vertex_states(blitter);

View File

@ -122,15 +122,13 @@ fd_draw_emit(struct fd_batch *batch, struct fd_ringbuffer *ring,
enum pc_di_src_sel src_sel; enum pc_di_src_sel src_sel;
uint32_t idx_size, idx_offset; uint32_t idx_size, idx_offset;
if (info->indexed) { if (info->index_size) {
struct pipe_index_buffer *idx = &batch->ctx->indexbuf; assert(!info->has_user_indices);
assert(!idx->user_buffer); idx_buffer = info->index.resource;
idx_type = size2indextype(info->index_size);
idx_buffer = idx->buffer; idx_size = info->index_size * info->count;
idx_type = size2indextype(idx->index_size); idx_offset = info->start * info->index_size;
idx_size = idx->index_size * info->count;
idx_offset = idx->offset + (info->start * idx->index_size);
src_sel = DI_SRC_SEL_DMA; src_sel = DI_SRC_SEL_DMA;
} else { } else {
idx_buffer = NULL; idx_buffer = NULL;

View File

@ -62,10 +62,6 @@ fd_invalidate_resource(struct fd_context *ctx, struct pipe_resource *prsc)
ctx->dirty |= FD_DIRTY_VTXBUF; ctx->dirty |= FD_DIRTY_VTXBUF;
} }
/* Index buffer */
if (ctx->indexbuf.buffer == prsc)
ctx->dirty |= FD_DIRTY_INDEXBUF;
/* per-shader-stage resources: */ /* per-shader-stage resources: */
for (unsigned stage = 0; stage < PIPE_SHADER_TYPES; stage++) { for (unsigned stage = 0; stage < PIPE_SHADER_TYPES; stage++) {
/* Constbufs.. note that constbuf[0] is normal uniforms emitted in /* Constbufs.. note that constbuf[0] is normal uniforms emitted in

View File

@ -276,24 +276,6 @@ fd_set_vertex_buffers(struct pipe_context *pctx,
ctx->dirty |= FD_DIRTY_VTXBUF; ctx->dirty |= FD_DIRTY_VTXBUF;
} }
static void
fd_set_index_buffer(struct pipe_context *pctx,
const struct pipe_index_buffer *ib)
{
struct fd_context *ctx = fd_context(pctx);
if (ib) {
pipe_resource_reference(&ctx->indexbuf.buffer, ib->buffer);
ctx->indexbuf.index_size = ib->index_size;
ctx->indexbuf.offset = ib->offset;
ctx->indexbuf.user_buffer = ib->user_buffer;
} else {
pipe_resource_reference(&ctx->indexbuf.buffer, NULL);
}
ctx->dirty |= FD_DIRTY_INDEXBUF;
}
static void static void
fd_blend_state_bind(struct pipe_context *pctx, void *hwcso) fd_blend_state_bind(struct pipe_context *pctx, void *hwcso)
{ {
@ -492,7 +474,6 @@ fd_state_init(struct pipe_context *pctx)
pctx->set_viewport_states = fd_set_viewport_states; pctx->set_viewport_states = fd_set_viewport_states;
pctx->set_vertex_buffers = fd_set_vertex_buffers; pctx->set_vertex_buffers = fd_set_vertex_buffers;
pctx->set_index_buffer = fd_set_index_buffer;
pctx->bind_blend_state = fd_blend_state_bind; pctx->bind_blend_state = fd_blend_state_bind;
pctx->delete_blend_state = fd_blend_state_delete; pctx->delete_blend_state = fd_blend_state_delete;

View File

@ -723,7 +723,7 @@ ir3_emit_vs_consts(const struct ir3_shader_variant *v, struct fd_ringbuffer *rin
uint32_t offset = v->constbase.driver_param; uint32_t offset = v->constbase.driver_param;
if (v->constlen > offset) { if (v->constlen > offset) {
uint32_t vertex_params[IR3_DP_VS_COUNT] = { uint32_t vertex_params[IR3_DP_VS_COUNT] = {
[IR3_DP_VTXID_BASE] = info->indexed ? [IR3_DP_VTXID_BASE] = info->index_size ?
info->index_bias : info->start, info->index_bias : info->start,
[IR3_DP_VTXCNT_MAX] = max_tf_vtx(ctx, v), [IR3_DP_VTXCNT_MAX] = max_tf_vtx(ctx, v),
}; };

View File

@ -83,13 +83,13 @@ i915_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
/* /*
* Map index buffer, if present * Map index buffer, if present
*/ */
if (info->indexed) { if (info->index_size) {
mapped_indices = i915->index_buffer.user_buffer; mapped_indices = info->has_user_indices ? info->index.user : NULL;
if (!mapped_indices) if (!mapped_indices)
mapped_indices = i915_buffer(i915->index_buffer.buffer)->data; mapped_indices = i915_buffer(info->index.resource)->data;
draw_set_indexes(draw, draw_set_indexes(draw,
(ubyte *) mapped_indices + i915->index_buffer.offset, (ubyte *) mapped_indices,
i915->index_buffer.index_size, ~0); info->index_size, ~0);
} }
if (i915->constants[PIPE_SHADER_VERTEX]) if (i915->constants[PIPE_SHADER_VERTEX])

View File

@ -249,7 +249,6 @@ struct i915_context {
struct pipe_sampler_view *fragment_sampler_views[PIPE_MAX_SAMPLERS]; struct pipe_sampler_view *fragment_sampler_views[PIPE_MAX_SAMPLERS];
struct pipe_sampler_view *vertex_sampler_views[PIPE_MAX_SAMPLERS]; struct pipe_sampler_view *vertex_sampler_views[PIPE_MAX_SAMPLERS];
struct pipe_viewport_state viewport; struct pipe_viewport_state viewport;
struct pipe_index_buffer index_buffer;
unsigned dirty; unsigned dirty;

View File

@ -1060,17 +1060,6 @@ i915_delete_vertex_elements_state(struct pipe_context *pipe, void *velems)
FREE( velems ); FREE( velems );
} }
static void i915_set_index_buffer(struct pipe_context *pipe,
const struct pipe_index_buffer *ib)
{
struct i915_context *i915 = i915_context(pipe);
if (ib)
memcpy(&i915->index_buffer, ib, sizeof(i915->index_buffer));
else
memset(&i915->index_buffer, 0, sizeof(i915->index_buffer));
}
static void static void
i915_set_sample_mask(struct pipe_context *pipe, i915_set_sample_mask(struct pipe_context *pipe,
unsigned sample_mask) unsigned sample_mask)
@ -1119,5 +1108,4 @@ i915_init_state_functions( struct i915_context *i915 )
i915->base.sampler_view_destroy = i915_sampler_view_destroy; i915->base.sampler_view_destroy = i915_sampler_view_destroy;
i915->base.set_viewport_states = i915_set_viewport_states; i915->base.set_viewport_states = i915_set_viewport_states;
i915->base.set_vertex_buffers = i915_set_vertex_buffers; i915->base.set_vertex_buffers = i915_set_vertex_buffers;
i915->base.set_index_buffer = i915_set_index_buffer;
} }

View File

@ -81,7 +81,6 @@ struct llvmpipe_context {
struct pipe_viewport_state viewports[PIPE_MAX_VIEWPORTS]; struct pipe_viewport_state viewports[PIPE_MAX_VIEWPORTS];
struct pipe_vertex_buffer vertex_buffer[PIPE_MAX_ATTRIBS]; struct pipe_vertex_buffer vertex_buffer[PIPE_MAX_ATTRIBS];
struct pipe_index_buffer index_buffer;
unsigned num_samplers[PIPE_SHADER_TYPES]; unsigned num_samplers[PIPE_SHADER_TYPES];
unsigned num_sampler_views[PIPE_SHADER_TYPES]; unsigned num_sampler_views[PIPE_SHADER_TYPES];

View File

@ -87,20 +87,16 @@ llvmpipe_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
} }
/* Map index buffer, if present */ /* Map index buffer, if present */
if (info->indexed) { if (info->index_size) {
unsigned available_space = ~0; unsigned available_space = ~0;
mapped_indices = lp->index_buffer.user_buffer; mapped_indices = info->has_user_indices ? info->index.user : NULL;
if (!mapped_indices) { if (!mapped_indices) {
mapped_indices = llvmpipe_resource_data(lp->index_buffer.buffer); mapped_indices = llvmpipe_resource_data(info->index.resource);
if (lp->index_buffer.buffer->width0 > lp->index_buffer.offset) available_space = info->index.resource->width0;
available_space =
(lp->index_buffer.buffer->width0 - lp->index_buffer.offset);
else
available_space = 0;
} }
draw_set_indexes(draw, draw_set_indexes(draw,
(ubyte *) mapped_indices + lp->index_buffer.offset, (ubyte *) mapped_indices,
lp->index_buffer.index_size, available_space); info->index_size, available_space);
} }
for (i = 0; i < lp->num_so_targets; i++) { for (i = 0; i < lp->num_so_targets; i++) {

View File

@ -93,18 +93,6 @@ llvmpipe_set_vertex_buffers(struct pipe_context *pipe,
} }
static void
llvmpipe_set_index_buffer(struct pipe_context *pipe,
const struct pipe_index_buffer *ib)
{
struct llvmpipe_context *llvmpipe = llvmpipe_context(pipe);
if (ib)
memcpy(&llvmpipe->index_buffer, ib, sizeof(llvmpipe->index_buffer));
else
memset(&llvmpipe->index_buffer, 0, sizeof(llvmpipe->index_buffer));
}
void void
llvmpipe_init_vertex_funcs(struct llvmpipe_context *llvmpipe) llvmpipe_init_vertex_funcs(struct llvmpipe_context *llvmpipe)
{ {
@ -113,5 +101,4 @@ llvmpipe_init_vertex_funcs(struct llvmpipe_context *llvmpipe)
llvmpipe->pipe.delete_vertex_elements_state = llvmpipe_delete_vertex_elements_state; llvmpipe->pipe.delete_vertex_elements_state = llvmpipe_delete_vertex_elements_state;
llvmpipe->pipe.set_vertex_buffers = llvmpipe_set_vertex_buffers; llvmpipe->pipe.set_vertex_buffers = llvmpipe_set_vertex_buffers;
llvmpipe->pipe.set_index_buffer = llvmpipe_set_index_buffer;
} }

View File

@ -188,11 +188,6 @@ static void noop_delete_state(struct pipe_context *ctx, void *state)
FREE(state); FREE(state);
} }
static void noop_set_index_buffer(struct pipe_context *ctx,
const struct pipe_index_buffer *ib)
{
}
static void noop_set_vertex_buffers(struct pipe_context *ctx, static void noop_set_vertex_buffers(struct pipe_context *ctx,
unsigned start_slot, unsigned count, unsigned start_slot, unsigned count,
const struct pipe_vertex_buffer *buffers) const struct pipe_vertex_buffer *buffers)
@ -298,7 +293,6 @@ void noop_init_state_functions(struct pipe_context *ctx)
ctx->set_scissor_states = noop_set_scissor_states; ctx->set_scissor_states = noop_set_scissor_states;
ctx->set_stencil_ref = noop_set_stencil_ref; ctx->set_stencil_ref = noop_set_stencil_ref;
ctx->set_vertex_buffers = noop_set_vertex_buffers; ctx->set_vertex_buffers = noop_set_vertex_buffers;
ctx->set_index_buffer = noop_set_index_buffer;
ctx->set_viewport_states = noop_set_viewport_states; ctx->set_viewport_states = noop_set_viewport_states;
ctx->sampler_view_destroy = noop_sampler_view_destroy; ctx->sampler_view_destroy = noop_sampler_view_destroy;
ctx->surface_destroy = noop_surface_destroy; ctx->surface_destroy = noop_surface_destroy;

View File

@ -123,13 +123,6 @@ nv30_invalidate_resource_storage(struct nouveau_context *nv,
} }
} }
} }
if (res->bind & PIPE_BIND_INDEX_BUFFER) {
if (nv30->idxbuf.buffer == res) {
nouveau_bufctx_reset(nv30->bufctx, BUFCTX_IDXBUF);
if (!--ref)
return ref;
}
}
if (res->bind & PIPE_BIND_SAMPLER_VIEW) { if (res->bind & PIPE_BIND_SAMPLER_VIEW) {
for (i = 0; i < nv30->fragprog.num_textures; ++i) { for (i = 0; i < nv30->fragprog.num_textures; ++i) {

View File

@ -110,7 +110,6 @@ struct nv30_context {
struct pipe_vertex_buffer vtxbuf[PIPE_MAX_ATTRIBS]; struct pipe_vertex_buffer vtxbuf[PIPE_MAX_ATTRIBS];
unsigned num_vtxbufs; unsigned num_vtxbufs;
struct pipe_index_buffer idxbuf;
uint32_t vbo_fifo; uint32_t vbo_fifo;
uint32_t vbo_user; uint32_t vbo_user;
unsigned vbo_min_index; unsigned vbo_min_index;

View File

@ -430,15 +430,15 @@ nv30_render_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
draw_set_mapped_vertex_buffer(draw, i, map, ~0); draw_set_mapped_vertex_buffer(draw, i, map, ~0);
} }
if (info->indexed) { if (info->index_size) {
const void *map = nv30->idxbuf.user_buffer; const void *map = info->has_user_indices ? info->index.user : NULL;
if (!map) if (!map)
map = pipe_buffer_map(pipe, nv30->idxbuf.buffer, map = pipe_buffer_map(pipe, info->index.resource,
PIPE_TRANSFER_UNSYNCHRONIZED | PIPE_TRANSFER_UNSYNCHRONIZED |
PIPE_TRANSFER_READ, &transferi); PIPE_TRANSFER_READ, &transferi);
draw_set_indexes(draw, draw_set_indexes(draw,
(ubyte *) map + nv30->idxbuf.offset, (ubyte *) map,
nv30->idxbuf.index_size, ~0); info->index_size, ~0);
} else { } else {
draw_set_indexes(draw, NULL, 0, 0); draw_set_indexes(draw, NULL, 0, 0);
} }
@ -446,7 +446,7 @@ nv30_render_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
draw_vbo(draw, info); draw_vbo(draw, info);
draw_flush(draw); draw_flush(draw);
if (info->indexed && transferi) if (info->index_size && transferi)
pipe_buffer_unmap(pipe, transferi); pipe_buffer_unmap(pipe, transferi);
for (i = 0; i < nv30->num_vtxbufs; i++) for (i = 0; i < nv30->num_vtxbufs; i++)
if (transfer[i]) if (transfer[i])

View File

@ -199,7 +199,7 @@ nv30_push_vbo(struct nv30_context *nv30, const struct pipe_draw_info *info)
{ {
struct push_context ctx; struct push_context ctx;
unsigned i, index_size; unsigned i, index_size;
bool apply_bias = info->indexed && info->index_bias; bool apply_bias = info->index_size && info->index_bias;
ctx.push = nv30->base.pushbuf; ctx.push = nv30->base.pushbuf;
ctx.translate = nv30->vertex->translate; ctx.translate = nv30->vertex->translate;
@ -224,18 +224,18 @@ nv30_push_vbo(struct nv30_context *nv30, const struct pipe_draw_info *info)
ctx.translate->set_buffer(ctx.translate, i, data, vb->stride, ~0); ctx.translate->set_buffer(ctx.translate, i, data, vb->stride, ~0);
} }
if (info->indexed) { if (info->index_size) {
if (nv30->idxbuf.buffer) if (!info->has_user_indices)
ctx.idxbuf = nouveau_resource_map_offset(&nv30->base, ctx.idxbuf = nouveau_resource_map_offset(&nv30->base,
nv04_resource(nv30->idxbuf.buffer), nv30->idxbuf.offset, nv04_resource(info->index.resource), info->start * info->index_size,
NOUVEAU_BO_RD); NOUVEAU_BO_RD);
else else
ctx.idxbuf = nv30->idxbuf.user_buffer; ctx.idxbuf = info->index.user;
if (!ctx.idxbuf) { if (!ctx.idxbuf) {
nv30_state_release(nv30); nv30_state_release(nv30);
return; return;
} }
index_size = nv30->idxbuf.index_size; index_size = info->index_size;
ctx.primitive_restart = info->primitive_restart; ctx.primitive_restart = info->primitive_restart;
ctx.restart_index = info->restart_index; ctx.restart_index = info->restart_index;
} else { } else {
@ -277,8 +277,8 @@ nv30_push_vbo(struct nv30_context *nv30, const struct pipe_draw_info *info)
BEGIN_NV04(ctx.push, NV30_3D(VERTEX_BEGIN_END), 1); BEGIN_NV04(ctx.push, NV30_3D(VERTEX_BEGIN_END), 1);
PUSH_DATA (ctx.push, NV30_3D_VERTEX_BEGIN_END_STOP); PUSH_DATA (ctx.push, NV30_3D_VERTEX_BEGIN_END_STOP);
if (info->indexed) if (info->index_size && !info->has_user_indices)
nouveau_resource_unmap(nv04_resource(nv30->idxbuf.buffer)); nouveau_resource_unmap(nv04_resource(info->index.resource));
for (i = 0; i < nv30->num_vtxbufs; ++i) { for (i = 0; i < nv30->num_vtxbufs; ++i) {
if (nv30->vtxbuf[i].buffer.resource) { if (nv30->vtxbuf[i].buffer.resource) {

View File

@ -44,10 +44,6 @@ nv30_memory_barrier(struct pipe_context *pipe, unsigned flags)
if (nv30->vtxbuf[i].buffer.resource->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT) if (nv30->vtxbuf[i].buffer.resource->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT)
nv30->base.vbo_dirty = true; nv30->base.vbo_dirty = true;
} }
if (nv30->idxbuf.buffer &&
nv30->idxbuf.buffer->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT)
nv30->base.vbo_dirty = true;
} }
} }

View File

@ -438,23 +438,6 @@ nv30_set_vertex_buffers(struct pipe_context *pipe,
nv30->dirty |= NV30_NEW_ARRAYS; nv30->dirty |= NV30_NEW_ARRAYS;
} }
static void
nv30_set_index_buffer(struct pipe_context *pipe,
const struct pipe_index_buffer *ib)
{
struct nv30_context *nv30 = nv30_context(pipe);
if (ib) {
pipe_resource_reference(&nv30->idxbuf.buffer, ib->buffer);
nv30->idxbuf.index_size = ib->index_size;
nv30->idxbuf.offset = ib->offset;
nv30->idxbuf.user_buffer = ib->user_buffer;
} else {
pipe_resource_reference(&nv30->idxbuf.buffer, NULL);
nv30->idxbuf.user_buffer = NULL;
}
}
void void
nv30_state_init(struct pipe_context *pipe) nv30_state_init(struct pipe_context *pipe)
{ {
@ -481,5 +464,4 @@ nv30_state_init(struct pipe_context *pipe)
pipe->set_viewport_states = nv30_set_viewport_states; pipe->set_viewport_states = nv30_set_viewport_states;
pipe->set_vertex_buffers = nv30_set_vertex_buffers; pipe->set_vertex_buffers = nv30_set_vertex_buffers;
pipe->set_index_buffer = nv30_set_index_buffer;
} }

View File

@ -459,10 +459,11 @@ nv30_draw_elements_inline_u32_short(struct nouveau_pushbuf *push,
static void static void
nv30_draw_elements(struct nv30_context *nv30, bool shorten, nv30_draw_elements(struct nv30_context *nv30, bool shorten,
const struct pipe_draw_info *info,
unsigned mode, unsigned start, unsigned count, unsigned mode, unsigned start, unsigned count,
unsigned instance_count, int32_t index_bias) unsigned instance_count, int32_t index_bias,
unsigned index_size)
{ {
const unsigned index_size = nv30->idxbuf.index_size;
struct nouveau_pushbuf *push = nv30->base.pushbuf; struct nouveau_pushbuf *push = nv30->base.pushbuf;
struct nouveau_object *eng3d = nv30->screen->eng3d; struct nouveau_object *eng3d = nv30->screen->eng3d;
unsigned prim = nv30_prim_gl(mode); unsigned prim = nv30_prim_gl(mode);
@ -474,9 +475,9 @@ nv30_draw_elements(struct nv30_context *nv30, bool shorten,
} }
if (eng3d->oclass == NV40_3D_CLASS && index_size > 1 && if (eng3d->oclass == NV40_3D_CLASS && index_size > 1 &&
nv30->idxbuf.buffer) { !info->has_user_indices) {
struct nv04_resource *res = nv04_resource(nv30->idxbuf.buffer); struct nv04_resource *res = nv04_resource(info->index.resource);
unsigned offset = nv30->idxbuf.offset; unsigned offset = 0;
assert(nouveau_resource_mapped_by_gpu(&res->base)); assert(nouveau_resource_mapped_by_gpu(&res->base));
@ -511,12 +512,12 @@ nv30_draw_elements(struct nv30_context *nv30, bool shorten,
PUSH_RESET(push, BUFCTX_IDXBUF); PUSH_RESET(push, BUFCTX_IDXBUF);
} else { } else {
const void *data; const void *data;
if (nv30->idxbuf.buffer) if (!info->has_user_indices)
data = nouveau_resource_map_offset(&nv30->base, data = nouveau_resource_map_offset(&nv30->base,
nv04_resource(nv30->idxbuf.buffer), nv04_resource(info->index.resource),
nv30->idxbuf.offset, NOUVEAU_BO_RD); start * index_size, NOUVEAU_BO_RD);
else else
data = nv30->idxbuf.user_buffer; data = info->index.user;
if (!data) if (!data)
return; return;
@ -559,7 +560,7 @@ nv30_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
* if index count is larger and we expect repeated vertices, suggest upload. * if index count is larger and we expect repeated vertices, suggest upload.
*/ */
nv30->vbo_push_hint = /* the 64 is heuristic */ nv30->vbo_push_hint = /* the 64 is heuristic */
!(info->indexed && !(info->index_size &&
((info->max_index - info->min_index + 64) < info->count)); ((info->max_index - info->min_index + 64) < info->count));
nv30->vbo_min_index = info->min_index; nv30->vbo_min_index = info->min_index;
@ -589,8 +590,8 @@ nv30_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
nv30->base.vbo_dirty = true; nv30->base.vbo_dirty = true;
} }
if (!nv30->base.vbo_dirty && nv30->idxbuf.buffer && if (!nv30->base.vbo_dirty && info->index_size && !info->has_user_indices &&
nv30->idxbuf.buffer->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT) info->index.resource->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT)
nv30->base.vbo_dirty = true; nv30->base.vbo_dirty = true;
if (nv30->base.vbo_dirty) { if (nv30->base.vbo_dirty) {
@ -599,7 +600,7 @@ nv30_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
nv30->base.vbo_dirty = false; nv30->base.vbo_dirty = false;
} }
if (!info->indexed) { if (!info->index_size) {
nv30_draw_arrays(nv30, nv30_draw_arrays(nv30,
info->mode, info->start, info->count, info->mode, info->start, info->count,
info->instance_count); info->instance_count);
@ -628,9 +629,9 @@ nv30_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
shorten = false; shorten = false;
} }
nv30_draw_elements(nv30, shorten, nv30_draw_elements(nv30, shorten, info,
info->mode, info->start, info->count, info->mode, info->start, info->count,
info->instance_count, info->index_bias); info->instance_count, info->index_bias, info->index_size);
} }
nv30_state_release(nv30); nv30_state_release(nv30);

View File

@ -68,10 +68,6 @@ nv50_memory_barrier(struct pipe_context *pipe, unsigned flags)
nv50->base.vbo_dirty = true; nv50->base.vbo_dirty = true;
} }
if (nv50->idxbuf.buffer &&
nv50->idxbuf.buffer->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT)
nv50->base.vbo_dirty = true;
for (s = 0; s < 3 && !nv50->cb_dirty; ++s) { for (s = 0; s < 3 && !nv50->cb_dirty; ++s) {
uint32_t valid = nv50->constbuf_valid[s]; uint32_t valid = nv50->constbuf_valid[s];
@ -146,8 +142,6 @@ nv50_context_unreference_resources(struct nv50_context *nv50)
for (i = 0; i < nv50->num_vtxbufs; ++i) for (i = 0; i < nv50->num_vtxbufs; ++i)
pipe_resource_reference(&nv50->vtxbuf[i].buffer.resource, NULL); pipe_resource_reference(&nv50->vtxbuf[i].buffer.resource, NULL);
pipe_resource_reference(&nv50->idxbuf.buffer, NULL);
for (s = 0; s < 3; ++s) { for (s = 0; s < 3; ++s) {
assert(nv50->num_textures[s] <= PIPE_MAX_SAMPLERS); assert(nv50->num_textures[s] <= PIPE_MAX_SAMPLERS);
for (i = 0; i < nv50->num_textures[s]; ++i) for (i = 0; i < nv50->num_textures[s]; ++i)
@ -238,14 +232,6 @@ nv50_invalidate_resource_storage(struct nouveau_context *ctx,
} }
} }
if (nv50->idxbuf.buffer == res) {
/* Just rebind to the bufctx as there is no separate dirty bit */
nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_3D_INDEX);
BCTX_REFN(nv50->bufctx_3d, 3D_INDEX, nv04_resource(res), RD);
if (!--ref)
return ref;
}
for (s = 0; s < 3; ++s) { for (s = 0; s < 3; ++s) {
assert(nv50->num_textures[s] <= PIPE_MAX_SAMPLERS); assert(nv50->num_textures[s] <= PIPE_MAX_SAMPLERS);
for (i = 0; i < nv50->num_textures[s]; ++i) { for (i = 0; i < nv50->num_textures[s]; ++i) {

View File

@ -143,7 +143,6 @@ struct nv50_context {
struct pipe_vertex_buffer vtxbuf[PIPE_MAX_ATTRIBS]; struct pipe_vertex_buffer vtxbuf[PIPE_MAX_ATTRIBS];
unsigned num_vtxbufs; unsigned num_vtxbufs;
uint32_t vtxbufs_coherent; uint32_t vtxbufs_coherent;
struct pipe_index_buffer idxbuf;
uint32_t vbo_fifo; /* bitmask of vertex elements to be pushed to FIFO */ uint32_t vbo_fifo; /* bitmask of vertex elements to be pushed to FIFO */
uint32_t vbo_user; /* bitmask of vertex buffers pointing to user memory */ uint32_t vbo_user; /* bitmask of vertex buffers pointing to user memory */
uint32_t vbo_constant; /* bitmask of user buffers with stride 0 */ uint32_t vbo_constant; /* bitmask of user buffers with stride 0 */

View File

@ -244,7 +244,7 @@ nv50_push_vbo(struct nv50_context *nv50, const struct pipe_draw_info *info)
unsigned i, index_size; unsigned i, index_size;
unsigned inst_count = info->instance_count; unsigned inst_count = info->instance_count;
unsigned vert_count = info->count; unsigned vert_count = info->count;
bool apply_bias = info->indexed && info->index_bias; bool apply_bias = info->index_size && info->index_bias;
ctx.push = nv50->base.pushbuf; ctx.push = nv50->base.pushbuf;
ctx.translate = nv50->vertex->translate; ctx.translate = nv50->vertex->translate;
@ -276,17 +276,17 @@ nv50_push_vbo(struct nv50_context *nv50, const struct pipe_draw_info *info)
ctx.translate->set_buffer(ctx.translate, i, data, vb->stride, ~0); ctx.translate->set_buffer(ctx.translate, i, data, vb->stride, ~0);
} }
if (info->indexed) { if (info->index_size) {
if (nv50->idxbuf.buffer) { if (!info->has_user_indices) {
ctx.idxbuf = nouveau_resource_map_offset(&nv50->base, ctx.idxbuf = nouveau_resource_map_offset(&nv50->base,
nv04_resource(nv50->idxbuf.buffer), nv50->idxbuf.offset, nv04_resource(info->index.resource), info->start * info->index_size,
NOUVEAU_BO_RD); NOUVEAU_BO_RD);
} else { } else {
ctx.idxbuf = nv50->idxbuf.user_buffer; ctx.idxbuf = info->index.user;
} }
if (!ctx.idxbuf) if (!ctx.idxbuf)
return; return;
index_size = nv50->idxbuf.index_size; index_size = info->index_size;
ctx.primitive_restart = info->primitive_restart; ctx.primitive_restart = info->primitive_restart;
ctx.restart_index = info->restart_index; ctx.restart_index = info->restart_index;
} else { } else {

View File

@ -1080,29 +1080,6 @@ nv50_set_vertex_buffers(struct pipe_context *pipe,
} }
} }
static void
nv50_set_index_buffer(struct pipe_context *pipe,
const struct pipe_index_buffer *ib)
{
struct nv50_context *nv50 = nv50_context(pipe);
if (nv50->idxbuf.buffer)
nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_3D_INDEX);
if (ib) {
pipe_resource_reference(&nv50->idxbuf.buffer, ib->buffer);
nv50->idxbuf.index_size = ib->index_size;
if (ib->buffer) {
nv50->idxbuf.offset = ib->offset;
BCTX_REFN(nv50->bufctx_3d, 3D_INDEX, nv04_resource(ib->buffer), RD);
} else {
nv50->idxbuf.user_buffer = ib->user_buffer;
}
} else {
pipe_resource_reference(&nv50->idxbuf.buffer, NULL);
}
}
static void static void
nv50_vertex_state_bind(struct pipe_context *pipe, void *hwcso) nv50_vertex_state_bind(struct pipe_context *pipe, void *hwcso)
{ {
@ -1341,7 +1318,6 @@ nv50_init_state_functions(struct nv50_context *nv50)
pipe->bind_vertex_elements_state = nv50_vertex_state_bind; pipe->bind_vertex_elements_state = nv50_vertex_state_bind;
pipe->set_vertex_buffers = nv50_set_vertex_buffers; pipe->set_vertex_buffers = nv50_set_vertex_buffers;
pipe->set_index_buffer = nv50_set_index_buffer;
pipe->create_stream_output_target = nv50_so_target_create; pipe->create_stream_output_target = nv50_so_target_create;
pipe->stream_output_target_destroy = nv50_so_target_destroy; pipe->stream_output_target_destroy = nv50_so_target_destroy;

View File

@ -595,12 +595,13 @@ nv50_draw_elements_inline_u32_short(struct nouveau_pushbuf *push,
static void static void
nv50_draw_elements(struct nv50_context *nv50, bool shorten, nv50_draw_elements(struct nv50_context *nv50, bool shorten,
const struct pipe_draw_info *info,
unsigned mode, unsigned start, unsigned count, unsigned mode, unsigned start, unsigned count,
unsigned instance_count, int32_t index_bias) unsigned instance_count, int32_t index_bias,
unsigned index_size)
{ {
struct nouveau_pushbuf *push = nv50->base.pushbuf; struct nouveau_pushbuf *push = nv50->base.pushbuf;
unsigned prim; unsigned prim;
const unsigned index_size = nv50->idxbuf.index_size;
prim = nv50_prim_gl(mode); prim = nv50_prim_gl(mode);
@ -614,15 +615,15 @@ nv50_draw_elements(struct nv50_context *nv50, bool shorten,
nv50->state.index_bias = index_bias; nv50->state.index_bias = index_bias;
} }
if (nv50->idxbuf.buffer) { if (!info->has_user_indices) {
struct nv04_resource *buf = nv04_resource(nv50->idxbuf.buffer); struct nv04_resource *buf = nv04_resource(info->index.resource);
unsigned pb_start; unsigned pb_start;
unsigned pb_bytes; unsigned pb_bytes;
const unsigned base = (buf->offset + nv50->idxbuf.offset) & ~3; const unsigned base = buf->offset & ~3;
start += ((buf->offset + nv50->idxbuf.offset) & 3) >> (index_size >> 1); start += (buf->offset & 3) >> (index_size >> 1);
assert(nouveau_resource_mapped_by_gpu(nv50->idxbuf.buffer)); assert(nouveau_resource_mapped_by_gpu(info->index.resource));
/* This shouldn't have to be here. The going theory is that the buffer /* This shouldn't have to be here. The going theory is that the buffer
* is being filled in by PGRAPH, and it's not done yet by the time it * is being filled in by PGRAPH, and it's not done yet by the time it
@ -675,7 +676,7 @@ nv50_draw_elements(struct nv50_context *nv50, bool shorten,
prim |= NV50_3D_VERTEX_BEGIN_GL_INSTANCE_NEXT; prim |= NV50_3D_VERTEX_BEGIN_GL_INSTANCE_NEXT;
} }
} else { } else {
const void *data = nv50->idxbuf.user_buffer; const void *data = info->index.user;
while (instance_count--) { while (instance_count--) {
BEGIN_NV04(push, NV50_3D(VERTEX_BEGIN_GL), 1); BEGIN_NV04(push, NV50_3D(VERTEX_BEGIN_GL), 1);
@ -769,6 +770,11 @@ nv50_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
bool tex_dirty = false; bool tex_dirty = false;
int s; int s;
if (info->index_size && !info->has_user_indices) {
nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_3D_INDEX);
BCTX_REFN(nv50->bufctx_3d, 3D_INDEX, nv04_resource(info->index.resource), RD);
}
/* NOTE: caller must ensure that (min_index + index_bias) is >= 0 */ /* NOTE: caller must ensure that (min_index + index_bias) is >= 0 */
nv50->vb_elt_first = info->min_index + info->index_bias; nv50->vb_elt_first = info->min_index + info->index_bias;
nv50->vb_elt_limit = info->max_index - info->min_index; nv50->vb_elt_limit = info->max_index - info->min_index;
@ -779,7 +785,7 @@ nv50_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
* if index count is larger and we expect repeated vertices, suggest upload. * if index count is larger and we expect repeated vertices, suggest upload.
*/ */
nv50->vbo_push_hint = /* the 64 is heuristic */ nv50->vbo_push_hint = /* the 64 is heuristic */
!(info->indexed && ((nv50->vb_elt_limit + 64) < info->count)); !(info->index_size && ((nv50->vb_elt_limit + 64) < info->count));
if (nv50->vbo_user && !(nv50->dirty_3d & (NV50_NEW_3D_ARRAYS | NV50_NEW_3D_VERTEX))) { if (nv50->vbo_user && !(nv50->dirty_3d & (NV50_NEW_3D_ARRAYS | NV50_NEW_3D_VERTEX))) {
if (!!nv50->vbo_fifo != nv50->vbo_push_hint) if (!!nv50->vbo_fifo != nv50->vbo_push_hint)
@ -853,7 +859,7 @@ nv50_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
nv50->base.vbo_dirty = false; nv50->base.vbo_dirty = false;
} }
if (info->indexed) { if (info->index_size) {
bool shorten = info->max_index <= 65535; bool shorten = info->max_index <= 65535;
if (info->primitive_restart != nv50->state.prim_restart) { if (info->primitive_restart != nv50->state.prim_restart) {
@ -878,9 +884,9 @@ nv50_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
shorten = false; shorten = false;
} }
nv50_draw_elements(nv50, shorten, nv50_draw_elements(nv50, shorten, info,
info->mode, info->start, info->count, info->mode, info->start, info->count,
info->instance_count, info->index_bias); info->instance_count, info->index_bias, info->index_size);
} else } else
if (unlikely(info->count_from_stream_output)) { if (unlikely(info->count_from_stream_output)) {
nva0_draw_stream_output(nv50, info); nva0_draw_stream_output(nv50, info);

View File

@ -68,10 +68,6 @@ nvc0_memory_barrier(struct pipe_context *pipe, unsigned flags)
nvc0->base.vbo_dirty = true; nvc0->base.vbo_dirty = true;
} }
if (nvc0->idxbuf.buffer &&
nvc0->idxbuf.buffer->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT)
nvc0->base.vbo_dirty = true;
for (s = 0; s < 5 && !nvc0->cb_dirty; ++s) { for (s = 0; s < 5 && !nvc0->cb_dirty; ++s) {
uint32_t valid = nvc0->constbuf_valid[s]; uint32_t valid = nvc0->constbuf_valid[s];
@ -149,8 +145,6 @@ nvc0_context_unreference_resources(struct nvc0_context *nvc0)
for (i = 0; i < nvc0->num_vtxbufs; ++i) for (i = 0; i < nvc0->num_vtxbufs; ++i)
pipe_vertex_buffer_unreference(&nvc0->vtxbuf[i]); pipe_vertex_buffer_unreference(&nvc0->vtxbuf[i]);
pipe_resource_reference(&nvc0->idxbuf.buffer, NULL);
for (s = 0; s < 6; ++s) { for (s = 0; s < 6; ++s) {
for (i = 0; i < nvc0->num_textures[s]; ++i) for (i = 0; i < nvc0->num_textures[s]; ++i)
pipe_sampler_view_reference(&nvc0->textures[s][i], NULL); pipe_sampler_view_reference(&nvc0->textures[s][i], NULL);
@ -268,13 +262,6 @@ nvc0_invalidate_resource_storage(struct nouveau_context *ctx,
} }
} }
if (nvc0->idxbuf.buffer == res) {
nvc0->dirty_3d |= NVC0_NEW_3D_IDXBUF;
nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_3D_IDX);
if (!--ref)
return ref;
}
for (s = 0; s < 6; ++s) { for (s = 0; s < 6; ++s) {
for (i = 0; i < nvc0->num_textures[s]; ++i) { for (i = 0; i < nvc0->num_textures[s]; ++i) {
if (nvc0->textures[s][i] && if (nvc0->textures[s][i] &&

View File

@ -53,7 +53,7 @@
#define NVC0_NEW_3D_TEXTURES (1 << 19) #define NVC0_NEW_3D_TEXTURES (1 << 19)
#define NVC0_NEW_3D_SAMPLERS (1 << 20) #define NVC0_NEW_3D_SAMPLERS (1 << 20)
#define NVC0_NEW_3D_TFB_TARGETS (1 << 21) #define NVC0_NEW_3D_TFB_TARGETS (1 << 21)
#define NVC0_NEW_3D_IDXBUF (1 << 22)
#define NVC0_NEW_3D_SURFACES (1 << 23) #define NVC0_NEW_3D_SURFACES (1 << 23)
#define NVC0_NEW_3D_MIN_SAMPLES (1 << 24) #define NVC0_NEW_3D_MIN_SAMPLES (1 << 24)
#define NVC0_NEW_3D_TESSFACTOR (1 << 25) #define NVC0_NEW_3D_TESSFACTOR (1 << 25)
@ -193,7 +193,6 @@ struct nvc0_context {
struct pipe_vertex_buffer vtxbuf[PIPE_MAX_ATTRIBS]; struct pipe_vertex_buffer vtxbuf[PIPE_MAX_ATTRIBS];
unsigned num_vtxbufs; unsigned num_vtxbufs;
uint32_t vtxbufs_coherent; uint32_t vtxbufs_coherent;
struct pipe_index_buffer idxbuf;
uint32_t constant_vbos; uint32_t constant_vbos;
uint32_t vbo_user; /* bitmask of vertex buffers pointing to user memory */ uint32_t vbo_user; /* bitmask of vertex buffers pointing to user memory */
uint32_t vb_elt_first; /* from pipe_draw_info, for vertex upload */ uint32_t vb_elt_first; /* from pipe_draw_info, for vertex upload */

View File

@ -961,31 +961,6 @@ nvc0_set_vertex_buffers(struct pipe_context *pipe,
} }
} }
static void
nvc0_set_index_buffer(struct pipe_context *pipe,
const struct pipe_index_buffer *ib)
{
struct nvc0_context *nvc0 = nvc0_context(pipe);
if (nvc0->idxbuf.buffer)
nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_3D_IDX);
if (ib) {
pipe_resource_reference(&nvc0->idxbuf.buffer, ib->buffer);
nvc0->idxbuf.index_size = ib->index_size;
if (ib->buffer) {
nvc0->idxbuf.offset = ib->offset;
nvc0->dirty_3d |= NVC0_NEW_3D_IDXBUF;
} else {
nvc0->idxbuf.user_buffer = ib->user_buffer;
nvc0->dirty_3d &= ~NVC0_NEW_3D_IDXBUF;
}
} else {
nvc0->dirty_3d &= ~NVC0_NEW_3D_IDXBUF;
pipe_resource_reference(&nvc0->idxbuf.buffer, NULL);
}
}
static void static void
nvc0_vertex_state_bind(struct pipe_context *pipe, void *hwcso) nvc0_vertex_state_bind(struct pipe_context *pipe, void *hwcso)
{ {
@ -1426,7 +1401,6 @@ nvc0_init_state_functions(struct nvc0_context *nvc0)
pipe->bind_vertex_elements_state = nvc0_vertex_state_bind; pipe->bind_vertex_elements_state = nvc0_vertex_state_bind;
pipe->set_vertex_buffers = nvc0_set_vertex_buffers; pipe->set_vertex_buffers = nvc0_set_vertex_buffers;
pipe->set_index_buffer = nvc0_set_index_buffer;
pipe->create_stream_output_target = nvc0_so_target_create; pipe->create_stream_output_target = nvc0_so_target_create;
pipe->stream_output_target_destroy = nvc0_so_target_destroy; pipe->stream_output_target_destroy = nvc0_so_target_destroy;

View File

@ -819,8 +819,6 @@ nvc0_switch_pipe_context(struct nvc0_context *ctx_to)
if (!ctx_to->vertex) if (!ctx_to->vertex)
ctx_to->dirty_3d &= ~(NVC0_NEW_3D_VERTEX | NVC0_NEW_3D_ARRAYS); ctx_to->dirty_3d &= ~(NVC0_NEW_3D_VERTEX | NVC0_NEW_3D_ARRAYS);
if (!ctx_to->idxbuf.buffer)
ctx_to->dirty_3d &= ~NVC0_NEW_3D_IDXBUF;
if (!ctx_to->vertprog) if (!ctx_to->vertprog)
ctx_to->dirty_3d &= ~NVC0_NEW_3D_VERTPROG; ctx_to->dirty_3d &= ~NVC0_NEW_3D_VERTPROG;
@ -876,7 +874,6 @@ validate_list_3d[] = {
{ nvc0_vertex_arrays_validate, NVC0_NEW_3D_VERTEX | NVC0_NEW_3D_ARRAYS }, { nvc0_vertex_arrays_validate, NVC0_NEW_3D_VERTEX | NVC0_NEW_3D_ARRAYS },
{ nvc0_validate_surfaces, NVC0_NEW_3D_SURFACES }, { nvc0_validate_surfaces, NVC0_NEW_3D_SURFACES },
{ nvc0_validate_buffers, NVC0_NEW_3D_BUFFERS }, { nvc0_validate_buffers, NVC0_NEW_3D_BUFFERS },
{ nvc0_idxbuf_validate, NVC0_NEW_3D_IDXBUF },
{ nvc0_tfb_validate, NVC0_NEW_3D_TFB_TARGETS | NVC0_NEW_3D_GMTYPROG }, { nvc0_tfb_validate, NVC0_NEW_3D_TFB_TARGETS | NVC0_NEW_3D_GMTYPROG },
{ nvc0_layer_validate, NVC0_NEW_3D_VERTPROG | { nvc0_layer_validate, NVC0_NEW_3D_VERTPROG |
NVC0_NEW_3D_TEVLPROG | NVC0_NEW_3D_TEVLPROG |

View File

@ -522,26 +522,6 @@ nvc0_vertex_arrays_validate(struct nvc0_context *nvc0)
nvc0_validate_vertex_buffers(nvc0); nvc0_validate_vertex_buffers(nvc0);
} }
void
nvc0_idxbuf_validate(struct nvc0_context *nvc0)
{
struct nouveau_pushbuf *push = nvc0->base.pushbuf;
struct nv04_resource *buf = nv04_resource(nvc0->idxbuf.buffer);
assert(buf);
assert(nouveau_resource_mapped_by_gpu(&buf->base));
PUSH_SPACE(push, 6);
BEGIN_NVC0(push, NVC0_3D(INDEX_ARRAY_START_HIGH), 5);
PUSH_DATAh(push, buf->address + nvc0->idxbuf.offset);
PUSH_DATA (push, buf->address + nvc0->idxbuf.offset);
PUSH_DATAh(push, buf->address + buf->base.width0 - 1);
PUSH_DATA (push, buf->address + buf->base.width0 - 1);
PUSH_DATA (push, nvc0->idxbuf.index_size >> 1);
BCTX_REFN(nvc0->bufctx_3d, 3D_IDX, buf, RD);
}
#define NVC0_PRIM_GL_CASE(n) \ #define NVC0_PRIM_GL_CASE(n) \
case PIPE_PRIM_##n: return NVC0_3D_VERTEX_BEGIN_GL_PRIMITIVE_##n case PIPE_PRIM_##n: return NVC0_3D_VERTEX_BEGIN_GL_PRIMITIVE_##n
@ -588,7 +568,7 @@ nvc0_draw_arrays(struct nvc0_context *nvc0,
unsigned prim; unsigned prim;
if (nvc0->state.index_bias) { if (nvc0->state.index_bias) {
/* index_bias is implied 0 if !info->indexed (really ?) */ /* index_bias is implied 0 if !info->index_size (really ?) */
/* TODO: can we deactivate it for the VERTEX_BUFFER_FIRST command ? */ /* TODO: can we deactivate it for the VERTEX_BUFFER_FIRST command ? */
PUSH_SPACE(push, 2); PUSH_SPACE(push, 2);
IMMED_NVC0(push, NVC0_3D(VB_ELEMENT_BASE), 0); IMMED_NVC0(push, NVC0_3D(VB_ELEMENT_BASE), 0);
@ -711,12 +691,13 @@ nvc0_draw_elements_inline_u32_short(struct nouveau_pushbuf *push,
static void static void
nvc0_draw_elements(struct nvc0_context *nvc0, bool shorten, nvc0_draw_elements(struct nvc0_context *nvc0, bool shorten,
const struct pipe_draw_info *info,
unsigned mode, unsigned start, unsigned count, unsigned mode, unsigned start, unsigned count,
unsigned instance_count, int32_t index_bias) unsigned instance_count, int32_t index_bias,
unsigned index_size)
{ {
struct nouveau_pushbuf *push = nvc0->base.pushbuf; struct nouveau_pushbuf *push = nvc0->base.pushbuf;
unsigned prim; unsigned prim;
const unsigned index_size = nvc0->idxbuf.index_size;
prim = nvc0_prim_gl(mode); prim = nvc0_prim_gl(mode);
@ -729,7 +710,7 @@ nvc0_draw_elements(struct nvc0_context *nvc0, bool shorten,
nvc0->state.index_bias = index_bias; nvc0->state.index_bias = index_bias;
} }
if (nvc0->idxbuf.buffer) { if (!info->has_user_indices) {
PUSH_SPACE(push, 1); PUSH_SPACE(push, 1);
IMMED_NVC0(push, NVC0_3D(VERTEX_BEGIN_GL), prim); IMMED_NVC0(push, NVC0_3D(VERTEX_BEGIN_GL), prim);
do { do {
@ -745,7 +726,7 @@ nvc0_draw_elements(struct nvc0_context *nvc0, bool shorten,
} while (instance_count); } while (instance_count);
IMMED_NVC0(push, NVC0_3D(VERTEX_END_GL), 0); IMMED_NVC0(push, NVC0_3D(VERTEX_END_GL), 0);
} else { } else {
const void *data = nvc0->idxbuf.user_buffer; const void *data = info->index.user;
while (instance_count--) { while (instance_count--) {
PUSH_SPACE(push, 2); PUSH_SPACE(push, 2);
@ -841,9 +822,9 @@ nvc0_draw_indirect(struct nvc0_context *nvc0, const struct pipe_draw_info *info)
BEGIN_NVC0(push, NVC0_3D(CB_POS), 1); BEGIN_NVC0(push, NVC0_3D(CB_POS), 1);
PUSH_DATA (push, NVC0_CB_AUX_DRAW_INFO); PUSH_DATA (push, NVC0_CB_AUX_DRAW_INFO);
if (info->indexed) { if (info->index_size) {
assert(nvc0->idxbuf.buffer); assert(!info->has_user_indices);
assert(nouveau_resource_mapped_by_gpu(nvc0->idxbuf.buffer)); assert(nouveau_resource_mapped_by_gpu(info->index.resource));
size = 5; size = 5;
if (buf_count) if (buf_count)
macro = NVC0_3D_MACRO_DRAW_ELEMENTS_INDIRECT_COUNT; macro = NVC0_3D_MACRO_DRAW_ELEMENTS_INDIRECT_COUNT;
@ -851,7 +832,7 @@ nvc0_draw_indirect(struct nvc0_context *nvc0, const struct pipe_draw_info *info)
macro = NVC0_3D_MACRO_DRAW_ELEMENTS_INDIRECT; macro = NVC0_3D_MACRO_DRAW_ELEMENTS_INDIRECT;
} else { } else {
if (nvc0->state.index_bias) { if (nvc0->state.index_bias) {
/* index_bias is implied 0 if !info->indexed (really ?) */ /* index_bias is implied 0 if !info->index_size (really ?) */
IMMED_NVC0(push, NVC0_3D(VB_ELEMENT_BASE), 0); IMMED_NVC0(push, NVC0_3D(VB_ELEMENT_BASE), 0);
IMMED_NVC0(push, NVC0_3D(VERTEX_ID_BASE), 0); IMMED_NVC0(push, NVC0_3D(VERTEX_ID_BASE), 0);
nvc0->state.index_bias = 0; nvc0->state.index_bias = 0;
@ -940,6 +921,9 @@ nvc0_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
struct nvc0_screen *screen = nvc0->screen; struct nvc0_screen *screen = nvc0->screen;
int s; int s;
if (info->index_size)
nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_3D_IDX);
/* NOTE: caller must ensure that (min_index + index_bias) is >= 0 */ /* NOTE: caller must ensure that (min_index + index_bias) is >= 0 */
nvc0->vb_elt_first = info->min_index + info->index_bias; nvc0->vb_elt_first = info->min_index + info->index_bias;
nvc0->vb_elt_limit = info->max_index - info->min_index; nvc0->vb_elt_limit = info->max_index - info->min_index;
@ -950,7 +934,7 @@ nvc0_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
* if index count is larger and we expect repeated vertices, suggest upload. * if index count is larger and we expect repeated vertices, suggest upload.
*/ */
nvc0->vbo_push_hint = nvc0->vbo_push_hint =
!info->indirect && info->indexed && !info->indirect && info->index_size &&
(nvc0->vb_elt_limit >= (info->count * 2)); (nvc0->vb_elt_limit >= (info->count * 2));
/* Check whether we want to switch vertex-submission mode. */ /* Check whether we want to switch vertex-submission mode. */
@ -974,6 +958,23 @@ nvc0_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
IMMED_NVC0(push, NVC0_3D(PATCH_VERTICES), nvc0->state.patch_vertices); IMMED_NVC0(push, NVC0_3D(PATCH_VERTICES), nvc0->state.patch_vertices);
} }
if (info->index_size && !info->has_user_indices) {
struct nv04_resource *buf = nv04_resource(info->index.resource);
assert(buf);
assert(nouveau_resource_mapped_by_gpu(&buf->base));
PUSH_SPACE(push, 6);
BEGIN_NVC0(push, NVC0_3D(INDEX_ARRAY_START_HIGH), 5);
PUSH_DATAh(push, buf->address);
PUSH_DATA (push, buf->address);
PUSH_DATAh(push, buf->address + buf->base.width0 - 1);
PUSH_DATA (push, buf->address + buf->base.width0 - 1);
PUSH_DATA (push, info->index_size >> 1);
BCTX_REFN(nvc0->bufctx_3d, 3D_IDX, buf, RD);
}
nvc0_state_validate_3d(nvc0, ~0); nvc0_state_validate_3d(nvc0, ~0);
if (nvc0->vertprog->vp.need_draw_parameters && !info->indirect) { if (nvc0->vertprog->vp.need_draw_parameters && !info->indirect) {
@ -1046,8 +1047,8 @@ nvc0_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
nvc0->base.vbo_dirty |= !!nvc0->vtxbufs_coherent; nvc0->base.vbo_dirty |= !!nvc0->vtxbufs_coherent;
if (!nvc0->base.vbo_dirty && nvc0->idxbuf.buffer && if (!nvc0->base.vbo_dirty && info->index_size && !info->has_user_indices &&
nvc0->idxbuf.buffer->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT) info->index.resource->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT)
nvc0->base.vbo_dirty = true; nvc0->base.vbo_dirty = true;
nvc0_update_prim_restart(nvc0, info->primitive_restart, info->restart_index); nvc0_update_prim_restart(nvc0, info->primitive_restart, info->restart_index);
@ -1064,15 +1065,15 @@ nvc0_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
if (unlikely(info->count_from_stream_output)) { if (unlikely(info->count_from_stream_output)) {
nvc0_draw_stream_output(nvc0, info); nvc0_draw_stream_output(nvc0, info);
} else } else
if (info->indexed) { if (info->index_size) {
bool shorten = info->max_index <= 65535; bool shorten = info->max_index <= 65535;
if (info->primitive_restart && info->restart_index > 65535) if (info->primitive_restart && info->restart_index > 65535)
shorten = false; shorten = false;
nvc0_draw_elements(nvc0, shorten, nvc0_draw_elements(nvc0, shorten, info,
info->mode, info->start, info->count, info->mode, info->start, info->count,
info->instance_count, info->index_bias); info->instance_count, info->index_bias, info->index_size);
} else { } else {
nvc0_draw_arrays(nvc0, nvc0_draw_arrays(nvc0,
info->mode, info->start, info->count, info->mode, info->start, info->count,

View File

@ -83,14 +83,16 @@ nvc0_vertex_configure_translate(struct nvc0_context *nvc0, int32_t index_bias)
} }
static inline void static inline void
nvc0_push_map_idxbuf(struct push_context *ctx, struct nvc0_context *nvc0) nvc0_push_map_idxbuf(struct push_context *ctx, struct nvc0_context *nvc0,
const struct pipe_draw_info *info,
unsigned offset)
{ {
if (nvc0->idxbuf.buffer) { if (!info->has_user_indices) {
struct nv04_resource *buf = nv04_resource(nvc0->idxbuf.buffer); struct nv04_resource *buf = nv04_resource(info->index.resource);
ctx->idxbuf = nouveau_resource_map_offset(&nvc0->base, ctx->idxbuf = nouveau_resource_map_offset(&nvc0->base,
buf, nvc0->idxbuf.offset, NOUVEAU_BO_RD); buf, offset, NOUVEAU_BO_RD);
} else { } else {
ctx->idxbuf = nvc0->idxbuf.user_buffer; ctx->idxbuf = info->index.user;
} }
} }
@ -499,16 +501,16 @@ nvc0_push_vbo(struct nvc0_context *nvc0, const struct pipe_draw_info *info)
*/ */
BEGIN_NVC0(ctx.push, NVC0_3D(PRIM_RESTART_ENABLE), 2); BEGIN_NVC0(ctx.push, NVC0_3D(PRIM_RESTART_ENABLE), 2);
PUSH_DATA (ctx.push, 1); PUSH_DATA (ctx.push, 1);
PUSH_DATA (ctx.push, info->indexed ? 0xffffffff : info->restart_index); PUSH_DATA (ctx.push, info->index_size ? 0xffffffff : info->restart_index);
} else } else
if (nvc0->state.prim_restart) { if (nvc0->state.prim_restart) {
IMMED_NVC0(ctx.push, NVC0_3D(PRIM_RESTART_ENABLE), 0); IMMED_NVC0(ctx.push, NVC0_3D(PRIM_RESTART_ENABLE), 0);
} }
nvc0->state.prim_restart = info->primitive_restart; nvc0->state.prim_restart = info->primitive_restart;
if (info->indexed) { if (info->index_size) {
nvc0_push_map_idxbuf(&ctx, nvc0); nvc0_push_map_idxbuf(&ctx, nvc0, info, info->start * info->index_size);
index_size = nvc0->idxbuf.index_size; index_size = info->index_size;
} else { } else {
if (unlikely(info->count_from_stream_output)) { if (unlikely(info->count_from_stream_output)) {
struct pipe_context *pipe = &nvc0->base.pipe; struct pipe_context *pipe = &nvc0->base.pipe;
@ -583,8 +585,8 @@ nvc0_push_vbo(struct nvc0_context *nvc0, const struct pipe_draw_info *info)
IMMED_NVC0(ctx.push, NVC0_3D(VERTEX_ARRAY_FETCH(1)), 0); IMMED_NVC0(ctx.push, NVC0_3D(VERTEX_ARRAY_FETCH(1)), 0);
} }
if (info->indexed) if (info->index_size && !info->has_user_indices)
nouveau_resource_unmap(nv04_resource(nvc0->idxbuf.buffer)); nouveau_resource_unmap(nv04_resource(info->index.resource));
for (i = 0; i < nvc0->num_vtxbufs; ++i) for (i = 0; i < nvc0->num_vtxbufs; ++i)
nouveau_resource_unmap(nv04_resource(nvc0->vtxbuf[i].buffer.resource)); nouveau_resource_unmap(nv04_resource(nvc0->vtxbuf[i].buffer.resource));
@ -626,7 +628,7 @@ nvc0_push_upload_vertex_ids(struct push_context *ctx,
uint64_t va; uint64_t va;
uint32_t *data; uint32_t *data;
uint32_t format; uint32_t format;
unsigned index_size = nvc0->idxbuf.index_size; unsigned index_size = info->index_size;
unsigned i; unsigned i;
unsigned a = nvc0->vertex->num_elements; unsigned a = nvc0->vertex->num_elements;
@ -639,11 +641,11 @@ nvc0_push_upload_vertex_ids(struct push_context *ctx,
bo); bo);
nouveau_pushbuf_validate(push); nouveau_pushbuf_validate(push);
if (info->indexed) { if (info->index_size) {
if (!info->index_bias) { if (!info->index_bias) {
memcpy(data, ctx->idxbuf, info->count * index_size); memcpy(data, ctx->idxbuf, info->count * index_size);
} else { } else {
switch (nvc0->idxbuf.index_size) { switch (info->index_size) {
case 1: case 1:
copy_indices_u8(data, ctx->idxbuf, info->index_bias, info->count); copy_indices_u8(data, ctx->idxbuf, info->index_bias, info->count);
break; break;

View File

@ -591,7 +591,6 @@ struct r300_context {
void *dsa_decompress_zmask; void *dsa_decompress_zmask;
struct pipe_index_buffer index_buffer;
struct pipe_vertex_buffer vertex_buffer[PIPE_MAX_ATTRIBS]; struct pipe_vertex_buffer vertex_buffer[PIPE_MAX_ATTRIBS];
unsigned nr_vertex_buffers; unsigned nr_vertex_buffers;
struct u_upload_mgr *uploader; struct u_upload_mgr *uploader;
@ -733,7 +732,7 @@ void r300_stop_query(struct r300_context *r300);
/* r300_render_translate.c */ /* r300_render_translate.c */
void r300_translate_index_buffer(struct r300_context *r300, void r300_translate_index_buffer(struct r300_context *r300,
struct pipe_index_buffer *ib, const struct pipe_draw_info *info,
struct pipe_resource **out_index_buffer, struct pipe_resource **out_index_buffer,
unsigned *index_size, unsigned index_offset, unsigned *index_size, unsigned index_offset,
unsigned *start, unsigned count); unsigned *start, unsigned count);

View File

@ -501,7 +501,7 @@ static void r300_draw_elements_immediate(struct r300_context *r300,
const uint8_t *ptr1; const uint8_t *ptr1;
const uint16_t *ptr2; const uint16_t *ptr2;
const uint32_t *ptr4; const uint32_t *ptr4;
unsigned index_size = r300->index_buffer.index_size; unsigned index_size = info->index_size;
unsigned i, count_dwords = index_size == 4 ? info->count : unsigned i, count_dwords = index_size == 4 ? info->count :
(info->count + 1) / 2; (info->count + 1) / 2;
CS_LOCALS(r300); CS_LOCALS(r300);
@ -519,7 +519,7 @@ static void r300_draw_elements_immediate(struct r300_context *r300,
switch (index_size) { switch (index_size) {
case 1: case 1:
ptr1 = (uint8_t*)r300->index_buffer.user_buffer; ptr1 = (uint8_t*)info->index.user;
ptr1 += info->start; ptr1 += info->start;
OUT_CS(R300_VAP_VF_CNTL__PRIM_WALK_INDICES | (info->count << 16) | OUT_CS(R300_VAP_VF_CNTL__PRIM_WALK_INDICES | (info->count << 16) |
@ -543,7 +543,7 @@ static void r300_draw_elements_immediate(struct r300_context *r300,
break; break;
case 2: case 2:
ptr2 = (uint16_t*)r300->index_buffer.user_buffer; ptr2 = (uint16_t*)info->index.user;
ptr2 += info->start; ptr2 += info->start;
OUT_CS(R300_VAP_VF_CNTL__PRIM_WALK_INDICES | (info->count << 16) | OUT_CS(R300_VAP_VF_CNTL__PRIM_WALK_INDICES | (info->count << 16) |
@ -562,7 +562,7 @@ static void r300_draw_elements_immediate(struct r300_context *r300,
break; break;
case 4: case 4:
ptr4 = (uint32_t*)r300->index_buffer.user_buffer; ptr4 = (uint32_t*)info->index.user;
ptr4 += info->start; ptr4 += info->start;
OUT_CS(R300_VAP_VF_CNTL__PRIM_WALK_INDICES | (info->count << 16) | OUT_CS(R300_VAP_VF_CNTL__PRIM_WALK_INDICES | (info->count << 16) |
@ -584,8 +584,9 @@ static void r300_draw_elements(struct r300_context *r300,
const struct pipe_draw_info *info, const struct pipe_draw_info *info,
int instance_id) int instance_id)
{ {
struct pipe_resource *indexBuffer = r300->index_buffer.buffer; struct pipe_resource *indexBuffer =
unsigned indexSize = r300->index_buffer.index_size; info->has_user_indices ? NULL : info->index.resource;
unsigned indexSize = info->index_size;
struct pipe_resource* orgIndexBuffer = indexBuffer; struct pipe_resource* orgIndexBuffer = indexBuffer;
unsigned start = info->start; unsigned start = info->start;
unsigned count = info->count; unsigned count = info->count;
@ -600,7 +601,7 @@ static void r300_draw_elements(struct r300_context *r300,
&index_offset); &index_offset);
} }
r300_translate_index_buffer(r300, &r300->index_buffer, &indexBuffer, r300_translate_index_buffer(r300, info, &indexBuffer,
&indexSize, index_offset, &start, count); &indexSize, index_offset, &start, count);
/* Fallback for misaligned ushort indices. */ /* Fallback for misaligned ushort indices. */
@ -621,10 +622,10 @@ static void r300_draw_elements(struct r300_context *r300,
count, (uint8_t*)ptr); count, (uint8_t*)ptr);
} }
} else { } else {
if (r300->index_buffer.user_buffer) if (info->has_user_indices)
r300_upload_index_buffer(r300, &indexBuffer, indexSize, r300_upload_index_buffer(r300, &indexBuffer, indexSize,
&start, count, &start, count,
r300->index_buffer.user_buffer); info->index.user);
} }
/* 19 dwords for emit_draw_elements. Give up if the function fails. */ /* 19 dwords for emit_draw_elements. Give up if the function fails. */
@ -792,7 +793,7 @@ static void r300_draw_vbo(struct pipe_context* pipe,
r300_update_derived_state(r300); r300_update_derived_state(r300);
/* Draw. */ /* Draw. */
if (info.indexed) { if (info.index_size) {
unsigned max_count = r300_max_vertex_count(r300); unsigned max_count = r300_max_vertex_count(r300);
if (!max_count) { if (!max_count) {
@ -807,11 +808,9 @@ static void r300_draw_vbo(struct pipe_context* pipe,
} }
info.max_index = max_count - 1; info.max_index = max_count - 1;
info.start += r300->index_buffer.offset / r300->index_buffer.index_size;
if (info.instance_count <= 1) { if (info.instance_count <= 1) {
if (info.count <= 8 && if (info.count <= 8 && info.has_user_indices) {
r300->index_buffer.user_buffer) {
r300_draw_elements_immediate(r300, &info); r300_draw_elements_immediate(r300, &info);
} else { } else {
r300_draw_elements(r300, &info, -1); r300_draw_elements(r300, &info, -1);
@ -850,6 +849,14 @@ static void r300_swtcl_draw_vbo(struct pipe_context* pipe,
if (!u_trim_pipe_prim(info->mode, (unsigned*)&info->count)) if (!u_trim_pipe_prim(info->mode, (unsigned*)&info->count))
return; return;
if (info->index_size) {
draw_set_indexes(r300->draw,
info->has_user_indices ?
info->index.user :
r300_resource(info->index.resource)->malloced_buffer,
info->index_size, ~0);
}
r300_update_derived_state(r300); r300_update_derived_state(r300);
draw_vbo(r300->draw, info); draw_vbo(r300->draw, info);

View File

@ -26,7 +26,7 @@
void r300_translate_index_buffer(struct r300_context *r300, void r300_translate_index_buffer(struct r300_context *r300,
struct pipe_index_buffer *ib, const struct pipe_draw_info *info,
struct pipe_resource **out_buffer, struct pipe_resource **out_buffer,
unsigned *index_size, unsigned index_offset, unsigned *index_size, unsigned index_offset,
unsigned *start, unsigned count) unsigned *start, unsigned count)
@ -41,7 +41,7 @@ void r300_translate_index_buffer(struct r300_context *r300,
&out_offset, out_buffer, &ptr); &out_offset, out_buffer, &ptr);
util_shorten_ubyte_elts_to_userptr( util_shorten_ubyte_elts_to_userptr(
&r300->context, ib, PIPE_TRANSFER_UNSYNCHRONIZED, index_offset, &r300->context, info, PIPE_TRANSFER_UNSYNCHRONIZED, index_offset,
*start, count, ptr); *start, count, ptr);
*index_size = 2; *index_size = 2;
@ -54,7 +54,7 @@ void r300_translate_index_buffer(struct r300_context *r300,
u_upload_alloc(r300->uploader, 0, count * 2, 4, u_upload_alloc(r300->uploader, 0, count * 2, 4,
&out_offset, out_buffer, &ptr); &out_offset, out_buffer, &ptr);
util_rebuild_ushort_elts_to_userptr(&r300->context, ib, util_rebuild_ushort_elts_to_userptr(&r300->context, info,
PIPE_TRANSFER_UNSYNCHRONIZED, PIPE_TRANSFER_UNSYNCHRONIZED,
index_offset, *start, index_offset, *start,
count, ptr); count, ptr);
@ -69,7 +69,7 @@ void r300_translate_index_buffer(struct r300_context *r300,
u_upload_alloc(r300->uploader, 0, count * 4, 4, u_upload_alloc(r300->uploader, 0, count * 4, 4,
&out_offset, out_buffer, &ptr); &out_offset, out_buffer, &ptr);
util_rebuild_uint_elts_to_userptr(&r300->context, ib, util_rebuild_uint_elts_to_userptr(&r300->context, info,
PIPE_TRANSFER_UNSYNCHRONIZED, PIPE_TRANSFER_UNSYNCHRONIZED,
index_offset, *start, index_offset, *start,
count, ptr); count, ptr);

View File

@ -1783,37 +1783,6 @@ static void r300_set_vertex_buffers_swtcl(struct pipe_context* pipe,
} }
} }
static void r300_set_index_buffer_hwtcl(struct pipe_context* pipe,
const struct pipe_index_buffer *ib)
{
struct r300_context* r300 = r300_context(pipe);
if (ib) {
pipe_resource_reference(&r300->index_buffer.buffer, ib->buffer);
memcpy(&r300->index_buffer, ib, sizeof(*ib));
} else {
pipe_resource_reference(&r300->index_buffer.buffer, NULL);
}
}
static void r300_set_index_buffer_swtcl(struct pipe_context* pipe,
const struct pipe_index_buffer *ib)
{
struct r300_context* r300 = r300_context(pipe);
if (ib) {
const void *buf = NULL;
if (ib->user_buffer) {
buf = ib->user_buffer;
} else if (ib->buffer) {
buf = r300_resource(ib->buffer)->malloced_buffer;
}
draw_set_indexes(r300->draw,
(const ubyte *) buf + ib->offset,
ib->index_size, ~0);
}
}
/* Initialize the PSC tables. */ /* Initialize the PSC tables. */
static void r300_vertex_psc(struct r300_vertex_element_state *velems) static void r300_vertex_psc(struct r300_vertex_element_state *velems)
{ {
@ -2125,10 +2094,8 @@ void r300_init_state_functions(struct r300_context* r300)
if (r300->screen->caps.has_tcl) { if (r300->screen->caps.has_tcl) {
r300->context.set_vertex_buffers = r300_set_vertex_buffers_hwtcl; r300->context.set_vertex_buffers = r300_set_vertex_buffers_hwtcl;
r300->context.set_index_buffer = r300_set_index_buffer_hwtcl;
} else { } else {
r300->context.set_vertex_buffers = r300_set_vertex_buffers_swtcl; r300->context.set_vertex_buffers = r300_set_vertex_buffers_swtcl;
r300->context.set_index_buffer = r300_set_index_buffer_swtcl;
} }
r300->context.create_vertex_elements_state = r300_create_vertex_elements_state; r300->context.create_vertex_elements_state = r300_create_vertex_elements_state;

View File

@ -509,9 +509,6 @@ struct r600_context {
* the GPU addresses are updated. */ * the GPU addresses are updated. */
struct list_head texture_buffers; struct list_head texture_buffers;
/* Index buffer. */
struct pipe_index_buffer index_buffer;
/* Last draw state (-1 = unset). */ /* Last draw state (-1 = unset). */
enum pipe_prim_type last_primitive_type; /* Last primitive type used in draw_vbo. */ enum pipe_prim_type last_primitive_type; /* Last primitive type used in draw_vbo. */
enum pipe_prim_type current_rast_prim; /* primitive type after TES, GS */ enum pipe_prim_type current_rast_prim; /* primitive type after TES, GS */

View File

@ -523,20 +523,6 @@ static void r600_delete_vertex_elements(struct pipe_context *ctx, void *state)
FREE(shader); FREE(shader);
} }
static void r600_set_index_buffer(struct pipe_context *ctx,
const struct pipe_index_buffer *ib)
{
struct r600_context *rctx = (struct r600_context *)ctx;
if (ib) {
pipe_resource_reference(&rctx->index_buffer.buffer, ib->buffer);
memcpy(&rctx->index_buffer, ib, sizeof(*ib));
r600_context_add_resource_size(ctx, ib->buffer);
} else {
pipe_resource_reference(&rctx->index_buffer.buffer, NULL);
}
}
void r600_vertex_buffers_dirty(struct r600_context *rctx) void r600_vertex_buffers_dirty(struct r600_context *rctx)
{ {
if (rctx->vertex_buffer_state.dirty_mask) { if (rctx->vertex_buffer_state.dirty_mask) {
@ -1702,14 +1688,16 @@ static inline void r600_emit_rasterizer_prim_state(struct r600_context *rctx)
static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *info) static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *info)
{ {
struct r600_context *rctx = (struct r600_context *)ctx; struct r600_context *rctx = (struct r600_context *)ctx;
struct pipe_index_buffer ib = {}; struct pipe_resource *indexbuf = info->has_user_indices ? NULL : info->index.resource;
struct radeon_winsys_cs *cs = rctx->b.gfx.cs; struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
bool render_cond_bit = rctx->b.render_cond && !rctx->b.render_cond_force_off; bool render_cond_bit = rctx->b.render_cond && !rctx->b.render_cond_force_off;
bool has_user_indices = info->has_user_indices;
uint64_t mask; uint64_t mask;
unsigned num_patches, dirty_tex_counter; unsigned num_patches, dirty_tex_counter, index_offset = 0;
unsigned index_size = info->index_size;
int index_bias; int index_bias;
if (!info->indirect && !info->count && (info->indexed || !info->count_from_stream_output)) { if (!info->indirect && !info->count && (index_size || !info->count_from_stream_output)) {
return; return;
} }
@ -1747,18 +1735,11 @@ static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info
: (rctx->tes_shader)? rctx->tes_shader->info.properties[TGSI_PROPERTY_TES_PRIM_MODE] : (rctx->tes_shader)? rctx->tes_shader->info.properties[TGSI_PROPERTY_TES_PRIM_MODE]
: info->mode; : info->mode;
if (info->indexed) { if (index_size) {
/* Initialize the index buffer struct. */ index_offset += info->start * index_size;
pipe_resource_reference(&ib.buffer, rctx->index_buffer.buffer);
ib.user_buffer = rctx->index_buffer.user_buffer;
ib.index_size = rctx->index_buffer.index_size;
ib.offset = rctx->index_buffer.offset;
if (!info->indirect) {
ib.offset += info->start * ib.index_size;
}
/* Translate 8-bit indices to 16-bit. */ /* Translate 8-bit indices to 16-bit. */
if (unlikely(ib.index_size == 1)) { if (unlikely(index_size == 1)) {
struct pipe_resource *out_buffer = NULL; struct pipe_resource *out_buffer = NULL;
unsigned out_offset; unsigned out_offset;
void *ptr; void *ptr;
@ -1775,7 +1756,7 @@ static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info
PIPE_TRANSFER_READ); PIPE_TRANSFER_READ);
if (data) { if (data) {
data += info->indirect->offset / sizeof(unsigned); data += info->indirect->offset / sizeof(unsigned);
start = data[2] * ib.index_size; start = data[2] * index_size;
count = data[0]; count = data[0];
} }
else { else {
@ -1786,19 +1767,16 @@ static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info
u_upload_alloc(ctx->stream_uploader, start, count * 2, u_upload_alloc(ctx->stream_uploader, start, count * 2,
256, &out_offset, &out_buffer, &ptr); 256, &out_offset, &out_buffer, &ptr);
if (unlikely(!ptr)) { if (unlikely(!ptr))
pipe_resource_reference(&ib.buffer, NULL);
return; return;
}
util_shorten_ubyte_elts_to_userptr( util_shorten_ubyte_elts_to_userptr(
&rctx->b.b, &ib, 0, 0, ib.offset + start, count, ptr); &rctx->b.b, info, 0, 0, index_offset, count, ptr);
pipe_resource_reference(&ib.buffer, NULL); indexbuf = out_buffer;
ib.user_buffer = NULL; index_offset = out_offset;
ib.buffer = out_buffer; index_size = 2;
ib.offset = out_offset; has_user_indices = false;
ib.index_size = 2;
} }
/* Upload the index buffer. /* Upload the index buffer.
@ -1806,13 +1784,14 @@ static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info
* and the indices are emitted via PKT3_DRAW_INDEX_IMMD. * and the indices are emitted via PKT3_DRAW_INDEX_IMMD.
* Indirect draws never use immediate indices. * Indirect draws never use immediate indices.
* Note: Instanced rendering in combination with immediate indices hangs. */ * Note: Instanced rendering in combination with immediate indices hangs. */
if (ib.user_buffer && (R600_BIG_ENDIAN || info->indirect || if (has_user_indices && (R600_BIG_ENDIAN || info->indirect ||
info->instance_count > 1 || info->instance_count > 1 ||
info->count*ib.index_size > 20)) { info->count*index_size > 20)) {
indexbuf = NULL;
u_upload_data(ctx->stream_uploader, 0, u_upload_data(ctx->stream_uploader, 0,
info->count * ib.index_size, 256, info->count * index_size, 256,
ib.user_buffer, &ib.offset, &ib.buffer); info->index.user, &index_offset, &indexbuf);
ib.user_buffer = NULL; has_user_indices = false;
} }
index_bias = info->index_bias; index_bias = info->index_bias;
} else { } else {
@ -1840,7 +1819,7 @@ static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info
evergreen_setup_tess_constants(rctx, info, &num_patches); evergreen_setup_tess_constants(rctx, info, &num_patches);
/* Emit states. */ /* Emit states. */
r600_need_cs_space(rctx, ib.user_buffer ? 5 : 0, TRUE); r600_need_cs_space(rctx, has_user_indices ? 5 : 0, TRUE);
r600_flush_emit(rctx); r600_flush_emit(rctx);
mask = rctx->dirty_atoms; mask = rctx->dirty_atoms;
@ -1937,21 +1916,21 @@ static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info
RADEON_PRIO_DRAW_INDIRECT)); RADEON_PRIO_DRAW_INDIRECT));
} }
if (info->indexed) { if (index_size) {
radeon_emit(cs, PKT3(PKT3_INDEX_TYPE, 0, 0)); radeon_emit(cs, PKT3(PKT3_INDEX_TYPE, 0, 0));
radeon_emit(cs, ib.index_size == 4 ? radeon_emit(cs, index_size == 4 ?
(VGT_INDEX_32 | (R600_BIG_ENDIAN ? VGT_DMA_SWAP_32_BIT : 0)) : (VGT_INDEX_32 | (R600_BIG_ENDIAN ? VGT_DMA_SWAP_32_BIT : 0)) :
(VGT_INDEX_16 | (R600_BIG_ENDIAN ? VGT_DMA_SWAP_16_BIT : 0))); (VGT_INDEX_16 | (R600_BIG_ENDIAN ? VGT_DMA_SWAP_16_BIT : 0)));
if (ib.user_buffer) { if (has_user_indices) {
unsigned size_bytes = info->count*ib.index_size; unsigned size_bytes = info->count*index_size;
unsigned size_dw = align(size_bytes, 4) / 4; unsigned size_dw = align(size_bytes, 4) / 4;
radeon_emit(cs, PKT3(PKT3_DRAW_INDEX_IMMD, 1 + size_dw, render_cond_bit)); radeon_emit(cs, PKT3(PKT3_DRAW_INDEX_IMMD, 1 + size_dw, render_cond_bit));
radeon_emit(cs, info->count); radeon_emit(cs, info->count);
radeon_emit(cs, V_0287F0_DI_SRC_SEL_IMMEDIATE); radeon_emit(cs, V_0287F0_DI_SRC_SEL_IMMEDIATE);
radeon_emit_array(cs, ib.user_buffer, size_dw); radeon_emit_array(cs, info->index.user, size_dw);
} else { } else {
uint64_t va = r600_resource(ib.buffer)->gpu_address + ib.offset; uint64_t va = r600_resource(indexbuf)->gpu_address + index_offset;
if (likely(!info->indirect)) { if (likely(!info->indirect)) {
radeon_emit(cs, PKT3(PKT3_DRAW_INDEX, 3, render_cond_bit)); radeon_emit(cs, PKT3(PKT3_DRAW_INDEX, 3, render_cond_bit));
@ -1961,12 +1940,12 @@ static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info
radeon_emit(cs, V_0287F0_DI_SRC_SEL_DMA); radeon_emit(cs, V_0287F0_DI_SRC_SEL_DMA);
radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx,
(struct r600_resource*)ib.buffer, (struct r600_resource*)indexbuf,
RADEON_USAGE_READ, RADEON_USAGE_READ,
RADEON_PRIO_INDEX_BUFFER)); RADEON_PRIO_INDEX_BUFFER));
} }
else { else {
uint32_t max_size = (ib.buffer->width0 - ib.offset) / ib.index_size; uint32_t max_size = (indexbuf->width0 - index_offset) / index_size;
radeon_emit(cs, PKT3(EG_PKT3_INDEX_BASE, 1, 0)); radeon_emit(cs, PKT3(EG_PKT3_INDEX_BASE, 1, 0));
radeon_emit(cs, va); radeon_emit(cs, va);
@ -1974,7 +1953,7 @@ static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info
radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx,
(struct r600_resource*)ib.buffer, (struct r600_resource*)indexbuf,
RADEON_USAGE_READ, RADEON_USAGE_READ,
RADEON_PRIO_INDEX_BUFFER)); RADEON_PRIO_INDEX_BUFFER));
@ -2064,7 +2043,8 @@ static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info
rctx->framebuffer.do_update_surf_dirtiness = false; rctx->framebuffer.do_update_surf_dirtiness = false;
} }
pipe_resource_reference(&ib.buffer, NULL); if (index_size && indexbuf != info->index.resource)
pipe_resource_reference(&indexbuf, NULL);
rctx->b.num_draw_calls++; rctx->b.num_draw_calls++;
} }
@ -2971,7 +2951,6 @@ void r600_init_common_state_functions(struct r600_context *rctx)
rctx->b.b.set_sample_mask = r600_set_sample_mask; rctx->b.b.set_sample_mask = r600_set_sample_mask;
rctx->b.b.set_stencil_ref = r600_set_pipe_stencil_ref; rctx->b.b.set_stencil_ref = r600_set_pipe_stencil_ref;
rctx->b.b.set_vertex_buffers = r600_set_vertex_buffers; rctx->b.b.set_vertex_buffers = r600_set_vertex_buffers;
rctx->b.b.set_index_buffer = r600_set_index_buffer;
rctx->b.b.set_sampler_views = r600_set_sampler_views; rctx->b.b.set_sampler_views = r600_set_sampler_views;
rctx->b.b.sampler_view_destroy = r600_sampler_view_destroy; rctx->b.b.sampler_view_destroy = r600_sampler_view_destroy;
rctx->b.b.texture_barrier = r600_texture_barrier; rctx->b.b.texture_barrier = r600_texture_barrier;

View File

@ -314,7 +314,6 @@ struct si_context {
/* Vertex and index buffers. */ /* Vertex and index buffers. */
bool vertex_buffers_dirty; bool vertex_buffers_dirty;
bool vertex_buffer_pointer_dirty; bool vertex_buffer_pointer_dirty;
struct pipe_index_buffer index_buffer;
struct pipe_vertex_buffer vertex_buffer[SI_NUM_VERTEX_BUFFERS]; struct pipe_vertex_buffer vertex_buffer[SI_NUM_VERTEX_BUFFERS];
/* MSAA config state. */ /* MSAA config state. */

View File

@ -3879,24 +3879,6 @@ static void si_set_vertex_buffers(struct pipe_context *ctx,
sctx->vertex_buffers_dirty = true; sctx->vertex_buffers_dirty = true;
} }
static void si_set_index_buffer(struct pipe_context *ctx,
const struct pipe_index_buffer *ib)
{
struct si_context *sctx = (struct si_context *)ctx;
if (ib) {
struct pipe_resource *buf = ib->buffer;
pipe_resource_reference(&sctx->index_buffer.buffer, buf);
memcpy(&sctx->index_buffer, ib, sizeof(*ib));
r600_context_add_resource_size(ctx, buf);
if (buf)
r600_resource(buf)->bind_history |= PIPE_BIND_INDEX_BUFFER;
} else {
pipe_resource_reference(&sctx->index_buffer.buffer, NULL);
}
}
/* /*
* Misc * Misc
*/ */
@ -4051,7 +4033,6 @@ void si_init_state_functions(struct si_context *sctx)
sctx->b.b.bind_vertex_elements_state = si_bind_vertex_elements; sctx->b.b.bind_vertex_elements_state = si_bind_vertex_elements;
sctx->b.b.delete_vertex_elements_state = si_delete_vertex_element; sctx->b.b.delete_vertex_elements_state = si_delete_vertex_element;
sctx->b.b.set_vertex_buffers = si_set_vertex_buffers; sctx->b.b.set_vertex_buffers = si_set_vertex_buffers;
sctx->b.b.set_index_buffer = si_set_index_buffer;
sctx->b.b.texture_barrier = si_texture_barrier; sctx->b.b.texture_barrier = si_texture_barrier;
sctx->b.b.memory_barrier = si_memory_barrier; sctx->b.b.memory_barrier = si_memory_barrier;

View File

@ -552,7 +552,7 @@ static void si_emit_vs_state(struct si_context *sctx,
const struct pipe_draw_info *info) const struct pipe_draw_info *info)
{ {
sctx->current_vs_state &= C_VS_STATE_INDEXED; sctx->current_vs_state &= C_VS_STATE_INDEXED;
sctx->current_vs_state |= S_VS_STATE_INDEXED(!!info->indexed); sctx->current_vs_state |= S_VS_STATE_INDEXED(!!info->index_size);
if (sctx->current_vs_state != sctx->last_vs_state) { if (sctx->current_vs_state != sctx->last_vs_state) {
struct radeon_winsys_cs *cs = sctx->b.gfx.cs; struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
@ -625,7 +625,9 @@ static void si_emit_draw_registers(struct si_context *sctx,
static void si_emit_draw_packets(struct si_context *sctx, static void si_emit_draw_packets(struct si_context *sctx,
const struct pipe_draw_info *info, const struct pipe_draw_info *info,
const struct pipe_index_buffer *ib) struct pipe_resource *indexbuf,
unsigned index_size,
unsigned index_offset)
{ {
struct pipe_draw_indirect_info *indirect = info->indirect; struct pipe_draw_indirect_info *indirect = info->indirect;
struct radeon_winsys_cs *cs = sctx->b.gfx.cs; struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
@ -658,12 +660,12 @@ static void si_emit_draw_packets(struct si_context *sctx,
} }
/* draw packet */ /* draw packet */
if (info->indexed) { if (index_size) {
if (ib->index_size != sctx->last_index_size) { if (index_size != sctx->last_index_size) {
unsigned index_type; unsigned index_type;
/* index type */ /* index type */
switch (ib->index_size) { switch (index_size) {
case 1: case 1:
index_type = V_028A7C_VGT_INDEX_8; index_type = V_028A7C_VGT_INDEX_8;
break; break;
@ -690,15 +692,15 @@ static void si_emit_draw_packets(struct si_context *sctx,
radeon_emit(cs, index_type); radeon_emit(cs, index_type);
} }
sctx->last_index_size = ib->index_size; sctx->last_index_size = index_size;
} }
index_max_size = (ib->buffer->width0 - ib->offset) / index_max_size = (indexbuf->width0 - index_offset) /
ib->index_size; index_size;
index_va = r600_resource(ib->buffer)->gpu_address + ib->offset; index_va = r600_resource(indexbuf)->gpu_address + index_offset;
radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
(struct r600_resource *)ib->buffer, (struct r600_resource *)indexbuf,
RADEON_USAGE_READ, RADEON_PRIO_INDEX_BUFFER); RADEON_USAGE_READ, RADEON_PRIO_INDEX_BUFFER);
} else { } else {
/* On CI and later, non-indexed draws overwrite VGT_INDEX_TYPE, /* On CI and later, non-indexed draws overwrite VGT_INDEX_TYPE,
@ -724,12 +726,12 @@ static void si_emit_draw_packets(struct si_context *sctx,
(struct r600_resource *)indirect->buffer, (struct r600_resource *)indirect->buffer,
RADEON_USAGE_READ, RADEON_PRIO_DRAW_INDIRECT); RADEON_USAGE_READ, RADEON_PRIO_DRAW_INDIRECT);
unsigned di_src_sel = info->indexed ? V_0287F0_DI_SRC_SEL_DMA unsigned di_src_sel = index_size ? V_0287F0_DI_SRC_SEL_DMA
: V_0287F0_DI_SRC_SEL_AUTO_INDEX; : V_0287F0_DI_SRC_SEL_AUTO_INDEX;
assert(indirect->offset % 4 == 0); assert(indirect->offset % 4 == 0);
if (info->indexed) { if (index_size) {
radeon_emit(cs, PKT3(PKT3_INDEX_BASE, 1, 0)); radeon_emit(cs, PKT3(PKT3_INDEX_BASE, 1, 0));
radeon_emit(cs, index_va); radeon_emit(cs, index_va);
radeon_emit(cs, index_va >> 32); radeon_emit(cs, index_va >> 32);
@ -739,7 +741,7 @@ static void si_emit_draw_packets(struct si_context *sctx,
} }
if (!sctx->screen->has_draw_indirect_multi) { if (!sctx->screen->has_draw_indirect_multi) {
radeon_emit(cs, PKT3(info->indexed ? PKT3_DRAW_INDEX_INDIRECT radeon_emit(cs, PKT3(index_size ? PKT3_DRAW_INDEX_INDIRECT
: PKT3_DRAW_INDIRECT, : PKT3_DRAW_INDIRECT,
3, render_cond_bit)); 3, render_cond_bit));
radeon_emit(cs, indirect->offset); radeon_emit(cs, indirect->offset);
@ -760,7 +762,7 @@ static void si_emit_draw_packets(struct si_context *sctx,
count_va = params_buf->gpu_address + indirect->indirect_draw_count_offset; count_va = params_buf->gpu_address + indirect->indirect_draw_count_offset;
} }
radeon_emit(cs, PKT3(info->indexed ? PKT3_DRAW_INDEX_INDIRECT_MULTI : radeon_emit(cs, PKT3(index_size ? PKT3_DRAW_INDEX_INDIRECT_MULTI :
PKT3_DRAW_INDIRECT_MULTI, PKT3_DRAW_INDIRECT_MULTI,
8, render_cond_bit)); 8, render_cond_bit));
radeon_emit(cs, indirect->offset); radeon_emit(cs, indirect->offset);
@ -782,7 +784,7 @@ static void si_emit_draw_packets(struct si_context *sctx,
radeon_emit(cs, info->instance_count); radeon_emit(cs, info->instance_count);
/* Base vertex and start instance. */ /* Base vertex and start instance. */
base_vertex = info->indexed ? info->index_bias : info->start; base_vertex = index_size ? info->index_bias : info->start;
if (base_vertex != sctx->last_base_vertex || if (base_vertex != sctx->last_base_vertex ||
sctx->last_base_vertex == SI_BASE_VERTEX_UNKNOWN || sctx->last_base_vertex == SI_BASE_VERTEX_UNKNOWN ||
@ -800,8 +802,8 @@ static void si_emit_draw_packets(struct si_context *sctx,
sctx->last_sh_base_reg = sh_base_reg; sctx->last_sh_base_reg = sh_base_reg;
} }
if (info->indexed) { if (index_size) {
index_va += info->start * ib->index_size; index_va += info->start * index_size;
radeon_emit(cs, PKT3(PKT3_DRAW_INDEX_2, 4, render_cond_bit)); radeon_emit(cs, PKT3(PKT3_DRAW_INDEX_2, 4, render_cond_bit));
radeon_emit(cs, index_max_size); radeon_emit(cs, index_max_size);
@ -1160,11 +1162,12 @@ void si_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *info)
{ {
struct si_context *sctx = (struct si_context *)ctx; struct si_context *sctx = (struct si_context *)ctx;
struct si_state_rasterizer *rs = sctx->queued.named.rasterizer; struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
const struct pipe_index_buffer *ib = &sctx->index_buffer; struct pipe_resource *indexbuf = info->index.resource;
struct pipe_index_buffer ib_tmp; /* for index buffer uploads only */
unsigned mask, dirty_tex_counter; unsigned mask, dirty_tex_counter;
enum pipe_prim_type rast_prim; enum pipe_prim_type rast_prim;
unsigned num_patches = 0; unsigned num_patches = 0;
unsigned index_size = info->index_size;
unsigned index_offset = info->indirect ? info->start * index_size : 0;
if (likely(!info->indirect)) { if (likely(!info->indirect)) {
/* SI-CI treat instance_count==0 as instance_count==1. There is /* SI-CI treat instance_count==0 as instance_count==1. There is
@ -1176,7 +1179,7 @@ void si_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *info)
/* Handle count == 0. */ /* Handle count == 0. */
if (unlikely(!info->count && if (unlikely(!info->count &&
(info->indexed || !info->count_from_stream_output))) (index_size || !info->count_from_stream_output)))
return; return;
} }
@ -1248,58 +1251,55 @@ void si_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *info)
if (!si_upload_graphics_shader_descriptors(sctx)) if (!si_upload_graphics_shader_descriptors(sctx))
return; return;
ib_tmp.buffer = NULL; if (index_size) {
if (info->indexed) {
/* Translate or upload, if needed. */ /* Translate or upload, if needed. */
/* 8-bit indices are supported on VI. */ /* 8-bit indices are supported on VI. */
if (sctx->b.chip_class <= CIK && ib->index_size == 1) { if (sctx->b.chip_class <= CIK && index_size == 1) {
unsigned start, count, start_offset, size; unsigned start, count, start_offset, size, offset;
void *ptr; void *ptr;
si_get_draw_start_count(sctx, info, &start, &count); si_get_draw_start_count(sctx, info, &start, &count);
start_offset = start * 2; start_offset = start * 2;
size = count * 2; size = count * 2;
indexbuf = NULL;
u_upload_alloc(ctx->stream_uploader, start_offset, u_upload_alloc(ctx->stream_uploader, start_offset,
size, size,
si_optimal_tcc_alignment(sctx, size), si_optimal_tcc_alignment(sctx, size),
&ib_tmp.offset, &ib_tmp.buffer, &ptr); &offset, &indexbuf, &ptr);
if (!ib_tmp.buffer) if (!indexbuf)
return; return;
util_shorten_ubyte_elts_to_userptr(&sctx->b.b, ib, 0, 0, util_shorten_ubyte_elts_to_userptr(&sctx->b.b, info, 0, 0,
ib->offset + start, index_offset + start,
count, ptr); count, ptr);
/* info->start will be added by the drawing code */ /* info->start will be added by the drawing code */
ib_tmp.offset -= start_offset; index_offset = offset - start_offset;
ib_tmp.index_size = 2; index_size = 2;
ib = &ib_tmp; } else if (info->has_user_indices) {
} else if (ib->user_buffer && !ib->buffer) {
unsigned start_offset; unsigned start_offset;
assert(!info->indirect); assert(!info->indirect);
start_offset = info->start * ib->index_size; start_offset = info->start * index_size;
indexbuf = NULL;
u_upload_data(ctx->stream_uploader, start_offset, u_upload_data(ctx->stream_uploader, start_offset,
info->count * ib->index_size, info->count * index_size,
sctx->screen->b.info.tcc_cache_line_size, sctx->screen->b.info.tcc_cache_line_size,
(char*)ib->user_buffer + start_offset, (char*)info->index.user + start_offset,
&ib_tmp.offset, &ib_tmp.buffer); &index_offset, &indexbuf);
if (!ib_tmp.buffer) if (!indexbuf)
return; return;
/* info->start will be added by the drawing code */ /* info->start will be added by the drawing code */
ib_tmp.offset -= start_offset; index_offset -= start_offset;
ib_tmp.index_size = ib->index_size;
ib = &ib_tmp;
} else if (sctx->b.chip_class <= CIK && } else if (sctx->b.chip_class <= CIK &&
r600_resource(ib->buffer)->TC_L2_dirty) { r600_resource(indexbuf)->TC_L2_dirty) {
/* VI reads index buffers through TC L2, so it doesn't /* VI reads index buffers through TC L2, so it doesn't
* need this. */ * need this. */
sctx->b.flags |= SI_CONTEXT_WRITEBACK_GLOBAL_L2; sctx->b.flags |= SI_CONTEXT_WRITEBACK_GLOBAL_L2;
r600_resource(ib->buffer)->TC_L2_dirty = false; r600_resource(indexbuf)->TC_L2_dirty = false;
} }
} }
@ -1370,7 +1370,7 @@ void si_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *info)
si_emit_draw_registers(sctx, info, num_patches); si_emit_draw_registers(sctx, info, num_patches);
si_ce_pre_draw_synchronization(sctx); si_ce_pre_draw_synchronization(sctx);
si_emit_draw_packets(sctx, info, ib); si_emit_draw_packets(sctx, info, indexbuf, index_size, index_offset);
si_ce_post_draw_synchronization(sctx); si_ce_post_draw_synchronization(sctx);
if (sctx->trace_buf) if (sctx->trace_buf)
@ -1416,12 +1416,13 @@ void si_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *info)
sctx->framebuffer.do_update_surf_dirtiness = false; sctx->framebuffer.do_update_surf_dirtiness = false;
} }
pipe_resource_reference(&ib_tmp.buffer, NULL);
sctx->b.num_draw_calls++; sctx->b.num_draw_calls++;
if (info->primitive_restart) if (info->primitive_restart)
sctx->b.num_prim_restart_calls++; sctx->b.num_prim_restart_calls++;
if (G_0286E8_WAVESIZE(sctx->spi_tmpring_size)) if (G_0286E8_WAVESIZE(sctx->spi_tmpring_size))
sctx->b.num_spill_draw_calls++; sctx->b.num_spill_draw_calls++;
if (index_size && indexbuf != info->index.resource)
pipe_resource_reference(&indexbuf, NULL);
} }
void si_trace_emit(struct si_context *sctx) void si_trace_emit(struct si_context *sctx)

View File

@ -793,25 +793,6 @@ rbug_set_vertex_buffers(struct pipe_context *_pipe,
mtx_unlock(&rb_pipe->call_mutex); mtx_unlock(&rb_pipe->call_mutex);
} }
static void
rbug_set_index_buffer(struct pipe_context *_pipe,
const struct pipe_index_buffer *_ib)
{
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
struct pipe_index_buffer unwrapped_ib, *ib = NULL;
if (_ib) {
unwrapped_ib = *_ib;
unwrapped_ib.buffer = rbug_resource_unwrap(_ib->buffer);
ib = &unwrapped_ib;
}
mtx_lock(&rb_pipe->call_mutex);
pipe->set_index_buffer(pipe, ib);
mtx_unlock(&rb_pipe->call_mutex);
}
static void static void
rbug_set_sample_mask(struct pipe_context *_pipe, rbug_set_sample_mask(struct pipe_context *_pipe,
unsigned sample_mask) unsigned sample_mask)
@ -1260,7 +1241,6 @@ rbug_context_create(struct pipe_screen *_screen, struct pipe_context *pipe)
rb_pipe->base.set_viewport_states = rbug_set_viewport_states; rb_pipe->base.set_viewport_states = rbug_set_viewport_states;
rb_pipe->base.set_sampler_views = rbug_set_sampler_views; rb_pipe->base.set_sampler_views = rbug_set_sampler_views;
rb_pipe->base.set_vertex_buffers = rbug_set_vertex_buffers; rb_pipe->base.set_vertex_buffers = rbug_set_vertex_buffers;
rb_pipe->base.set_index_buffer = rbug_set_index_buffer;
rb_pipe->base.set_sample_mask = rbug_set_sample_mask; rb_pipe->base.set_sample_mask = rbug_set_sample_mask;
rb_pipe->base.create_stream_output_target = rbug_create_stream_output_target; rb_pipe->base.create_stream_output_target = rbug_create_stream_output_target;
rb_pipe->base.stream_output_target_destroy = rbug_stream_output_target_destroy; rb_pipe->base.stream_output_target_destroy = rbug_stream_output_target_destroy;

View File

@ -88,7 +88,6 @@ struct softpipe_context {
struct pipe_shader_buffer buffers[PIPE_SHADER_TYPES][PIPE_MAX_SHADER_BUFFERS]; struct pipe_shader_buffer buffers[PIPE_SHADER_TYPES][PIPE_MAX_SHADER_BUFFERS];
struct pipe_viewport_state viewports[PIPE_MAX_VIEWPORTS]; struct pipe_viewport_state viewports[PIPE_MAX_VIEWPORTS];
struct pipe_vertex_buffer vertex_buffer[PIPE_MAX_ATTRIBS]; struct pipe_vertex_buffer vertex_buffer[PIPE_MAX_ATTRIBS];
struct pipe_index_buffer index_buffer;
struct pipe_resource *mapped_vs_tex[PIPE_MAX_SHADER_SAMPLER_VIEWS]; struct pipe_resource *mapped_vs_tex[PIPE_MAX_SHADER_SAMPLER_VIEWS];
struct pipe_resource *mapped_gs_tex[PIPE_MAX_SHADER_SAMPLER_VIEWS]; struct pipe_resource *mapped_gs_tex[PIPE_MAX_SHADER_SAMPLER_VIEWS];

View File

@ -96,21 +96,17 @@ softpipe_draw_vbo(struct pipe_context *pipe,
} }
/* Map index buffer, if present */ /* Map index buffer, if present */
if (info->indexed) { if (info->index_size) {
unsigned available_space = ~0; unsigned available_space = ~0;
mapped_indices = sp->index_buffer.user_buffer; mapped_indices = info->has_user_indices ? info->index.user : NULL;
if (!mapped_indices) { if (!mapped_indices) {
mapped_indices = softpipe_resource_data(sp->index_buffer.buffer); mapped_indices = softpipe_resource_data(info->index.resource);
if (sp->index_buffer.buffer->width0 > sp->index_buffer.offset) available_space = info->index.resource->width0;
available_space =
(sp->index_buffer.buffer->width0 - sp->index_buffer.offset);
else
available_space = 0;
} }
draw_set_indexes(draw, draw_set_indexes(draw,
(ubyte *) mapped_indices + sp->index_buffer.offset, (ubyte *) mapped_indices,
sp->index_buffer.index_size, available_space); info->index_size, available_space);
} }

View File

@ -97,19 +97,6 @@ softpipe_set_vertex_buffers(struct pipe_context *pipe,
} }
static void
softpipe_set_index_buffer(struct pipe_context *pipe,
const struct pipe_index_buffer *ib)
{
struct softpipe_context *softpipe = softpipe_context(pipe);
if (ib)
memcpy(&softpipe->index_buffer, ib, sizeof(softpipe->index_buffer));
else
memset(&softpipe->index_buffer, 0, sizeof(softpipe->index_buffer));
}
void void
softpipe_init_vertex_funcs(struct pipe_context *pipe) softpipe_init_vertex_funcs(struct pipe_context *pipe)
{ {
@ -118,5 +105,4 @@ softpipe_init_vertex_funcs(struct pipe_context *pipe)
pipe->delete_vertex_elements_state = softpipe_delete_vertex_elements_state; pipe->delete_vertex_elements_state = softpipe_delete_vertex_elements_state;
pipe->set_vertex_buffers = softpipe_set_vertex_buffers; pipe->set_vertex_buffers = softpipe_set_vertex_buffers;
pipe->set_index_buffer = softpipe_set_index_buffer;
} }

View File

@ -268,7 +268,6 @@ struct svga_state
struct svga_geometry_shader *gs; /* derived GS */ struct svga_geometry_shader *gs; /* derived GS */
struct pipe_vertex_buffer vb[PIPE_MAX_ATTRIBS]; struct pipe_vertex_buffer vb[PIPE_MAX_ATTRIBS];
struct pipe_index_buffer ib;
/** Constant buffers for each shader. /** Constant buffers for each shader.
* The size should probably always match with that of * The size should probably always match with that of
* svga_shader_emitter_v10.num_shader_consts. * svga_shader_emitter_v10.num_shader_consts.

View File

@ -171,13 +171,13 @@ static boolean
need_fallback_prim_restart(const struct svga_context *svga, need_fallback_prim_restart(const struct svga_context *svga,
const struct pipe_draw_info *info) const struct pipe_draw_info *info)
{ {
if (info->primitive_restart && info->indexed) { if (info->primitive_restart && info->index_size) {
if (!svga_have_vgpu10(svga)) if (!svga_have_vgpu10(svga))
return TRUE; return TRUE;
else if (!svga->state.sw.need_swtnl) { else if (!svga->state.sw.need_swtnl) {
if (svga->curr.ib.index_size == 1) if (info->index_size == 1)
return TRUE; /* no device support for 1-byte indexes */ return TRUE; /* no device support for 1-byte indexes */
else if (svga->curr.ib.index_size == 2) else if (info->index_size == 2)
return info->restart_index != 0xffff; return info->restart_index != 0xffff;
else else
return info->restart_index != 0xffffffff; return info->restart_index != 0xffffffff;
@ -196,6 +196,8 @@ svga_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
unsigned count = info->count; unsigned count = info->count;
enum pipe_error ret = 0; enum pipe_error ret = 0;
boolean needed_swtnl; boolean needed_swtnl;
struct pipe_resource *indexbuf =
info->has_user_indices ? NULL : info->index.resource;
SVGA_STATS_TIME_PUSH(svga_sws(svga), SVGA_STATS_TIME_DRAWVBO); SVGA_STATS_TIME_PUSH(svga_sws(svga), SVGA_STATS_TIME_DRAWVBO);
@ -206,11 +208,10 @@ svga_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
goto done; goto done;
/* Upload a user index buffer. */ /* Upload a user index buffer. */
struct pipe_index_buffer ibuffer_saved = {0}; unsigned index_offset = 0;
if (info->indexed && svga->curr.ib.user_buffer && if (info->index_size && info->has_user_indices &&
!util_save_and_upload_index_buffer(pipe, info, &svga->curr.ib, !util_upload_index_buffer(pipe, info, &indexbuf, &index_offset)) {
&ibuffer_saved)) { goto done;
return;
} }
/* /*
@ -229,7 +230,7 @@ svga_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
if (need_fallback_prim_restart(svga, info)) { if (need_fallback_prim_restart(svga, info)) {
enum pipe_error r; enum pipe_error r;
r = util_draw_vbo_without_prim_restart(pipe, &svga->curr.ib, info); r = util_draw_vbo_without_prim_restart(pipe, info);
assert(r == PIPE_OK); assert(r == PIPE_OK);
(void) r; (void) r;
goto done; goto done;
@ -258,18 +259,18 @@ svga_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
/* Avoid leaking the previous hwtnl bias to swtnl */ /* Avoid leaking the previous hwtnl bias to swtnl */
svga_hwtnl_set_index_bias( svga->hwtnl, 0 ); svga_hwtnl_set_index_bias( svga->hwtnl, 0 );
ret = svga_swtnl_draw_vbo( svga, info ); ret = svga_swtnl_draw_vbo(svga, info, indexbuf, index_offset);
} }
else { else {
if (info->indexed && svga->curr.ib.buffer) { if (info->index_size && indexbuf) {
unsigned offset; unsigned offset;
assert(svga->curr.ib.offset % svga->curr.ib.index_size == 0); assert(index_offset % info->index_size == 0);
offset = svga->curr.ib.offset / svga->curr.ib.index_size; offset = index_offset / info->index_size;
ret = retry_draw_range_elements( svga, ret = retry_draw_range_elements( svga,
svga->curr.ib.buffer, indexbuf,
svga->curr.ib.index_size, info->index_size,
info->index_bias, info->index_bias,
info->min_index, info->min_index,
info->max_index, info->max_index,
@ -296,9 +297,8 @@ svga_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
} }
done: done:
if (info->indexed && ibuffer_saved.user_buffer) if (info->index_size && info->index.resource != indexbuf)
pipe->set_index_buffer(pipe, &ibuffer_saved); pipe_resource_reference(&indexbuf, NULL);
SVGA_STATS_TIME_POP(svga_sws(svga)); SVGA_STATS_TIME_POP(svga_sws(svga));
} }

View File

@ -54,15 +54,6 @@ static void svga_set_vertex_buffers(struct pipe_context *pipe,
} }
static void svga_set_index_buffer(struct pipe_context *pipe,
const struct pipe_index_buffer *ib)
{
struct svga_context *svga = svga_context(pipe);
util_set_index_buffer(&svga->curr.ib, ib);
}
/** /**
* Does the given vertex attrib format need range adjustment in the VS? * Does the given vertex attrib format need range adjustment in the VS?
* Range adjustment scales and biases values from [0,1] to [-1,1]. * Range adjustment scales and biases values from [0,1] to [-1,1].
@ -340,7 +331,6 @@ void svga_cleanup_vertex_state( struct svga_context *svga )
void svga_init_vertex_functions( struct svga_context *svga ) void svga_init_vertex_functions( struct svga_context *svga )
{ {
svga->pipe.set_vertex_buffers = svga_set_vertex_buffers; svga->pipe.set_vertex_buffers = svga_set_vertex_buffers;
svga->pipe.set_index_buffer = svga_set_index_buffer;
svga->pipe.create_vertex_elements_state = svga_create_vertex_elements_state; svga->pipe.create_vertex_elements_state = svga_create_vertex_elements_state;
svga->pipe.bind_vertex_elements_state = svga_bind_vertex_elements_state; svga->pipe.bind_vertex_elements_state = svga_bind_vertex_elements_state;
svga->pipe.delete_vertex_elements_state = svga_delete_vertex_elements_state; svga->pipe.delete_vertex_elements_state = svga_delete_vertex_elements_state;

View File

@ -39,7 +39,9 @@ void svga_destroy_swtnl( struct svga_context *svga );
enum pipe_error enum pipe_error
svga_swtnl_draw_vbo(struct svga_context *svga, svga_swtnl_draw_vbo(struct svga_context *svga,
const struct pipe_draw_info *info); const struct pipe_draw_info *info,
struct pipe_resource *indexbuf,
unsigned index_offset);
#endif #endif

View File

@ -38,7 +38,9 @@
enum pipe_error enum pipe_error
svga_swtnl_draw_vbo(struct svga_context *svga, svga_swtnl_draw_vbo(struct svga_context *svga,
const struct pipe_draw_info *info) const struct pipe_draw_info *info,
struct pipe_resource *indexbuf,
unsigned index_offset)
{ {
struct pipe_transfer *vb_transfer[PIPE_MAX_ATTRIBS] = { 0 }; struct pipe_transfer *vb_transfer[PIPE_MAX_ATTRIBS] = { 0 };
struct pipe_transfer *ib_transfer = NULL; struct pipe_transfer *ib_transfer = NULL;
@ -83,13 +85,14 @@ svga_swtnl_draw_vbo(struct svga_context *svga,
/* Map index buffer, if present */ /* Map index buffer, if present */
map = NULL; map = NULL;
if (info->indexed && svga->curr.ib.buffer) { if (info->index_size && indexbuf) {
map = pipe_buffer_map(&svga->pipe, svga->curr.ib.buffer, map = pipe_buffer_map(&svga->pipe, indexbuf,
PIPE_TRANSFER_READ, PIPE_TRANSFER_READ,
&ib_transfer); &ib_transfer);
map = (ubyte *) map + index_offset;
draw_set_indexes(draw, draw_set_indexes(draw,
(const ubyte *) map + svga->curr.ib.offset, (const ubyte *) map,
svga->curr.ib.index_size, ~0); info->index_size, ~0);
} }
/* Map constant buffers */ /* Map constant buffers */

View File

@ -137,7 +137,6 @@ struct swr_context {
struct pipe_viewport_state viewport; struct pipe_viewport_state viewport;
struct pipe_vertex_buffer vertex_buffer[PIPE_MAX_ATTRIBS]; struct pipe_vertex_buffer vertex_buffer[PIPE_MAX_ATTRIBS];
struct pipe_index_buffer index_buffer;
struct blitter_context *blitter; struct blitter_context *blitter;

View File

@ -167,7 +167,7 @@ swr_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
feState.bEnableCutIndex = info->primitive_restart; feState.bEnableCutIndex = info->primitive_restart;
SwrSetFrontendState(ctx->swrContext, &feState); SwrSetFrontendState(ctx->swrContext, &feState);
if (info->indexed) if (info->index_size)
SwrDrawIndexedInstanced(ctx->swrContext, SwrDrawIndexedInstanced(ctx->swrContext,
swr_convert_prim_topology(info->mode), swr_convert_prim_topology(info->mode),
info->count, info->count,

View File

@ -590,20 +590,6 @@ swr_set_vertex_buffers(struct pipe_context *pipe,
} }
static void
swr_set_index_buffer(struct pipe_context *pipe,
const struct pipe_index_buffer *ib)
{
struct swr_context *ctx = swr_context(pipe);
if (ib)
memcpy(&ctx->index_buffer, ib, sizeof(ctx->index_buffer));
else
memset(&ctx->index_buffer, 0, sizeof(ctx->index_buffer));
ctx->dirty |= SWR_NEW_VERTEX;
}
static void static void
swr_set_polygon_stipple(struct pipe_context *pipe, swr_set_polygon_stipple(struct pipe_context *pipe,
const struct pipe_poly_stipple *stipple) const struct pipe_poly_stipple *stipple)
@ -749,10 +735,9 @@ swr_update_resource_status(struct pipe_context *pipe,
} }
/* VBO index buffer */ /* VBO index buffer */
if (p_draw_info && p_draw_info->indexed) { if (p_draw_info && p_draw_info->index_size) {
struct pipe_index_buffer *ib = &ctx->index_buffer; if (!p_draw_info->has_user_indices)
if (!ib->user_buffer) swr_resource_read(p_draw_info->index.resource);
swr_resource_read(ib->buffer);
} }
/* transform feedback buffers */ /* transform feedback buffers */
@ -1222,7 +1207,10 @@ swr_update_derived(struct pipe_context *pipe,
/* Set vertex & index buffers */ /* Set vertex & index buffers */
/* (using draw info if called by swr_draw_vbo) */ /* (using draw info if called by swr_draw_vbo) */
if (ctx->dirty & SWR_NEW_VERTEX) { /* TODO: This is always true, because the index buffer comes from
* pipe_draw_info.
*/
if (1 || ctx->dirty & SWR_NEW_VERTEX) {
uint32_t scratch_total; uint32_t scratch_total;
uint8_t *scratch = NULL; uint8_t *scratch = NULL;
@ -1303,20 +1291,19 @@ swr_update_derived(struct pipe_context *pipe,
/* index buffer, if required (info passed in by swr_draw_vbo) */ /* index buffer, if required (info passed in by swr_draw_vbo) */
SWR_FORMAT index_type = R32_UINT; /* Default for non-indexed draws */ SWR_FORMAT index_type = R32_UINT; /* Default for non-indexed draws */
if (info.indexed) { if (info.index_size) {
const uint8_t *p_data; const uint8_t *p_data;
uint32_t size, pitch; uint32_t size, pitch;
struct pipe_index_buffer *ib = &ctx->index_buffer;
pitch = ib->index_size ? ib->index_size : sizeof(uint32_t); pitch = p_draw_info->index_size ? p_draw_info->index_size : sizeof(uint32_t);
index_type = swr_convert_index_type(pitch); index_type = swr_convert_index_type(pitch);
if (!ib->user_buffer) { if (!info.has_user_indices) {
/* VBO /* VBO
* size is based on buffer->width0 rather than info.count * size is based on buffer->width0 rather than info.count
* to prevent having to validate VBO on each draw */ * to prevent having to validate VBO on each draw */
size = ib->buffer->width0; size = info.index.resource->width0;
p_data = swr_resource_data(ib->buffer) + ib->offset; p_data = swr_resource_data(info.index.resource);
} else { } else {
/* Client buffer /* Client buffer
* client memory is one-time use, re-trigger SWR_NEW_VERTEX to * client memory is one-time use, re-trigger SWR_NEW_VERTEX to
@ -1327,14 +1314,14 @@ swr_update_derived(struct pipe_context *pipe,
size = AlignUp(size, 4); size = AlignUp(size, 4);
/* Copy indices to scratch space */ /* Copy indices to scratch space */
const void *ptr = ib->user_buffer; const void *ptr = info.index.user;
ptr = swr_copy_to_scratch_space( ptr = swr_copy_to_scratch_space(
ctx, &ctx->scratch->index_buffer, ptr, size); ctx, &ctx->scratch->index_buffer, ptr, size);
p_data = (const uint8_t *)ptr; p_data = (const uint8_t *)ptr;
} }
SWR_INDEX_BUFFER_STATE swrIndexBuffer; SWR_INDEX_BUFFER_STATE swrIndexBuffer;
swrIndexBuffer.format = swr_convert_index_type(ib->index_size); swrIndexBuffer.format = swr_convert_index_type(p_draw_info->index_size);
swrIndexBuffer.pIndices = p_data; swrIndexBuffer.pIndices = p_data;
swrIndexBuffer.size = size; swrIndexBuffer.size = size;
@ -1852,7 +1839,6 @@ swr_state_init(struct pipe_context *pipe)
pipe->delete_vertex_elements_state = swr_delete_vertex_elements_state; pipe->delete_vertex_elements_state = swr_delete_vertex_elements_state;
pipe->set_vertex_buffers = swr_set_vertex_buffers; pipe->set_vertex_buffers = swr_set_vertex_buffers;
pipe->set_index_buffer = swr_set_index_buffer;
pipe->set_polygon_stipple = swr_set_polygon_stipple; pipe->set_polygon_stipple = swr_set_polygon_stipple;
pipe->set_clip_state = swr_set_clip_state; pipe->set_clip_state = swr_set_clip_state;

View File

@ -1034,24 +1034,6 @@ trace_context_set_vertex_buffers(struct pipe_context *_pipe,
} }
static void
trace_context_set_index_buffer(struct pipe_context *_pipe,
const struct pipe_index_buffer *ib)
{
struct trace_context *tr_ctx = trace_context(_pipe);
struct pipe_context *pipe = tr_ctx->pipe;
trace_dump_call_begin("pipe_context", "set_index_buffer");
trace_dump_arg(ptr, pipe);
trace_dump_arg(index_buffer, ib);
pipe->set_index_buffer(pipe, ib);
trace_dump_call_end();
}
static struct pipe_stream_output_target * static struct pipe_stream_output_target *
trace_context_create_stream_output_target(struct pipe_context *_pipe, trace_context_create_stream_output_target(struct pipe_context *_pipe,
struct pipe_resource *res, struct pipe_resource *res,
@ -1804,7 +1786,6 @@ trace_context_create(struct trace_screen *tr_scr,
TR_CTX_INIT(create_surface); TR_CTX_INIT(create_surface);
TR_CTX_INIT(surface_destroy); TR_CTX_INIT(surface_destroy);
TR_CTX_INIT(set_vertex_buffers); TR_CTX_INIT(set_vertex_buffers);
TR_CTX_INIT(set_index_buffer);
TR_CTX_INIT(create_stream_output_target); TR_CTX_INIT(create_stream_output_target);
TR_CTX_INIT(stream_output_target_destroy); TR_CTX_INIT(stream_output_target_destroy);
TR_CTX_INIT(set_stream_output_targets); TR_CTX_INIT(set_stream_output_targets);

View File

@ -661,27 +661,6 @@ void trace_dump_vertex_buffer(const struct pipe_vertex_buffer *state)
} }
void trace_dump_index_buffer(const struct pipe_index_buffer *state)
{
if (!trace_dumping_enabled_locked())
return;
if (!state) {
trace_dump_null();
return;
}
trace_dump_struct_begin("pipe_index_buffer");
trace_dump_member(uint, state, index_size);
trace_dump_member(uint, state, offset);
trace_dump_member(ptr, state, buffer);
trace_dump_member(ptr, state, user_buffer);
trace_dump_struct_end();
}
void trace_dump_vertex_element(const struct pipe_vertex_element *state) void trace_dump_vertex_element(const struct pipe_vertex_element *state)
{ {
if (!trace_dumping_enabled_locked()) if (!trace_dumping_enabled_locked())
@ -792,7 +771,8 @@ void trace_dump_draw_info(const struct pipe_draw_info *state)
trace_dump_struct_begin("pipe_draw_info"); trace_dump_struct_begin("pipe_draw_info");
trace_dump_member(bool, state, indexed); trace_dump_member(uint, state, index_size);
trace_dump_member(uint, state, has_user_indices);
trace_dump_member(uint, state, mode); trace_dump_member(uint, state, mode);
trace_dump_member(uint, state, start); trace_dump_member(uint, state, start);
@ -810,6 +790,7 @@ void trace_dump_draw_info(const struct pipe_draw_info *state)
trace_dump_member(bool, state, primitive_restart); trace_dump_member(bool, state, primitive_restart);
trace_dump_member(uint, state, restart_index); trace_dump_member(uint, state, restart_index);
trace_dump_member(ptr, state, index.resource);
trace_dump_member(ptr, state, count_from_stream_output); trace_dump_member(ptr, state, count_from_stream_output);
if (!state->indirect) { if (!state->indirect) {

View File

@ -74,8 +74,6 @@ void trace_dump_transfer(const struct pipe_transfer *state);
void trace_dump_vertex_buffer(const struct pipe_vertex_buffer *state); void trace_dump_vertex_buffer(const struct pipe_vertex_buffer *state);
void trace_dump_index_buffer(const struct pipe_index_buffer *state);
void trace_dump_vertex_element(const struct pipe_vertex_element *state); void trace_dump_vertex_element(const struct pipe_vertex_element *state);
void trace_dump_constant_buffer(const struct pipe_constant_buffer *state); void trace_dump_constant_buffer(const struct pipe_constant_buffer *state);

View File

@ -67,7 +67,7 @@
#define VC4_DIRTY_CONSTBUF (1 << 13) #define VC4_DIRTY_CONSTBUF (1 << 13)
#define VC4_DIRTY_VTXSTATE (1 << 14) #define VC4_DIRTY_VTXSTATE (1 << 14)
#define VC4_DIRTY_VTXBUF (1 << 15) #define VC4_DIRTY_VTXBUF (1 << 15)
#define VC4_DIRTY_INDEXBUF (1 << 16)
#define VC4_DIRTY_SCISSOR (1 << 17) #define VC4_DIRTY_SCISSOR (1 << 17)
#define VC4_DIRTY_FLAT_SHADE_FLAGS (1 << 18) #define VC4_DIRTY_FLAT_SHADE_FLAGS (1 << 18)
#define VC4_DIRTY_PRIM_MODE (1 << 19) #define VC4_DIRTY_PRIM_MODE (1 << 19)
@ -377,7 +377,6 @@ struct vc4_context {
struct pipe_viewport_state viewport; struct pipe_viewport_state viewport;
struct vc4_constbuf_stateobj constbuf[PIPE_SHADER_TYPES]; struct vc4_constbuf_stateobj constbuf[PIPE_SHADER_TYPES];
struct vc4_vertexbuf_stateobj vertexbuf; struct vc4_vertexbuf_stateobj vertexbuf;
struct pipe_index_buffer indexbuf;
/** @} */ /** @} */
}; };

View File

@ -289,7 +289,6 @@ vc4_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info)
return; return;
if (info->mode >= PIPE_PRIM_QUADS) { if (info->mode >= PIPE_PRIM_QUADS) {
util_primconvert_save_index_buffer(vc4->primconvert, &vc4->indexbuf);
util_primconvert_save_rasterizer_state(vc4->primconvert, &vc4->rasterizer->base); util_primconvert_save_rasterizer_state(vc4->primconvert, &vc4->rasterizer->base);
util_primconvert_draw_vbo(vc4->primconvert, info); util_primconvert_draw_vbo(vc4->primconvert, info);
perf_debug("Fallback conversion for %d %s vertices\n", perf_debug("Fallback conversion for %d %s vertices\n",
@ -340,23 +339,24 @@ vc4_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info)
* definitions, up to but not including QUADS. * definitions, up to but not including QUADS.
*/ */
struct vc4_cl_out *bcl = cl_start(&job->bcl); struct vc4_cl_out *bcl = cl_start(&job->bcl);
if (info->indexed) { if (info->index_size) {
uint32_t offset = vc4->indexbuf.offset; uint32_t index_size = info->index_size;
uint32_t index_size = vc4->indexbuf.index_size; uint32_t offset = info->start * index_size;
struct pipe_resource *prsc; struct pipe_resource *prsc;
if (vc4->indexbuf.index_size == 4) { if (info->index_size == 4) {
prsc = vc4_get_shadow_index_buffer(pctx, &vc4->indexbuf, prsc = vc4_get_shadow_index_buffer(pctx, info,
offset,
info->count, &offset); info->count, &offset);
index_size = 2; index_size = 2;
} else { } else {
if (vc4->indexbuf.user_buffer) { if (info->has_user_indices) {
prsc = NULL; prsc = NULL;
u_upload_data(vc4->uploader, 0, u_upload_data(vc4->uploader, 0,
info->count * index_size, 4, info->count * index_size, 4,
vc4->indexbuf.user_buffer, info->index.user,
&offset, &prsc); &offset, &prsc);
} else { } else {
prsc = vc4->indexbuf.buffer; prsc = info->index.resource;
} }
} }
struct vc4_resource *rsc = vc4_resource(prsc); struct vc4_resource *rsc = vc4_resource(prsc);
@ -373,7 +373,7 @@ vc4_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info)
cl_u32(&bcl, vc4->max_index); cl_u32(&bcl, vc4->max_index);
job->draw_calls_queued++; job->draw_calls_queued++;
if (vc4->indexbuf.index_size == 4 || vc4->indexbuf.user_buffer) if (info->index_size == 4 || info->has_user_indices)
pipe_resource_reference(&prsc, NULL); pipe_resource_reference(&prsc, NULL);
} else { } else {
uint32_t count = info->count; uint32_t count = info->count;

View File

@ -979,12 +979,13 @@ vc4_update_shadow_baselevel_texture(struct pipe_context *pctx,
*/ */
struct pipe_resource * struct pipe_resource *
vc4_get_shadow_index_buffer(struct pipe_context *pctx, vc4_get_shadow_index_buffer(struct pipe_context *pctx,
const struct pipe_index_buffer *ib, const struct pipe_draw_info *info,
uint32_t offset,
uint32_t count, uint32_t count,
uint32_t *shadow_offset) uint32_t *shadow_offset)
{ {
struct vc4_context *vc4 = vc4_context(pctx); struct vc4_context *vc4 = vc4_context(pctx);
struct vc4_resource *orig = vc4_resource(ib->buffer); struct vc4_resource *orig = vc4_resource(info->index.resource);
perf_debug("Fallback conversion for %d uint indices\n", count); perf_debug("Fallback conversion for %d uint indices\n", count);
void *data; void *data;
@ -995,11 +996,11 @@ vc4_get_shadow_index_buffer(struct pipe_context *pctx,
struct pipe_transfer *src_transfer = NULL; struct pipe_transfer *src_transfer = NULL;
const uint32_t *src; const uint32_t *src;
if (ib->user_buffer) { if (info->has_user_indices) {
src = ib->user_buffer; src = info->index.user;
} else { } else {
src = pipe_buffer_map_range(pctx, &orig->base.b, src = pipe_buffer_map_range(pctx, &orig->base.b,
ib->offset, offset,
count * 4, count * 4,
PIPE_TRANSFER_READ, &src_transfer); PIPE_TRANSFER_READ, &src_transfer);
} }

View File

@ -121,9 +121,10 @@ struct pipe_resource *vc4_resource_create(struct pipe_screen *pscreen,
void vc4_update_shadow_baselevel_texture(struct pipe_context *pctx, void vc4_update_shadow_baselevel_texture(struct pipe_context *pctx,
struct pipe_sampler_view *view); struct pipe_sampler_view *view);
struct pipe_resource *vc4_get_shadow_index_buffer(struct pipe_context *pctx, struct pipe_resource *vc4_get_shadow_index_buffer(struct pipe_context *pctx,
const struct pipe_index_buffer *ib, const struct pipe_draw_info *info,
uint32_t offset,
uint32_t count, uint32_t count,
uint32_t *offset); uint32_t *shadow_offset);
void vc4_dump_surface(struct pipe_surface *psurf); void vc4_dump_surface(struct pipe_surface *psurf);
#endif /* VC4_RESOURCE_H */ #endif /* VC4_RESOURCE_H */

View File

@ -301,24 +301,6 @@ vc4_set_vertex_buffers(struct pipe_context *pctx,
vc4->dirty |= VC4_DIRTY_VTXBUF; vc4->dirty |= VC4_DIRTY_VTXBUF;
} }
static void
vc4_set_index_buffer(struct pipe_context *pctx,
const struct pipe_index_buffer *ib)
{
struct vc4_context *vc4 = vc4_context(pctx);
if (ib) {
pipe_resource_reference(&vc4->indexbuf.buffer, ib->buffer);
vc4->indexbuf.index_size = ib->index_size;
vc4->indexbuf.offset = ib->offset;
vc4->indexbuf.user_buffer = ib->user_buffer;
} else {
pipe_resource_reference(&vc4->indexbuf.buffer, NULL);
}
vc4->dirty |= VC4_DIRTY_INDEXBUF;
}
static void static void
vc4_blend_state_bind(struct pipe_context *pctx, void *hwcso) vc4_blend_state_bind(struct pipe_context *pctx, void *hwcso)
{ {
@ -670,7 +652,6 @@ vc4_state_init(struct pipe_context *pctx)
pctx->set_viewport_states = vc4_set_viewport_states; pctx->set_viewport_states = vc4_set_viewport_states;
pctx->set_vertex_buffers = vc4_set_vertex_buffers; pctx->set_vertex_buffers = vc4_set_vertex_buffers;
pctx->set_index_buffer = vc4_set_index_buffer;
pctx->create_blend_state = vc4_create_blend_state; pctx->create_blend_state = vc4_create_blend_state;
pctx->bind_blend_state = vc4_blend_state_bind; pctx->bind_blend_state = vc4_blend_state_bind;

View File

@ -130,12 +130,13 @@ static void virgl_attach_res_vertex_buffers(struct virgl_context *vctx)
} }
} }
static void virgl_attach_res_index_buffer(struct virgl_context *vctx) static void virgl_attach_res_index_buffer(struct virgl_context *vctx,
struct virgl_indexbuf *ib)
{ {
struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws; struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
struct virgl_resource *res; struct virgl_resource *res;
res = virgl_resource(vctx->index_buffer.buffer); res = virgl_resource(ib->buffer);
if (res) if (res)
vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE); vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
} }
@ -183,7 +184,6 @@ static void virgl_reemit_res(struct virgl_context *vctx)
virgl_attach_res_sampler_views(vctx, shader_type); virgl_attach_res_sampler_views(vctx, shader_type);
virgl_attach_res_uniform_buffers(vctx, shader_type); virgl_attach_res_uniform_buffers(vctx, shader_type);
} }
virgl_attach_res_index_buffer(vctx);
virgl_attach_res_vertex_buffers(vctx); virgl_attach_res_vertex_buffers(vctx);
virgl_attach_res_so_targets(vctx); virgl_attach_res_so_targets(vctx);
} }
@ -404,25 +404,12 @@ static void virgl_set_blend_color(struct pipe_context *ctx,
virgl_encoder_set_blend_color(vctx, color); virgl_encoder_set_blend_color(vctx, color);
} }
static void virgl_set_index_buffer(struct pipe_context *ctx,
const struct pipe_index_buffer *ib)
{
struct virgl_context *vctx = virgl_context(ctx);
if (ib) {
pipe_resource_reference(&vctx->index_buffer.buffer, ib->buffer);
memcpy(&vctx->index_buffer, ib, sizeof(*ib));
} else {
pipe_resource_reference(&vctx->index_buffer.buffer, NULL);
}
}
static void virgl_hw_set_index_buffer(struct pipe_context *ctx, static void virgl_hw_set_index_buffer(struct pipe_context *ctx,
struct pipe_index_buffer *ib) struct virgl_indexbuf *ib)
{ {
struct virgl_context *vctx = virgl_context(ctx); struct virgl_context *vctx = virgl_context(ctx);
virgl_encoder_set_index_buffer(vctx, ib); virgl_encoder_set_index_buffer(vctx, ib);
virgl_attach_res_index_buffer(vctx); virgl_attach_res_index_buffer(vctx, ib);
} }
static void virgl_set_constant_buffer(struct pipe_context *ctx, static void virgl_set_constant_buffer(struct pipe_context *ctx,
@ -590,7 +577,7 @@ static void virgl_draw_vbo(struct pipe_context *ctx,
{ {
struct virgl_context *vctx = virgl_context(ctx); struct virgl_context *vctx = virgl_context(ctx);
struct virgl_screen *rs = virgl_screen(ctx->screen); struct virgl_screen *rs = virgl_screen(ctx->screen);
struct pipe_index_buffer ib = {}; struct virgl_indexbuf ib = {};
struct pipe_draw_info info = *dinfo; struct pipe_draw_info info = *dinfo;
if (!dinfo->count_from_stream_output && !dinfo->indirect && if (!dinfo->count_from_stream_output && !dinfo->indirect &&
@ -599,15 +586,14 @@ static void virgl_draw_vbo(struct pipe_context *ctx,
return; return;
if (!(rs->caps.caps.v1.prim_mask & (1 << dinfo->mode))) { if (!(rs->caps.caps.v1.prim_mask & (1 << dinfo->mode))) {
util_primconvert_save_index_buffer(vctx->primconvert, &vctx->index_buffer);
util_primconvert_draw_vbo(vctx->primconvert, dinfo); util_primconvert_draw_vbo(vctx->primconvert, dinfo);
return; return;
} }
if (info.indexed) { if (info.index_size) {
pipe_resource_reference(&ib.buffer, vctx->index_buffer.buffer); pipe_resource_reference(&ib.buffer, info.has_user_indices ? NULL : info.index.resource);
ib.user_buffer = vctx->index_buffer.user_buffer; ib.user_buffer = info.has_user_indices ? info.index.user : NULL;
ib.index_size = vctx->index_buffer.index_size; ib.index_size = dinfo->index_size;
ib.offset = vctx->index_buffer.offset + info.start * ib.index_size; ib.offset = info.start * ib.index_size;
if (ib.user_buffer) { if (ib.user_buffer) {
u_upload_data(vctx->uploader, 0, info.count * ib.index_size, 256, u_upload_data(vctx->uploader, 0, info.count * ib.index_size, 256,
@ -620,7 +606,7 @@ static void virgl_draw_vbo(struct pipe_context *ctx,
vctx->num_draws++; vctx->num_draws++;
virgl_hw_set_vertex_buffers(ctx); virgl_hw_set_vertex_buffers(ctx);
if (info.indexed) if (info.index_size)
virgl_hw_set_index_buffer(ctx, &ib); virgl_hw_set_index_buffer(ctx, &ib);
virgl_encoder_draw_vbo(vctx, &info); virgl_encoder_draw_vbo(vctx, &info);
@ -905,7 +891,6 @@ struct pipe_context *virgl_context_create(struct pipe_screen *pscreen,
vctx->base.bind_vertex_elements_state = virgl_bind_vertex_elements_state; vctx->base.bind_vertex_elements_state = virgl_bind_vertex_elements_state;
vctx->base.delete_vertex_elements_state = virgl_delete_vertex_elements_state; vctx->base.delete_vertex_elements_state = virgl_delete_vertex_elements_state;
vctx->base.set_vertex_buffers = virgl_set_vertex_buffers; vctx->base.set_vertex_buffers = virgl_set_vertex_buffers;
vctx->base.set_index_buffer = virgl_set_index_buffer;
vctx->base.set_constant_buffer = virgl_set_constant_buffer; vctx->base.set_constant_buffer = virgl_set_constant_buffer;
vctx->base.create_vs_state = virgl_create_vs_state; vctx->base.create_vs_state = virgl_create_vs_state;

View File

@ -58,7 +58,6 @@ struct virgl_context {
struct slab_child_pool texture_transfer_pool; struct slab_child_pool texture_transfer_pool;
struct pipe_index_buffer index_buffer;
struct u_upload_mgr *uploader; struct u_upload_mgr *uploader;
struct pipe_vertex_buffer vertex_buffer[PIPE_MAX_ATTRIBS]; struct pipe_vertex_buffer vertex_buffer[PIPE_MAX_ATTRIBS];

View File

@ -398,7 +398,7 @@ int virgl_encoder_set_vertex_buffers(struct virgl_context *ctx,
} }
int virgl_encoder_set_index_buffer(struct virgl_context *ctx, int virgl_encoder_set_index_buffer(struct virgl_context *ctx,
const struct pipe_index_buffer *ib) const struct virgl_indexbuf *ib)
{ {
int length = VIRGL_SET_INDEX_BUFFER_SIZE(ib); int length = VIRGL_SET_INDEX_BUFFER_SIZE(ib);
struct virgl_resource *res = NULL; struct virgl_resource *res = NULL;
@ -409,7 +409,7 @@ int virgl_encoder_set_index_buffer(struct virgl_context *ctx,
virgl_encoder_write_res(ctx, res); virgl_encoder_write_res(ctx, res);
if (ib) { if (ib) {
virgl_encoder_write_dword(ctx->cbuf, ib->index_size); virgl_encoder_write_dword(ctx->cbuf, ib->index_size);
virgl_encoder_write_dword(ctx->cbuf, ib->offset); virgl_encoder_write_dword(ctx->cbuf, 0);
} }
return 0; return 0;
} }
@ -421,7 +421,7 @@ int virgl_encoder_draw_vbo(struct virgl_context *ctx,
virgl_encoder_write_dword(ctx->cbuf, info->start); virgl_encoder_write_dword(ctx->cbuf, info->start);
virgl_encoder_write_dword(ctx->cbuf, info->count); virgl_encoder_write_dword(ctx->cbuf, info->count);
virgl_encoder_write_dword(ctx->cbuf, info->mode); virgl_encoder_write_dword(ctx->cbuf, info->mode);
virgl_encoder_write_dword(ctx->cbuf, info->indexed); virgl_encoder_write_dword(ctx->cbuf, !!info->index_size);
virgl_encoder_write_dword(ctx->cbuf, info->instance_count); virgl_encoder_write_dword(ctx->cbuf, info->instance_count);
virgl_encoder_write_dword(ctx->cbuf, info->index_bias); virgl_encoder_write_dword(ctx->cbuf, info->index_bias);
virgl_encoder_write_dword(ctx->cbuf, info->start_instance); virgl_encoder_write_dword(ctx->cbuf, info->start_instance);

View File

@ -39,6 +39,13 @@ struct virgl_surface {
uint32_t handle; uint32_t handle;
}; };
struct virgl_indexbuf {
unsigned offset;
unsigned index_size; /**< size of an index, in bytes */
struct pipe_resource *buffer; /**< the actual buffer */
const void *user_buffer; /**< pointer to a user buffer if buffer == NULL */
};
static inline struct virgl_surface *virgl_surface(struct pipe_surface *surf) static inline struct virgl_surface *virgl_surface(struct pipe_surface *surf)
{ {
return (struct virgl_surface *)surf; return (struct virgl_surface *)surf;
@ -167,7 +174,7 @@ int virgl_encode_bind_sampler_states(struct virgl_context *ctx,
uint32_t *handles); uint32_t *handles);
int virgl_encoder_set_index_buffer(struct virgl_context *ctx, int virgl_encoder_set_index_buffer(struct virgl_context *ctx,
const struct pipe_index_buffer *ib); const struct virgl_indexbuf *ib);
uint32_t virgl_object_assign_handle(void); uint32_t virgl_object_assign_handle(void);

View File

@ -53,7 +53,6 @@ struct pipe_grid_info;
struct pipe_fence_handle; struct pipe_fence_handle;
struct pipe_framebuffer_state; struct pipe_framebuffer_state;
struct pipe_image_view; struct pipe_image_view;
struct pipe_index_buffer;
struct pipe_query; struct pipe_query;
struct pipe_poly_stipple; struct pipe_poly_stipple;
struct pipe_rasterizer_state; struct pipe_rasterizer_state;
@ -354,9 +353,6 @@ struct pipe_context {
unsigned num_buffers, unsigned num_buffers,
const struct pipe_vertex_buffer * ); const struct pipe_vertex_buffer * );
void (*set_index_buffer)( struct pipe_context *pipe,
const struct pipe_index_buffer * );
/*@}*/ /*@}*/
/** /**

View File

@ -628,19 +628,6 @@ struct pipe_vertex_element
}; };
/**
* An index buffer. When an index buffer is bound, all indices to vertices
* will be looked up in the buffer.
*/
struct pipe_index_buffer
{
unsigned index_size; /**< size of an index, in bytes */
unsigned offset; /**< offset to start of data in buffer, in bytes */
struct pipe_resource *buffer; /**< the actual buffer */
const void *user_buffer; /**< pointer to a user buffer if buffer == NULL */
};
struct pipe_draw_indirect_info struct pipe_draw_indirect_info
{ {
unsigned offset; /**< must be 4 byte aligned */ unsigned offset; /**< must be 4 byte aligned */
@ -650,7 +637,7 @@ struct pipe_draw_indirect_info
/* Indirect draw parameters resource is laid out as follows: /* Indirect draw parameters resource is laid out as follows:
* *
* if indexed is TRUE: * if using indexed drawing:
* struct { * struct {
* uint32_t count; * uint32_t count;
* uint32_t instance_count; * uint32_t instance_count;
@ -680,12 +667,18 @@ struct pipe_draw_indirect_info
*/ */
struct pipe_draw_info struct pipe_draw_info
{ {
boolean indexed; /**< use index buffer */ ubyte index_size; /**< if 0, the draw is not indexed. */
enum pipe_prim_type mode:8; /**< the mode of the primitive */ enum pipe_prim_type mode:8; /**< the mode of the primitive */
boolean primitive_restart; unsigned primitive_restart:1;
unsigned has_user_indices:1; /**< if true, use index.user_buffer */
ubyte vertices_per_patch; /**< the number of vertices per patch */ ubyte vertices_per_patch; /**< the number of vertices per patch */
unsigned start; /**< the index of the first vertex */ /**
* Direct draws: start is the index of the first vertex
* Non-indexed indirect draws: not used
* Indexed indirect draws: start is added to the indirect start.
*/
unsigned start;
unsigned count; /**< number of vertices */ unsigned count; /**< number of vertices */
unsigned start_instance; /**< first instance id */ unsigned start_instance; /**< first instance id */
@ -707,6 +700,17 @@ struct pipe_draw_info
/* Pointers must be at the end for an optimal structure layout on 64-bit. */ /* Pointers must be at the end for an optimal structure layout on 64-bit. */
/**
* An index buffer. When an index buffer is bound, all indices to vertices
* will be looked up from the buffer.
*
* If has_user_indices, use index.user, else use index.resource.
*/
union {
struct pipe_resource *resource; /**< real buffer */
const void *user; /**< pointer to a user buffer */
} index;
struct pipe_draw_indirect_info *indirect; /**< Indirect draw. */ struct pipe_draw_indirect_info *indirect; /**< Indirect draw. */
/** /**

Some files were not shown because too many files have changed in this diff Show More