i965: Rename brw->no_batch_wrap to intel_batchbuffer::no_wrap
This really makes more sense in the intel_batchbuffer struct. Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
This commit is contained in:
parent
d22bc4ba52
commit
77d3d71f23
|
@ -187,12 +187,12 @@ brw_dispatch_compute_common(struct gl_context *ctx)
|
||||||
intel_batchbuffer_save_state(brw);
|
intel_batchbuffer_save_state(brw);
|
||||||
|
|
||||||
retry:
|
retry:
|
||||||
brw->no_batch_wrap = true;
|
brw->batch.no_wrap = true;
|
||||||
brw_upload_compute_state(brw);
|
brw_upload_compute_state(brw);
|
||||||
|
|
||||||
brw_emit_gpgpu_walker(brw);
|
brw_emit_gpgpu_walker(brw);
|
||||||
|
|
||||||
brw->no_batch_wrap = false;
|
brw->batch.no_wrap = false;
|
||||||
|
|
||||||
if (!brw_batch_has_aperture_space(brw, 0)) {
|
if (!brw_batch_has_aperture_space(brw, 0)) {
|
||||||
if (!fail_next) {
|
if (!fail_next) {
|
||||||
|
|
|
@ -465,6 +465,7 @@ struct intel_batchbuffer {
|
||||||
bool use_batch_first;
|
bool use_batch_first;
|
||||||
bool needs_sol_reset;
|
bool needs_sol_reset;
|
||||||
bool state_base_address_emitted;
|
bool state_base_address_emitted;
|
||||||
|
bool no_wrap;
|
||||||
|
|
||||||
struct brw_reloc_list batch_relocs;
|
struct brw_reloc_list batch_relocs;
|
||||||
struct brw_reloc_list state_relocs;
|
struct brw_reloc_list state_relocs;
|
||||||
|
@ -695,7 +696,6 @@ struct brw_context
|
||||||
uint32_t reset_count;
|
uint32_t reset_count;
|
||||||
|
|
||||||
struct intel_batchbuffer batch;
|
struct intel_batchbuffer batch;
|
||||||
bool no_batch_wrap;
|
|
||||||
|
|
||||||
struct {
|
struct {
|
||||||
struct brw_bo *bo;
|
struct brw_bo *bo;
|
||||||
|
|
|
@ -792,13 +792,13 @@ retry:
|
||||||
* brw->ctx.NewDriverState.
|
* brw->ctx.NewDriverState.
|
||||||
*/
|
*/
|
||||||
if (brw->ctx.NewDriverState) {
|
if (brw->ctx.NewDriverState) {
|
||||||
brw->no_batch_wrap = true;
|
brw->batch.no_wrap = true;
|
||||||
brw_upload_render_state(brw);
|
brw_upload_render_state(brw);
|
||||||
}
|
}
|
||||||
|
|
||||||
brw_emit_prim(brw, prim, brw->primitive, xfb_obj, stream);
|
brw_emit_prim(brw, prim, brw->primitive, xfb_obj, stream);
|
||||||
|
|
||||||
brw->no_batch_wrap = false;
|
brw->batch.no_wrap = false;
|
||||||
|
|
||||||
if (!brw_batch_has_aperture_space(brw, 0)) {
|
if (!brw_batch_has_aperture_space(brw, 0)) {
|
||||||
if (!fail_next) {
|
if (!fail_next) {
|
||||||
|
|
|
@ -224,7 +224,7 @@ retry:
|
||||||
intel_batchbuffer_require_space(brw, 1400, RENDER_RING);
|
intel_batchbuffer_require_space(brw, 1400, RENDER_RING);
|
||||||
brw_require_statebuffer_space(brw, 600);
|
brw_require_statebuffer_space(brw, 600);
|
||||||
intel_batchbuffer_save_state(brw);
|
intel_batchbuffer_save_state(brw);
|
||||||
brw->no_batch_wrap = true;
|
brw->batch.no_wrap = true;
|
||||||
|
|
||||||
#if GEN_GEN == 6
|
#if GEN_GEN == 6
|
||||||
/* Emit workaround flushes when we switch from drawing to blorping. */
|
/* Emit workaround flushes when we switch from drawing to blorping. */
|
||||||
|
@ -252,7 +252,7 @@ retry:
|
||||||
|
|
||||||
blorp_exec(batch, params);
|
blorp_exec(batch, params);
|
||||||
|
|
||||||
brw->no_batch_wrap = false;
|
brw->batch.no_wrap = false;
|
||||||
|
|
||||||
/* Check if the blorp op we just did would make our batch likely to fail to
|
/* Check if the blorp op we just did would make our batch likely to fail to
|
||||||
* map all the BOs into the GPU at batch exec time later. If so, flush the
|
* map all the BOs into the GPU at batch exec time later. If so, flush the
|
||||||
|
|
|
@ -371,7 +371,7 @@ intel_batchbuffer_require_space(struct brw_context *brw, GLuint sz,
|
||||||
|
|
||||||
const unsigned batch_used = USED_BATCH(*batch) * 4;
|
const unsigned batch_used = USED_BATCH(*batch) * 4;
|
||||||
if (batch_used + sz >= BATCH_SZ) {
|
if (batch_used + sz >= BATCH_SZ) {
|
||||||
if (!brw->no_batch_wrap) {
|
if (!batch->no_wrap) {
|
||||||
intel_batchbuffer_flush(brw);
|
intel_batchbuffer_flush(brw);
|
||||||
} else {
|
} else {
|
||||||
const unsigned new_size =
|
const unsigned new_size =
|
||||||
|
@ -631,7 +631,7 @@ brw_finish_batch(struct brw_context *brw)
|
||||||
{
|
{
|
||||||
const struct gen_device_info *devinfo = &brw->screen->devinfo;
|
const struct gen_device_info *devinfo = &brw->screen->devinfo;
|
||||||
|
|
||||||
brw->no_batch_wrap = true;
|
brw->batch.no_wrap = true;
|
||||||
|
|
||||||
/* Capture the closing pipeline statistics register values necessary to
|
/* Capture the closing pipeline statistics register values necessary to
|
||||||
* support query objects (in the non-hardware context world).
|
* support query objects (in the non-hardware context world).
|
||||||
|
@ -675,7 +675,7 @@ brw_finish_batch(struct brw_context *brw)
|
||||||
intel_batchbuffer_emit_dword(&brw->batch, MI_NOOP);
|
intel_batchbuffer_emit_dword(&brw->batch, MI_NOOP);
|
||||||
}
|
}
|
||||||
|
|
||||||
brw->no_batch_wrap = false;
|
brw->batch.no_wrap = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
@ -891,7 +891,7 @@ _intel_batchbuffer_flush_fence(struct brw_context *brw,
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/* Check that we didn't just wrap our batchbuffer at a bad time. */
|
/* Check that we didn't just wrap our batchbuffer at a bad time. */
|
||||||
assert(!brw->no_batch_wrap);
|
assert(!brw->batch.no_wrap);
|
||||||
|
|
||||||
brw_finish_batch(brw);
|
brw_finish_batch(brw);
|
||||||
intel_upload_finish(brw);
|
intel_upload_finish(brw);
|
||||||
|
@ -1048,7 +1048,7 @@ brw_state_batch(struct brw_context *brw,
|
||||||
uint32_t offset = ALIGN(batch->state_used, alignment);
|
uint32_t offset = ALIGN(batch->state_used, alignment);
|
||||||
|
|
||||||
if (offset + size >= STATE_SZ) {
|
if (offset + size >= STATE_SZ) {
|
||||||
if (!brw->no_batch_wrap) {
|
if (!batch->no_wrap) {
|
||||||
intel_batchbuffer_flush(brw);
|
intel_batchbuffer_flush(brw);
|
||||||
offset = ALIGN(batch->state_used, alignment);
|
offset = ALIGN(batch->state_used, alignment);
|
||||||
} else {
|
} else {
|
||||||
|
|
Loading…
Reference in New Issue