i965: Reorganize batch/state BO fields into a 'brw_growing_bo' struct.
We're about to add more of them, and need to pass the whole lot of them
around together when growing them. Putting them in a struct makes this
much easier.
brw->batch.batch.bo is a bit of a mouthful, but it's nice to have things
labeled 'batch' and 'state' now that we have multiple buffers.
Fixes: 2dfc119f22
"i965: Grow the batch/state buffers if we need space and can't flush."
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=103101
Reviewed-by: Ian Romanick <ian.d.romanick@intel.com>
This commit is contained in:
parent
ca43616586
commit
74e38739ca
|
@ -469,22 +469,25 @@ struct brw_reloc_list {
|
|||
int reloc_array_size;
|
||||
};
|
||||
|
||||
struct brw_growing_bo {
|
||||
struct brw_bo *bo;
|
||||
uint32_t *map;
|
||||
uint32_t *cpu_map;
|
||||
};
|
||||
|
||||
struct intel_batchbuffer {
|
||||
/** Current batchbuffer being queued up. */
|
||||
struct brw_bo *bo;
|
||||
/** Last BO submitted to the hardware. Used for glFinish(). */
|
||||
struct brw_bo *last_bo;
|
||||
struct brw_growing_bo batch;
|
||||
/** Current statebuffer being queued up. */
|
||||
struct brw_bo *state_bo;
|
||||
struct brw_growing_bo state;
|
||||
|
||||
/** Last batchbuffer submitted to the hardware. Used for glFinish(). */
|
||||
struct brw_bo *last_bo;
|
||||
|
||||
#ifdef DEBUG
|
||||
uint16_t emit, total;
|
||||
#endif
|
||||
uint32_t *map_next;
|
||||
uint32_t *map;
|
||||
uint32_t *batch_cpu_map;
|
||||
uint32_t *state_cpu_map;
|
||||
uint32_t *state_map;
|
||||
uint32_t state_used;
|
||||
|
||||
enum brw_gpu_ring ring;
|
||||
|
|
|
@ -65,15 +65,15 @@ upload_pipelined_state_pointers(struct brw_context *brw)
|
|||
|
||||
BEGIN_BATCH(7);
|
||||
OUT_BATCH(_3DSTATE_PIPELINED_POINTERS << 16 | (7 - 2));
|
||||
OUT_RELOC(brw->batch.state_bo, 0, brw->vs.base.state_offset);
|
||||
OUT_RELOC(brw->batch.state.bo, 0, brw->vs.base.state_offset);
|
||||
if (brw->ff_gs.prog_active)
|
||||
OUT_RELOC(brw->batch.state_bo, 0, brw->ff_gs.state_offset | 1);
|
||||
OUT_RELOC(brw->batch.state.bo, 0, brw->ff_gs.state_offset | 1);
|
||||
else
|
||||
OUT_BATCH(0);
|
||||
OUT_RELOC(brw->batch.state_bo, 0, brw->clip.state_offset | 1);
|
||||
OUT_RELOC(brw->batch.state_bo, 0, brw->sf.state_offset);
|
||||
OUT_RELOC(brw->batch.state_bo, 0, brw->wm.base.state_offset);
|
||||
OUT_RELOC(brw->batch.state_bo, 0, brw->cc.state_offset);
|
||||
OUT_RELOC(brw->batch.state.bo, 0, brw->clip.state_offset | 1);
|
||||
OUT_RELOC(brw->batch.state.bo, 0, brw->sf.state_offset);
|
||||
OUT_RELOC(brw->batch.state.bo, 0, brw->wm.base.state_offset);
|
||||
OUT_RELOC(brw->batch.state.bo, 0, brw->cc.state_offset);
|
||||
ADVANCE_BATCH();
|
||||
|
||||
brw->ctx.NewDriverState |= BRW_NEW_PSP;
|
||||
|
@ -629,9 +629,9 @@ brw_upload_state_base_address(struct brw_context *brw)
|
|||
OUT_BATCH(0);
|
||||
OUT_BATCH(mocs_wb << 16);
|
||||
/* Surface state base address: */
|
||||
OUT_RELOC64(brw->batch.state_bo, 0, mocs_wb << 4 | 1);
|
||||
OUT_RELOC64(brw->batch.state.bo, 0, mocs_wb << 4 | 1);
|
||||
/* Dynamic state base address: */
|
||||
OUT_RELOC64(brw->batch.state_bo, 0, mocs_wb << 4 | 1);
|
||||
OUT_RELOC64(brw->batch.state.bo, 0, mocs_wb << 4 | 1);
|
||||
/* Indirect object base address: MEDIA_OBJECT data */
|
||||
OUT_BATCH(mocs_wb << 4 | 1);
|
||||
OUT_BATCH(0);
|
||||
|
@ -664,7 +664,7 @@ brw_upload_state_base_address(struct brw_context *brw)
|
|||
* BINDING_TABLE_STATE
|
||||
* SURFACE_STATE
|
||||
*/
|
||||
OUT_RELOC(brw->batch.state_bo, 0, 1);
|
||||
OUT_RELOC(brw->batch.state.bo, 0, 1);
|
||||
/* Dynamic state base address:
|
||||
* SAMPLER_STATE
|
||||
* SAMPLER_BORDER_COLOR_STATE
|
||||
|
@ -675,7 +675,7 @@ brw_upload_state_base_address(struct brw_context *brw)
|
|||
* Push constants (when INSTPM: CONSTANT_BUFFER Address Offset
|
||||
* Disable is clear, which we rely on)
|
||||
*/
|
||||
OUT_RELOC(brw->batch.state_bo, 0, 1);
|
||||
OUT_RELOC(brw->batch.state.bo, 0, 1);
|
||||
|
||||
OUT_BATCH(1); /* Indirect object base address: MEDIA_OBJECT data */
|
||||
|
||||
|
@ -696,7 +696,7 @@ brw_upload_state_base_address(struct brw_context *brw)
|
|||
BEGIN_BATCH(8);
|
||||
OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (8 - 2));
|
||||
OUT_BATCH(1); /* General state base address */
|
||||
OUT_RELOC(brw->batch.state_bo, 0, 1); /* Surface state base address */
|
||||
OUT_RELOC(brw->batch.state.bo, 0, 1); /* Surface state base address */
|
||||
OUT_BATCH(1); /* Indirect object base address */
|
||||
OUT_RELOC(brw->cache.bo, 0, 1); /* Instruction base address */
|
||||
OUT_BATCH(0xfffff001); /* General state upper bound */
|
||||
|
@ -707,7 +707,7 @@ brw_upload_state_base_address(struct brw_context *brw)
|
|||
BEGIN_BATCH(6);
|
||||
OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (6 - 2));
|
||||
OUT_BATCH(1); /* General state base address */
|
||||
OUT_RELOC(brw->batch.state_bo, 0, 1); /* Surface state base address */
|
||||
OUT_RELOC(brw->batch.state.bo, 0, 1); /* Surface state base address */
|
||||
OUT_BATCH(1); /* Indirect object base address */
|
||||
OUT_BATCH(1); /* General state upper bound */
|
||||
OUT_BATCH(1); /* Indirect object upper bound */
|
||||
|
|
|
@ -146,7 +146,7 @@ brw_fence_insert_locked(struct brw_context *brw, struct brw_fence *fence)
|
|||
assert(!fence->batch_bo);
|
||||
assert(!fence->signalled);
|
||||
|
||||
fence->batch_bo = brw->batch.bo;
|
||||
fence->batch_bo = brw->batch.batch.bo;
|
||||
brw_bo_reference(fence->batch_bo);
|
||||
|
||||
if (intel_batchbuffer_flush(brw) < 0) {
|
||||
|
|
|
@ -28,7 +28,7 @@ dynamic_state_address(struct blorp_batch *batch, uint32_t offset)
|
|||
struct brw_context *brw = batch->driver_batch;
|
||||
|
||||
return (struct blorp_address) {
|
||||
.buffer = brw->batch.state_bo,
|
||||
.buffer = brw->batch.state.bo,
|
||||
.offset = offset,
|
||||
};
|
||||
}
|
||||
|
|
|
@ -60,7 +60,7 @@ blorp_emit_reloc(struct blorp_batch *batch,
|
|||
uint32_t offset;
|
||||
|
||||
if (GEN_GEN < 6 && brw_ptr_in_state_buffer(&brw->batch, location)) {
|
||||
offset = (char *)location - (char *)brw->batch.state_map;
|
||||
offset = (char *)location - (char *)brw->batch.state.map;
|
||||
return brw_state_reloc(&brw->batch, offset,
|
||||
address.buffer, address.offset + delta,
|
||||
address.reloc_flags);
|
||||
|
@ -68,7 +68,7 @@ blorp_emit_reloc(struct blorp_batch *batch,
|
|||
|
||||
assert(!brw_ptr_in_state_buffer(&brw->batch, location));
|
||||
|
||||
offset = (char *)location - (char *)brw->batch.map;
|
||||
offset = (char *)location - (char *)brw->batch.batch.map;
|
||||
return brw_batch_reloc(&brw->batch, offset,
|
||||
address.buffer, address.offset + delta,
|
||||
address.reloc_flags);
|
||||
|
@ -86,7 +86,7 @@ blorp_surface_reloc(struct blorp_batch *batch, uint32_t ss_offset,
|
|||
brw_state_reloc(&brw->batch, ss_offset, bo, address.offset + delta,
|
||||
address.reloc_flags);
|
||||
|
||||
void *reloc_ptr = (void *)brw->batch.state_map + ss_offset;
|
||||
void *reloc_ptr = (void *)brw->batch.state.map + ss_offset;
|
||||
#if GEN_GEN >= 8
|
||||
*(uint64_t *)reloc_ptr = reloc_val;
|
||||
#else
|
||||
|
@ -101,7 +101,7 @@ blorp_get_surface_base_address(struct blorp_batch *batch)
|
|||
assert(batch->blorp->driver_ctx == batch->driver_batch);
|
||||
struct brw_context *brw = batch->driver_batch;
|
||||
return (struct blorp_address) {
|
||||
.buffer = brw->batch.state_bo,
|
||||
.buffer = brw->batch.state.bo,
|
||||
.offset = 0,
|
||||
};
|
||||
}
|
||||
|
@ -163,7 +163,7 @@ blorp_alloc_vertex_buffer(struct blorp_batch *batch, uint32_t size,
|
|||
void *data = brw_state_batch(brw, size, 64, &offset);
|
||||
|
||||
*addr = (struct blorp_address) {
|
||||
.buffer = brw->batch.state_bo,
|
||||
.buffer = brw->batch.state.bo,
|
||||
.offset = offset,
|
||||
|
||||
#if GEN_GEN == 10
|
||||
|
|
|
@ -89,7 +89,7 @@ __gen_combine_address(struct brw_context *brw, void *location,
|
|||
return address.offset + delta;
|
||||
} else {
|
||||
if (GEN_GEN < 6 && brw_ptr_in_state_buffer(batch, location)) {
|
||||
offset = (char *) location - (char *) brw->batch.state_map;
|
||||
offset = (char *) location - (char *) brw->batch.state.map;
|
||||
return brw_state_reloc(batch, offset, address.bo,
|
||||
address.offset + delta,
|
||||
address.reloc_flags);
|
||||
|
@ -97,7 +97,7 @@ __gen_combine_address(struct brw_context *brw, void *location,
|
|||
|
||||
assert(!brw_ptr_in_state_buffer(batch, location));
|
||||
|
||||
offset = (char *) location - (char *) brw->batch.map;
|
||||
offset = (char *) location - (char *) brw->batch.batch.map;
|
||||
return brw_batch_reloc(batch, offset, address.bo,
|
||||
address.offset + delta,
|
||||
address.reloc_flags);
|
||||
|
@ -1279,7 +1279,7 @@ genX(upload_clip_state)(struct brw_context *brw)
|
|||
clip.GuardbandClipTestEnable = true;
|
||||
|
||||
clip.ClipperViewportStatePointer =
|
||||
ro_bo(brw->batch.state_bo, brw->clip.vp_offset);
|
||||
ro_bo(brw->batch.state.bo, brw->clip.vp_offset);
|
||||
|
||||
clip.ScreenSpaceViewportXMin = -1;
|
||||
clip.ScreenSpaceViewportXMax = 1;
|
||||
|
@ -1496,7 +1496,7 @@ genX(upload_sf)(struct brw_context *brw)
|
|||
* domain.
|
||||
*/
|
||||
sf.SetupViewportStateOffset =
|
||||
ro_bo(brw->batch.state_bo, brw->sf.vp_offset);
|
||||
ro_bo(brw->batch.state.bo, brw->sf.vp_offset);
|
||||
|
||||
sf.PointRasterizationRule = RASTRULE_UPPER_RIGHT;
|
||||
|
||||
|
@ -1800,7 +1800,7 @@ genX(upload_wm)(struct brw_context *brw)
|
|||
|
||||
if (stage_state->sampler_count)
|
||||
wm.SamplerStatePointer =
|
||||
ro_bo(brw->batch.state_bo, stage_state->sampler_offset);
|
||||
ro_bo(brw->batch.state.bo, stage_state->sampler_offset);
|
||||
#if GEN_GEN == 5
|
||||
if (wm_prog_data->prog_offset_2)
|
||||
wm.GRFRegisterCount2 = wm_prog_data->reg_blocks_2;
|
||||
|
@ -2093,7 +2093,7 @@ genX(upload_vs_state)(struct brw_context *brw)
|
|||
|
||||
vs.StatisticsEnable = false;
|
||||
vs.SamplerStatePointer =
|
||||
ro_bo(brw->batch.state_bo, stage_state->sampler_offset);
|
||||
ro_bo(brw->batch.state.bo, stage_state->sampler_offset);
|
||||
#endif
|
||||
|
||||
#if GEN_GEN == 5
|
||||
|
@ -3332,7 +3332,7 @@ genX(upload_color_calc_state)(struct brw_context *brw)
|
|||
cc.StatisticsEnable = brw->stats_wm;
|
||||
|
||||
cc.CCViewportStatePointer =
|
||||
ro_bo(brw->batch.state_bo, brw->cc.vp_offset);
|
||||
ro_bo(brw->batch.state.bo, brw->cc.vp_offset);
|
||||
#else
|
||||
/* _NEW_COLOR */
|
||||
cc.BlendConstantColorRed = ctx->Color.BlendColorUnclamped[0];
|
||||
|
@ -5083,7 +5083,7 @@ genX(update_sampler_state)(struct brw_context *brw,
|
|||
}
|
||||
#if GEN_GEN < 6
|
||||
samp_st.BorderColorPointer =
|
||||
ro_bo(brw->batch.state_bo, border_color_offset);
|
||||
ro_bo(brw->batch.state.bo, border_color_offset);
|
||||
#else
|
||||
samp_st.BorderColorPointer = border_color_offset;
|
||||
#endif
|
||||
|
|
|
@ -84,11 +84,11 @@ intel_batchbuffer_init(struct brw_context *brw)
|
|||
const struct gen_device_info *devinfo = &screen->devinfo;
|
||||
|
||||
if (!devinfo->has_llc) {
|
||||
batch->batch_cpu_map = malloc(BATCH_SZ);
|
||||
batch->map = batch->batch_cpu_map;
|
||||
batch->map_next = batch->map;
|
||||
batch->state_cpu_map = malloc(STATE_SZ);
|
||||
batch->state_map = batch->state_cpu_map;
|
||||
batch->batch.cpu_map = malloc(BATCH_SZ);
|
||||
batch->batch.map = batch->batch.cpu_map;
|
||||
batch->map_next = batch->batch.map;
|
||||
batch->state.cpu_map = malloc(STATE_SZ);
|
||||
batch->state.map = batch->state.cpu_map;
|
||||
}
|
||||
|
||||
init_reloc_list(&batch->batch_relocs, 250);
|
||||
|
@ -171,20 +171,21 @@ intel_batchbuffer_reset(struct brw_context *brw)
|
|||
brw_bo_unreference(batch->last_bo);
|
||||
batch->last_bo = NULL;
|
||||
}
|
||||
batch->last_bo = batch->bo;
|
||||
batch->last_bo = batch->batch.bo;
|
||||
|
||||
batch->bo = brw_bo_alloc(bufmgr, "batchbuffer", BATCH_SZ, 4096);
|
||||
if (!batch->batch_cpu_map) {
|
||||
batch->map = brw_bo_map(brw, batch->bo, MAP_READ | MAP_WRITE);
|
||||
batch->batch.bo = brw_bo_alloc(bufmgr, "batchbuffer", BATCH_SZ, 4096);
|
||||
if (!batch->batch.cpu_map) {
|
||||
batch->batch.map =
|
||||
brw_bo_map(brw, batch->batch.bo, MAP_READ | MAP_WRITE);
|
||||
}
|
||||
batch->map_next = batch->map;
|
||||
batch->map_next = batch->batch.map;
|
||||
|
||||
batch->state_bo = brw_bo_alloc(bufmgr, "statebuffer", STATE_SZ, 4096);
|
||||
batch->state_bo->kflags =
|
||||
batch->state.bo = brw_bo_alloc(bufmgr, "statebuffer", STATE_SZ, 4096);
|
||||
batch->state.bo->kflags =
|
||||
can_do_exec_capture(screen) ? EXEC_OBJECT_CAPTURE : 0;
|
||||
if (!batch->state_cpu_map) {
|
||||
batch->state_map =
|
||||
brw_bo_map(brw, batch->state_bo, MAP_READ | MAP_WRITE);
|
||||
if (!batch->state.cpu_map) {
|
||||
batch->state.map =
|
||||
brw_bo_map(brw, batch->state.bo, MAP_READ | MAP_WRITE);
|
||||
}
|
||||
|
||||
/* Avoid making 0 a valid state offset - otherwise the decoder will try
|
||||
|
@ -192,8 +193,8 @@ intel_batchbuffer_reset(struct brw_context *brw)
|
|||
*/
|
||||
batch->state_used = 1;
|
||||
|
||||
add_exec_bo(batch, batch->bo);
|
||||
assert(batch->bo->index == 0);
|
||||
add_exec_bo(batch, batch->batch.bo);
|
||||
assert(batch->batch.bo->index == 0);
|
||||
|
||||
batch->needs_sol_reset = false;
|
||||
batch->state_base_address_emitted = false;
|
||||
|
@ -242,8 +243,8 @@ intel_batchbuffer_reset_to_saved(struct brw_context *brw)
|
|||
void
|
||||
intel_batchbuffer_free(struct intel_batchbuffer *batch)
|
||||
{
|
||||
free(batch->batch_cpu_map);
|
||||
free(batch->state_cpu_map);
|
||||
free(batch->batch.cpu_map);
|
||||
free(batch->state.cpu_map);
|
||||
|
||||
for (int i = 0; i < batch->exec_count; i++) {
|
||||
brw_bo_unreference(batch->exec_bos[i]);
|
||||
|
@ -254,8 +255,8 @@ intel_batchbuffer_free(struct intel_batchbuffer *batch)
|
|||
free(batch->validation_list);
|
||||
|
||||
brw_bo_unreference(batch->last_bo);
|
||||
brw_bo_unreference(batch->bo);
|
||||
brw_bo_unreference(batch->state_bo);
|
||||
brw_bo_unreference(batch->batch.bo);
|
||||
brw_bo_unreference(batch->state.bo);
|
||||
if (batch->state_batch_sizes)
|
||||
_mesa_hash_table_destroy(batch->state_batch_sizes, NULL);
|
||||
}
|
||||
|
@ -367,13 +368,14 @@ intel_batchbuffer_require_space(struct brw_context *brw, GLuint sz,
|
|||
const unsigned batch_used = USED_BATCH(*batch) * 4;
|
||||
if (batch_used + sz >= BATCH_SZ && !batch->no_wrap) {
|
||||
intel_batchbuffer_flush(brw);
|
||||
} else if (batch_used + sz >= batch->bo->size) {
|
||||
} else if (batch_used + sz >= batch->batch.bo->size) {
|
||||
const unsigned new_size =
|
||||
MIN2(batch->bo->size + batch->bo->size / 2, MAX_BATCH_SIZE);
|
||||
grow_buffer(brw, &batch->bo, &batch->map, &batch->batch_cpu_map,
|
||||
batch_used, new_size);
|
||||
batch->map_next = (void *) batch->map + batch_used;
|
||||
assert(batch_used + sz < batch->bo->size);
|
||||
MIN2(batch->batch.bo->size + batch->batch.bo->size / 2,
|
||||
MAX_BATCH_SIZE);
|
||||
grow_buffer(brw, &batch->batch.bo, &batch->batch.map,
|
||||
&batch->batch.cpu_map, batch_used, new_size);
|
||||
batch->map_next = (void *) batch->batch.map + batch_used;
|
||||
assert(batch_used + sz < batch->batch.bo->size);
|
||||
}
|
||||
|
||||
/* The intel_batchbuffer_flush() calls above might have changed
|
||||
|
@ -430,16 +432,16 @@ do_batch_dump(struct brw_context *brw)
|
|||
if (batch->ring != RENDER_RING)
|
||||
return;
|
||||
|
||||
uint32_t *batch_data = brw_bo_map(brw, batch->bo, MAP_READ);
|
||||
uint32_t *state = brw_bo_map(brw, batch->state_bo, MAP_READ);
|
||||
uint32_t *batch_data = brw_bo_map(brw, batch->batch.bo, MAP_READ);
|
||||
uint32_t *state = brw_bo_map(brw, batch->state.bo, MAP_READ);
|
||||
if (batch_data == NULL || state == NULL) {
|
||||
fprintf(stderr, "WARNING: failed to map batchbuffer/statebuffer\n");
|
||||
return;
|
||||
}
|
||||
|
||||
uint32_t *end = batch_data + USED_BATCH(*batch);
|
||||
uint32_t batch_gtt_offset = batch->bo->gtt_offset;
|
||||
uint32_t state_gtt_offset = batch->state_bo->gtt_offset;
|
||||
uint32_t batch_gtt_offset = batch->batch.bo->gtt_offset;
|
||||
uint32_t state_gtt_offset = batch->state.bo->gtt_offset;
|
||||
int length;
|
||||
|
||||
bool color = INTEL_DEBUG & DEBUG_COLOR;
|
||||
|
@ -584,8 +586,8 @@ do_batch_dump(struct brw_context *brw)
|
|||
}
|
||||
}
|
||||
|
||||
brw_bo_unmap(batch->bo);
|
||||
brw_bo_unmap(batch->state_bo);
|
||||
brw_bo_unmap(batch->batch.bo);
|
||||
brw_bo_unmap(batch->state.bo);
|
||||
}
|
||||
#else
|
||||
static void do_batch_dump(struct brw_context *brw) { }
|
||||
|
@ -607,7 +609,7 @@ brw_new_batch(struct brw_context *brw)
|
|||
brw->batch.exec_count = 0;
|
||||
brw->batch.aperture_space = 0;
|
||||
|
||||
brw_bo_unreference(brw->batch.state_bo);
|
||||
brw_bo_unreference(brw->batch.state.bo);
|
||||
|
||||
/* Create a new batchbuffer and reset the associated state: */
|
||||
intel_batchbuffer_reset_and_clear_render_cache(brw);
|
||||
|
@ -801,18 +803,18 @@ submit_batch(struct brw_context *brw, int in_fence_fd, int *out_fence_fd)
|
|||
struct intel_batchbuffer *batch = &brw->batch;
|
||||
int ret = 0;
|
||||
|
||||
if (batch->batch_cpu_map) {
|
||||
void *bo_map = brw_bo_map(brw, batch->bo, MAP_WRITE);
|
||||
memcpy(bo_map, batch->batch_cpu_map, 4 * USED_BATCH(*batch));
|
||||
if (batch->batch.cpu_map) {
|
||||
void *bo_map = brw_bo_map(brw, batch->batch.bo, MAP_WRITE);
|
||||
memcpy(bo_map, batch->batch.cpu_map, 4 * USED_BATCH(*batch));
|
||||
}
|
||||
|
||||
if (batch->state_cpu_map) {
|
||||
void *bo_map = brw_bo_map(brw, batch->state_bo, MAP_WRITE);
|
||||
memcpy(bo_map, batch->state_cpu_map, batch->state_used);
|
||||
if (batch->state.cpu_map) {
|
||||
void *bo_map = brw_bo_map(brw, batch->state.bo, MAP_WRITE);
|
||||
memcpy(bo_map, batch->state.cpu_map, batch->state_used);
|
||||
}
|
||||
|
||||
brw_bo_unmap(batch->bo);
|
||||
brw_bo_unmap(batch->state_bo);
|
||||
brw_bo_unmap(batch->batch.bo);
|
||||
brw_bo_unmap(batch->state.bo);
|
||||
|
||||
if (!brw->screen->no_hw) {
|
||||
/* The requirement for using I915_EXEC_NO_RELOC are:
|
||||
|
@ -840,19 +842,19 @@ submit_batch(struct brw_context *brw, int in_fence_fd, int *out_fence_fd)
|
|||
uint32_t hw_ctx = batch->ring == RENDER_RING ? brw->hw_ctx : 0;
|
||||
|
||||
/* Set statebuffer relocations */
|
||||
const unsigned state_index = batch->state_bo->index;
|
||||
const unsigned state_index = batch->state.bo->index;
|
||||
if (state_index < batch->exec_count &&
|
||||
batch->exec_bos[state_index] == batch->state_bo) {
|
||||
batch->exec_bos[state_index] == batch->state.bo) {
|
||||
struct drm_i915_gem_exec_object2 *entry =
|
||||
&batch->validation_list[state_index];
|
||||
assert(entry->handle == batch->state_bo->gem_handle);
|
||||
assert(entry->handle == batch->state.bo->gem_handle);
|
||||
entry->relocation_count = batch->state_relocs.reloc_count;
|
||||
entry->relocs_ptr = (uintptr_t) batch->state_relocs.relocs;
|
||||
}
|
||||
|
||||
/* Set batchbuffer relocations */
|
||||
struct drm_i915_gem_exec_object2 *entry = &batch->validation_list[0];
|
||||
assert(entry->handle == batch->bo->gem_handle);
|
||||
assert(entry->handle == batch->batch.bo->gem_handle);
|
||||
entry->relocation_count = batch->batch_relocs.reloc_count;
|
||||
entry->relocs_ptr = (uintptr_t) batch->batch_relocs.relocs;
|
||||
|
||||
|
@ -914,7 +916,7 @@ _intel_batchbuffer_flush_fence(struct brw_context *brw,
|
|||
intel_upload_finish(brw);
|
||||
|
||||
if (brw->throttle_batch[0] == NULL) {
|
||||
brw->throttle_batch[0] = brw->batch.bo;
|
||||
brw->throttle_batch[0] = brw->batch.batch.bo;
|
||||
brw_bo_reference(brw->throttle_batch[0]);
|
||||
}
|
||||
|
||||
|
@ -936,7 +938,7 @@ _intel_batchbuffer_flush_fence(struct brw_context *brw,
|
|||
|
||||
if (unlikely(INTEL_DEBUG & DEBUG_SYNC)) {
|
||||
fprintf(stderr, "waiting for idle\n");
|
||||
brw_bo_wait_rendering(brw->batch.bo);
|
||||
brw_bo_wait_rendering(brw->batch.batch.bo);
|
||||
}
|
||||
|
||||
/* Start a new batch buffer. */
|
||||
|
@ -1009,7 +1011,7 @@ brw_batch_reloc(struct intel_batchbuffer *batch, uint32_t batch_offset,
|
|||
struct brw_bo *target, uint32_t target_offset,
|
||||
unsigned int reloc_flags)
|
||||
{
|
||||
assert(batch_offset <= batch->bo->size - sizeof(uint32_t));
|
||||
assert(batch_offset <= batch->batch.bo->size - sizeof(uint32_t));
|
||||
|
||||
return emit_reloc(batch, &batch->batch_relocs, batch_offset,
|
||||
target, target_offset, reloc_flags);
|
||||
|
@ -1020,7 +1022,7 @@ brw_state_reloc(struct intel_batchbuffer *batch, uint32_t state_offset,
|
|||
struct brw_bo *target, uint32_t target_offset,
|
||||
unsigned int reloc_flags)
|
||||
{
|
||||
assert(state_offset <= batch->state_bo->size - sizeof(uint32_t));
|
||||
assert(state_offset <= batch->state.bo->size - sizeof(uint32_t));
|
||||
|
||||
return emit_reloc(batch, &batch->state_relocs, state_offset,
|
||||
target, target_offset, reloc_flags);
|
||||
|
@ -1060,20 +1062,20 @@ brw_state_batch(struct brw_context *brw,
|
|||
{
|
||||
struct intel_batchbuffer *batch = &brw->batch;
|
||||
|
||||
assert(size < batch->bo->size);
|
||||
assert(size < batch->state.bo->size);
|
||||
|
||||
uint32_t offset = ALIGN(batch->state_used, alignment);
|
||||
|
||||
if (offset + size >= STATE_SZ && !batch->no_wrap) {
|
||||
intel_batchbuffer_flush(brw);
|
||||
offset = ALIGN(batch->state_used, alignment);
|
||||
} else if (offset + size >= batch->state_bo->size) {
|
||||
} else if (offset + size >= batch->state.bo->size) {
|
||||
const unsigned new_size =
|
||||
MIN2(batch->state_bo->size + batch->state_bo->size / 2,
|
||||
MIN2(batch->state.bo->size + batch->state.bo->size / 2,
|
||||
MAX_STATE_SIZE);
|
||||
grow_buffer(brw, &batch->state_bo, &batch->state_map,
|
||||
&batch->state_cpu_map, batch->state_used, new_size);
|
||||
assert(offset + size < batch->state_bo->size);
|
||||
grow_buffer(brw, &batch->state.bo, &batch->state.map,
|
||||
&batch->state.cpu_map, batch->state_used, new_size);
|
||||
assert(offset + size < batch->state.bo->size);
|
||||
}
|
||||
|
||||
if (unlikely(INTEL_DEBUG & DEBUG_BATCH)) {
|
||||
|
@ -1085,7 +1087,7 @@ brw_state_batch(struct brw_context *brw,
|
|||
batch->state_used = offset + size;
|
||||
|
||||
*out_offset = offset;
|
||||
return batch->state_map + (offset >> 2);
|
||||
return batch->state.map + (offset >> 2);
|
||||
}
|
||||
|
||||
void
|
||||
|
|
|
@ -64,7 +64,8 @@ uint64_t brw_state_reloc(struct intel_batchbuffer *batch,
|
|||
uint32_t target_offset,
|
||||
unsigned flags);
|
||||
|
||||
#define USED_BATCH(batch) ((uintptr_t)((batch).map_next - (batch).map))
|
||||
#define USED_BATCH(_batch) \
|
||||
((uintptr_t)((_batch).map_next - (_batch).batch.map))
|
||||
|
||||
static inline uint32_t float_as_int(float f)
|
||||
{
|
||||
|
@ -122,8 +123,8 @@ intel_batchbuffer_advance(struct brw_context *brw)
|
|||
static inline bool
|
||||
brw_ptr_in_state_buffer(struct intel_batchbuffer *batch, void *p)
|
||||
{
|
||||
return (char *) p >= (char *) batch->state_map &&
|
||||
(char *) p < (char *) batch->state_map + batch->state_bo->size;
|
||||
return (char *) p >= (char *) batch->state.map &&
|
||||
(char *) p < (char *) batch->state.map + batch->state.bo->size;
|
||||
}
|
||||
|
||||
#define BEGIN_BATCH(n) do { \
|
||||
|
@ -140,7 +141,7 @@ brw_ptr_in_state_buffer(struct intel_batchbuffer *batch, void *p)
|
|||
#define OUT_BATCH_F(f) OUT_BATCH(float_as_int((f)))
|
||||
|
||||
#define OUT_RELOC(buf, flags, delta) do { \
|
||||
uint32_t __offset = (__map - brw->batch.map) * 4; \
|
||||
uint32_t __offset = (__map - brw->batch.batch.map) * 4; \
|
||||
uint32_t reloc = \
|
||||
brw_batch_reloc(&brw->batch, __offset, (buf), (delta), (flags)); \
|
||||
OUT_BATCH(reloc); \
|
||||
|
@ -148,7 +149,7 @@ brw_ptr_in_state_buffer(struct intel_batchbuffer *batch, void *p)
|
|||
|
||||
/* Handle 48-bit address relocations for Gen8+ */
|
||||
#define OUT_RELOC64(buf, flags, delta) do { \
|
||||
uint32_t __offset = (__map - brw->batch.map) * 4; \
|
||||
uint32_t __offset = (__map - brw->batch.batch.map) * 4; \
|
||||
uint64_t reloc64 = \
|
||||
brw_batch_reloc(&brw->batch, __offset, (buf), (delta), (flags)); \
|
||||
OUT_BATCH(reloc64); \
|
||||
|
|
Loading…
Reference in New Issue