i965: Add and use USED_BATCH macro.
The next patch will replace the .used field with an on-demand calculation of batchbuffer usage. Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
This commit is contained in:
parent
09348c12fc
commit
131573df7a
|
@ -226,7 +226,7 @@ retry:
|
|||
intel_batchbuffer_require_space(brw, estimated_max_batch_usage, RENDER_RING);
|
||||
intel_batchbuffer_save_state(brw);
|
||||
drm_intel_bo *saved_bo = brw->batch.bo;
|
||||
uint32_t saved_used = brw->batch.used;
|
||||
uint32_t saved_used = USED_BATCH(brw->batch);
|
||||
uint32_t saved_state_batch_offset = brw->batch.state_batch_offset;
|
||||
|
||||
switch (brw->gen) {
|
||||
|
@ -245,7 +245,7 @@ retry:
|
|||
* reserved enough space that a wrap will never happen.
|
||||
*/
|
||||
assert(brw->batch.bo == saved_bo);
|
||||
assert((brw->batch.used - saved_used) * 4 +
|
||||
assert((USED_BATCH(brw->batch) - saved_used) * 4 +
|
||||
(saved_state_batch_offset - brw->batch.state_batch_offset) <
|
||||
estimated_max_batch_usage);
|
||||
/* Shut up compiler warnings on release build */
|
||||
|
|
|
@ -710,7 +710,7 @@ emit_mi_report_perf_count(struct brw_context *brw,
|
|||
/* Make sure the commands to take a snapshot fits in a single batch. */
|
||||
intel_batchbuffer_require_space(brw, MI_REPORT_PERF_COUNT_BATCH_DWORDS * 4,
|
||||
RENDER_RING);
|
||||
int batch_used = brw->batch.used;
|
||||
int batch_used = USED_BATCH(brw->batch);
|
||||
|
||||
/* Reports apparently don't always get written unless we flush first. */
|
||||
brw_emit_mi_flush(brw);
|
||||
|
@ -754,7 +754,7 @@ emit_mi_report_perf_count(struct brw_context *brw,
|
|||
brw_emit_mi_flush(brw);
|
||||
|
||||
(void) batch_used;
|
||||
assert(brw->batch.used - batch_used <= MI_REPORT_PERF_COUNT_BATCH_DWORDS * 4);
|
||||
assert(USED_BATCH(brw->batch) - batch_used <= MI_REPORT_PERF_COUNT_BATCH_DWORDS * 4);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1386,7 +1386,7 @@ void
|
|||
brw_perf_monitor_new_batch(struct brw_context *brw)
|
||||
{
|
||||
assert(brw->batch.ring == RENDER_RING);
|
||||
assert(brw->gen < 6 || brw->batch.used == 0);
|
||||
assert(brw->gen < 6 || USED_BATCH(brw->batch) == 0);
|
||||
|
||||
if (brw->perfmon.oa_users == 0)
|
||||
return;
|
||||
|
|
|
@ -87,7 +87,7 @@ brw_annotate_aub(struct brw_context *brw)
|
|||
drm_intel_aub_annotation annotations[annotation_count];
|
||||
int a = 0;
|
||||
make_annotation(&annotations[a++], AUB_TRACE_TYPE_BATCH, 0,
|
||||
4*brw->batch.used);
|
||||
4 * USED_BATCH(brw->batch));
|
||||
for (int i = brw->state_batch_count; i-- > 0; ) {
|
||||
uint32_t type = brw->state_batch_list[i].type;
|
||||
uint32_t start_offset = brw->state_batch_list[i].offset;
|
||||
|
@ -136,7 +136,7 @@ __brw_state_batch(struct brw_context *brw,
|
|||
* space, then flush and try again.
|
||||
*/
|
||||
if (batch->state_batch_offset < size ||
|
||||
offset < 4*batch->used + batch->reserved_space) {
|
||||
offset < 4 * USED_BATCH(*batch) + batch->reserved_space) {
|
||||
intel_batchbuffer_flush(brw);
|
||||
offset = ROUND_DOWN_TO(batch->state_batch_offset - size, alignment);
|
||||
}
|
||||
|
|
|
@ -249,8 +249,8 @@ void brw_upload_urb_fence(struct brw_context *brw)
|
|||
uf.bits1.cs_fence = brw->urb.size;
|
||||
|
||||
/* erratum: URB_FENCE must not cross a 64byte cacheline */
|
||||
if ((brw->batch.used & 15) > 12) {
|
||||
int pad = 16 - (brw->batch.used & 15);
|
||||
if ((USED_BATCH(brw->batch) & 15) > 12) {
|
||||
int pad = 16 - (USED_BATCH(brw->batch) & 15);
|
||||
do
|
||||
brw->batch.map[brw->batch.used++] = MI_NOOP;
|
||||
while (--pad);
|
||||
|
|
|
@ -94,7 +94,7 @@ intel_batchbuffer_reset_to_saved(struct brw_context *brw)
|
|||
drm_intel_gem_bo_clear_relocs(brw->batch.bo, brw->batch.saved.reloc_count);
|
||||
|
||||
brw->batch.used = brw->batch.saved.used;
|
||||
if (brw->batch.used == 0)
|
||||
if (USED_BATCH(brw->batch) == 0)
|
||||
brw->batch.ring = UNKNOWN_RING;
|
||||
}
|
||||
|
||||
|
@ -122,7 +122,7 @@ do_batch_dump(struct brw_context *brw)
|
|||
drm_intel_decode_set_batch_pointer(decode,
|
||||
batch->bo->virtual,
|
||||
batch->bo->offset64,
|
||||
batch->used);
|
||||
USED_BATCH(*batch));
|
||||
} else {
|
||||
fprintf(stderr,
|
||||
"WARNING: failed to map batchbuffer (%s), "
|
||||
|
@ -131,7 +131,7 @@ do_batch_dump(struct brw_context *brw)
|
|||
drm_intel_decode_set_batch_pointer(decode,
|
||||
batch->map,
|
||||
batch->bo->offset64,
|
||||
batch->used);
|
||||
USED_BATCH(*batch));
|
||||
}
|
||||
|
||||
drm_intel_decode_set_output_file(decode, stderr);
|
||||
|
@ -289,7 +289,7 @@ do_flush_locked(struct brw_context *brw)
|
|||
if (brw->has_llc) {
|
||||
drm_intel_bo_unmap(batch->bo);
|
||||
} else {
|
||||
ret = drm_intel_bo_subdata(batch->bo, 0, 4*batch->used, batch->map);
|
||||
ret = drm_intel_bo_subdata(batch->bo, 0, 4 * USED_BATCH(*batch), batch->map);
|
||||
if (ret == 0 && batch->state_batch_offset != batch->bo->size) {
|
||||
ret = drm_intel_bo_subdata(batch->bo,
|
||||
batch->state_batch_offset,
|
||||
|
@ -314,11 +314,11 @@ do_flush_locked(struct brw_context *brw)
|
|||
brw_annotate_aub(brw);
|
||||
|
||||
if (brw->hw_ctx == NULL || batch->ring != RENDER_RING) {
|
||||
ret = drm_intel_bo_mrb_exec(batch->bo, 4 * batch->used, NULL, 0, 0,
|
||||
flags);
|
||||
ret = drm_intel_bo_mrb_exec(batch->bo, 4 * USED_BATCH(*batch),
|
||||
NULL, 0, 0, flags);
|
||||
} else {
|
||||
ret = drm_intel_gem_bo_context_exec(batch->bo, brw->hw_ctx,
|
||||
4 * batch->used, flags);
|
||||
4 * USED_BATCH(*batch), flags);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -342,7 +342,7 @@ _intel_batchbuffer_flush(struct brw_context *brw,
|
|||
{
|
||||
int ret;
|
||||
|
||||
if (brw->batch.used == 0)
|
||||
if (USED_BATCH(brw->batch) == 0)
|
||||
return 0;
|
||||
|
||||
if (brw->throttle_batch[0] == NULL) {
|
||||
|
@ -351,7 +351,7 @@ _intel_batchbuffer_flush(struct brw_context *brw,
|
|||
}
|
||||
|
||||
if (unlikely(INTEL_DEBUG & DEBUG_BATCH)) {
|
||||
int bytes_for_commands = 4 * brw->batch.used;
|
||||
int bytes_for_commands = 4 * USED_BATCH(brw->batch);
|
||||
int bytes_for_state = brw->batch.bo->size - brw->batch.state_batch_offset;
|
||||
int total_bytes = bytes_for_commands + bytes_for_state;
|
||||
fprintf(stderr, "%s:%d: Batchbuffer flush with %4db (pkt) + "
|
||||
|
@ -367,7 +367,7 @@ _intel_batchbuffer_flush(struct brw_context *brw,
|
|||
|
||||
/* Mark the end of the buffer. */
|
||||
intel_batchbuffer_emit_dword(brw, MI_BATCH_BUFFER_END);
|
||||
if (brw->batch.used & 1) {
|
||||
if (USED_BATCH(brw->batch) & 1) {
|
||||
/* Round batchbuffer usage to 2 DWORDs. */
|
||||
intel_batchbuffer_emit_dword(brw, MI_NOOP);
|
||||
}
|
||||
|
|
|
@ -67,6 +67,9 @@ uint64_t intel_batchbuffer_reloc64(struct brw_context *brw,
|
|||
uint32_t read_domains,
|
||||
uint32_t write_domain,
|
||||
uint32_t offset);
|
||||
|
||||
#define USED_BATCH(batch) ((batch).used)
|
||||
|
||||
static inline uint32_t float_as_int(float f)
|
||||
{
|
||||
union {
|
||||
|
@ -87,7 +90,7 @@ static inline unsigned
|
|||
intel_batchbuffer_space(struct brw_context *brw)
|
||||
{
|
||||
return (brw->batch.state_batch_offset - brw->batch.reserved_space)
|
||||
- brw->batch.used*4;
|
||||
- USED_BATCH(brw->batch) * 4;
|
||||
}
|
||||
|
||||
|
||||
|
@ -139,7 +142,7 @@ intel_batchbuffer_begin(struct brw_context *brw, int n, enum brw_gpu_ring ring)
|
|||
intel_batchbuffer_require_space(brw, n * 4, ring);
|
||||
|
||||
#ifdef DEBUG
|
||||
brw->batch.emit = brw->batch.used;
|
||||
brw->batch.emit = USED_BATCH(brw->batch);
|
||||
brw->batch.total = n;
|
||||
#endif
|
||||
}
|
||||
|
@ -149,7 +152,7 @@ intel_batchbuffer_advance(struct brw_context *brw)
|
|||
{
|
||||
#ifdef DEBUG
|
||||
struct intel_batchbuffer *batch = &brw->batch;
|
||||
unsigned int _n = batch->used - batch->emit;
|
||||
unsigned int _n = USED_BATCH(*batch) - batch->emit;
|
||||
assert(batch->total != 0);
|
||||
if (_n != batch->total) {
|
||||
fprintf(stderr, "ADVANCE_BATCH: %d of %d dwords emitted\n",
|
||||
|
|
Loading…
Reference in New Issue