i965: Split brw_emit_reloc into brw_batch_reloc and brw_state_reloc.
brw_batch_reloc emits a relocation from the batchbuffer to elsewhere. brw_state_reloc emits a relocation from the statebuffer to elsewhere. For now, they do the same thing, but when we actually split the two buffers, we'll change brw_state_reloc to use the state buffer. Reviewed-by: Matt Turner <mattst88@gmail.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
This commit is contained in:
parent
1674a0bcbc
commit
e723255901
|
@ -176,9 +176,9 @@ brw_emit_surface_state(struct brw_context *brw,
|
|||
surf_offset);
|
||||
|
||||
isl_surf_fill_state(&brw->isl_dev, state, .surf = &mt->surf, .view = &view,
|
||||
.address = brw_emit_reloc(&brw->batch,
|
||||
*surf_offset + brw->isl_dev.ss.addr_offset,
|
||||
mt->bo, offset, reloc_flags),
|
||||
.address = brw_state_reloc(&brw->batch,
|
||||
*surf_offset + brw->isl_dev.ss.addr_offset,
|
||||
mt->bo, offset, reloc_flags),
|
||||
.aux_surf = aux_surf, .aux_usage = aux_usage,
|
||||
.aux_address = aux_offset,
|
||||
.mocs = mocs, .clear_color = clear_color,
|
||||
|
@ -194,11 +194,11 @@ brw_emit_surface_state(struct brw_context *brw,
|
|||
*/
|
||||
assert((aux_offset & 0xfff) == 0);
|
||||
uint32_t *aux_addr = state + brw->isl_dev.ss.aux_addr_offset;
|
||||
*aux_addr = brw_emit_reloc(&brw->batch,
|
||||
*surf_offset +
|
||||
brw->isl_dev.ss.aux_addr_offset,
|
||||
aux_bo, *aux_addr,
|
||||
reloc_flags);
|
||||
*aux_addr = brw_state_reloc(&brw->batch,
|
||||
*surf_offset +
|
||||
brw->isl_dev.ss.aux_addr_offset,
|
||||
aux_bo, *aux_addr,
|
||||
reloc_flags);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -607,10 +607,10 @@ brw_emit_buffer_surface_state(struct brw_context *brw,
|
|||
|
||||
isl_buffer_fill_state(&brw->isl_dev, dw,
|
||||
.address = !bo ? buffer_offset :
|
||||
brw_emit_reloc(&brw->batch,
|
||||
*out_offset + brw->isl_dev.ss.addr_offset,
|
||||
bo, buffer_offset,
|
||||
reloc_flags),
|
||||
brw_state_reloc(&brw->batch,
|
||||
*out_offset + brw->isl_dev.ss.addr_offset,
|
||||
bo, buffer_offset,
|
||||
reloc_flags),
|
||||
.size = buffer_size,
|
||||
.format = surface_format,
|
||||
.stride = pitch,
|
||||
|
@ -777,8 +777,8 @@ brw_update_sol_surface(struct brw_context *brw,
|
|||
BRW_SURFACE_MIPMAPLAYOUT_BELOW << BRW_SURFACE_MIPLAYOUT_SHIFT |
|
||||
surface_format << BRW_SURFACE_FORMAT_SHIFT |
|
||||
BRW_SURFACE_RC_READ_WRITE;
|
||||
surf[1] = brw_emit_reloc(&brw->batch,
|
||||
*out_offset + 4, bo, offset_bytes, RELOC_WRITE);
|
||||
surf[1] = brw_state_reloc(&brw->batch,
|
||||
*out_offset + 4, bo, offset_bytes, RELOC_WRITE);
|
||||
surf[2] = (width << BRW_SURFACE_WIDTH_SHIFT |
|
||||
height << BRW_SURFACE_HEIGHT_SHIFT);
|
||||
surf[3] = (depth << BRW_SURFACE_DEPTH_SHIFT |
|
||||
|
@ -870,9 +870,9 @@ emit_null_surface_state(struct brw_context *brw,
|
|||
|
||||
surf[0] = (BRW_SURFACE_2D << BRW_SURFACE_TYPE_SHIFT |
|
||||
ISL_FORMAT_B8G8R8A8_UNORM << BRW_SURFACE_FORMAT_SHIFT);
|
||||
surf[1] = brw_emit_reloc(&brw->batch, *out_offset + 4,
|
||||
brw->wm.multisampled_null_render_target_bo,
|
||||
0, RELOC_WRITE);
|
||||
surf[1] = brw_state_reloc(&brw->batch, *out_offset + 4,
|
||||
brw->wm.multisampled_null_render_target_bo,
|
||||
0, RELOC_WRITE);
|
||||
|
||||
surf[2] = ((width - 1) << BRW_SURFACE_WIDTH_SHIFT |
|
||||
(height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
|
||||
|
@ -940,12 +940,12 @@ gen4_update_renderbuffer_surface(struct brw_context *brw,
|
|||
|
||||
/* reloc */
|
||||
assert(mt->offset % mt->cpp == 0);
|
||||
surf[1] = brw_emit_reloc(&brw->batch, offset + 4, mt->bo,
|
||||
mt->offset +
|
||||
intel_renderbuffer_get_tile_offsets(irb,
|
||||
&tile_x,
|
||||
&tile_y),
|
||||
RELOC_WRITE);
|
||||
surf[1] = brw_state_reloc(&brw->batch, offset + 4, mt->bo,
|
||||
mt->offset +
|
||||
intel_renderbuffer_get_tile_offsets(irb,
|
||||
&tile_x,
|
||||
&tile_y),
|
||||
RELOC_WRITE);
|
||||
|
||||
surf[2] = ((rb->Width - 1) << BRW_SURFACE_WIDTH_SHIFT |
|
||||
(rb->Height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
|
||||
|
|
|
@ -59,9 +59,9 @@ blorp_emit_reloc(struct blorp_batch *batch,
|
|||
struct brw_context *brw = batch->driver_batch;
|
||||
|
||||
uint32_t offset = (char *)location - (char *)brw->batch.map;
|
||||
return brw_emit_reloc(&brw->batch, offset,
|
||||
address.buffer, address.offset + delta,
|
||||
address.reloc_flags);
|
||||
return brw_batch_reloc(&brw->batch, offset,
|
||||
address.buffer, address.offset + delta,
|
||||
address.reloc_flags);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -73,8 +73,8 @@ blorp_surface_reloc(struct blorp_batch *batch, uint32_t ss_offset,
|
|||
struct brw_bo *bo = address.buffer;
|
||||
|
||||
uint64_t reloc_val =
|
||||
brw_emit_reloc(&brw->batch, ss_offset, bo, address.offset + delta,
|
||||
address.reloc_flags);
|
||||
brw_state_reloc(&brw->batch, ss_offset, bo, address.offset + delta,
|
||||
address.reloc_flags);
|
||||
|
||||
void *reloc_ptr = (void *)brw->batch.map + ss_offset;
|
||||
#if GEN_GEN >= 8
|
||||
|
|
|
@ -87,9 +87,10 @@ __gen_combine_address(struct brw_context *brw, void *location,
|
|||
} else {
|
||||
uint32_t offset = (char *) location - (char *) brw->batch.map;
|
||||
|
||||
return brw_emit_reloc(&brw->batch, offset, address.bo,
|
||||
address.offset + delta,
|
||||
address.reloc_flags);
|
||||
/* TODO: Use brw_state_reloc for some things on Gen4-5 */
|
||||
return brw_batch_reloc(&brw->batch, offset, address.bo,
|
||||
address.offset + delta,
|
||||
address.reloc_flags);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -794,13 +794,12 @@ brw_batch_references(struct intel_batchbuffer *batch, struct brw_bo *bo)
|
|||
|
||||
/* This is the only way buffers get added to the validate list.
|
||||
*/
|
||||
uint64_t
|
||||
brw_emit_reloc(struct intel_batchbuffer *batch, uint32_t batch_offset,
|
||||
struct brw_bo *target, uint32_t target_offset,
|
||||
unsigned int reloc_flags)
|
||||
static uint64_t
|
||||
emit_reloc(struct intel_batchbuffer *batch,
|
||||
struct brw_reloc_list *rlist, uint32_t offset,
|
||||
struct brw_bo *target, uint32_t target_offset,
|
||||
unsigned int reloc_flags)
|
||||
{
|
||||
struct brw_reloc_list *rlist = &batch->batch_relocs;
|
||||
|
||||
assert(target != NULL);
|
||||
|
||||
if (rlist->reloc_count == rlist->reloc_array_size) {
|
||||
|
@ -810,9 +809,6 @@ brw_emit_reloc(struct intel_batchbuffer *batch, uint32_t batch_offset,
|
|||
sizeof(struct drm_i915_gem_relocation_entry));
|
||||
}
|
||||
|
||||
/* Check args */
|
||||
assert(batch_offset <= batch->bo->size - sizeof(uint32_t));
|
||||
|
||||
unsigned int index = add_exec_bo(batch, target);
|
||||
struct drm_i915_gem_exec_object2 *entry = &batch->validation_list[index];
|
||||
|
||||
|
@ -821,7 +817,7 @@ brw_emit_reloc(struct intel_batchbuffer *batch, uint32_t batch_offset,
|
|||
|
||||
rlist->relocs[rlist->reloc_count++] =
|
||||
(struct drm_i915_gem_relocation_entry) {
|
||||
.offset = batch_offset,
|
||||
.offset = offset,
|
||||
.delta = target_offset,
|
||||
.target_handle = batch->use_batch_first ? index : target->gem_handle,
|
||||
.presumed_offset = entry->offset,
|
||||
|
@ -834,6 +830,29 @@ brw_emit_reloc(struct intel_batchbuffer *batch, uint32_t batch_offset,
|
|||
return entry->offset + target_offset;
|
||||
}
|
||||
|
||||
uint64_t
|
||||
brw_batch_reloc(struct intel_batchbuffer *batch, uint32_t batch_offset,
|
||||
struct brw_bo *target, uint32_t target_offset,
|
||||
unsigned int reloc_flags)
|
||||
{
|
||||
assert(batch_offset <= batch->bo->size - sizeof(uint32_t));
|
||||
|
||||
return emit_reloc(batch, &batch->batch_relocs, batch_offset,
|
||||
target, target_offset, reloc_flags);
|
||||
}
|
||||
|
||||
uint64_t
|
||||
brw_state_reloc(struct intel_batchbuffer *batch, uint32_t state_offset,
|
||||
struct brw_bo *target, uint32_t target_offset,
|
||||
unsigned int reloc_flags)
|
||||
{
|
||||
assert(state_offset <= batch->bo->size - sizeof(uint32_t));
|
||||
|
||||
return emit_reloc(batch, &batch->batch_relocs, state_offset,
|
||||
target, target_offset, reloc_flags);
|
||||
}
|
||||
|
||||
|
||||
uint32_t
|
||||
brw_state_batch_size(struct brw_context *brw, uint32_t offset)
|
||||
{
|
||||
|
|
|
@ -71,11 +71,16 @@ bool brw_batch_references(struct intel_batchbuffer *batch, struct brw_bo *bo);
|
|||
|
||||
#define RELOC_WRITE EXEC_OBJECT_WRITE
|
||||
#define RELOC_NEEDS_GGTT EXEC_OBJECT_NEEDS_GTT
|
||||
uint64_t brw_emit_reloc(struct intel_batchbuffer *batch,
|
||||
uint32_t batch_offset,
|
||||
struct brw_bo *target,
|
||||
uint32_t target_offset,
|
||||
unsigned flags);
|
||||
uint64_t brw_batch_reloc(struct intel_batchbuffer *batch,
|
||||
uint32_t batch_offset,
|
||||
struct brw_bo *target,
|
||||
uint32_t target_offset,
|
||||
unsigned flags);
|
||||
uint64_t brw_state_reloc(struct intel_batchbuffer *batch,
|
||||
uint32_t batch_offset,
|
||||
struct brw_bo *target,
|
||||
uint32_t target_offset,
|
||||
unsigned flags);
|
||||
|
||||
#define USED_BATCH(batch) ((uintptr_t)((batch).map_next - (batch).map))
|
||||
|
||||
|
@ -164,7 +169,7 @@ intel_batchbuffer_advance(struct brw_context *brw)
|
|||
#define OUT_RELOC(buf, flags, delta) do { \
|
||||
uint32_t __offset = (__map - brw->batch.map) * 4; \
|
||||
uint32_t reloc = \
|
||||
brw_emit_reloc(&brw->batch, __offset, (buf), (delta), (flags)); \
|
||||
brw_batch_reloc(&brw->batch, __offset, (buf), (delta), (flags)); \
|
||||
OUT_BATCH(reloc); \
|
||||
} while (0)
|
||||
|
||||
|
@ -172,7 +177,7 @@ intel_batchbuffer_advance(struct brw_context *brw)
|
|||
#define OUT_RELOC64(buf, flags, delta) do { \
|
||||
uint32_t __offset = (__map - brw->batch.map) * 4; \
|
||||
uint64_t reloc64 = \
|
||||
brw_emit_reloc(&brw->batch, __offset, (buf), (delta), (flags)); \
|
||||
brw_batch_reloc(&brw->batch, __offset, (buf), (delta), (flags)); \
|
||||
OUT_BATCH(reloc64); \
|
||||
OUT_BATCH(reloc64 >> 32); \
|
||||
} while (0)
|
||||
|
|
Loading…
Reference in New Issue