i965: Make intel_bachbuffer_reloc() take a batchbuffer argument
Reviewed-by: Kenneth Graunke <kenneth@whitecape.org>
This commit is contained in:
parent
4b7dfd8812
commit
f03bac1fc7
|
@ -55,12 +55,12 @@ blorp_emit_reloc(struct blorp_batch *batch,
|
|||
|
||||
uint32_t offset = (char *)location - (char *)brw->batch.map;
|
||||
if (brw->gen >= 8) {
|
||||
return intel_batchbuffer_reloc64(brw, address.buffer, offset,
|
||||
return intel_batchbuffer_reloc64(&brw->batch, address.buffer, offset,
|
||||
address.read_domains,
|
||||
address.write_domain,
|
||||
address.offset + delta);
|
||||
} else {
|
||||
return intel_batchbuffer_reloc(brw, address.buffer, offset,
|
||||
return intel_batchbuffer_reloc(&brw->batch, address.buffer, offset,
|
||||
address.read_domains,
|
||||
address.write_domain,
|
||||
address.offset + delta);
|
||||
|
|
|
@ -432,14 +432,14 @@ _intel_batchbuffer_flush(struct brw_context *brw,
|
|||
/* This is the only way buffers get added to the validate list.
|
||||
*/
|
||||
uint32_t
|
||||
intel_batchbuffer_reloc(struct brw_context *brw,
|
||||
intel_batchbuffer_reloc(struct intel_batchbuffer *batch,
|
||||
drm_intel_bo *buffer, uint32_t offset,
|
||||
uint32_t read_domains, uint32_t write_domain,
|
||||
uint32_t delta)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = drm_intel_bo_emit_reloc(brw->batch.bo, offset,
|
||||
ret = drm_intel_bo_emit_reloc(batch->bo, offset,
|
||||
buffer, delta,
|
||||
read_domains, write_domain);
|
||||
assert(ret == 0);
|
||||
|
@ -453,12 +453,12 @@ intel_batchbuffer_reloc(struct brw_context *brw,
|
|||
}
|
||||
|
||||
uint64_t
|
||||
intel_batchbuffer_reloc64(struct brw_context *brw,
|
||||
intel_batchbuffer_reloc64(struct intel_batchbuffer *batch,
|
||||
drm_intel_bo *buffer, uint32_t offset,
|
||||
uint32_t read_domains, uint32_t write_domain,
|
||||
uint32_t delta)
|
||||
{
|
||||
int ret = drm_intel_bo_emit_reloc(brw->batch.bo, offset,
|
||||
int ret = drm_intel_bo_emit_reloc(batch->bo, offset,
|
||||
buffer, delta,
|
||||
read_domains, write_domain);
|
||||
assert(ret == 0);
|
||||
|
|
|
@ -62,13 +62,13 @@ void intel_batchbuffer_data(struct brw_context *brw,
|
|||
const void *data, GLuint bytes,
|
||||
enum brw_gpu_ring ring);
|
||||
|
||||
uint32_t intel_batchbuffer_reloc(struct brw_context *brw,
|
||||
uint32_t intel_batchbuffer_reloc(struct intel_batchbuffer *batch,
|
||||
drm_intel_bo *buffer,
|
||||
uint32_t offset,
|
||||
uint32_t read_domains,
|
||||
uint32_t write_domain,
|
||||
uint32_t delta);
|
||||
uint64_t intel_batchbuffer_reloc64(struct brw_context *brw,
|
||||
uint64_t intel_batchbuffer_reloc64(struct intel_batchbuffer *batch,
|
||||
drm_intel_bo *buffer,
|
||||
uint32_t offset,
|
||||
uint32_t read_domains,
|
||||
|
@ -159,18 +159,18 @@ intel_batchbuffer_advance(struct brw_context *brw)
|
|||
#define OUT_BATCH(d) *__map++ = (d)
|
||||
#define OUT_BATCH_F(f) OUT_BATCH(float_as_int((f)))
|
||||
|
||||
#define OUT_RELOC(buf, read_domains, write_domain, delta) do { \
|
||||
uint32_t __offset = (__map - brw->batch.map) * 4; \
|
||||
OUT_BATCH(intel_batchbuffer_reloc(brw, (buf), __offset, \
|
||||
(read_domains), \
|
||||
(write_domain), \
|
||||
(delta))); \
|
||||
#define OUT_RELOC(buf, read_domains, write_domain, delta) do { \
|
||||
uint32_t __offset = (__map - brw->batch.map) * 4; \
|
||||
OUT_BATCH(intel_batchbuffer_reloc(&brw->batch, (buf), __offset, \
|
||||
(read_domains), \
|
||||
(write_domain), \
|
||||
(delta))); \
|
||||
} while (0)
|
||||
|
||||
/* Handle 48-bit address relocations for Gen8+ */
|
||||
#define OUT_RELOC64(buf, read_domains, write_domain, delta) do { \
|
||||
uint32_t __offset = (__map - brw->batch.map) * 4; \
|
||||
uint64_t reloc64 = intel_batchbuffer_reloc64(brw, (buf), __offset, \
|
||||
uint64_t reloc64 = intel_batchbuffer_reloc64(&brw->batch, (buf), __offset, \
|
||||
(read_domains), \
|
||||
(write_domain), \
|
||||
(delta)); \
|
||||
|
|
Loading…
Reference in New Issue